aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl29
-rw-r--r--Documentation/devicetree/bindings/arc/archs-pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arc/pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qca,ath79-pll.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt4
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt7
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt18
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt22
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt12
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt38
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt7
-rw-r--r--Documentation/filesystems/cramfs.txt2
-rw-r--r--Documentation/filesystems/tmpfs.txt2
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--Documentation/input/event-codes.txt4
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/power/runtime_pm.txt4
-rw-r--r--Documentation/sysctl/vm.txt19
-rw-r--r--Documentation/usb/gadget_multi.txt2
-rw-r--r--Documentation/x86/protection-keys.txt27
-rw-r--r--Documentation/x86/x86_64/mm.txt6
-rw-r--r--MAINTAINERS50
-rw-r--r--Makefile7
-rw-r--r--arch/arc/Kconfig3
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi8
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/include/asm/fb.h19
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h36
-rw-r--r--arch/arc/kernel/entry-arcv2.S10
-rw-r--r--arch/arc/kernel/entry-compact.S3
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir5221.dts5
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi4
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts5
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts17
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi2
-rw-r--r--arch/arm/boot/dts/dm814x-clocks.dtsi243
-rw-r--r--arch/arm/boot/dts/dra62x-clocks.dtsi26
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi18
-rw-r--r--arch/arm/boot/dts/meson8.dtsi57
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi12
-rw-r--r--arch/arm/boot/dts/omap4.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi14
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts1
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts14
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi5
-rw-r--r--arch/arm/configs/u8500_defconfig3
-rw-r--r--arch/arm/include/asm/cputype.h2
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S4
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kvm/arm.c59
-rw-r--r--arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c5
-rw-r--r--arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/id.c4
-rw-r--r--arch/arm/mach-omap2/io.c4
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c23
-rw-r--r--arch/arm/mach-omap2/soc.h1
-rw-r--r--arch/arm/mach-pxa/devices.c1
-rw-r--r--arch/arm/mach-sa1100/Kconfig10
-rw-r--r--arch/arm/mach-shmobile/timer.c28
-rw-r--r--arch/arm/mach-uniphier/platsmp.c2
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm/mm/flush.c4
-rw-r--r--arch/arm/mm/proc-v7.S10
-rw-r--r--arch/arm64/boot/dts/broadcom/vulcan.dtsi15
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi20
-rw-r--r--arch/arm64/include/asm/kvm_arm.h10
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h7
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/kernel/head.S13
-rw-r--r--arch/arm64/kernel/smp_spin_table.c11
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c45
-rw-r--r--arch/m68k/coldfire/gpio.c8
-rw-r--r--arch/m68k/configs/amiga_defconfig13
-rw-r--r--arch/m68k/configs/apollo_defconfig13
-rw-r--r--arch/m68k/configs/atari_defconfig13
-rw-r--r--arch/m68k/configs/bvme6000_defconfig13
-rw-r--r--arch/m68k/configs/hp300_defconfig13
-rw-r--r--arch/m68k/configs/mac_defconfig13
-rw-r--r--arch/m68k/configs/multi_defconfig13
-rw-r--r--arch/m68k/configs/mvme147_defconfig13
-rw-r--r--arch/m68k/configs/mvme16x_defconfig13
-rw-r--r--arch/m68k/configs/q40_defconfig13
-rw-r--r--arch/m68k/configs/sun3_defconfig13
-rw-r--r--arch/m68k/configs/sun3x_defconfig13
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h2
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/mips/alchemy/common/dbdma.c4
-rw-r--r--arch/mips/alchemy/devboards/db1000.c18
-rw-r--r--arch/mips/alchemy/devboards/db1550.c4
-rw-r--r--arch/mips/ath79/clock.c44
-rw-r--r--arch/mips/bcm47xx/sprom.c4
-rw-r--r--arch/mips/boot/compressed/Makefile7
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c14
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c2
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/configs/ci20_defconfig14
-rw-r--r--arch/mips/dec/int-handler.S2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/include/asm/cpu-info.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-generic/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h2
-rw-r--r--arch/mips/include/asm/mips-cm.h2
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h2
-rw-r--r--arch/mips/include/asm/pci/bridge.h18
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h2
-rw-r--r--arch/mips/include/asm/sgiarcs.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h2
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h2
-rw-r--r--arch/mips/include/asm/uaccess.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c12
-rw-r--r--arch/mips/kernel/module-rela.c19
-rw-r--r--arch/mips/kernel/module.c19
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smp.c12
-rw-r--r--arch/mips/kernel/traps.c46
-rw-r--r--arch/mips/kernel/unaligned.c51
-rw-r--r--arch/mips/kvm/tlb.c2
-rw-r--r--arch/mips/kvm/trap_emul.c2
-rw-r--r--arch/mips/math-emu/ieee754dp.c6
-rw-r--r--arch/mips/math-emu/ieee754sp.c6
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c5
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/nios2/lib/memset.c2
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/Kconfig.debug4
-rw-r--r--arch/parisc/Makefile4
-rw-r--r--arch/parisc/include/asm/ftrace.h18
-rw-r--r--arch/parisc/include/asm/uaccess.h11
-rw-r--r--arch/parisc/kernel/Makefile4
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/cache.c2
-rw-r--r--arch/parisc/kernel/entry.S93
-rw-r--r--arch/parisc/kernel/ftrace.c146
-rw-r--r--arch/parisc/kernel/head.S9
-rw-r--r--arch/parisc/kernel/module.c8
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/traps.c3
-rw-r--r--arch/parisc/lib/fixup.S6
-rw-r--r--arch/parisc/mm/fault.c1
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h1
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h2
-rw-r--r--arch/powerpc/kernel/prom.c26
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c4
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/hypfs/inode.c4
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h28
-rw-r--r--arch/s390/include/asm/pci.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/seccomp.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h9
-rw-r--r--arch/s390/lib/spinlock.c1
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/pgalloc.c85
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/sh/include/asm/smp.h5
-rw-r--r--arch/sh/include/asm/topology.h2
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c2
-rw-r--r--arch/sh/kernel/topology.c4
-rw-r--r--arch/x86/boot/compressed/Makefile14
-rw-r--r--arch/x86/boot/compressed/head_32.S28
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c4
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/lbr.c6
-rw-r--r--arch/x86/events/intel/pt.c75
-rw-r--r--arch/x86/events/intel/pt.h3
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/msr-index.h8
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/kernel/apic/vector.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-genpool.c4
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/hyperv.c5
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/mmu.h9
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/kvm/x86.c30
-rw-r--r--arch/x86/mm/setup_nx.c5
-rw-r--r--arch/x86/xen/apic.c12
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--block/bio.c12
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-settings.c12
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/ioctl.c4
-rw-r--r--block/partition-generic.c21
-rw-r--r--crypto/rsa-pkcs1pad.c12
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/rbd.c58
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/uniphier-system-bus.c2
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c1
-rw-r--r--drivers/clocksource/tango_xtal.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c215
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/edma.c63
-rw-r--r--drivers/dma/hsu/hsu.c13
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/omap-dma.c26
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c32
-rw-r--r--drivers/extcon/extcon-palmas.c3
-rw-r--r--drivers/firmware/efi/arm-init.c18
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/qemu_fw_cfg.c24
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpiolib.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c69
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c16
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c14
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c31
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c123
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c79
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c8
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lenovo.c16
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c1
-rw-r--r--drivers/hid/hid-wiimote-modules.c14
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/wacom_sys.c102
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hid/wacom_wac.h8
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c7
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/i2c/i2c-core.c10
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c39
-rw-r--r--drivers/idle/intel_idle.c97
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/max1363.c12
-rw-r--r--drivers/iio/gyro/bmg160_core.c9
-rw-r--r--drivers/iio/health/max30100.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig3
-rw-r--r--drivers/iio/industrialio-buffer.c1
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c24
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c55
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h2
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/arizona-haptics.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/misc/twl4030-vibra.c1
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu.c22
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/iommu/rockchip-iommu.c8
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/mailbox/mailbox-test.c16
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/md/bitmap.c21
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c52
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c63
-rw-r--r--drivers/media/usb/au0828/au0828.h9
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c20
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c20
-rw-r--r--drivers/misc/cxl/context.c7
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c4
-rw-r--r--drivers/misc/lkdtm.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/card/block.c18
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c22
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c39
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/dsa/mv88e6xxx.c34
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c40
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c157
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/macsec.c65
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c177
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/core.c41
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c52
-rw-r--r--drivers/nvme/host/pci.c31
-rw-r--r--drivers/oprofile/oprofilefs.c4
-rw-r--r--drivers/pci/access.c42
-rw-r--r--drivers/pci/host/pci-imx6.c20
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/perf/arm_pmu.c15
-rw-r--r--drivers/phy/phy-rockchip-dp.c7
-rw-r--r--drivers/phy/phy-rockchip-emmc.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c35
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/pinctrl/pinctrl-xway.c17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c14
-rw-r--r--drivers/pinctrl/sh-pfc/core.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c48
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/scsi/aacraid/linit.c3
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c1
-rw-r--r--drivers/scsi/cxlflash/main.c138
-rw-r--r--drivers/scsi/cxlflash/main.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c33
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/sd.c49
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c11
-rw-r--r--drivers/spi/spi-imx.c16
-rw-r--r--drivers/spi/spi-omap2-mcspi.c62
-rw-r--r--drivers/spi/spi-rockchip.c16
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c16
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c10
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h10
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h2
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c22
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c30
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c16
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c44
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c54
-rw-r--r--drivers/staging/olpc_dcon/Kconfig35
-rw-r--r--drivers/staging/olpc_dcon/Makefile6
-rw-r--r--drivers/staging/olpc_dcon/TODO9
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c813
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h111
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c205
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c161
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig1
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c91
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c40
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h3
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c33
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/mtk_thermal.c3
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/pty.c79
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/uartlite.c8
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/core/config.c16
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/dwc2/gadget.c23
-rw-r--r--drivers/usb/dwc3/core.c71
-rw-r--r--drivers/usb/dwc3/debugfs.c13
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c5
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c12
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c17
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/function/f_fs.c9
-rw-r--r--drivers/usb/gadget/function/f_midi.c17
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c14
-rw-r--r--drivers/usb/gadget/udc/udc-core.c6
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c13
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c72
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/mct_u232.c9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/video/fbdev/amba-clcd.c15
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c11
-rw-r--r--drivers/xen/events/events_base.c28
-rw-r--r--fs/9p/vfs_addr.c18
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/affs/file.c26
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/file.c4
-rw-r--r--fs/afs/mntpt.c6
-rw-r--r--fs/afs/super.c4
-rw-r--r--fs/afs/write.c26
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/check-integrity.c64
-rw-r--r--fs/btrfs/compression.c84
-rw-r--r--fs/btrfs/ctree.c12
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c25
-rw-r--r--fs/btrfs/extent_io.c266
-rw-r--r--fs/btrfs/extent_io.h6
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c51
-rw-r--r--fs/btrfs/free-space-cache.c30
-rw-r--r--fs/btrfs/inode-map.c10
-rw-r--r--fs/btrfs/inode.c104
-rw-r--r--fs/btrfs/ioctl.c86
-rw-r--r--fs/btrfs/lzo.c32
-rw-r--r--fs/btrfs/qgroup.c63
-rw-r--r--fs/btrfs/raid56.c28
-rw-r--r--fs/btrfs/reada.c30
-rw-r--r--fs/btrfs/relocation.c17
-rw-r--r--fs/btrfs/scrub.c24
-rw-r--r--fs/btrfs/send.c16
-rw-r--r--fs/btrfs/struct-funcs.c4
-rw-r--r--fs/btrfs/tests/extent-io-tests.c44
-rw-r--r--fs/btrfs/tests/free-space-tests.c2
-rw-r--r--fs/btrfs/tree-log.c137
-rw-r--r--fs/btrfs/volumes.c14
-rw-r--r--fs/btrfs/zlib.c38
-rw-r--r--fs/buffer.c100
-rw-r--r--fs/cachefiles/rdwr.c38
-rw-r--r--fs/ceph/addr.c114
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/file.c32
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/ceph/mds_client.c8
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/super.c8
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifssmb.c16
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c96
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/configfs/mount.c4
-rw-r--r--fs/cramfs/README26
-rw-r--r--fs/cramfs/inode.c32
-rw-r--r--fs/crypto/crypto.c59
-rw-r--r--fs/dax.c34
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/devpts/inode.c100
-rw-r--r--fs/direct-io.c26
-rw-r--r--fs/dlm/lowcomms.c8
-rw-r--r--fs/ecryptfs/crypto.c22
-rw-r--r--fs/ecryptfs/inode.c8
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/main.c8
-rw-r--r--fs/ecryptfs/mmap.c44
-rw-r--r--fs/ecryptfs/read_write.c14
-rw-r--r--fs/efivarfs/super.c4
-rw-r--r--fs/exofs/dir.c30
-rw-r--r--fs/exofs/inode.c34
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/ext2/dir.c36
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext4/crypto.c61
-rw-r--r--fs/ext4/dir.c4
-rw-r--r--fs/ext4/ext4.h33
-rw-r--r--fs/ext4/file.c16
-rw-r--r--fs/ext4/inline.c18
-rw-r--r--fs/ext4/inode.c176
-rw-r--r--fs/ext4/mballoc.c40
-rw-r--r--fs/ext4/move_extent.c27
-rw-r--r--fs/ext4/page-io.c18
-rw-r--r--fs/ext4/readpage.c14
-rw-r--r--fs/ext4/super.c65
-rw-r--r--fs/ext4/symlink.c4
-rw-r--r--fs/ext4/xattr.c32
-rw-r--r--fs/f2fs/data.c68
-rw-r--r--fs/f2fs/debug.c6
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/file.c84
-rw-r--r--fs/f2fs/inline.c10
-rw-r--r--fs/f2fs/namei.c16
-rw-r--r--fs/f2fs/node.c10
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c16
-rw-r--r--fs/f2fs/super.c108
-rw-r--r--fs/freevxfs/vxfs_immed.c4
-rw-r--r--fs/freevxfs/vxfs_lookup.c12
-rw-r--r--fs/freevxfs/vxfs_subr.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fscache/page.c10
-rw-r--r--fs/fuse/dev.c26
-rw-r--r--fs/fuse/file.c72
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/gfs2/aops.c44
-rw-r--r--fs/gfs2/bmap.c12
-rw-r--r--fs/gfs2/file.c16
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/quota.c14
-rw-r--r--fs/gfs2/rgrp.c5
-rw-r--r--fs/hfs/bnode.c12
-rw-r--r--fs/hfs/btree.c20
-rw-r--r--fs/hfs/inode.c8
-rw-r--r--fs/hfsplus/bitmap.c2
-rw-r--r--fs/hfsplus/bnode.c90
-rw-r--r--fs/hfsplus/btree.c22
-rw-r--r--fs/hfsplus/inode.c8
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/xattr.c6
-rw-r--r--fs/hostfs/hostfs_kern.c18
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/isofs/compress.c36
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/jffs2/debug.c8
-rw-r--r--fs/jffs2/file.c23
-rw-r--r--fs/jffs2/fs.c8
-rw-r--r--fs/jffs2/gc.c8
-rw-r--r--fs/jffs2/nodelist.c8
-rw-r--r--fs/jffs2/write.c7
-rw-r--r--fs/jfs/jfs_metapage.c42
-rw-r--r--fs/jfs/jfs_metapage.h4
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/mount.c4
-rw-r--r--fs/libfs.c24
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/logfs/dev_mtd.c10
-rw-r--r--fs/logfs/dir.c12
-rw-r--r--fs/logfs/file.c26
-rw-r--r--fs/logfs/readwrite.c20
-rw-r--r--fs/logfs/segment.c28
-rw-r--r--fs/logfs/super.c16
-rw-r--r--fs/minix/dir.c18
-rw-r--r--fs/minix/namei.c4
-rw-r--r--fs/mpage.c22
-rw-r--r--fs/ncpfs/dir.c10
-rw-r--r--fs/ncpfs/ncplib_kernel.h2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c24
-rw-r--r--fs/nfs/blocklayout/blocklayout.h4
-rw-r--r--fs/nfs/client.c8
-rw-r--r--fs/nfs/dir.c10
-rw-r--r--fs/nfs/direct.c8
-rw-r--r--fs/nfs/file.c20
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h6
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/objlayout/objio_osd.c2
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c6
-rw-r--r--fs/nfs/read.c16
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nilfs2/bmap.c2
-rw-r--r--fs/nilfs2/btnode.c10
-rw-r--r--fs/nilfs2/dir.c32
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/mdt.c14
-rw-r--r--fs/nilfs2/namei.c4
-rw-r--r--fs/nilfs2/page.c18
-rw-r--r--fs/nilfs2/recovery.c4
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ntfs/aops.c50
-rw-r--r--fs/ntfs/aops.h4
-rw-r--r--fs/ntfs/attrib.c28
-rw-r--r--fs/ntfs/bitmap.c10
-rw-r--r--fs/ntfs/compress.c77
-rw-r--r--fs/ntfs/dir.c56
-rw-r--r--fs/ntfs/file.c56
-rw-r--r--fs/ntfs/index.c14
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/lcnalloc.c6
-rw-r--r--fs/ntfs/logfile.c16
-rw-r--r--fs/ntfs/mft.c34
-rw-r--r--fs/ntfs/ntfs.h2
-rw-r--r--fs/ntfs/super.c72
-rw-r--r--fs/ocfs2/alloc.c28
-rw-r--r--fs/ocfs2/aops.c50
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/mmap.c6
-rw-r--r--fs/ocfs2/ocfs2.h20
-rw-r--r--fs/ocfs2/quota_global.c11
-rw-r--r--fs/ocfs2/refcounttree.c24
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/orangefs/dir.c4
-rw-r--r--fs/orangefs/inode.c26
-rw-r--r--fs/orangefs/orangefs-bufmap.c4
-rw-r--r--fs/orangefs/orangefs-debugfs.c3
-rw-r--r--fs/orangefs/orangefs-utils.c8
-rw-r--r--fs/orangefs/protocol.h33
-rw-r--r--fs/orangefs/xattr.c19
-rw-r--r--fs/overlayfs/super.c33
-rw-r--r--fs/pipe.c6
-rw-r--r--fs/proc/task_mmu.c35
-rw-r--r--fs/proc/vmcore.c4
-rw-r--r--fs/pstore/inode.c4
-rw-r--r--fs/qnx6/dir.c16
-rw-r--r--fs/qnx6/inode.c4
-rw-r--r--fs/qnx6/qnx6.h2
-rw-r--r--fs/quota/dquot.c13
-rw-r--r--fs/ramfs/inode.c4
-rw-r--r--fs/reiserfs/file.c4
-rw-r--r--fs/reiserfs/inode.c44
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/tail_conversion.c4
-rw-r--r--fs/reiserfs/xattr.c18
-rw-r--r--fs/seq_file.c7
-rw-r--r--fs/splice.c32
-rw-r--r--fs/squashfs/block.c4
-rw-r--r--fs/squashfs/cache.c18
-rw-r--r--fs/squashfs/decompressor.c2
-rw-r--r--fs/squashfs/file.c24
-rw-r--r--fs/squashfs/file_direct.c22
-rw-r--r--fs/squashfs/lz4_wrapper.c8
-rw-r--r--fs/squashfs/lzo_wrapper.c8
-rw-r--r--fs/squashfs/page_actor.c4
-rw-r--r--fs/squashfs/page_actor.h2
-rw-r--r--fs/squashfs/super.c2
-rw-r--r--fs/squashfs/symlink.c6
-rw-r--r--fs/squashfs/xz_wrapper.c4
-rw-r--r--fs/squashfs/zlib_wrapper.c4
-rw-r--r--fs/sync.c4
-rw-r--r--fs/sysv/dir.c18
-rw-r--r--fs/sysv/namei.c4
-rw-r--r--fs/ubifs/file.c54
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/file.c6
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/ufs/balloc.c6
-rw-r--r--fs/ufs/dir.c32
-rw-r--r--fs/ufs/inode.c4
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--fs/ufs/util.c4
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/xfs_aops.c22
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mount.h4
-rw-r--r--fs/xfs/xfs_pnfs.c4
-rw-r--r--fs/xfs/xfs_super.c8
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/dcache.h10
-rw-r--r--include/linux/devpts_fs.h38
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/fscrypto.h9
-rw-r--r--include/linux/huge_mm.h5
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/port.h6
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h70
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nilfs2_fs.h4
-rw-r--r--include/linux/pagemap.h32
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pmem.h22
-rw-r--r--include/linux/rculist_nulls.h39
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/thermal.h4
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/media/videobuf2-core.h8
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/cls_cgroup.h7
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/sctp/sctp.h6
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/switchdev.h4
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/scsi/scsi_device.h25
-rw-r--r--include/sound/hda_i915.h5
-rw-r--r--include/sound/hda_regmap.h2
-rw-r--r--include/trace/events/btrfs.h89
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/if_macsec.h4
-rw-r--r--include/uapi/linux/usb/ch9.h2
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--include/uapi/linux/virtio_config.h2
-rw-r--r--include/video/imx-ipu-v3.h7
-rw-r--r--ipc/mqueue.c4
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpu.c33
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c55
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/irq/ipi.c1
-rw-r--r--kernel/kcov.c3
-rw-r--r--kernel/kexec_core.c7
-rw-r--r--kernel/locking/lockdep.c37
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/qspinlock_stat.h8
-rw-r--r--kernel/resource.c13
-rw-r--r--kernel/workqueue.c29
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/lz4/lz4defs.h25
-rw-r--r--lib/stackdepot.c4
-rw-r--r--lib/test_bpf.c229
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/fadvise.c8
-rw-r--r--mm/filemap.c126
-rw-r--r--mm/gup.c54
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/madvise.c6
-rw-r--r--mm/memcontrol.c37
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/memory.c95
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/mincore.c8
-rw-r--r--mm/nommu.c46
-rw-r--r--mm/page-writeback.c12
-rw-r--r--mm/page_io.c8
-rw-r--r--mm/readahead.c20
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/shmem.c130
-rw-r--r--mm/swap.c19
-rw-r--r--mm/swap_state.c12
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/truncate.c40
-rw-r--r--mm/userfaultfd.c4
-rw-r--r--mm/vmscan.c30
-rw-r--r--mm/zswap.c4
-rw-r--r--net/bridge/br_mdb.c124
-rw-r--r--net/bridge/br_multicast.c8
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/ceph/auth.c8
-rw-r--r--net/ceph/auth_none.c71
-rw-r--r--net/ceph/auth_none.h3
-rw-r--r--net/ceph/auth_x.c21
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/ceph/pagelist.c4
-rw-r--r--net/ceph/pagevec.c30
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c9
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/fou.c6
-rw-r--r--net/ipv4/gre_offload.c8
-rw-r--r--net/ipv4/ip_gre.c13
-rw-r--r--net/ipv4/netfilter/arptable_filter.c6
-rw-r--r--net/ipv4/route.c19
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c16
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/addrconf.c70
-rw-r--r--net/ipv6/datagram.c169
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/l2tp/l2tp_ip.c8
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/sta_info.c14
-rw-r--r--net/mac80211/sta_info.h1
-rw-r--r--net/mac80211/tdls.c43
-rw-r--r--net/mac80211/tx.c13
-rw-r--r--net/mac80211/vht.c30
-rw-r--r--net/mpls/af_mpls.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/conntrack.c1
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/rds/cong.c4
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/ib_recv.c2
-rw-r--r--net/rds/page.c4
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/sctp/outqueue.c15
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/sm_sideeffect.c36
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c10
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/socklib.c6
-rw-r--r--net/sunrpc/xdr.c50
-rw-r--r--net/switchdev/switchdev.c6
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/vmw_vsock/vmci_transport.c9
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--samples/bpf/Makefile12
-rw-r--r--samples/bpf/bpf_helpers.h26
-rw-r--r--samples/bpf/map_perf_test_user.c1
-rw-r--r--samples/bpf/spintest_kern.c2
-rw-r--r--samples/bpf/tracex2_kern.c4
-rw-r--r--samples/bpf/tracex4_kern.c2
-rw-r--r--scripts/asn1_compiler.c2
-rw-r--r--sound/hda/ext/hdac_ext_stream.c5
-rw-r--r--sound/hda/hdac_device.c10
-rw-r--r--sound/hda/hdac_i915.c77
-rw-r--r--sound/hda/hdac_regmap.c40
-rw-r--r--sound/isa/sscape.c2
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/hda_intel.c59
-rw-r--r--sound/pci/hda/patch_cirrus.c14
-rw-r--r--sound/pci/hda/patch_hdmi.c12
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c1
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/arizona.c12
-rw-r--r--sound/soc/codecs/arizona.h2
-rw-r--r--sound/soc/codecs/cs35l32.c17
-rw-r--r--sound/soc/codecs/cs47l24.c3
-rw-r--r--sound/soc/codecs/hdac_hdmi.c94
-rw-r--r--sound/soc/codecs/nau8825.c126
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5640.h36
-rw-r--r--sound/soc/codecs/wm5102.c5
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/codecs/wm8997.c2
-rw-r--r--sound/soc/codecs/wm8998.c2
-rw-r--r--sound/soc/intel/Kconfig1
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c42
-rw-r--r--sound/soc/intel/skylake/skl-topology.h8
-rw-r--r--sound/soc/intel/skylake/skl.c32
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/usb/Kconfig4
-rw-r--r--sound/usb/Makefile2
-rw-r--r--sound/usb/card.c14
-rw-r--r--sound/usb/card.h3
-rw-r--r--sound/usb/media.c318
-rw-r--r--sound/usb/media.h72
-rw-r--r--sound/usb/mixer.h3
-rw-r--r--sound/usb/mixer_maps.c14
-rw-r--r--sound/usb/pcm.c28
-rw-r--r--sound/usb/quirks-table.h1
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/stream.c2
-rw-r--r--sound/usb/usbaudio.h6
-rw-r--r--tools/objtool/Documentation/stack-validation.txt38
-rw-r--r--tools/objtool/builtin-check.c97
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.c117
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/reuseport_dualstack.c208
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c22
-rw-r--r--virt/kvm/arm/arch_timer.c49
-rw-r--r--virt/kvm/arm/pmu.c3
1211 files changed, 13051 insertions, 7850 deletions
diff --git a/.mailmap b/.mailmap
index 90c0aefc276d..c156a8b4d845 100644
--- a/.mailmap
+++ b/.mailmap
@@ -48,6 +48,9 @@ Felix Kuhling <fxkuehl@gmx.de>
48Felix Moeller <felix@derklecks.de> 48Felix Moeller <felix@derklecks.de>
49Filipe Lautert <filipe@icewall.org> 49Filipe Lautert <filipe@icewall.org>
50Franck Bui-Huu <vagabon.xyz@gmail.com> 50Franck Bui-Huu <vagabon.xyz@gmail.com>
51Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
52Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
53Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
51Frank Zago <fzago@systemfabricworks.com> 54Frank Zago <fzago@systemfabricworks.com>
52Greg Kroah-Hartman <greg@echidna.(none)> 55Greg Kroah-Hartman <greg@echidna.(none)>
53Greg Kroah-Hartman <gregkh@suse.de> 56Greg Kroah-Hartman <gregkh@suse.de>
@@ -79,6 +82,7 @@ Kay Sievers <kay.sievers@vrfy.org>
79Kenneth W Chen <kenneth.w.chen@intel.com> 82Kenneth W Chen <kenneth.w.chen@intel.com>
80Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> 83Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
81Koushik <raghavendra.koushik@neterion.com> 84Koushik <raghavendra.koushik@neterion.com>
85Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
82Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 86Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
83Leonid I Ananiev <leonid.i.ananiev@intel.com> 87Leonid I Ananiev <leonid.i.ananiev@intel.com>
84Linas Vepstas <linas@austin.ibm.com> 88Linas Vepstas <linas@austin.ibm.com>
diff --git a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
index 7ac7d7262bb7..3c3514815cd5 100644
--- a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
+++ b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
@@ -1,23 +1,18 @@
1What: /sys/devices/platform/<i2c-demux-name>/cur_master 1What: /sys/devices/platform/<i2c-demux-name>/available_masters
2Date: January 2016 2Date: January 2016
3KernelVersion: 4.6 3KernelVersion: 4.6
4Contact: Wolfram Sang <wsa@the-dreams.de> 4Contact: Wolfram Sang <wsa@the-dreams.de>
5Description: 5Description:
6 Reading the file will give you a list of masters which can be
7 selected for a demultiplexed bus. The format is
8 "<index>:<name>". Example from a Renesas Lager board:
6 9
7This file selects the active I2C master for a demultiplexed bus. 10 0:/i2c@e6500000 1:/i2c@e6508000
8 11
9Write 0 there for the first master, 1 for the second etc. Reading the file will 12What: /sys/devices/platform/<i2c-demux-name>/current_master
10give you a list with the active master marked. Example from a Renesas Lager 13Date: January 2016
11board: 14KernelVersion: 4.6
12 15Contact: Wolfram Sang <wsa@the-dreams.de>
13root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master 16Description:
14* 0 - /i2c@9 17 This file selects/shows the active I2C master for a demultiplexed
15 1 - /i2c@e6520000 18 bus. It uses the <index> value from the file 'available_masters'.
16 2 - /i2c@e6530000
17
18root@Lager:~# echo 2 > /sys/devices/platform/i2c@8/cur_master
19
20root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master
21 0 - /i2c@9
22 1 - /i2c@e6520000
23* 2 - /i2c@e6530000
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
index 1ae98b87c640..e4b9dcee6d41 100644
--- a/Documentation/devicetree/bindings/arc/archs-pct.txt
+++ b/Documentation/devicetree/bindings/arc/archs-pct.txt
@@ -2,7 +2,7 @@
2 2
3The ARC HS can be configured with a pipeline performance monitor for counting 3The ARC HS can be configured with a pipeline performance monitor for counting
4CPU and cache events like cache misses and hits. Like conventional PCT there 4CPU and cache events like cache misses and hits. Like conventional PCT there
5are 100+ hardware conditions dynamically mapped to upto 32 counters. 5are 100+ hardware conditions dynamically mapped to up to 32 counters.
6It also supports overflow interrupts. 6It also supports overflow interrupts.
7 7
8Required properties: 8Required properties:
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt
index 7b9588444f20..4e874d9a38a6 100644
--- a/Documentation/devicetree/bindings/arc/pct.txt
+++ b/Documentation/devicetree/bindings/arc/pct.txt
@@ -2,7 +2,7 @@
2 2
3The ARC700 can be configured with a pipeline performance monitor for counting 3The ARC700 can be configured with a pipeline performance monitor for counting
4CPU and cache events like cache misses and hits. Like conventional PCT there 4CPU and cache events like cache misses and hits. Like conventional PCT there
5are 100+ hardware conditions dynamically mapped to upto 32 counters 5are 100+ hardware conditions dynamically mapped to up to 32 counters
6 6
7Note that: 7Note that:
8 * The ARC 700 PCT does not support interrupts; although HW events may be 8 * The ARC 700 PCT does not support interrupts; although HW events may be
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index ccc62f145306..3f0cbbb8395f 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -192,7 +192,6 @@ nodes to be present and contain the properties described below.
192 can be one of: 192 can be one of:
193 "allwinner,sun6i-a31" 193 "allwinner,sun6i-a31"
194 "allwinner,sun8i-a23" 194 "allwinner,sun8i-a23"
195 "arm,psci"
196 "arm,realview-smp" 195 "arm,realview-smp"
197 "brcm,bcm-nsp-smp" 196 "brcm,bcm-nsp-smp"
198 "brcm,brahma-b15" 197 "brcm,brahma-b15"
diff --git a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
index e0fc2c11dd00..241fb0545b9e 100644
--- a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
+++ b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9XXX PLL controller
3The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB. 3The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
4 4
5Required Properties: 5Required Properties:
6- compatible: has to be "qca,<soctype>-cpu-intc" and one of the following 6- compatible: has to be "qca,<soctype>-pll" and one of the following
7 fallbacks: 7 fallbacks:
8 - "qca,ar7100-pll" 8 - "qca,ar7100-pll"
9 - "qca,ar7240-pll" 9 - "qca,ar7240-pll"
@@ -21,8 +21,8 @@ Optional properties:
21 21
22Example: 22Example:
23 23
24 memory-controller@18050000 { 24 pll-controller@18050000 {
25 compatible = "qca,ar9132-ppl", "qca,ar9130-pll"; 25 compatible = "qca,ar9132-pll", "qca,ar9130-pll";
26 reg = <0x18050000 0x20>; 26 reg = <0x18050000 0x20>;
27 27
28 clock-names = "ref"; 28 clock-names = "ref";
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index f0d71bc52e64..0b4a85fe2d86 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -6,8 +6,8 @@ RK3xxx SoCs.
6Required properties : 6Required properties :
7 7
8 - reg : Offset and length of the register set for the device 8 - reg : Offset and length of the register set for the device
9 - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or 9 - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
10 "rockchip,rk3288-i2c". 10 "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
11 - interrupts : interrupt number 11 - interrupts : interrupt number
12 - clocks : parent clock 12 - clocks : parent clock
13 13
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 5ca79290eabf..32eaaca04d9b 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
9Required properties: 9Required properties:
10- compatible: Should be "mediatek,mt7623-eth" 10- compatible: Should be "mediatek,mt7623-eth"
11- reg: Address and length of the register set for the device 11- reg: Address and length of the register set for the device
12- interrupts: Should contain the frame engines interrupt 12- interrupts: Should contain the three frame engines interrupts in numeric
13 order. These are fe_int0, fe_int1 and fe_int2.
13- clocks: the clock used by the core 14- clocks: the clock used by the core
14- clock-names: the names of the clock listed in the clocks property. These are 15- clock-names: the names of the clock listed in the clocks property. These are
15 "ethif", "esw", "gp2", "gp1" 16 "ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
42 <&ethsys CLK_ETHSYS_GP2>, 43 <&ethsys CLK_ETHSYS_GP2>,
43 <&ethsys CLK_ETHSYS_GP1>; 44 <&ethsys CLK_ETHSYS_GP1>;
44 clock-names = "ethif", "esw", "gp2", "gp1"; 45 clock-names = "ethif", "esw", "gp2", "gp1";
45 interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>; 46 interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
47 GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
48 GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
46 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; 49 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
47 resets = <&ethsys MT2701_ETHSYS_ETH_RST>; 50 resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
48 reset-names = "eth"; 51 reset-names = "eth";
diff --git a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
index 50c4f9b00adf..e3b4809fbe82 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
@@ -8,15 +8,19 @@ Required properties:
8 of memory mapped region. 8 of memory mapped region.
9- clock-names: from common clock binding: 9- clock-names: from common clock binding:
10 Required elements: "24m" 10 Required elements: "24m"
11- rockchip,grf: phandle to the syscon managing the "general register files"
12- #phy-cells : from the generic PHY bindings, must be 0; 11- #phy-cells : from the generic PHY bindings, must be 0;
13 12
14Example: 13Example:
15 14
16edp_phy: edp-phy { 15grf: syscon@ff770000 {
17 compatible = "rockchip,rk3288-dp-phy"; 16 compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
18 rockchip,grf = <&grf>; 17
19 clocks = <&cru SCLK_EDP_24M>; 18...
20 clock-names = "24m"; 19
21 #phy-cells = <0>; 20 edp_phy: edp-phy {
21 compatible = "rockchip,rk3288-dp-phy";
22 clocks = <&cru SCLK_EDP_24M>;
23 clock-names = "24m";
24 #phy-cells = <0>;
25 };
22}; 26};
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index 61916f15a949..555cb0f40690 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -3,17 +3,23 @@ Rockchip EMMC PHY
3 3
4Required properties: 4Required properties:
5 - compatible: rockchip,rk3399-emmc-phy 5 - compatible: rockchip,rk3399-emmc-phy
6 - rockchip,grf : phandle to the syscon managing the "general
7 register files"
8 - #phy-cells: must be 0 6 - #phy-cells: must be 0
9 - reg: PHY configure reg address offset in "general 7 - reg: PHY register address offset and length in "general
10 register files" 8 register files"
11 9
12Example: 10Example:
13 11
14emmcphy: phy { 12
15 compatible = "rockchip,rk3399-emmc-phy"; 13grf: syscon@ff770000 {
16 rockchip,grf = <&grf>; 14 compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
17 reg = <0xf780>; 15 #address-cells = <1>;
18 #phy-cells = <0>; 16 #size-cells = <1>;
17
18...
19
20 emmcphy: phy@f780 {
21 compatible = "rockchip,rk3399-emmc-phy";
22 reg = <0xf780 0x20>;
23 #phy-cells = <0>;
24 };
19}; 25};
diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
index 08a4a32c8eb0..0326154c7925 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
@@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug
134mfio81 dreq0, mips_trace_data, eth_debug 134mfio81 dreq0, mips_trace_data, eth_debug
135mfio82 dreq1, mips_trace_data, eth_debug 135mfio82 dreq1, mips_trace_data, eth_debug
136mfio83 mips_pll_lock, mips_trace_data, usb_debug 136mfio83 mips_pll_lock, mips_trace_data, usb_debug
137mfio84 sys_pll_lock, mips_trace_data, usb_debug 137mfio84 audio_pll_lock, mips_trace_data, usb_debug
138mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug 138mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug
139mfio86 bt_pll_lock, mips_trace_data, sdhost_debug 139mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug
140mfio87 rpu_v_pll_lock, dreq2, socif_debug 140mfio87 sys_pll_lock, dreq2, socif_debug
141mfio88 rpu_l_pll_lock, dreq3, socif_debug 141mfio88 wifi_pll_lock, dreq3, socif_debug
142mfio89 audio_pll_lock, dreq4, dreq5 142mfio89 bt_pll_lock, dreq4, dreq5
143tck 143tck
144trstn 144trstn
145tdi 145tdi
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 3f6a524cc5ff..32f4a2d6d0b3 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -1,13 +1,16 @@
1== Amlogic Meson pinmux controller == 1== Amlogic Meson pinmux controller ==
2 2
3Required properties for the root node: 3Required properties for the root node:
4 - compatible: "amlogic,meson8-pinctrl" or "amlogic,meson8b-pinctrl" 4 - compatible: one of "amlogic,meson8-cbus-pinctrl"
5 "amlogic,meson8b-cbus-pinctrl"
6 "amlogic,meson8-aobus-pinctrl"
7 "amlogic,meson8b-aobus-pinctrl"
5 - reg: address and size of registers controlling irq functionality 8 - reg: address and size of registers controlling irq functionality
6 9
7=== GPIO sub-nodes === 10=== GPIO sub-nodes ===
8 11
9The 2 power domains of the controller (regular and always-on) are 12The GPIO bank for the controller is represented as a sub-node and it acts as a
10represented as sub-nodes and each of them acts as a GPIO controller. 13GPIO controller.
11 14
12Required properties for sub-nodes are: 15Required properties for sub-nodes are:
13 - reg: should contain address and size for mux, pull-enable, pull and 16 - reg: should contain address and size for mux, pull-enable, pull and
@@ -18,10 +21,6 @@ Required properties for sub-nodes are:
18 - gpio-controller: identifies the node as a gpio controller 21 - gpio-controller: identifies the node as a gpio controller
19 - #gpio-cells: must be 2 22 - #gpio-cells: must be 2
20 23
21Valid sub-node names are:
22 - "banks" for the regular domain
23 - "ao-bank" for the always-on domain
24
25=== Other sub-nodes === 24=== Other sub-nodes ===
26 25
27Child nodes without the "gpio-controller" represent some desired 26Child nodes without the "gpio-controller" represent some desired
@@ -45,7 +44,7 @@ pinctrl-bindings.txt
45=== Example === 44=== Example ===
46 45
47 pinctrl: pinctrl@c1109880 { 46 pinctrl: pinctrl@c1109880 {
48 compatible = "amlogic,meson8-pinctrl"; 47 compatible = "amlogic,meson8-cbus-pinctrl";
49 reg = <0xc1109880 0x10>; 48 reg = <0xc1109880 0x10>;
50 #address-cells = <1>; 49 #address-cells = <1>;
51 #size-cells = <1>; 50 #size-cells = <1>;
@@ -61,15 +60,6 @@ pinctrl-bindings.txt
61 #gpio-cells = <2>; 60 #gpio-cells = <2>;
62 }; 61 };
63 62
64 gpio_ao: ao-bank@c1108030 {
65 reg = <0xc8100014 0x4>,
66 <0xc810002c 0x4>,
67 <0xc8100024 0x8>;
68 reg-names = "mux", "pull", "gpio";
69 gpio-controller;
70 #gpio-cells = <2>;
71 };
72
73 nand { 63 nand {
74 mux { 64 mux {
75 groups = "nand_io", "nand_io_ce0", "nand_io_ce1", 65 groups = "nand_io", "nand_io_ce0", "nand_io_ce1",
@@ -79,18 +69,4 @@ pinctrl-bindings.txt
79 function = "nand"; 69 function = "nand";
80 }; 70 };
81 }; 71 };
82
83 uart_ao_a {
84 mux {
85 groups = "uart_tx_ao_a", "uart_rx_ao_a",
86 "uart_cts_ao_a", "uart_rts_ao_a";
87 function = "uart_ao";
88 };
89
90 conf {
91 pins = "GPIOAO_0", "GPIOAO_1",
92 "GPIOAO_2", "GPIOAO_3";
93 bias-disable;
94 };
95 };
96 }; 72 };
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index 1068ffce9f91..fdde63a5419c 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -15,9 +15,10 @@ Required properties:
15 is the rtc tick interrupt. The number of cells representing a interrupt 15 is the rtc tick interrupt. The number of cells representing a interrupt
16 depends on the parent interrupt controller. 16 depends on the parent interrupt controller.
17- clocks: Must contain a list of phandle and clock specifier for the rtc 17- clocks: Must contain a list of phandle and clock specifier for the rtc
18 and source clocks. 18 clock and in the case of a s3c6410 compatible controller, also
19- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the 19 a source clock.
20 same order as the clocks property. 20- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
21 a "rtc_src" sorted in the same order as the clocks property.
21 22
22Example: 23Example:
23 24
diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt
index 31f53f0ab957..4006298f6707 100644
--- a/Documentation/filesystems/cramfs.txt
+++ b/Documentation/filesystems/cramfs.txt
@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
38which the timestamp reverts to 1970, i.e. moves backwards in time. 38which the timestamp reverts to 1970, i.e. moves backwards in time.
39 39
40Currently, cramfs must be written and read with architectures of the 40Currently, cramfs must be written and read with architectures of the
41same endianness, and can be read only by kernels with PAGE_CACHE_SIZE 41same endianness, and can be read only by kernels with PAGE_SIZE
42== 4096. At least the latter of these is a bug, but it hasn't been 42== 4096. At least the latter of these is a bug, but it hasn't been
43decided what the best fix is. For the moment if you have larger pages 43decided what the best fix is. For the moment if you have larger pages
44you can just change the #define in mkcramfs.c, so long as you don't 44you can just change the #define in mkcramfs.c, so long as you don't
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index d392e1505f17..d9c11d25bf02 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
60 default is half of your physical RAM without swap. If you 60 default is half of your physical RAM without swap. If you
61 oversize your tmpfs instances the machine will deadlock 61 oversize your tmpfs instances the machine will deadlock
62 since the OOM handler will not be able to free that memory. 62 since the OOM handler will not be able to free that memory.
63nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE. 63nr_blocks: The same as size, but in blocks of PAGE_SIZE.
64nr_inodes: The maximum number of inodes for this instance. The default 64nr_inodes: The maximum number of inodes for this instance. The default
65 is half of the number of your physical RAM pages, or (on a 65 is half of the number of your physical RAM pages, or (on a
66 machine with highmem) the number of lowmem RAM pages, 66 machine with highmem) the number of lowmem RAM pages,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b02a7d598258..4164bd6397a2 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -708,9 +708,9 @@ struct address_space_operations {
708 from the address space. This generally corresponds to either a 708 from the address space. This generally corresponds to either a
709 truncation, punch hole or a complete invalidation of the address 709 truncation, punch hole or a complete invalidation of the address
710 space (in the latter case 'offset' will always be 0 and 'length' 710 space (in the latter case 'offset' will always be 0 and 'length'
711 will be PAGE_CACHE_SIZE). Any private data associated with the page 711 will be PAGE_SIZE). Any private data associated with the page
712 should be updated to reflect this truncation. If offset is 0 and 712 should be updated to reflect this truncation. If offset is 0 and
713 length is PAGE_CACHE_SIZE, then the private data should be released, 713 length is PAGE_SIZE, then the private data should be released,
714 because the page must be able to be completely discarded. This may 714 because the page must be able to be completely discarded. This may
715 be done by calling the ->releasepage function, but in this case the 715 be done by calling the ->releasepage function, but in this case the
716 release MUST succeed. 716 release MUST succeed.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 3f0f5ce3338b..36ea940e5bb9 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -173,6 +173,10 @@ A few EV_ABS codes have special meanings:
173 proximity of the device and while the value of the BTN_TOUCH code is 0. If 173 proximity of the device and while the value of the BTN_TOUCH code is 0. If
174 the input device may be used freely in three dimensions, consider ABS_Z 174 the input device may be used freely in three dimensions, consider ABS_Z
175 instead. 175 instead.
176 - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
177 proximity and set to 0 when the tool leaves detectable proximity.
178 BTN_TOOL_<name> signals the type of tool that is currently detected by the
179 hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
176 180
177* ABS_MT_<name>: 181* ABS_MT_<name>:
178 - Used to describe multitouch input events. Please see 182 - Used to describe multitouch input events. Please see
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ecc74fa4bfde..0b3de80ec8f6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -4077,6 +4077,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
4077 sector if the number is odd); 4077 sector if the number is odd);
4078 i = IGNORE_DEVICE (don't bind to this 4078 i = IGNORE_DEVICE (don't bind to this
4079 device); 4079 device);
4080 j = NO_REPORT_LUNS (don't use report luns
4081 command, uas only);
4080 l = NOT_LOCKABLE (don't try to lock and 4082 l = NOT_LOCKABLE (don't try to lock and
4081 unlock ejectable media); 4083 unlock ejectable media);
4082 m = MAX_SECTORS_64 (don't transfer more 4084 m = MAX_SECTORS_64 (don't transfer more
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 7328cf85236c..1fd1fbe9ce95 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -586,6 +586,10 @@ drivers to make their ->remove() callbacks avoid races with runtime PM directly,
586but also it allows of more flexibility in the handling of devices during the 586but also it allows of more flexibility in the handling of devices during the
587removal of their drivers. 587removal of their drivers.
588 588
589Drivers in ->remove() callback should undo the runtime PM changes done
590in ->probe(). Usually this means calling pm_runtime_disable(),
591pm_runtime_dont_use_autosuspend() etc.
592
589The user space can effectively disallow the driver of the device to power manage 593The user space can effectively disallow the driver of the device to power manage
590it at run time by changing the value of its /sys/devices/.../power/control 594it at run time by changing the value of its /sys/devices/.../power/control
591attribute to "on", which causes pm_runtime_forbid() to be called. In principle, 595attribute to "on", which causes pm_runtime_forbid() to be called. In principle,
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index cb0368459da3..34a5fece3121 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -581,15 +581,16 @@ Specify "[Nn]ode" for node order
581"Zone Order" orders the zonelists by zone type, then by node within each 581"Zone Order" orders the zonelists by zone type, then by node within each
582zone. Specify "[Zz]one" for zone order. 582zone. Specify "[Zz]one" for zone order.
583 583
584Specify "[Dd]efault" to request automatic configuration. Autoconfiguration 584Specify "[Dd]efault" to request automatic configuration.
585will select "node" order in following case. 585
586(1) if the DMA zone does not exist or 586On 32-bit, the Normal zone needs to be preserved for allocations accessible
587(2) if the DMA zone comprises greater than 50% of the available memory or 587by the kernel, so "zone" order will be selected.
588(3) if any node's DMA zone comprises greater than 70% of its local memory and 588
589 the amount of local memory is big enough. 589On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
590 590order will be selected.
591Otherwise, "zone" order will be selected. Default order is recommended unless 591
592this is causing problems for your system/application. 592Default order is recommended unless this is causing problems for your
593system/application.
593 594
594============================================================== 595==============================================================
595 596
diff --git a/Documentation/usb/gadget_multi.txt b/Documentation/usb/gadget_multi.txt
index 7d66a8636cb5..5faf514047e9 100644
--- a/Documentation/usb/gadget_multi.txt
+++ b/Documentation/usb/gadget_multi.txt
@@ -43,7 +43,7 @@ For the gadget two work under Windows two conditions have to be met:
43First of all, Windows need to detect the gadget as an USB composite 43First of all, Windows need to detect the gadget as an USB composite
44gadget which on its own have some conditions[4]. If they are met, 44gadget which on its own have some conditions[4]. If they are met,
45Windows lets USB Generic Parent Driver[5] handle the device which then 45Windows lets USB Generic Parent Driver[5] handle the device which then
46tries to much drivers for each individual interface (sort of, don't 46tries to match drivers for each individual interface (sort of, don't
47get into too many details). 47get into too many details).
48 48
49The good news is: you do not have to worry about most of the 49The good news is: you do not have to worry about most of the
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
new file mode 100644
index 000000000000..c281ded1ba16
--- /dev/null
+++ b/Documentation/x86/protection-keys.txt
@@ -0,0 +1,27 @@
1Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
2which will be found on future Intel CPUs.
3
4Memory Protection Keys provides a mechanism for enforcing page-based
5protections, but without requiring modification of the page tables
6when an application changes protection domains. It works by
7dedicating 4 previously ignored bits in each page table entry to a
8"protection key", giving 16 possible keys.
9
10There is also a new user-accessible register (PKRU) with two separate
11bits (Access Disable and Write Disable) for each key. Being a CPU
12register, PKRU is inherently thread-local, potentially giving each
13thread a different set of protections from every other thread.
14
15There are two new instructions (RDPKRU/WRPKRU) for reading and writing
16to the new register. The feature is only available in 64-bit mode,
17even though there is theoretically space in the PAE PTEs. These
18permissions are enforced on data access only and have no effect on
19instruction fetches.
20
21=========================== Config Option ===========================
22
23This config option adds approximately 1.5kb of text. and 50 bytes of
24data to the executable. A workload which does large O_DIRECT reads
25of holes in XFS files was run to exercise get_user_pages_fast(). No
26performance delta was observed with the config option
27enabled or disabled.
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index c518dce7da4d..5aa738346062 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -19,7 +19,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
19ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space 19ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
20... unused hole ... 20... unused hole ...
21ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 21ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
22ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space 22ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
23ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls 23ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
24ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole 24ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
25 25
@@ -31,8 +31,8 @@ vmalloc space is lazily synchronized into the different PML4 pages of
31the processes using the page fault handler, with init_level4_pgt as 31the processes using the page fault handler, with init_level4_pgt as
32reference. 32reference.
33 33
34Current X86-64 implementations only support 40 bits of address space, 34Current X86-64 implementations support up to 46 bits of address space (64 TB),
35but we support up to 46 bits. This expands into MBZ space in the page tables. 35which is our current limit. This expands into MBZ space in the page tables.
36 36
37We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual 37We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
38memory window (this size is arbitrary, it can be raised later if needed). 38memory window (this size is arbitrary, it can be raised later if needed).
diff --git a/MAINTAINERS b/MAINTAINERS
index 1c32f8a3d6c4..42e65d128d01 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4302,7 +4302,7 @@ F: drivers/net/ethernet/agere/
4302 4302
4303ETHERNET BRIDGE 4303ETHERNET BRIDGE
4304M: Stephen Hemminger <stephen@networkplumber.org> 4304M: Stephen Hemminger <stephen@networkplumber.org>
4305L: bridge@lists.linux-foundation.org 4305L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
4306L: netdev@vger.kernel.org 4306L: netdev@vger.kernel.org
4307W: http://www.linuxfoundation.org/en/Net:Bridge 4307W: http://www.linuxfoundation.org/en/Net:Bridge
4308S: Maintained 4308S: Maintained
@@ -5751,7 +5751,7 @@ R: Don Skidmore <donald.c.skidmore@intel.com>
5751R: Bruce Allan <bruce.w.allan@intel.com> 5751R: Bruce Allan <bruce.w.allan@intel.com>
5752R: John Ronciak <john.ronciak@intel.com> 5752R: John Ronciak <john.ronciak@intel.com>
5753R: Mitch Williams <mitch.a.williams@intel.com> 5753R: Mitch Williams <mitch.a.williams@intel.com>
5754L: intel-wired-lan@lists.osuosl.org 5754L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
5755W: http://www.intel.com/support/feedback.htm 5755W: http://www.intel.com/support/feedback.htm
5756W: http://e1000.sourceforge.net/ 5756W: http://e1000.sourceforge.net/
5757Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ 5757Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
@@ -6027,7 +6027,7 @@ F: include/scsi/*iscsi*
6027 6027
6028ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR 6028ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
6029M: Or Gerlitz <ogerlitz@mellanox.com> 6029M: Or Gerlitz <ogerlitz@mellanox.com>
6030M: Sagi Grimberg <sagig@mellanox.com> 6030M: Sagi Grimberg <sagi@grimberg.me>
6031M: Roi Dayan <roid@mellanox.com> 6031M: Roi Dayan <roid@mellanox.com>
6032L: linux-rdma@vger.kernel.org 6032L: linux-rdma@vger.kernel.org
6033S: Supported 6033S: Supported
@@ -6037,7 +6037,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
6037F: drivers/infiniband/ulp/iser/ 6037F: drivers/infiniband/ulp/iser/
6038 6038
6039ISCSI EXTENSIONS FOR RDMA (ISER) TARGET 6039ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
6040M: Sagi Grimberg <sagig@mellanox.com> 6040M: Sagi Grimberg <sagi@grimberg.me>
6041T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 6041T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
6042L: linux-rdma@vger.kernel.org 6042L: linux-rdma@vger.kernel.org
6043L: target-devel@vger.kernel.org 6043L: target-devel@vger.kernel.org
@@ -6252,8 +6252,8 @@ S: Maintained
6252F: tools/testing/selftests 6252F: tools/testing/selftests
6253 6253
6254KERNEL VIRTUAL MACHINE (KVM) 6254KERNEL VIRTUAL MACHINE (KVM)
6255M: Gleb Natapov <gleb@kernel.org>
6256M: Paolo Bonzini <pbonzini@redhat.com> 6255M: Paolo Bonzini <pbonzini@redhat.com>
6256M: Radim Krčmář <rkrcmar@redhat.com>
6257L: kvm@vger.kernel.org 6257L: kvm@vger.kernel.org
6258W: http://www.linux-kvm.org 6258W: http://www.linux-kvm.org
6259T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git 6259T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -6400,7 +6400,7 @@ F: mm/kmemleak.c
6400F: mm/kmemleak-test.c 6400F: mm/kmemleak-test.c
6401 6401
6402KPROBES 6402KPROBES
6403M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> 6403M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
6404M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6404M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6405M: "David S. Miller" <davem@davemloft.net> 6405M: "David S. Miller" <davem@davemloft.net>
6406M: Masami Hiramatsu <mhiramat@kernel.org> 6406M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -7576,7 +7576,7 @@ F: drivers/infiniband/hw/nes/
7576 7576
7577NETEM NETWORK EMULATOR 7577NETEM NETWORK EMULATOR
7578M: Stephen Hemminger <stephen@networkplumber.org> 7578M: Stephen Hemminger <stephen@networkplumber.org>
7579L: netem@lists.linux-foundation.org 7579L: netem@lists.linux-foundation.org (moderated for non-subscribers)
7580S: Maintained 7580S: Maintained
7581F: net/sched/sch_netem.c 7581F: net/sched/sch_netem.c
7582 7582
@@ -8712,6 +8712,8 @@ F: drivers/pinctrl/sh-pfc/
8712 8712
8713PIN CONTROLLER - SAMSUNG 8713PIN CONTROLLER - SAMSUNG
8714M: Tomasz Figa <tomasz.figa@gmail.com> 8714M: Tomasz Figa <tomasz.figa@gmail.com>
8715M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
8716M: Sylwester Nawrocki <s.nawrocki@samsung.com>
8715L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8717L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8716L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 8718L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
8717S: Maintained 8719S: Maintained
@@ -9140,6 +9142,13 @@ T: git git://github.com/KrasnikovEugene/wcn36xx.git
9140S: Supported 9142S: Supported
9141F: drivers/net/wireless/ath/wcn36xx/ 9143F: drivers/net/wireless/ath/wcn36xx/
9142 9144
9145QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
9146M: Gabriel Somlo <somlo@cmu.edu>
9147M: "Michael S. Tsirkin" <mst@redhat.com>
9148L: qemu-devel@nongnu.org
9149S: Maintained
9150F: drivers/firmware/qemu_fw_cfg.c
9151
9143RADOS BLOCK DEVICE (RBD) 9152RADOS BLOCK DEVICE (RBD)
9144M: Ilya Dryomov <idryomov@gmail.com> 9153M: Ilya Dryomov <idryomov@gmail.com>
9145M: Sage Weil <sage@redhat.com> 9154M: Sage Weil <sage@redhat.com>
@@ -10586,6 +10595,14 @@ L: linux-tegra@vger.kernel.org
10586S: Maintained 10595S: Maintained
10587F: drivers/staging/nvec/ 10596F: drivers/staging/nvec/
10588 10597
10598STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
10599M: Jens Frederich <jfrederich@gmail.com>
10600M: Daniel Drake <dsd@laptop.org>
10601M: Jon Nettleton <jon.nettleton@gmail.com>
10602W: http://wiki.laptop.org/go/DCON
10603S: Maintained
10604F: drivers/staging/olpc_dcon/
10605
10589STAGING - REALTEK RTL8712U DRIVERS 10606STAGING - REALTEK RTL8712U DRIVERS
10590M: Larry Finger <Larry.Finger@lwfinger.net> 10607M: Larry Finger <Larry.Finger@lwfinger.net>
10591M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>. 10608M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
@@ -11054,6 +11071,15 @@ S: Maintained
11054F: drivers/clk/ti/ 11071F: drivers/clk/ti/
11055F: include/linux/clk/ti.h 11072F: include/linux/clk/ti.h
11056 11073
11074TI ETHERNET SWITCH DRIVER (CPSW)
11075M: Mugunthan V N <mugunthanvnm@ti.com>
11076R: Grygorii Strashko <grygorii.strashko@ti.com>
11077L: linux-omap@vger.kernel.org
11078L: netdev@vger.kernel.org
11079S: Maintained
11080F: drivers/net/ethernet/ti/cpsw*
11081F: drivers/net/ethernet/ti/davinci*
11082
11057TI FLASH MEDIA INTERFACE DRIVER 11083TI FLASH MEDIA INTERFACE DRIVER
11058M: Alex Dubov <oakad@yahoo.com> 11084M: Alex Dubov <oakad@yahoo.com>
11059S: Maintained 11085S: Maintained
@@ -12205,9 +12231,9 @@ S: Maintained
12205F: drivers/media/tuners/tuner-xc2028.* 12231F: drivers/media/tuners/tuner-xc2028.*
12206 12232
12207XEN HYPERVISOR INTERFACE 12233XEN HYPERVISOR INTERFACE
12208M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
12209M: Boris Ostrovsky <boris.ostrovsky@oracle.com> 12234M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
12210M: David Vrabel <david.vrabel@citrix.com> 12235M: David Vrabel <david.vrabel@citrix.com>
12236M: Juergen Gross <jgross@suse.com>
12211L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12237L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12212T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git 12238T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
12213S: Supported 12239S: Supported
@@ -12219,16 +12245,16 @@ F: include/xen/
12219F: include/uapi/xen/ 12245F: include/uapi/xen/
12220 12246
12221XEN HYPERVISOR ARM 12247XEN HYPERVISOR ARM
12222M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> 12248M: Stefano Stabellini <sstabellini@kernel.org>
12223L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12249L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12224S: Supported 12250S: Maintained
12225F: arch/arm/xen/ 12251F: arch/arm/xen/
12226F: arch/arm/include/asm/xen/ 12252F: arch/arm/include/asm/xen/
12227 12253
12228XEN HYPERVISOR ARM64 12254XEN HYPERVISOR ARM64
12229M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> 12255M: Stefano Stabellini <sstabellini@kernel.org>
12230L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12256L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12231S: Supported 12257S: Maintained
12232F: arch/arm64/xen/ 12258F: arch/arm64/xen/
12233F: arch/arm64/include/asm/xen/ 12259F: arch/arm64/include/asm/xen/
12234 12260
diff --git a/Makefile b/Makefile
index 173437debc87..7466de60ddc7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc6
5NAME = Blurry Fish Butt 5NAME = Charred Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
@@ -1008,7 +1008,8 @@ prepare0: archprepare FORCE
1008prepare: prepare0 prepare-objtool 1008prepare: prepare0 prepare-objtool
1009 1009
1010ifdef CONFIG_STACK_VALIDATION 1010ifdef CONFIG_STACK_VALIDATION
1011 has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0) 1011 has_libelf := $(call try-run,\
1012 echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
1012 ifeq ($(has_libelf),1) 1013 ifeq ($(has_libelf),1)
1013 objtool_target := tools/objtool FORCE 1014 objtool_target := tools/objtool FORCE
1014 else 1015 else
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 208aae071b37..ec4791ea6911 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -35,8 +35,10 @@ config ARC
35 select NO_BOOTMEM 35 select NO_BOOTMEM
36 select OF 36 select OF
37 select OF_EARLY_FLATTREE 37 select OF_EARLY_FLATTREE
38 select OF_RESERVED_MEM
38 select PERF_USE_VMALLOC 39 select PERF_USE_VMALLOC
39 select HAVE_DEBUG_STACKOVERFLOW 40 select HAVE_DEBUG_STACKOVERFLOW
41 select HAVE_GENERIC_DMA_COHERENT
40 42
41config MIGHT_HAVE_PCI 43config MIGHT_HAVE_PCI
42 bool 44 bool
@@ -593,7 +595,6 @@ config PCI_SYSCALL
593 def_bool PCI 595 def_bool PCI
594 596
595source "drivers/pci/Kconfig" 597source "drivers/pci/Kconfig"
596source "drivers/pci/pcie/Kconfig"
597 598
598endmenu 599endmenu
599 600
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index ab5d5701e11d..44a578c10732 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -47,14 +47,6 @@
47 clocks = <&apbclk>; 47 clocks = <&apbclk>;
48 clock-names = "stmmaceth"; 48 clock-names = "stmmaceth";
49 max-speed = <100>; 49 max-speed = <100>;
50 mdio0 {
51 #address-cells = <1>;
52 #size-cells = <0>;
53 compatible = "snps,dwmac-mdio";
54 phy1: ethernet-phy@1 {
55 reg = <1>;
56 };
57 };
58 }; 50 };
59 51
60 ehci@0x40000 { 52 ehci@0x40000 {
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index f8b396c9aedb..491b3b5f22bd 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -42,6 +42,7 @@ CONFIG_DEVTMPFS=y
42# CONFIG_STANDALONE is not set 42# CONFIG_STANDALONE is not set
43# CONFIG_PREVENT_FIRMWARE_BUILD is not set 43# CONFIG_PREVENT_FIRMWARE_BUILD is not set
44# CONFIG_FIRMWARE_IN_KERNEL is not set 44# CONFIG_FIRMWARE_IN_KERNEL is not set
45CONFIG_BLK_DEV_LOOP=y
45CONFIG_SCSI=y 46CONFIG_SCSI=y
46CONFIG_BLK_DEV_SD=y 47CONFIG_BLK_DEV_SD=y
47CONFIG_NETDEVICES=y 48CONFIG_NETDEVICES=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 56128ea2b748..b25ee73b2e79 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -43,6 +43,7 @@ CONFIG_DEVTMPFS=y
43# CONFIG_STANDALONE is not set 43# CONFIG_STANDALONE is not set
44# CONFIG_PREVENT_FIRMWARE_BUILD is not set 44# CONFIG_PREVENT_FIRMWARE_BUILD is not set
45# CONFIG_FIRMWARE_IN_KERNEL is not set 45# CONFIG_FIRMWARE_IN_KERNEL is not set
46CONFIG_BLK_DEV_LOOP=y
46CONFIG_SCSI=y 47CONFIG_SCSI=y
47CONFIG_BLK_DEV_SD=y 48CONFIG_BLK_DEV_SD=y
48CONFIG_NETDEVICES=y 49CONFIG_NETDEVICES=y
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
new file mode 100644
index 000000000000..bd3f68c9ddfc
--- /dev/null
+++ b/arch/arc/include/asm/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 37c2f751eebf..d1ec7f6b31e0 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -18,6 +18,12 @@
18#define STATUS_AD_MASK (1<<STATUS_AD_BIT) 18#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
19#define STATUS_IE_MASK (1<<STATUS_IE_BIT) 19#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
20 20
21/* status32 Bits as encoded/expected by CLRI/SETI */
22#define CLRI_STATUS_IE_BIT 4
23
24#define CLRI_STATUS_E_MASK 0xF
25#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
26
21#define AUX_USER_SP 0x00D 27#define AUX_USER_SP 0x00D
22#define AUX_IRQ_CTRL 0x00E 28#define AUX_IRQ_CTRL 0x00E
23#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ 29#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
@@ -100,6 +106,13 @@ static inline long arch_local_save_flags(void)
100 : 106 :
101 : "memory"); 107 : "memory");
102 108
109 /* To be compatible with irq_save()/irq_restore()
110 * encode the irq bits as expected by CLRI/SETI
111 * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
112 */
113 temp = (1 << 5) |
114 ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
115 (temp & CLRI_STATUS_E_MASK);
103 return temp; 116 return temp;
104} 117}
105 118
@@ -108,7 +121,7 @@ static inline long arch_local_save_flags(void)
108 */ 121 */
109static inline int arch_irqs_disabled_flags(unsigned long flags) 122static inline int arch_irqs_disabled_flags(unsigned long flags)
110{ 123{
111 return !(flags & (STATUS_IE_MASK)); 124 return !(flags & CLRI_STATUS_IE_MASK);
112} 125}
113 126
114static inline int arch_irqs_disabled(void) 127static inline int arch_irqs_disabled(void)
@@ -128,11 +141,32 @@ static inline void arc_softirq_clear(int irq)
128 141
129#else 142#else
130 143
144#ifdef CONFIG_TRACE_IRQFLAGS
145
146.macro TRACE_ASM_IRQ_DISABLE
147 bl trace_hardirqs_off
148.endm
149
150.macro TRACE_ASM_IRQ_ENABLE
151 bl trace_hardirqs_on
152.endm
153
154#else
155
156.macro TRACE_ASM_IRQ_DISABLE
157.endm
158
159.macro TRACE_ASM_IRQ_ENABLE
160.endm
161
162#endif
131.macro IRQ_DISABLE scratch 163.macro IRQ_DISABLE scratch
132 clri 164 clri
165 TRACE_ASM_IRQ_DISABLE
133.endm 166.endm
134 167
135.macro IRQ_ENABLE scratch 168.macro IRQ_ENABLE scratch
169 TRACE_ASM_IRQ_ENABLE
136 seti 170 seti
137.endm 171.endm
138 172
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index c1264607bbff..7a1c124ff021 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -69,8 +69,11 @@ ENTRY(handle_interrupt)
69 69
70 clri ; To make status32.IE agree with CPU internal state 70 clri ; To make status32.IE agree with CPU internal state
71 71
72 lr r0, [ICAUSE] 72#ifdef CONFIG_TRACE_IRQFLAGS
73 TRACE_ASM_IRQ_DISABLE
74#endif
73 75
76 lr r0, [ICAUSE]
74 mov blink, ret_from_exception 77 mov blink, ret_from_exception
75 78
76 b.d arch_do_IRQ 79 b.d arch_do_IRQ
@@ -169,6 +172,11 @@ END(EV_TLBProtV)
169 172
170.Lrestore_regs: 173.Lrestore_regs:
171 174
175 # Interrpts are actually disabled from this point on, but will get
176 # reenabled after we return from interrupt/exception.
177 # But irq tracer needs to be told now...
178 TRACE_ASM_IRQ_ENABLE
179
172 ld r0, [sp, PT_status32] ; U/K mode at time of entry 180 ld r0, [sp, PT_status32] ; U/K mode at time of entry
173 lr r10, [AUX_IRQ_ACT] 181 lr r10, [AUX_IRQ_ACT]
174 182
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 431433929189..0cb0abaa0479 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -341,6 +341,9 @@ END(call_do_page_fault)
341 341
342.Lrestore_regs: 342.Lrestore_regs:
343 343
344 # Interrpts are actually disabled from this point on, but will get
345 # reenabled after we return from interrupt/exception.
346 # But irq tracer needs to be told now...
344 TRACE_ASM_IRQ_ENABLE 347 TRACE_ASM_IRQ_ENABLE
345 348
346 lr r10, [status32] 349 lr r10, [status32]
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d7709e3930a3..9e5eddbb856f 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
628 628
629 /* kernel reading from page with U-mapping */ 629 /* kernel reading from page with U-mapping */
630 phys_addr_t paddr = (unsigned long)page_address(page); 630 phys_addr_t paddr = (unsigned long)page_address(page);
631 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 631 unsigned long vaddr = page->index << PAGE_SHIFT;
632 632
633 if (addr_not_cache_congruent(paddr, vaddr)) 633 if (addr_not_cache_congruent(paddr, vaddr))
634 __flush_dcache_page(paddr, vaddr); 634 __flush_dcache_page(paddr, vaddr);
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 7d2c4fbf4f22..5487d0b97400 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -13,6 +13,7 @@
13#ifdef CONFIG_BLK_DEV_INITRD 13#ifdef CONFIG_BLK_DEV_INITRD
14#include <linux/initrd.h> 14#include <linux/initrd.h>
15#endif 15#endif
16#include <linux/of_fdt.h>
16#include <linux/swap.h> 17#include <linux/swap.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
@@ -136,6 +137,9 @@ void __init setup_arch_memory(void)
136 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 137 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
137#endif 138#endif
138 139
140 early_init_fdt_reserve_self();
141 early_init_fdt_scan_reserved_mem();
142
139 memblock_dump_all(); 143 memblock_dump_all();
140 144
141 /*----------------- node/zones setup --------------------------*/ 145 /*----------------- node/zones setup --------------------------*/
diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
index 6c667fb35449..4e28d87e9356 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
@@ -470,9 +470,12 @@
470}; 470};
471 471
472&cpsw_emac0 { 472&cpsw_emac0 {
473 phy_id = <&davinci_mdio>, <0>;
474 phy-mode = "rmii"; 473 phy-mode = "rmii";
475 dual_emac_res_vlan = <1>; 474 dual_emac_res_vlan = <1>;
475 fixed-link {
476 speed = <100>;
477 full-duplex;
478 };
476}; 479};
477 480
478&cpsw_emac1 { 481&cpsw_emac1 {
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 55ca9c7dcf6a..0467846b4cc3 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -860,7 +860,7 @@
860 ti,no-idle-on-init; 860 ti,no-idle-on-init;
861 reg = <0x50000000 0x2000>; 861 reg = <0x50000000 0x2000>;
862 interrupts = <100>; 862 interrupts = <100>;
863 dmas = <&edma 52>; 863 dmas = <&edma 52 0>;
864 dma-names = "rxtx"; 864 dma-names = "rxtx";
865 gpmc,num-cs = <7>; 865 gpmc,num-cs = <7>;
866 gpmc,num-waitpins = <2>; 866 gpmc,num-waitpins = <2>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 6e4f5af3d8f8..ba580a9da390 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -207,7 +207,7 @@
207 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>, 207 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
208 <&edma_tptc2 0>; 208 <&edma_tptc2 0>;
209 209
210 ti,edma-memcpy-channels = <32 33>; 210 ti,edma-memcpy-channels = <58 59>;
211 }; 211 };
212 212
213 edma_tptc0: tptc@49800000 { 213 edma_tptc0: tptc@49800000 {
@@ -884,7 +884,7 @@
884 gpmc: gpmc@50000000 { 884 gpmc: gpmc@50000000 {
885 compatible = "ti,am3352-gpmc"; 885 compatible = "ti,am3352-gpmc";
886 ti,hwmods = "gpmc"; 886 ti,hwmods = "gpmc";
887 dmas = <&edma 52>; 887 dmas = <&edma 52 0>;
888 dma-names = "rxtx"; 888 dma-names = "rxtx";
889 clocks = <&l3s_gclk>; 889 clocks = <&l3s_gclk>;
890 clock-names = "fck"; 890 clock-names = "fck";
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 83dfafaaba1b..d5dd72047a7e 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -794,3 +794,8 @@
794 tx-num-evt = <32>; 794 tx-num-evt = <32>;
795 rx-num-evt = <32>; 795 rx-num-evt = <32>;
796}; 796};
797
798&synctimer_32kclk {
799 assigned-clocks = <&mux_synctimer32k_ck>;
800 assigned-clock-parents = <&clkdiv32k_ick>;
801};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 0a5fc5d02ce2..4168eb9dd369 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -99,13 +99,6 @@
99 #cooling-cells = <2>; 99 #cooling-cells = <2>;
100 }; 100 };
101 101
102 extcon_usb1: extcon_usb1 {
103 compatible = "linux,extcon-usb-gpio";
104 id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
105 pinctrl-names = "default";
106 pinctrl-0 = <&extcon_usb1_pins>;
107 };
108
109 hdmi0: connector { 102 hdmi0: connector {
110 compatible = "hdmi-connector"; 103 compatible = "hdmi-connector";
111 label = "hdmi"; 104 label = "hdmi";
@@ -349,12 +342,6 @@
349 >; 342 >;
350 }; 343 };
351 344
352 extcon_usb1_pins: extcon_usb1_pins {
353 pinctrl-single,pins = <
354 DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */
355 >;
356 };
357
358 tpd12s015_pins: pinmux_tpd12s015_pins { 345 tpd12s015_pins: pinmux_tpd12s015_pins {
359 pinctrl-single,pins = < 346 pinctrl-single,pins = <
360 DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ 347 DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */
@@ -706,10 +693,6 @@
706 pinctrl-0 = <&usb1_pins>; 693 pinctrl-0 = <&usb1_pins>;
707}; 694};
708 695
709&omap_dwc3_1 {
710 extcon = <&extcon_usb1>;
711};
712
713&omap_dwc3_2 { 696&omap_dwc3_2 {
714 extcon = <&extcon_usb2>; 697 extcon = <&extcon_usb2>;
715}; 698};
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 3710755c6d76..85d2c377c332 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -117,7 +117,7 @@
117 }; 117 };
118 118
119 /* USB part of the eSATA/USB 2.0 port */ 119 /* USB part of the eSATA/USB 2.0 port */
120 usb@50000 { 120 usb@58000 {
121 status = "okay"; 121 status = "okay";
122 }; 122 };
123 123
diff --git a/arch/arm/boot/dts/dm814x-clocks.dtsi b/arch/arm/boot/dts/dm814x-clocks.dtsi
index e0ea6a93a22e..792a64ee0df7 100644
--- a/arch/arm/boot/dts/dm814x-clocks.dtsi
+++ b/arch/arm/boot/dts/dm814x-clocks.dtsi
@@ -4,6 +4,157 @@
4 * published by the Free Software Foundation. 4 * published by the Free Software Foundation.
5 */ 5 */
6 6
7&pllss {
8 /*
9 * See TRM "2.6.10 Connected outputso DPLLS" and
10 * "2.6.11 Connected Outputs of DPLLJ". Only clkout is
11 * connected except for hdmi and usb.
12 */
13 adpll_mpu_ck: adpll@40 {
14 #clock-cells = <1>;
15 compatible = "ti,dm814-adpll-s-clock";
16 reg = <0x40 0x40>;
17 clocks = <&devosc_ck &devosc_ck &devosc_ck>;
18 clock-names = "clkinp", "clkinpulow", "clkinphif";
19 clock-output-names = "481c5040.adpll.dcoclkldo",
20 "481c5040.adpll.clkout",
21 "481c5040.adpll.clkoutx2",
22 "481c5040.adpll.clkouthif";
23 };
24
25 adpll_dsp_ck: adpll@80 {
26 #clock-cells = <1>;
27 compatible = "ti,dm814-adpll-lj-clock";
28 reg = <0x80 0x30>;
29 clocks = <&devosc_ck &devosc_ck>;
30 clock-names = "clkinp", "clkinpulow";
31 clock-output-names = "481c5080.adpll.dcoclkldo",
32 "481c5080.adpll.clkout",
33 "481c5080.adpll.clkoutldo";
34 };
35
36 adpll_sgx_ck: adpll@b0 {
37 #clock-cells = <1>;
38 compatible = "ti,dm814-adpll-lj-clock";
39 reg = <0xb0 0x30>;
40 clocks = <&devosc_ck &devosc_ck>;
41 clock-names = "clkinp", "clkinpulow";
42 clock-output-names = "481c50b0.adpll.dcoclkldo",
43 "481c50b0.adpll.clkout",
44 "481c50b0.adpll.clkoutldo";
45 };
46
47 adpll_hdvic_ck: adpll@e0 {
48 #clock-cells = <1>;
49 compatible = "ti,dm814-adpll-lj-clock";
50 reg = <0xe0 0x30>;
51 clocks = <&devosc_ck &devosc_ck>;
52 clock-names = "clkinp", "clkinpulow";
53 clock-output-names = "481c50e0.adpll.dcoclkldo",
54 "481c50e0.adpll.clkout",
55 "481c50e0.adpll.clkoutldo";
56 };
57
58 adpll_l3_ck: adpll@110 {
59 #clock-cells = <1>;
60 compatible = "ti,dm814-adpll-lj-clock";
61 reg = <0x110 0x30>;
62 clocks = <&devosc_ck &devosc_ck>;
63 clock-names = "clkinp", "clkinpulow";
64 clock-output-names = "481c5110.adpll.dcoclkldo",
65 "481c5110.adpll.clkout",
66 "481c5110.adpll.clkoutldo";
67 };
68
69 adpll_isp_ck: adpll@140 {
70 #clock-cells = <1>;
71 compatible = "ti,dm814-adpll-lj-clock";
72 reg = <0x140 0x30>;
73 clocks = <&devosc_ck &devosc_ck>;
74 clock-names = "clkinp", "clkinpulow";
75 clock-output-names = "481c5140.adpll.dcoclkldo",
76 "481c5140.adpll.clkout",
77 "481c5140.adpll.clkoutldo";
78 };
79
80 adpll_dss_ck: adpll@170 {
81 #clock-cells = <1>;
82 compatible = "ti,dm814-adpll-lj-clock";
83 reg = <0x170 0x30>;
84 clocks = <&devosc_ck &devosc_ck>;
85 clock-names = "clkinp", "clkinpulow";
86 clock-output-names = "481c5170.adpll.dcoclkldo",
87 "481c5170.adpll.clkout",
88 "481c5170.adpll.clkoutldo";
89 };
90
91 adpll_video0_ck: adpll@1a0 {
92 #clock-cells = <1>;
93 compatible = "ti,dm814-adpll-lj-clock";
94 reg = <0x1a0 0x30>;
95 clocks = <&devosc_ck &devosc_ck>;
96 clock-names = "clkinp", "clkinpulow";
97 clock-output-names = "481c51a0.adpll.dcoclkldo",
98 "481c51a0.adpll.clkout",
99 "481c51a0.adpll.clkoutldo";
100 };
101
102 adpll_video1_ck: adpll@1d0 {
103 #clock-cells = <1>;
104 compatible = "ti,dm814-adpll-lj-clock";
105 reg = <0x1d0 0x30>;
106 clocks = <&devosc_ck &devosc_ck>;
107 clock-names = "clkinp", "clkinpulow";
108 clock-output-names = "481c51d0.adpll.dcoclkldo",
109 "481c51d0.adpll.clkout",
110 "481c51d0.adpll.clkoutldo";
111 };
112
113 adpll_hdmi_ck: adpll@200 {
114 #clock-cells = <1>;
115 compatible = "ti,dm814-adpll-lj-clock";
116 reg = <0x200 0x30>;
117 clocks = <&devosc_ck &devosc_ck>;
118 clock-names = "clkinp", "clkinpulow";
119 clock-output-names = "481c5200.adpll.dcoclkldo",
120 "481c5200.adpll.clkout",
121 "481c5200.adpll.clkoutldo";
122 };
123
124 adpll_audio_ck: adpll@230 {
125 #clock-cells = <1>;
126 compatible = "ti,dm814-adpll-lj-clock";
127 reg = <0x230 0x30>;
128 clocks = <&devosc_ck &devosc_ck>;
129 clock-names = "clkinp", "clkinpulow";
130 clock-output-names = "481c5230.adpll.dcoclkldo",
131 "481c5230.adpll.clkout",
132 "481c5230.adpll.clkoutldo";
133 };
134
135 adpll_usb_ck: adpll@260 {
136 #clock-cells = <1>;
137 compatible = "ti,dm814-adpll-lj-clock";
138 reg = <0x260 0x30>;
139 clocks = <&devosc_ck &devosc_ck>;
140 clock-names = "clkinp", "clkinpulow";
141 clock-output-names = "481c5260.adpll.dcoclkldo",
142 "481c5260.adpll.clkout",
143 "481c5260.adpll.clkoutldo";
144 };
145
146 adpll_ddr_ck: adpll@290 {
147 #clock-cells = <1>;
148 compatible = "ti,dm814-adpll-lj-clock";
149 reg = <0x290 0x30>;
150 clocks = <&devosc_ck &devosc_ck>;
151 clock-names = "clkinp", "clkinpulow";
152 clock-output-names = "481c5290.adpll.dcoclkldo",
153 "481c5290.adpll.clkout",
154 "481c5290.adpll.clkoutldo";
155 };
156};
157
7&pllss_clocks { 158&pllss_clocks {
8 timer1_fck: timer1_fck { 159 timer1_fck: timer1_fck {
9 #clock-cells = <0>; 160 #clock-cells = <0>;
@@ -23,6 +174,24 @@
23 reg = <0x2e0>; 174 reg = <0x2e0>;
24 }; 175 };
25 176
177 /* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */
178 cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
179 #clock-cells = <0>;
180 compatible = "ti,mux-clock";
181 clocks = <&adpll_video0_ck 1
182 &adpll_video1_ck 1
183 &adpll_audio_ck 1>;
184 ti,bit-shift = <1>;
185 reg = <0x2e8>;
186 };
187
188 /* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */
189 cpsw_125mhz_gclk: cpsw_125mhz_gclk {
190 #clock-cells = <0>;
191 compatible = "fixed-clock";
192 clock-frequency = <125000000>;
193 };
194
26 sysclk18_ck: sysclk18_ck { 195 sysclk18_ck: sysclk18_ck {
27 #clock-cells = <0>; 196 #clock-cells = <0>;
28 compatible = "ti,mux-clock"; 197 compatible = "ti,mux-clock";
@@ -79,37 +248,6 @@
79 compatible = "fixed-clock"; 248 compatible = "fixed-clock";
80 clock-frequency = <1000000000>; 249 clock-frequency = <1000000000>;
81 }; 250 };
82
83 sysclk4_ck: sysclk4_ck {
84 #clock-cells = <0>;
85 compatible = "fixed-clock";
86 clock-frequency = <222000000>;
87 };
88
89 sysclk6_ck: sysclk6_ck {
90 #clock-cells = <0>;
91 compatible = "fixed-clock";
92 clock-frequency = <100000000>;
93 };
94
95 sysclk10_ck: sysclk10_ck {
96 #clock-cells = <0>;
97 compatible = "fixed-clock";
98 clock-frequency = <48000000>;
99 };
100
101 cpsw_125mhz_gclk: cpsw_125mhz_gclk {
102 #clock-cells = <0>;
103 compatible = "fixed-clock";
104 clock-frequency = <125000000>;
105 };
106
107 cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
108 #clock-cells = <0>;
109 compatible = "fixed-clock";
110 clock-frequency = <250000000>;
111 };
112
113}; 251};
114 252
115&prcm_clocks { 253&prcm_clocks {
@@ -138,6 +276,49 @@
138 clock-div = <78125>; 276 clock-div = <78125>;
139 }; 277 };
140 278
279 /* L4_HS 220 MHz*/
280 sysclk4_ck: sysclk4_ck {
281 #clock-cells = <0>;
282 compatible = "ti,fixed-factor-clock";
283 clocks = <&adpll_l3_ck 1>;
284 ti,clock-mult = <1>;
285 ti,clock-div = <1>;
286 };
287
288 /* L4_FWCFG */
289 sysclk5_ck: sysclk5_ck {
290 #clock-cells = <0>;
291 compatible = "ti,fixed-factor-clock";
292 clocks = <&adpll_l3_ck 1>;
293 ti,clock-mult = <1>;
294 ti,clock-div = <2>;
295 };
296
297 /* L4_LS 110 MHz */
298 sysclk6_ck: sysclk6_ck {
299 #clock-cells = <0>;
300 compatible = "ti,fixed-factor-clock";
301 clocks = <&adpll_l3_ck 1>;
302 ti,clock-mult = <1>;
303 ti,clock-div = <2>;
304 };
305
306 sysclk8_ck: sysclk8_ck {
307 #clock-cells = <0>;
308 compatible = "ti,fixed-factor-clock";
309 clocks = <&adpll_usb_ck 1>;
310 ti,clock-mult = <1>;
311 ti,clock-div = <1>;
312 };
313
314 sysclk10_ck: sysclk10_ck {
315 compatible = "ti,divider-clock";
316 reg = <0x324>;
317 ti,max-div = <7>;
318 #clock-cells = <0>;
319 clocks = <&adpll_usb_ck 1>;
320 };
321
141 aud_clkin0_ck: aud_clkin0_ck { 322 aud_clkin0_ck: aud_clkin0_ck {
142 #clock-cells = <0>; 323 #clock-cells = <0>;
143 compatible = "fixed-clock"; 324 compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/dra62x-clocks.dtsi b/arch/arm/boot/dts/dra62x-clocks.dtsi
index 6f98dc8df9dd..0e49741747ef 100644
--- a/arch/arm/boot/dts/dra62x-clocks.dtsi
+++ b/arch/arm/boot/dts/dra62x-clocks.dtsi
@@ -6,6 +6,32 @@
6 6
7#include "dm814x-clocks.dtsi" 7#include "dm814x-clocks.dtsi"
8 8
9/* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */
10&adpll_hdvic_ck {
11 status = "disabled";
12};
13
14&adpll_l3_ck {
15 status = "disabled";
16};
17
18&adpll_dss_ck {
19 status = "disabled";
20};
21
22/* Compared to dm814x, dra62x has interconnect clocks on isp PLL */
23&sysclk4_ck {
24 clocks = <&adpll_isp_ck 1>;
25};
26
27&sysclk5_ck {
28 clocks = <&adpll_isp_ck 1>;
29};
30
31&sysclk6_ck {
32 clocks = <&adpll_isp_ck 1>;
33};
34
9/* 35/*
10 * Compared to dm814x, dra62x has different shifts and more mux options. 36 * Compared to dm814x, dra62x has different shifts and more mux options.
11 * Please add the extra options for ysclk_14 and 16 if really needed. 37 * Please add the extra options for ysclk_14 and 16 if really needed.
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index d0bae06b7eb7..ef2164a99d0f 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -98,12 +98,20 @@
98 clock-frequency = <32768>; 98 clock-frequency = <32768>;
99 }; 99 };
100 100
101 sys_32k_ck: sys_32k_ck { 101 sys_clk32_crystal_ck: sys_clk32_crystal_ck {
102 #clock-cells = <0>; 102 #clock-cells = <0>;
103 compatible = "fixed-clock"; 103 compatible = "fixed-clock";
104 clock-frequency = <32768>; 104 clock-frequency = <32768>;
105 }; 105 };
106 106
107 sys_clk32_pseudo_ck: sys_clk32_pseudo_ck {
108 #clock-cells = <0>;
109 compatible = "fixed-factor-clock";
110 clocks = <&sys_clkin1>;
111 clock-mult = <1>;
112 clock-div = <610>;
113 };
114
107 virt_12000000_ck: virt_12000000_ck { 115 virt_12000000_ck: virt_12000000_ck {
108 #clock-cells = <0>; 116 #clock-cells = <0>;
109 compatible = "fixed-clock"; 117 compatible = "fixed-clock";
@@ -2170,4 +2178,12 @@
2170 ti,bit-shift = <22>; 2178 ti,bit-shift = <22>;
2171 reg = <0x0558>; 2179 reg = <0x0558>;
2172 }; 2180 };
2181
2182 sys_32k_ck: sys_32k_ck {
2183 #clock-cells = <0>;
2184 compatible = "ti,mux-clock";
2185 clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>;
2186 ti,bit-shift = <8>;
2187 reg = <0x6c4>;
2188 };
2173}; 2189};
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index a2ddcb8c545a..45619f6162c5 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -91,8 +91,8 @@
91 clock-frequency = <141666666>; 91 clock-frequency = <141666666>;
92 }; 92 };
93 93
94 pinctrl: pinctrl@c1109880 { 94 pinctrl_cbus: pinctrl@c1109880 {
95 compatible = "amlogic,meson8-pinctrl"; 95 compatible = "amlogic,meson8-cbus-pinctrl";
96 reg = <0xc1109880 0x10>; 96 reg = <0xc1109880 0x10>;
97 #address-cells = <1>; 97 #address-cells = <1>;
98 #size-cells = <1>; 98 #size-cells = <1>;
@@ -108,29 +108,6 @@
108 #gpio-cells = <2>; 108 #gpio-cells = <2>;
109 }; 109 };
110 110
111 gpio_ao: ao-bank@c1108030 {
112 reg = <0xc8100014 0x4>,
113 <0xc810002c 0x4>,
114 <0xc8100024 0x8>;
115 reg-names = "mux", "pull", "gpio";
116 gpio-controller;
117 #gpio-cells = <2>;
118 };
119
120 uart_ao_a_pins: uart_ao_a {
121 mux {
122 groups = "uart_tx_ao_a", "uart_rx_ao_a";
123 function = "uart_ao";
124 };
125 };
126
127 i2c_ao_pins: i2c_mst_ao {
128 mux {
129 groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
130 function = "i2c_mst_ao";
131 };
132 };
133
134 spi_nor_pins: nor { 111 spi_nor_pins: nor {
135 mux { 112 mux {
136 groups = "nor_d", "nor_q", "nor_c", "nor_cs"; 113 groups = "nor_d", "nor_q", "nor_c", "nor_cs";
@@ -157,4 +134,34 @@
157 }; 134 };
158 }; 135 };
159 136
137 pinctrl_aobus: pinctrl@c8100084 {
138 compatible = "amlogic,meson8-aobus-pinctrl";
139 reg = <0xc8100084 0xc>;
140 #address-cells = <1>;
141 #size-cells = <1>;
142 ranges;
143
144 gpio_ao: ao-bank@c1108030 {
145 reg = <0xc8100014 0x4>,
146 <0xc810002c 0x4>,
147 <0xc8100024 0x8>;
148 reg-names = "mux", "pull", "gpio";
149 gpio-controller;
150 #gpio-cells = <2>;
151 };
152
153 uart_ao_a_pins: uart_ao_a {
154 mux {
155 groups = "uart_tx_ao_a", "uart_rx_ao_a";
156 function = "uart_ao";
157 };
158 };
159
160 i2c_ao_pins: i2c_mst_ao {
161 mux {
162 groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
163 function = "i2c_mst_ao";
164 };
165 };
166 };
160}; /* end of / */ 167}; /* end of / */
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 8bad5571af46..2bfe401a4da9 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -155,8 +155,8 @@
155 reg = <0xc1108000 0x4>, <0xc1104000 0x460>; 155 reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
156 }; 156 };
157 157
158 pinctrl: pinctrl@c1109880 { 158 pinctrl_cbus: pinctrl@c1109880 {
159 compatible = "amlogic,meson8b-pinctrl"; 159 compatible = "amlogic,meson8b-cbus-pinctrl";
160 reg = <0xc1109880 0x10>; 160 reg = <0xc1109880 0x10>;
161 #address-cells = <1>; 161 #address-cells = <1>;
162 #size-cells = <1>; 162 #size-cells = <1>;
@@ -171,6 +171,14 @@
171 gpio-controller; 171 gpio-controller;
172 #gpio-cells = <2>; 172 #gpio-cells = <2>;
173 }; 173 };
174 };
175
176 pinctrl_aobus: pinctrl@c8100084 {
177 compatible = "amlogic,meson8b-aobus-pinctrl";
178 reg = <0xc8100084 0xc>;
179 #address-cells = <1>;
180 #size-cells = <1>;
181 ranges;
174 182
175 gpio_ao: ao-bank@c1108030 { 183 gpio_ao: ao-bank@c1108030 {
176 reg = <0xc8100014 0x4>, 184 reg = <0xc8100014 0x4>,
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 2bd9c83300b2..421fe9f8a9eb 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -70,7 +70,7 @@
70 compatible = "arm,cortex-a9-twd-timer"; 70 compatible = "arm,cortex-a9-twd-timer";
71 clocks = <&mpu_periphclk>; 71 clocks = <&mpu_periphclk>;
72 reg = <0x48240600 0x20>; 72 reg = <0x48240600 0x20>;
73 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>; 73 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_EDGE_RISING)>;
74 interrupt-parent = <&gic>; 74 interrupt-parent = <&gic>;
75 }; 75 };
76 76
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index ef5330578431..8193139d0d87 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -1,6 +1,6 @@
1/dts-v1/; 1/dts-v1/;
2 2
3#include <dt-bindings/interrupt-controller/arm-gic.h> 3#include <dt-bindings/interrupt-controller/irq.h>
4#include <dt-bindings/clock/qcom,gcc-msm8974.h> 4#include <dt-bindings/clock/qcom,gcc-msm8974.h>
5#include "skeleton.dtsi" 5#include "skeleton.dtsi"
6 6
@@ -460,8 +460,6 @@
460 clock-names = "core", "iface"; 460 clock-names = "core", "iface";
461 #address-cells = <1>; 461 #address-cells = <1>;
462 #size-cells = <0>; 462 #size-cells = <0>;
463 dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
464 dma-names = "tx", "rx";
465 }; 463 };
466 464
467 spmi_bus: spmi@fc4cf000 { 465 spmi_bus: spmi@fc4cf000 {
@@ -479,16 +477,6 @@
479 interrupt-controller; 477 interrupt-controller;
480 #interrupt-cells = <4>; 478 #interrupt-cells = <4>;
481 }; 479 };
482
483 blsp2_dma: dma-controller@f9944000 {
484 compatible = "qcom,bam-v1.4.0";
485 reg = <0xf9944000 0x19000>;
486 interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
487 clocks = <&gcc GCC_BLSP2_AHB_CLK>;
488 clock-names = "bam_clk";
489 #dma-cells = <1>;
490 qcom,ee = <0>;
491 };
492 }; 480 };
493 481
494 smd { 482 smd {
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 0ad71b81d3a2..cc6e28f81fe4 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -661,6 +661,7 @@
661}; 661};
662 662
663&pcie_bus_clk { 663&pcie_bus_clk {
664 clock-frequency = <100000000>;
664 status = "okay"; 665 status = "okay";
665}; 666};
666 667
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6c08314427d6..a9285d9a57cd 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -143,19 +143,11 @@
143}; 143};
144 144
145&pfc { 145&pfc {
146 pinctrl-0 = <&scif_clk_pins>;
147 pinctrl-names = "default";
148
149 scif0_pins: serial0 { 146 scif0_pins: serial0 {
150 renesas,groups = "scif0_data_d"; 147 renesas,groups = "scif0_data_d";
151 renesas,function = "scif0"; 148 renesas,function = "scif0";
152 }; 149 };
153 150
154 scif_clk_pins: scif_clk {
155 renesas,groups = "scif_clk";
156 renesas,function = "scif_clk";
157 };
158
159 ether_pins: ether { 151 ether_pins: ether {
160 renesas,groups = "eth_link", "eth_mdio", "eth_rmii"; 152 renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
161 renesas,function = "eth"; 153 renesas,function = "eth";
@@ -229,11 +221,6 @@
229 status = "okay"; 221 status = "okay";
230}; 222};
231 223
232&scif_clk {
233 clock-frequency = <14745600>;
234 status = "okay";
235};
236
237&ether { 224&ether {
238 pinctrl-0 = <&ether_pins &phy1_pins>; 225 pinctrl-0 = <&ether_pins &phy1_pins>;
239 pinctrl-names = "default"; 226 pinctrl-names = "default";
@@ -414,6 +401,7 @@
414}; 401};
415 402
416&pcie_bus_clk { 403&pcie_bus_clk {
404 clock-frequency = <100000000>;
417 status = "okay"; 405 status = "okay";
418}; 406};
419 407
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 6439f0569fe2..1cd1b6a3a72a 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1083,9 +1083,8 @@
1083 pcie_bus_clk: pcie_bus_clk { 1083 pcie_bus_clk: pcie_bus_clk {
1084 compatible = "fixed-clock"; 1084 compatible = "fixed-clock";
1085 #clock-cells = <0>; 1085 #clock-cells = <0>;
1086 clock-frequency = <100000000>; 1086 clock-frequency = <0>;
1087 clock-output-names = "pcie_bus"; 1087 clock-output-names = "pcie_bus";
1088 status = "disabled";
1089 }; 1088 };
1090 1089
1091 /* External SCIF clock */ 1090 /* External SCIF clock */
@@ -1094,7 +1093,6 @@
1094 #clock-cells = <0>; 1093 #clock-cells = <0>;
1095 /* This value must be overridden by the board. */ 1094 /* This value must be overridden by the board. */
1096 clock-frequency = <0>; 1095 clock-frequency = <0>;
1097 status = "disabled";
1098 }; 1096 };
1099 1097
1100 /* External USB clock - can be overridden by the board */ 1098 /* External USB clock - can be overridden by the board */
@@ -1112,7 +1110,6 @@
1112 /* This value must be overridden by the board. */ 1110 /* This value must be overridden by the board. */
1113 clock-frequency = <0>; 1111 clock-frequency = <0>;
1114 clock-output-names = "can_clk"; 1112 clock-output-names = "can_clk";
1115 status = "disabled";
1116 }; 1113 };
1117 1114
1118 /* Special CPG clocks */ 1115 /* Special CPG clocks */
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 07055eacbb0f..a691d590fbd1 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -63,6 +63,9 @@ CONFIG_INPUT_TOUCHSCREEN=y
63CONFIG_TOUCHSCREEN_BU21013=y 63CONFIG_TOUCHSCREEN_BU21013=y
64CONFIG_INPUT_MISC=y 64CONFIG_INPUT_MISC=y
65CONFIG_INPUT_AB8500_PONKEY=y 65CONFIG_INPUT_AB8500_PONKEY=y
66CONFIG_RMI4_CORE=y
67CONFIG_RMI4_I2C=y
68CONFIG_RMI4_F11=y
66# CONFIG_SERIO is not set 69# CONFIG_SERIO is not set
67CONFIG_VT_HW_CONSOLE_BINDING=y 70CONFIG_VT_HW_CONSOLE_BINDING=y
68# CONFIG_LEGACY_PTYS is not set 71# CONFIG_LEGACY_PTYS is not set
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b23c6c81c9ad..1ee94c716a7f 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -276,7 +276,7 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
276 int feature = (features >> field) & 15; 276 int feature = (features >> field) & 15;
277 277
278 /* feature registers are signed values */ 278 /* feature registers are signed values */
279 if (feature > 8) 279 if (feature > 7)
280 feature -= 16; 280 feature -= 16;
281 281
282 return feature; 282 return feature;
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7b84657fba35..194b69923389 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
19 * This may need to be greater than __NR_last_syscall+1 in order to 19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table 20 * account for the padding in the syscall table
21 */ 21 */
22#define __NR_syscalls (392) 22#define __NR_syscalls (396)
23 23
24#define __ARCH_WANT_STAT64 24#define __ARCH_WANT_STAT64
25#define __ARCH_WANT_SYS_GETHOSTNAME 25#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 5dd2528e9e45..2cb9dc770e1d 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -418,6 +418,8 @@
418#define __NR_membarrier (__NR_SYSCALL_BASE+389) 418#define __NR_membarrier (__NR_SYSCALL_BASE+389)
419#define __NR_mlock2 (__NR_SYSCALL_BASE+390) 419#define __NR_mlock2 (__NR_SYSCALL_BASE+390)
420#define __NR_copy_file_range (__NR_SYSCALL_BASE+391) 420#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
421#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
422#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
421 423
422/* 424/*
423 * The following SWIs are ARM private. 425 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index dfc7cd6851ad..703fa0f3cd8f 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -399,8 +399,10 @@
399 CALL(sys_execveat) 399 CALL(sys_execveat)
400 CALL(sys_userfaultfd) 400 CALL(sys_userfaultfd)
401 CALL(sys_membarrier) 401 CALL(sys_membarrier)
402 CALL(sys_mlock2) 402/* 390 */ CALL(sys_mlock2)
403 CALL(sys_copy_file_range) 403 CALL(sys_copy_file_range)
404 CALL(sys_preadv2)
405 CALL(sys_pwritev2)
404#ifndef syscalls_counted 406#ifndef syscalls_counted
405.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 407.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
406#define syscalls_counted 408#define syscalls_counted
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 139791ed473d..2c4bea39cf22 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void)
430 pr_info("CPU: div instructions available: patching division code\n"); 430 pr_info("CPU: div instructions available: patching division code\n");
431 431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1; 432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
433 asm ("" : "+g" (fn_addr));
433 ((u32 *)fn_addr)[0] = udiv_instruction(); 434 ((u32 *)fn_addr)[0] = udiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 435 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8); 436 flush_icache_range(fn_addr, fn_addr + 8);
436 437
437 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1; 438 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
439 asm ("" : "+g" (fn_addr));
438 ((u32 *)fn_addr)[0] = sdiv_instruction(); 440 ((u32 *)fn_addr)[0] = sdiv_instruction();
439 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 441 ((u32 *)fn_addr)[1] = bx_lr_instruction();
440 flush_icache_range(fn_addr, fn_addr + 8); 442 flush_icache_range(fn_addr, fn_addr + 8);
@@ -510,7 +512,7 @@ static void __init elf_hwcap_fixup(void)
510 */ 512 */
511 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || 513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
512 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && 514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
513 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3)) 515 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
514 elf_hwcap &= ~HWCAP_SWP; 516 elf_hwcap &= ~HWCAP_SWP;
515} 517}
516 518
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 6accd66d26f0..dded1b763c16 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy)
1061 kvm_arm_init_debug(); 1061 kvm_arm_init_debug();
1062} 1062}
1063 1063
1064static void cpu_hyp_reinit(void)
1065{
1066 if (is_kernel_in_hyp_mode()) {
1067 /*
1068 * cpu_init_stage2() is safe to call even if the PM
1069 * event was cancelled before the CPU was reset.
1070 */
1071 cpu_init_stage2(NULL);
1072 } else {
1073 if (__hyp_get_vectors() == hyp_default_vectors)
1074 cpu_init_hyp_mode(NULL);
1075 }
1076}
1077
1064static int hyp_init_cpu_notify(struct notifier_block *self, 1078static int hyp_init_cpu_notify(struct notifier_block *self,
1065 unsigned long action, void *cpu) 1079 unsigned long action, void *cpu)
1066{ 1080{
1067 switch (action) { 1081 switch (action) {
1068 case CPU_STARTING: 1082 case CPU_STARTING:
1069 case CPU_STARTING_FROZEN: 1083 case CPU_STARTING_FROZEN:
1070 if (__hyp_get_vectors() == hyp_default_vectors) 1084 cpu_hyp_reinit();
1071 cpu_init_hyp_mode(NULL);
1072 break;
1073 } 1085 }
1074 1086
1075 return NOTIFY_OK; 1087 return NOTIFY_OK;
@@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1084 unsigned long cmd, 1096 unsigned long cmd,
1085 void *v) 1097 void *v)
1086{ 1098{
1087 if (cmd == CPU_PM_EXIT && 1099 if (cmd == CPU_PM_EXIT) {
1088 __hyp_get_vectors() == hyp_default_vectors) { 1100 cpu_hyp_reinit();
1089 cpu_init_hyp_mode(NULL);
1090 return NOTIFY_OK; 1101 return NOTIFY_OK;
1091 } 1102 }
1092 1103
@@ -1101,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void)
1101{ 1112{
1102 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); 1113 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1103} 1114}
1115static void __init hyp_cpu_pm_exit(void)
1116{
1117 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1118}
1104#else 1119#else
1105static inline void hyp_cpu_pm_init(void) 1120static inline void hyp_cpu_pm_init(void)
1106{ 1121{
1107} 1122}
1123static inline void hyp_cpu_pm_exit(void)
1124{
1125}
1108#endif 1126#endif
1109 1127
1110static void teardown_common_resources(void) 1128static void teardown_common_resources(void)
@@ -1128,6 +1146,20 @@ static int init_subsystems(void)
1128 int err; 1146 int err;
1129 1147
1130 /* 1148 /*
1149 * Register CPU Hotplug notifier
1150 */
1151 err = register_cpu_notifier(&hyp_init_cpu_nb);
1152 if (err) {
1153 kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
1154 return err;
1155 }
1156
1157 /*
1158 * Register CPU lower-power notifier
1159 */
1160 hyp_cpu_pm_init();
1161
1162 /*
1131 * Init HYP view of VGIC 1163 * Init HYP view of VGIC
1132 */ 1164 */
1133 err = kvm_vgic_hyp_init(); 1165 err = kvm_vgic_hyp_init();
@@ -1166,6 +1198,8 @@ static void teardown_hyp_mode(void)
1166 free_hyp_pgds(); 1198 free_hyp_pgds();
1167 for_each_possible_cpu(cpu) 1199 for_each_possible_cpu(cpu)
1168 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 1200 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1201 unregister_cpu_notifier(&hyp_init_cpu_nb);
1202 hyp_cpu_pm_exit();
1169} 1203}
1170 1204
1171static int init_vhe_mode(void) 1205static int init_vhe_mode(void)
@@ -1270,19 +1304,6 @@ static int init_hyp_mode(void)
1270 free_boot_hyp_pgd(); 1304 free_boot_hyp_pgd();
1271#endif 1305#endif
1272 1306
1273 cpu_notifier_register_begin();
1274
1275 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1276
1277 cpu_notifier_register_done();
1278
1279 if (err) {
1280 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1281 goto out_err;
1282 }
1283
1284 hyp_cpu_pm_init();
1285
1286 /* set size of VMID supported by CPU */ 1307 /* set size of VMID supported by CPU */
1287 kvm_vmid_bits = kvm_get_vmid_bits(); 1308 kvm_vmid_bits = kvm_get_vmid_bits();
1288 kvm_info("%d-bit VMID\n", kvm_vmid_bits); 1309 kvm_info("%d-bit VMID\n", kvm_vmid_bits);
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
index a5edd7d60266..3d039ef021e0 100644
--- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
@@ -71,6 +71,7 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx(
71 if (!pdata) 71 if (!pdata)
72 pdata = &default_esdhc_pdata; 72 pdata = &default_esdhc_pdata;
73 73
74 return imx_add_platform_device(data->devid, data->id, res, 74 return imx_add_platform_device_dmamask(data->devid, data->id, res,
75 ARRAY_SIZE(res), pdata, sizeof(*pdata)); 75 ARRAY_SIZE(res), pdata, sizeof(*pdata),
76 DMA_BIT_MASK(32));
76} 77}
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 7581e036bda6..ef9ed36e8a61 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -461,7 +461,7 @@ static struct clockdomain ipu_7xx_clkdm = {
461 .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, 461 .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST,
462 .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS, 462 .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
463 .dep_bit = DRA7XX_IPU_STATDEP_SHIFT, 463 .dep_bit = DRA7XX_IPU_STATDEP_SHIFT,
464 .flags = CLKDM_CAN_HWSUP_SWSUP, 464 .flags = CLKDM_CAN_SWSUP,
465}; 465};
466 466
467static struct clockdomain mpu1_7xx_clkdm = { 467static struct clockdomain mpu1_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index d85c24918c17..2abd53ae3e7a 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -669,9 +669,9 @@ void __init dra7xxx_check_revision(void)
669 case 0: 669 case 0:
670 omap_revision = DRA722_REV_ES1_0; 670 omap_revision = DRA722_REV_ES1_0;
671 break; 671 break;
672 case 1:
672 default: 673 default:
673 /* If we have no new revisions */ 674 omap_revision = DRA722_REV_ES2_0;
674 omap_revision = DRA722_REV_ES1_0;
675 break; 675 break;
676 } 676 }
677 break; 677 break;
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3c87e40650cf..49de4dd227be 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
368void __init dra7xx_map_io(void) 368void __init dra7xx_map_io(void)
369{ 369{
370 iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc)); 370 iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
371 omap_barriers_init();
371} 372}
372#endif 373#endif
373/* 374/*
@@ -736,7 +737,8 @@ void __init omap5_init_late(void)
736#ifdef CONFIG_SOC_DRA7XX 737#ifdef CONFIG_SOC_DRA7XX
737void __init dra7xx_init_early(void) 738void __init dra7xx_init_early(void)
738{ 739{
739 omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE)); 740 omap2_set_globals_tap(DRA7XX_CLASS,
741 OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
740 omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE)); 742 omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
741 omap2_control_base_init(); 743 omap2_control_base_init();
742 omap4_pm_init_early(); 744 omap4_pm_init_early();
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f397bd6bd6e3..2c04f2741476 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -274,6 +274,10 @@ static inline void omap5_irq_save_context(void)
274 */ 274 */
275static void irq_save_context(void) 275static void irq_save_context(void)
276{ 276{
277 /* DRA7 has no SAR to save */
278 if (soc_is_dra7xx())
279 return;
280
277 if (!sar_base) 281 if (!sar_base)
278 sar_base = omap4_get_sar_ram_base(); 282 sar_base = omap4_get_sar_ram_base();
279 283
@@ -290,6 +294,9 @@ static void irq_sar_clear(void)
290{ 294{
291 u32 val; 295 u32 val;
292 u32 offset = SAR_BACKUP_STATUS_OFFSET; 296 u32 offset = SAR_BACKUP_STATUS_OFFSET;
297 /* DRA7 has no SAR to save */
298 if (soc_is_dra7xx())
299 return;
293 300
294 if (soc_is_omap54xx()) 301 if (soc_is_omap54xx())
295 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; 302 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b6d62e4cdfdd..2af6ff63e3b4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
1416 (sf & SYSC_HAS_CLOCKACTIVITY)) 1416 (sf & SYSC_HAS_CLOCKACTIVITY))
1417 _set_clockactivity(oh, oh->class->sysc->clockact, &v); 1417 _set_clockactivity(oh, oh->class->sysc->clockact, &v);
1418 1418
1419 /* If the cached value is the same as the new value, skip the write */ 1419 _write_sysconfig(v, oh);
1420 if (oh->_sysc_cache != v)
1421 _write_sysconfig(v, oh);
1422 1420
1423 /* 1421 /*
1424 * Set the autoidle bit only after setting the smartidle bit 1422 * Set the autoidle bit only after setting the smartidle bit
@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
1481 _set_master_standbymode(oh, idlemode, &v); 1479 _set_master_standbymode(oh, idlemode, &v);
1482 } 1480 }
1483 1481
1484 _write_sysconfig(v, oh); 1482 /* If the cached value is the same as the new value, skip the write */
1483 if (oh->_sysc_cache != v)
1484 _write_sysconfig(v, oh);
1485} 1485}
1486 1486
1487/** 1487/**
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index 39736ad2a754..df8327713d06 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -582,9 +582,11 @@ static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = {
582 .user = OCP_USER_MPU, 582 .user = OCP_USER_MPU,
583}; 583};
584 584
585/* USB needs udelay 1 after reset at least on hp t410, use 2 for margin */
585static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = { 586static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = {
586 .rev_offs = 0x0, 587 .rev_offs = 0x0,
587 .sysc_offs = 0x10, 588 .sysc_offs = 0x10,
589 .srst_udelay = 2,
588 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | 590 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
589 SYSC_HAS_SOFTRESET, 591 SYSC_HAS_SOFTRESET,
590 .idlemodes = SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART, 592 .idlemodes = SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 2dbd3785ee6f..d44e0e2f1106 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -198,7 +198,6 @@ void omap_sram_idle(void)
198 int per_next_state = PWRDM_POWER_ON; 198 int per_next_state = PWRDM_POWER_ON;
199 int core_next_state = PWRDM_POWER_ON; 199 int core_next_state = PWRDM_POWER_ON;
200 int per_going_off; 200 int per_going_off;
201 int core_prev_state;
202 u32 sdrc_pwr = 0; 201 u32 sdrc_pwr = 0;
203 202
204 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); 203 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
@@ -278,16 +277,20 @@ void omap_sram_idle(void)
278 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 277 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
279 278
280 /* CORE */ 279 /* CORE */
281 if (core_next_state < PWRDM_POWER_ON) { 280 if (core_next_state < PWRDM_POWER_ON &&
282 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); 281 pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
283 if (core_prev_state == PWRDM_POWER_OFF) { 282 omap3_core_restore_context();
284 omap3_core_restore_context(); 283 omap3_cm_restore_context();
285 omap3_cm_restore_context(); 284 omap3_sram_restore_context();
286 omap3_sram_restore_context(); 285 omap2_sms_restore_context();
287 omap2_sms_restore_context(); 286 } else {
288 } 287 /*
288 * In off-mode resume path above, omap3_core_restore_context
289 * also handles the INTC autoidle restore done here so limit
290 * this to non-off mode resume paths so we don't do it twice.
291 */
292 omap3_intc_resume_idle();
289 } 293 }
290 omap3_intc_resume_idle();
291 294
292 pwrdm_post_transition(NULL); 295 pwrdm_post_transition(NULL);
293 296
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 70df8f6cddcc..364418c78bf3 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -489,6 +489,7 @@ IS_OMAP_TYPE(3430, 0x3430)
489#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8)) 489#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
490#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) 490#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
491#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) 491#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
492#define DRA722_REV_ES2_0 (DRA7XX_CLASS | (0x22 << 16) | (0x20 << 8))
492 493
493void omap2xxx_check_revision(void); 494void omap2xxx_check_revision(void);
494void omap3xxx_check_revision(void); 495void omap3xxx_check_revision(void);
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 913a319c7b00..fffb697bbf0e 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -1235,5 +1235,6 @@ static struct platform_device pxa2xx_pxa_dma = {
1235void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors) 1235void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
1236{ 1236{
1237 pxa_dma_pdata.dma_channels = nb_channels; 1237 pxa_dma_pdata.dma_channels = nb_channels;
1238 pxa_dma_pdata.nb_requestors = nb_requestors;
1238 pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata); 1239 pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
1239} 1240}
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index c6f6ed1cbed0..36e3c79f4973 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -61,10 +61,7 @@ config SA1100_H3100
61 select MFD_IPAQ_MICRO 61 select MFD_IPAQ_MICRO
62 help 62 help
63 Say Y here if you intend to run this kernel on the Compaq iPAQ 63 Say Y here if you intend to run this kernel on the Compaq iPAQ
64 H3100 handheld computer. Information about this machine and the 64 H3100 handheld computer.
65 Linux port to this machine can be found at:
66
67 <http://www.handhelds.org/Compaq/index.html#iPAQ_H3100>
68 65
69config SA1100_H3600 66config SA1100_H3600
70 bool "Compaq iPAQ H3600/H3700" 67 bool "Compaq iPAQ H3600/H3700"
@@ -73,10 +70,7 @@ config SA1100_H3600
73 select MFD_IPAQ_MICRO 70 select MFD_IPAQ_MICRO
74 help 71 help
75 Say Y here if you intend to run this kernel on the Compaq iPAQ 72 Say Y here if you intend to run this kernel on the Compaq iPAQ
76 H3600 handheld computer. Information about this machine and the 73 H3600 and H3700 handheld computers.
77 Linux port to this machine can be found at:
78
79 <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600>
80 74
81config SA1100_BADGE4 75config SA1100_BADGE4
82 bool "HP Labs BadgePAD 4" 76 bool "HP Labs BadgePAD 4"
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index ad008e4b0c49..67d79f9c6bad 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -40,8 +40,7 @@ static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
40void __init shmobile_init_delay(void) 40void __init shmobile_init_delay(void)
41{ 41{
42 struct device_node *np, *cpus; 42 struct device_node *np, *cpus;
43 bool is_a7_a8_a9 = false; 43 unsigned int div = 0;
44 bool is_a15 = false;
45 bool has_arch_timer = false; 44 bool has_arch_timer = false;
46 u32 max_freq = 0; 45 u32 max_freq = 0;
47 46
@@ -55,27 +54,22 @@ void __init shmobile_init_delay(void)
55 if (!of_property_read_u32(np, "clock-frequency", &freq)) 54 if (!of_property_read_u32(np, "clock-frequency", &freq))
56 max_freq = max(max_freq, freq); 55 max_freq = max(max_freq, freq);
57 56
58 if (of_device_is_compatible(np, "arm,cortex-a8") || 57 if (of_device_is_compatible(np, "arm,cortex-a8")) {
59 of_device_is_compatible(np, "arm,cortex-a9")) { 58 div = 2;
60 is_a7_a8_a9 = true; 59 } else if (of_device_is_compatible(np, "arm,cortex-a9")) {
61 } else if (of_device_is_compatible(np, "arm,cortex-a7")) { 60 div = 1;
62 is_a7_a8_a9 = true; 61 } else if (of_device_is_compatible(np, "arm,cortex-a7") ||
63 has_arch_timer = true; 62 of_device_is_compatible(np, "arm,cortex-a15")) {
64 } else if (of_device_is_compatible(np, "arm,cortex-a15")) { 63 div = 1;
65 is_a15 = true;
66 has_arch_timer = true; 64 has_arch_timer = true;
67 } 65 }
68 } 66 }
69 67
70 of_node_put(cpus); 68 of_node_put(cpus);
71 69
72 if (!max_freq) 70 if (!max_freq || !div)
73 return; 71 return;
74 72
75 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { 73 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
76 if (is_a7_a8_a9) 74 shmobile_setup_delay_hz(max_freq, 1, div);
77 shmobile_setup_delay_hz(max_freq, 1, 3);
78 else if (is_a15)
79 shmobile_setup_delay_hz(max_freq, 2, 4);
80 }
81} 75}
diff --git a/arch/arm/mach-uniphier/platsmp.c b/arch/arm/mach-uniphier/platsmp.c
index 69141357afe8..db04142f88bc 100644
--- a/arch/arm/mach-uniphier/platsmp.c
+++ b/arch/arm/mach-uniphier/platsmp.c
@@ -120,7 +120,7 @@ static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
120 if (ret) 120 if (ret)
121 return ret; 121 return ret;
122 122
123 uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, sizeof(SZ_4)); 123 uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
124 if (!uniphier_smp_rom_boot_rsv2) { 124 if (!uniphier_smp_rom_boot_rsv2) {
125 pr_err("failed to map ROM_BOOT_RSV2 register\n"); 125 pr_err("failed to map ROM_BOOT_RSV2 register\n");
126 return -ENOMEM; 126 return -ENOMEM;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index deac58d5f1f7..c941e93048ad 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -762,7 +762,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
762 if (!mask) 762 if (!mask)
763 return NULL; 763 return NULL;
764 764
765 buf = kzalloc(sizeof(*buf), gfp); 765 buf = kzalloc(sizeof(*buf),
766 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
766 if (!buf) 767 if (!buf)
767 return NULL; 768 return NULL;
768 769
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index d0ba3551d49a..3cced8455727 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
235 */ 235 */
236 if (mapping && cache_is_vipt_aliasing()) 236 if (mapping && cache_is_vipt_aliasing())
237 flush_pfn_alias(page_to_pfn(page), 237 flush_pfn_alias(page_to_pfn(page),
238 page->index << PAGE_CACHE_SHIFT); 238 page->index << PAGE_SHIFT);
239} 239}
240 240
241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
250 * data in the current VM view associated with this page. 250 * data in the current VM view associated with this page.
251 * - aliasing VIPT: we only need to find one mapping of this page. 251 * - aliasing VIPT: we only need to find one mapping of this page.
252 */ 252 */
253 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 253 pgoff = page->index;
254 254
255 flush_dcache_mmap_lock(mapping); 255 flush_dcache_mmap_lock(mapping);
256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0f8963a7e7d9..6fcaac8e200f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -281,12 +281,12 @@ __v7_ca17mp_setup:
281 bl v7_invalidate_l1 281 bl v7_invalidate_l1
282 ldmia r12, {r1-r6, lr} 282 ldmia r12, {r1-r6, lr}
283#ifdef CONFIG_SMP 283#ifdef CONFIG_SMP
284 orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode
284 ALT_SMP(mrc p15, 0, r0, c1, c0, 1) 285 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
285 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP 286 ALT_UP(mov r0, r10) @ fake it for UP
286 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 287 orr r10, r10, r0 @ Set required bits
287 orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode 288 teq r10, r0 @ Were they already set?
288 orreq r0, r0, r10 @ Enable CPU-specific SMP bits 289 mcrne p15, 0, r10, c1, c0, 1 @ No, update register
289 mcreq p15, 0, r0, c1, c0, 1
290#endif 290#endif
291 b __v7_setup_cont 291 b __v7_setup_cont
292 292
diff --git a/arch/arm64/boot/dts/broadcom/vulcan.dtsi b/arch/arm64/boot/dts/broadcom/vulcan.dtsi
index c49b5a85809c..85820e2bca9d 100644
--- a/arch/arm64/boot/dts/broadcom/vulcan.dtsi
+++ b/arch/arm64/boot/dts/broadcom/vulcan.dtsi
@@ -108,12 +108,15 @@
108 reg = <0x0 0x30000000 0x0 0x10000000>; 108 reg = <0x0 0x30000000 0x0 0x10000000>;
109 reg-names = "PCI ECAM"; 109 reg-names = "PCI ECAM";
110 110
111 /* IO 0x4000_0000 - 0x4001_0000 */ 111 /*
112 ranges = <0x01000000 0 0x40000000 0 0x40000000 0 0x00010000 112 * PCI ranges:
113 /* MEM 0x4800_0000 - 0x5000_0000 */ 113 * IO no supported
114 0x02000000 0 0x48000000 0 0x48000000 0 0x08000000 114 * MEM 0x4000_0000 - 0x6000_0000
115 /* MEM64 pref 0x6_0000_0000 - 0x7_0000_0000 */ 115 * MEM64 pref 0x40_0000_0000 - 0x60_0000_0000
116 0x43000000 6 0x00000000 6 0x00000000 1 0x00000000>; 116 */
117 ranges =
118 <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000
119 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
117 interrupt-map-mask = <0 0 0 7>; 120 interrupt-map-mask = <0 0 0 7>;
118 interrupt-map = 121 interrupt-map =
119 /* addr pin ic icaddr icintr */ 122 /* addr pin ic icaddr icintr */
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
index 727ae5f8c4e7..b0ed44313a5b 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
@@ -70,7 +70,6 @@
70 i2c3 = &i2c3; 70 i2c3 = &i2c3;
71 i2c4 = &i2c4; 71 i2c4 = &i2c4;
72 i2c5 = &i2c5; 72 i2c5 = &i2c5;
73 i2c6 = &i2c6;
74 }; 73 };
75}; 74};
76 75
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
index e682a3f52791..651c9d9d2d54 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
@@ -201,15 +201,12 @@
201 201
202 i2c2: i2c@58782000 { 202 i2c2: i2c@58782000 {
203 compatible = "socionext,uniphier-fi2c"; 203 compatible = "socionext,uniphier-fi2c";
204 status = "disabled";
205 reg = <0x58782000 0x80>; 204 reg = <0x58782000 0x80>;
206 #address-cells = <1>; 205 #address-cells = <1>;
207 #size-cells = <0>; 206 #size-cells = <0>;
208 interrupts = <0 43 4>; 207 interrupts = <0 43 4>;
209 pinctrl-names = "default";
210 pinctrl-0 = <&pinctrl_i2c2>;
211 clocks = <&i2c_clk>; 208 clocks = <&i2c_clk>;
212 clock-frequency = <100000>; 209 clock-frequency = <400000>;
213 }; 210 };
214 211
215 i2c3: i2c@58783000 { 212 i2c3: i2c@58783000 {
@@ -227,12 +224,15 @@
227 224
228 i2c4: i2c@58784000 { 225 i2c4: i2c@58784000 {
229 compatible = "socionext,uniphier-fi2c"; 226 compatible = "socionext,uniphier-fi2c";
227 status = "disabled";
230 reg = <0x58784000 0x80>; 228 reg = <0x58784000 0x80>;
231 #address-cells = <1>; 229 #address-cells = <1>;
232 #size-cells = <0>; 230 #size-cells = <0>;
233 interrupts = <0 45 4>; 231 interrupts = <0 45 4>;
232 pinctrl-names = "default";
233 pinctrl-0 = <&pinctrl_i2c4>;
234 clocks = <&i2c_clk>; 234 clocks = <&i2c_clk>;
235 clock-frequency = <400000>; 235 clock-frequency = <100000>;
236 }; 236 };
237 237
238 i2c5: i2c@58785000 { 238 i2c5: i2c@58785000 {
@@ -245,16 +245,6 @@
245 clock-frequency = <400000>; 245 clock-frequency = <400000>;
246 }; 246 };
247 247
248 i2c6: i2c@58786000 {
249 compatible = "socionext,uniphier-fi2c";
250 reg = <0x58786000 0x80>;
251 #address-cells = <1>;
252 #size-cells = <0>;
253 interrupts = <0 26 4>;
254 clocks = <&i2c_clk>;
255 clock-frequency = <400000>;
256 };
257
258 system_bus: system-bus@58c00000 { 248 system_bus: system-bus@58c00000 {
259 compatible = "socionext,uniphier-system-bus"; 249 compatible = "socionext,uniphier-system-bus";
260 status = "disabled"; 250 status = "disabled";
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 0e391dbfc420..3f29887995bc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -124,7 +124,9 @@
124#define VTCR_EL2_SL0_LVL1 (1 << 6) 124#define VTCR_EL2_SL0_LVL1 (1 << 6)
125#define VTCR_EL2_T0SZ_MASK 0x3f 125#define VTCR_EL2_T0SZ_MASK 0x3f
126#define VTCR_EL2_T0SZ_40B 24 126#define VTCR_EL2_T0SZ_40B 24
127#define VTCR_EL2_VS 19 127#define VTCR_EL2_VS_SHIFT 19
128#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
129#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
128 130
129/* 131/*
130 * We configure the Stage-2 page tables to always restrict the IPA space to be 132 * We configure the Stage-2 page tables to always restrict the IPA space to be
@@ -149,8 +151,7 @@
149 */ 151 */
150#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ 152#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
151 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ 153 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
152 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ 154 VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
153 VTCR_EL2_RES1)
154#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) 155#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
155#else 156#else
156/* 157/*
@@ -161,8 +162,7 @@
161 */ 162 */
162#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ 163#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
163 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ 164 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
164 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ 165 VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
165 VTCR_EL2_RES1)
166#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) 166#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
167#endif 167#endif
168 168
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index eb7490d232a0..40a0a24e6c98 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void);
54 54
55extern u32 __kvm_get_mdcr_el2(void); 55extern u32 __kvm_get_mdcr_el2(void);
56 56
57extern void __init_stage2_translation(void); 57extern u32 __init_stage2_translation(void);
58 58
59#endif 59#endif
60 60
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index b7e82a795ac9..f5c6bd2541ef 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
369int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 369int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
370 struct kvm_device_attr *attr); 370 struct kvm_device_attr *attr);
371 371
372/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
373
374static inline void __cpu_init_stage2(void) 372static inline void __cpu_init_stage2(void)
375{ 373{
376 kvm_call_hyp(__init_stage2_translation); 374 u32 parange = kvm_call_hyp(__init_stage2_translation);
375
376 WARN_ONCE(parange < 40,
377 "PARange is %d bits, unsupported configuration!", parange);
377} 378}
378 379
379#endif /* __ARM64_KVM_HOST_H__ */ 380#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 1a78d6e2a78b..12874164b0ae 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -141,6 +141,9 @@
141#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 141#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
142#define ID_AA64MMFR1_HADBS_SHIFT 0 142#define ID_AA64MMFR1_HADBS_SHIFT 0
143 143
144#define ID_AA64MMFR1_VMIDBITS_8 0
145#define ID_AA64MMFR1_VMIDBITS_16 2
146
144/* id_aa64mmfr2 */ 147/* id_aa64mmfr2 */
145#define ID_AA64MMFR2_UAO_SHIFT 4 148#define ID_AA64MMFR2_UAO_SHIFT 4
146 149
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 4203d5f257bc..85da0f599cd6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -588,6 +588,15 @@ set_hcr:
588 msr vpidr_el2, x0 588 msr vpidr_el2, x0
589 msr vmpidr_el2, x1 589 msr vmpidr_el2, x1
590 590
591 /*
592 * When VHE is not in use, early init of EL2 and EL1 needs to be
593 * done here.
594 * When VHE _is_ in use, EL1 will not be used in the host and
595 * requires no configuration, and all non-hyp-specific EL2 setup
596 * will be done via the _EL1 system register aliases in __cpu_setup.
597 */
598 cbnz x2, 1f
599
591 /* sctlr_el1 */ 600 /* sctlr_el1 */
592 mov x0, #0x0800 // Set/clear RES{1,0} bits 601 mov x0, #0x0800 // Set/clear RES{1,0} bits
593CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems 602CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
@@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
597 /* Coprocessor traps. */ 606 /* Coprocessor traps. */
598 mov x0, #0x33ff 607 mov x0, #0x33ff
599 msr cptr_el2, x0 // Disable copro. traps to EL2 608 msr cptr_el2, x0 // Disable copro. traps to EL2
6091:
600 610
601#ifdef CONFIG_COMPAT 611#ifdef CONFIG_COMPAT
602 msr hstr_el2, xzr // Disable CP15 traps to EL2 612 msr hstr_el2, xzr // Disable CP15 traps to EL2
@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
734 744
735 .macro update_early_cpu_boot_status status, tmp1, tmp2 745 .macro update_early_cpu_boot_status status, tmp1, tmp2
736 mov \tmp2, #\status 746 mov \tmp2, #\status
737 str_l \tmp2, __early_cpu_boot_status, \tmp1 747 adr_l \tmp1, __early_cpu_boot_status
748 str \tmp2, [\tmp1]
738 dmb sy 749 dmb sy
739 dc ivac, \tmp1 // Invalidate potentially stale cache line 750 dc ivac, \tmp1 // Invalidate potentially stale cache line
740 .endm 751 .endm
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index aef3605a8c47..18a71bcd26ee 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
52static int smp_spin_table_cpu_init(unsigned int cpu) 52static int smp_spin_table_cpu_init(unsigned int cpu)
53{ 53{
54 struct device_node *dn; 54 struct device_node *dn;
55 int ret;
55 56
56 dn = of_get_cpu_node(cpu, NULL); 57 dn = of_get_cpu_node(cpu, NULL);
57 if (!dn) 58 if (!dn)
@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
60 /* 61 /*
61 * Determine the address from which the CPU is polling. 62 * Determine the address from which the CPU is polling.
62 */ 63 */
63 if (of_property_read_u64(dn, "cpu-release-addr", 64 ret = of_property_read_u64(dn, "cpu-release-addr",
64 &cpu_release_addr[cpu])) { 65 &cpu_release_addr[cpu]);
66 if (ret)
65 pr_err("CPU %d: missing or invalid cpu-release-addr property\n", 67 pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
66 cpu); 68 cpu);
67 69
68 return -1; 70 of_node_put(dn);
69 }
70 71
71 return 0; 72 return ret;
72} 73}
73 74
74static int smp_spin_table_cpu_prepare(unsigned int cpu) 75static int smp_spin_table_cpu_prepare(unsigned int cpu)
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index bfc54fd82797..bcbe761a5a3d 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -20,9 +20,10 @@
20#include <asm/kvm_asm.h> 20#include <asm/kvm_asm.h>
21#include <asm/kvm_hyp.h> 21#include <asm/kvm_hyp.h>
22 22
23void __hyp_text __init_stage2_translation(void) 23u32 __hyp_text __init_stage2_translation(void)
24{ 24{
25 u64 val = VTCR_EL2_FLAGS; 25 u64 val = VTCR_EL2_FLAGS;
26 u64 parange;
26 u64 tmp; 27 u64 tmp;
27 28
28 /* 29 /*
@@ -30,14 +31,50 @@ void __hyp_text __init_stage2_translation(void)
30 * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while 31 * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
31 * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... 32 * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
32 */ 33 */
33 val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16; 34 parange = read_sysreg(id_aa64mmfr0_el1) & 7;
35 val |= parange << 16;
36
37 /* Compute the actual PARange... */
38 switch (parange) {
39 case 0:
40 parange = 32;
41 break;
42 case 1:
43 parange = 36;
44 break;
45 case 2:
46 parange = 40;
47 break;
48 case 3:
49 parange = 42;
50 break;
51 case 4:
52 parange = 44;
53 break;
54 case 5:
55 default:
56 parange = 48;
57 break;
58 }
59
60 /*
61 * ... and clamp it to 40 bits, unless we have some braindead
62 * HW that implements less than that. In all cases, we'll
63 * return that value for the rest of the kernel to decide what
64 * to do.
65 */
66 val |= 64 - (parange > 40 ? 40 : parange);
34 67
35 /* 68 /*
36 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS 69 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
37 * bit in VTCR_EL2. 70 * bit in VTCR_EL2.
38 */ 71 */
39 tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; 72 tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
40 val |= (tmp == 2) ? VTCR_EL2_VS : 0; 73 val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
74 VTCR_EL2_VS_16BIT :
75 VTCR_EL2_VS_8BIT;
41 76
42 write_sysreg(val, vtcr_el2); 77 write_sysreg(val, vtcr_el2);
78
79 return parange;
43} 80}
diff --git a/arch/m68k/coldfire/gpio.c b/arch/m68k/coldfire/gpio.c
index 8832083e1cb8..b515809be2b9 100644
--- a/arch/m68k/coldfire/gpio.c
+++ b/arch/m68k/coldfire/gpio.c
@@ -158,11 +158,6 @@ static int mcfgpio_to_irq(struct gpio_chip *chip, unsigned offset)
158 return -EINVAL; 158 return -EINVAL;
159} 159}
160 160
161static struct bus_type mcfgpio_subsys = {
162 .name = "gpio",
163 .dev_name = "gpio",
164};
165
166static struct gpio_chip mcfgpio_chip = { 161static struct gpio_chip mcfgpio_chip = {
167 .label = "mcfgpio", 162 .label = "mcfgpio",
168 .request = mcfgpio_request, 163 .request = mcfgpio_request,
@@ -178,8 +173,7 @@ static struct gpio_chip mcfgpio_chip = {
178 173
179static int __init mcfgpio_sysinit(void) 174static int __init mcfgpio_sysinit(void)
180{ 175{
181 gpiochip_add_data(&mcfgpio_chip, NULL); 176 return gpiochip_add_data(&mcfgpio_chip, NULL);
182 return subsys_system_register(&mcfgpio_subsys, NULL);
183} 177}
184 178
185core_initcall(mcfgpio_sysinit); 179core_initcall(mcfgpio_sysinit);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index d1fc4796025e..3ee6976f6088 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-amiga" 1CONFIG_LOCALVERSION="-amiga"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -64,7 +63,6 @@ CONFIG_INET_IPCOMP=m
64CONFIG_INET_XFRM_MODE_TRANSPORT=m 63CONFIG_INET_XFRM_MODE_TRANSPORT=m
65CONFIG_INET_XFRM_MODE_TUNNEL=m 64CONFIG_INET_XFRM_MODE_TUNNEL=m
66CONFIG_INET_XFRM_MODE_BEET=m 65CONFIG_INET_XFRM_MODE_BEET=m
67# CONFIG_INET_LRO is not set
68CONFIG_INET_DIAG=m 66CONFIG_INET_DIAG=m
69CONFIG_INET_UDP_DIAG=m 67CONFIG_INET_UDP_DIAG=m
70CONFIG_IPV6=m 68CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
285CONFIG_MPLS_ROUTING=m 283CONFIG_MPLS_ROUTING=m
286CONFIG_MPLS_IPTUNNEL=m 284CONFIG_MPLS_IPTUNNEL=m
287CONFIG_NET_L3_MASTER_DEV=y 285CONFIG_NET_L3_MASTER_DEV=y
286CONFIG_AF_KCM=m
288# CONFIG_WIRELESS is not set 287# CONFIG_WIRELESS is not set
288CONFIG_NET_DEVLINK=m
289# CONFIG_UEVENT_HELPER is not set 289# CONFIG_UEVENT_HELPER is not set
290CONFIG_DEVTMPFS=y 290CONFIG_DEVTMPFS=y
291CONFIG_DEVTMPFS_MOUNT=y 291CONFIG_DEVTMPFS_MOUNT=y
@@ -359,6 +359,7 @@ CONFIG_MACVTAP=m
359CONFIG_IPVLAN=m 359CONFIG_IPVLAN=m
360CONFIG_VXLAN=m 360CONFIG_VXLAN=m
361CONFIG_GENEVE=m 361CONFIG_GENEVE=m
362CONFIG_MACSEC=m
362CONFIG_NETCONSOLE=m 363CONFIG_NETCONSOLE=m
363CONFIG_NETCONSOLE_DYNAMIC=y 364CONFIG_NETCONSOLE_DYNAMIC=y
364CONFIG_VETH=m 365CONFIG_VETH=m
@@ -452,6 +453,7 @@ CONFIG_JFS_FS=m
452CONFIG_XFS_FS=m 453CONFIG_XFS_FS=m
453CONFIG_OCFS2_FS=m 454CONFIG_OCFS2_FS=m
454# CONFIG_OCFS2_DEBUG_MASKLOG is not set 455# CONFIG_OCFS2_DEBUG_MASKLOG is not set
456CONFIG_FS_ENCRYPTION=m
455CONFIG_FANOTIFY=y 457CONFIG_FANOTIFY=y
456CONFIG_QUOTA_NETLINK_INTERFACE=y 458CONFIG_QUOTA_NETLINK_INTERFACE=y
457# CONFIG_PRINT_QUOTA_WARNING is not set 459# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -468,6 +470,7 @@ CONFIG_VFAT_FS=m
468CONFIG_PROC_KCORE=y 470CONFIG_PROC_KCORE=y
469CONFIG_PROC_CHILDREN=y 471CONFIG_PROC_CHILDREN=y
470CONFIG_TMPFS=y 472CONFIG_TMPFS=y
473CONFIG_ORANGEFS_FS=m
471CONFIG_AFFS_FS=m 474CONFIG_AFFS_FS=m
472CONFIG_ECRYPT_FS=m 475CONFIG_ECRYPT_FS=m
473CONFIG_ECRYPT_FS_MESSAGING=y 476CONFIG_ECRYPT_FS_MESSAGING=y
@@ -549,6 +552,7 @@ CONFIG_TEST_HEXDUMP=m
549CONFIG_TEST_STRING_HELPERS=m 552CONFIG_TEST_STRING_HELPERS=m
550CONFIG_TEST_KSTRTOX=m 553CONFIG_TEST_KSTRTOX=m
551CONFIG_TEST_PRINTF=m 554CONFIG_TEST_PRINTF=m
555CONFIG_TEST_BITMAP=m
552CONFIG_TEST_RHASHTABLE=m 556CONFIG_TEST_RHASHTABLE=m
553CONFIG_TEST_LKM=m 557CONFIG_TEST_LKM=m
554CONFIG_TEST_USER_COPY=m 558CONFIG_TEST_USER_COPY=m
@@ -557,7 +561,6 @@ CONFIG_TEST_FIRMWARE=m
557CONFIG_TEST_UDELAY=m 561CONFIG_TEST_UDELAY=m
558CONFIG_TEST_STATIC_KEYS=m 562CONFIG_TEST_STATIC_KEYS=m
559CONFIG_EARLY_PRINTK=y 563CONFIG_EARLY_PRINTK=y
560CONFIG_ENCRYPTED_KEYS=m
561CONFIG_CRYPTO_RSA=m 564CONFIG_CRYPTO_RSA=m
562CONFIG_CRYPTO_MANAGER=y 565CONFIG_CRYPTO_MANAGER=y
563CONFIG_CRYPTO_USER=m 566CONFIG_CRYPTO_USER=m
@@ -565,12 +568,9 @@ CONFIG_CRYPTO_CRYPTD=m
565CONFIG_CRYPTO_MCRYPTD=m 568CONFIG_CRYPTO_MCRYPTD=m
566CONFIG_CRYPTO_TEST=m 569CONFIG_CRYPTO_TEST=m
567CONFIG_CRYPTO_CCM=m 570CONFIG_CRYPTO_CCM=m
568CONFIG_CRYPTO_GCM=m
569CONFIG_CRYPTO_CHACHA20POLY1305=m 571CONFIG_CRYPTO_CHACHA20POLY1305=m
570CONFIG_CRYPTO_CTS=m
571CONFIG_CRYPTO_LRW=m 572CONFIG_CRYPTO_LRW=m
572CONFIG_CRYPTO_PCBC=m 573CONFIG_CRYPTO_PCBC=m
573CONFIG_CRYPTO_XTS=m
574CONFIG_CRYPTO_KEYWRAP=m 574CONFIG_CRYPTO_KEYWRAP=m
575CONFIG_CRYPTO_XCBC=m 575CONFIG_CRYPTO_XCBC=m
576CONFIG_CRYPTO_VMAC=m 576CONFIG_CRYPTO_VMAC=m
@@ -594,7 +594,6 @@ CONFIG_CRYPTO_SEED=m
594CONFIG_CRYPTO_SERPENT=m 594CONFIG_CRYPTO_SERPENT=m
595CONFIG_CRYPTO_TEA=m 595CONFIG_CRYPTO_TEA=m
596CONFIG_CRYPTO_TWOFISH=m 596CONFIG_CRYPTO_TWOFISH=m
597CONFIG_CRYPTO_ZLIB=m
598CONFIG_CRYPTO_LZO=m 597CONFIG_CRYPTO_LZO=m
599CONFIG_CRYPTO_842=m 598CONFIG_CRYPTO_842=m
600CONFIG_CRYPTO_LZ4=m 599CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 9bfe8be3658c..e96787ffcbce 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-apollo" 1CONFIG_LOCALVERSION="-apollo"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 61CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 62CONFIG_INET_XFRM_MODE_TUNNEL=m
64CONFIG_INET_XFRM_MODE_BEET=m 63CONFIG_INET_XFRM_MODE_BEET=m
65# CONFIG_INET_LRO is not set
66CONFIG_INET_DIAG=m 64CONFIG_INET_DIAG=m
67CONFIG_INET_UDP_DIAG=m 65CONFIG_INET_UDP_DIAG=m
68CONFIG_IPV6=m 66CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
283CONFIG_MPLS_ROUTING=m 281CONFIG_MPLS_ROUTING=m
284CONFIG_MPLS_IPTUNNEL=m 282CONFIG_MPLS_IPTUNNEL=m
285CONFIG_NET_L3_MASTER_DEV=y 283CONFIG_NET_L3_MASTER_DEV=y
284CONFIG_AF_KCM=m
286# CONFIG_WIRELESS is not set 285# CONFIG_WIRELESS is not set
286CONFIG_NET_DEVLINK=m
287# CONFIG_UEVENT_HELPER is not set 287# CONFIG_UEVENT_HELPER is not set
288CONFIG_DEVTMPFS=y 288CONFIG_DEVTMPFS=y
289CONFIG_DEVTMPFS_MOUNT=y 289CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
341CONFIG_IPVLAN=m 341CONFIG_IPVLAN=m
342CONFIG_VXLAN=m 342CONFIG_VXLAN=m
343CONFIG_GENEVE=m 343CONFIG_GENEVE=m
344CONFIG_MACSEC=m
344CONFIG_NETCONSOLE=m 345CONFIG_NETCONSOLE=m
345CONFIG_NETCONSOLE_DYNAMIC=y 346CONFIG_NETCONSOLE_DYNAMIC=y
346CONFIG_VETH=m 347CONFIG_VETH=m
@@ -411,6 +412,7 @@ CONFIG_JFS_FS=m
411CONFIG_XFS_FS=m 412CONFIG_XFS_FS=m
412CONFIG_OCFS2_FS=m 413CONFIG_OCFS2_FS=m
413# CONFIG_OCFS2_DEBUG_MASKLOG is not set 414# CONFIG_OCFS2_DEBUG_MASKLOG is not set
415CONFIG_FS_ENCRYPTION=m
414CONFIG_FANOTIFY=y 416CONFIG_FANOTIFY=y
415CONFIG_QUOTA_NETLINK_INTERFACE=y 417CONFIG_QUOTA_NETLINK_INTERFACE=y
416# CONFIG_PRINT_QUOTA_WARNING is not set 418# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -427,6 +429,7 @@ CONFIG_VFAT_FS=m
427CONFIG_PROC_KCORE=y 429CONFIG_PROC_KCORE=y
428CONFIG_PROC_CHILDREN=y 430CONFIG_PROC_CHILDREN=y
429CONFIG_TMPFS=y 431CONFIG_TMPFS=y
432CONFIG_ORANGEFS_FS=m
430CONFIG_AFFS_FS=m 433CONFIG_AFFS_FS=m
431CONFIG_ECRYPT_FS=m 434CONFIG_ECRYPT_FS=m
432CONFIG_ECRYPT_FS_MESSAGING=y 435CONFIG_ECRYPT_FS_MESSAGING=y
@@ -508,6 +511,7 @@ CONFIG_TEST_HEXDUMP=m
508CONFIG_TEST_STRING_HELPERS=m 511CONFIG_TEST_STRING_HELPERS=m
509CONFIG_TEST_KSTRTOX=m 512CONFIG_TEST_KSTRTOX=m
510CONFIG_TEST_PRINTF=m 513CONFIG_TEST_PRINTF=m
514CONFIG_TEST_BITMAP=m
511CONFIG_TEST_RHASHTABLE=m 515CONFIG_TEST_RHASHTABLE=m
512CONFIG_TEST_LKM=m 516CONFIG_TEST_LKM=m
513CONFIG_TEST_USER_COPY=m 517CONFIG_TEST_USER_COPY=m
@@ -516,7 +520,6 @@ CONFIG_TEST_FIRMWARE=m
516CONFIG_TEST_UDELAY=m 520CONFIG_TEST_UDELAY=m
517CONFIG_TEST_STATIC_KEYS=m 521CONFIG_TEST_STATIC_KEYS=m
518CONFIG_EARLY_PRINTK=y 522CONFIG_EARLY_PRINTK=y
519CONFIG_ENCRYPTED_KEYS=m
520CONFIG_CRYPTO_RSA=m 523CONFIG_CRYPTO_RSA=m
521CONFIG_CRYPTO_MANAGER=y 524CONFIG_CRYPTO_MANAGER=y
522CONFIG_CRYPTO_USER=m 525CONFIG_CRYPTO_USER=m
@@ -524,12 +527,9 @@ CONFIG_CRYPTO_CRYPTD=m
524CONFIG_CRYPTO_MCRYPTD=m 527CONFIG_CRYPTO_MCRYPTD=m
525CONFIG_CRYPTO_TEST=m 528CONFIG_CRYPTO_TEST=m
526CONFIG_CRYPTO_CCM=m 529CONFIG_CRYPTO_CCM=m
527CONFIG_CRYPTO_GCM=m
528CONFIG_CRYPTO_CHACHA20POLY1305=m 530CONFIG_CRYPTO_CHACHA20POLY1305=m
529CONFIG_CRYPTO_CTS=m
530CONFIG_CRYPTO_LRW=m 531CONFIG_CRYPTO_LRW=m
531CONFIG_CRYPTO_PCBC=m 532CONFIG_CRYPTO_PCBC=m
532CONFIG_CRYPTO_XTS=m
533CONFIG_CRYPTO_KEYWRAP=m 533CONFIG_CRYPTO_KEYWRAP=m
534CONFIG_CRYPTO_XCBC=m 534CONFIG_CRYPTO_XCBC=m
535CONFIG_CRYPTO_VMAC=m 535CONFIG_CRYPTO_VMAC=m
@@ -553,7 +553,6 @@ CONFIG_CRYPTO_SEED=m
553CONFIG_CRYPTO_SERPENT=m 553CONFIG_CRYPTO_SERPENT=m
554CONFIG_CRYPTO_TEA=m 554CONFIG_CRYPTO_TEA=m
555CONFIG_CRYPTO_TWOFISH=m 555CONFIG_CRYPTO_TWOFISH=m
556CONFIG_CRYPTO_ZLIB=m
557CONFIG_CRYPTO_LZO=m 556CONFIG_CRYPTO_LZO=m
558CONFIG_CRYPTO_842=m 557CONFIG_CRYPTO_842=m
559CONFIG_CRYPTO_LZ4=m 558CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index ebdcfae55580..083fe6beac14 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-atari" 1CONFIG_LOCALVERSION="-atari"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 61CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 62CONFIG_INET_XFRM_MODE_TUNNEL=m
64CONFIG_INET_XFRM_MODE_BEET=m 63CONFIG_INET_XFRM_MODE_BEET=m
65# CONFIG_INET_LRO is not set
66CONFIG_INET_DIAG=m 64CONFIG_INET_DIAG=m
67CONFIG_INET_UDP_DIAG=m 65CONFIG_INET_UDP_DIAG=m
68CONFIG_IPV6=m 66CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
283CONFIG_MPLS_ROUTING=m 281CONFIG_MPLS_ROUTING=m
284CONFIG_MPLS_IPTUNNEL=m 282CONFIG_MPLS_IPTUNNEL=m
285CONFIG_NET_L3_MASTER_DEV=y 283CONFIG_NET_L3_MASTER_DEV=y
284CONFIG_AF_KCM=m
286# CONFIG_WIRELESS is not set 285# CONFIG_WIRELESS is not set
286CONFIG_NET_DEVLINK=m
287# CONFIG_UEVENT_HELPER is not set 287# CONFIG_UEVENT_HELPER is not set
288CONFIG_DEVTMPFS=y 288CONFIG_DEVTMPFS=y
289CONFIG_DEVTMPFS_MOUNT=y 289CONFIG_DEVTMPFS_MOUNT=y
@@ -350,6 +350,7 @@ CONFIG_MACVTAP=m
350CONFIG_IPVLAN=m 350CONFIG_IPVLAN=m
351CONFIG_VXLAN=m 351CONFIG_VXLAN=m
352CONFIG_GENEVE=m 352CONFIG_GENEVE=m
353CONFIG_MACSEC=m
353CONFIG_NETCONSOLE=m 354CONFIG_NETCONSOLE=m
354CONFIG_NETCONSOLE_DYNAMIC=y 355CONFIG_NETCONSOLE_DYNAMIC=y
355CONFIG_VETH=m 356CONFIG_VETH=m
@@ -432,6 +433,7 @@ CONFIG_JFS_FS=m
432CONFIG_XFS_FS=m 433CONFIG_XFS_FS=m
433CONFIG_OCFS2_FS=m 434CONFIG_OCFS2_FS=m
434# CONFIG_OCFS2_DEBUG_MASKLOG is not set 435# CONFIG_OCFS2_DEBUG_MASKLOG is not set
436CONFIG_FS_ENCRYPTION=m
435CONFIG_FANOTIFY=y 437CONFIG_FANOTIFY=y
436CONFIG_QUOTA_NETLINK_INTERFACE=y 438CONFIG_QUOTA_NETLINK_INTERFACE=y
437# CONFIG_PRINT_QUOTA_WARNING is not set 439# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -448,6 +450,7 @@ CONFIG_VFAT_FS=m
448CONFIG_PROC_KCORE=y 450CONFIG_PROC_KCORE=y
449CONFIG_PROC_CHILDREN=y 451CONFIG_PROC_CHILDREN=y
450CONFIG_TMPFS=y 452CONFIG_TMPFS=y
453CONFIG_ORANGEFS_FS=m
451CONFIG_AFFS_FS=m 454CONFIG_AFFS_FS=m
452CONFIG_ECRYPT_FS=m 455CONFIG_ECRYPT_FS=m
453CONFIG_ECRYPT_FS_MESSAGING=y 456CONFIG_ECRYPT_FS_MESSAGING=y
@@ -529,6 +532,7 @@ CONFIG_TEST_HEXDUMP=m
529CONFIG_TEST_STRING_HELPERS=m 532CONFIG_TEST_STRING_HELPERS=m
530CONFIG_TEST_KSTRTOX=m 533CONFIG_TEST_KSTRTOX=m
531CONFIG_TEST_PRINTF=m 534CONFIG_TEST_PRINTF=m
535CONFIG_TEST_BITMAP=m
532CONFIG_TEST_RHASHTABLE=m 536CONFIG_TEST_RHASHTABLE=m
533CONFIG_TEST_LKM=m 537CONFIG_TEST_LKM=m
534CONFIG_TEST_USER_COPY=m 538CONFIG_TEST_USER_COPY=m
@@ -537,7 +541,6 @@ CONFIG_TEST_FIRMWARE=m
537CONFIG_TEST_UDELAY=m 541CONFIG_TEST_UDELAY=m
538CONFIG_TEST_STATIC_KEYS=m 542CONFIG_TEST_STATIC_KEYS=m
539CONFIG_EARLY_PRINTK=y 543CONFIG_EARLY_PRINTK=y
540CONFIG_ENCRYPTED_KEYS=m
541CONFIG_CRYPTO_RSA=m 544CONFIG_CRYPTO_RSA=m
542CONFIG_CRYPTO_MANAGER=y 545CONFIG_CRYPTO_MANAGER=y
543CONFIG_CRYPTO_USER=m 546CONFIG_CRYPTO_USER=m
@@ -545,12 +548,9 @@ CONFIG_CRYPTO_CRYPTD=m
545CONFIG_CRYPTO_MCRYPTD=m 548CONFIG_CRYPTO_MCRYPTD=m
546CONFIG_CRYPTO_TEST=m 549CONFIG_CRYPTO_TEST=m
547CONFIG_CRYPTO_CCM=m 550CONFIG_CRYPTO_CCM=m
548CONFIG_CRYPTO_GCM=m
549CONFIG_CRYPTO_CHACHA20POLY1305=m 551CONFIG_CRYPTO_CHACHA20POLY1305=m
550CONFIG_CRYPTO_CTS=m
551CONFIG_CRYPTO_LRW=m 552CONFIG_CRYPTO_LRW=m
552CONFIG_CRYPTO_PCBC=m 553CONFIG_CRYPTO_PCBC=m
553CONFIG_CRYPTO_XTS=m
554CONFIG_CRYPTO_KEYWRAP=m 554CONFIG_CRYPTO_KEYWRAP=m
555CONFIG_CRYPTO_XCBC=m 555CONFIG_CRYPTO_XCBC=m
556CONFIG_CRYPTO_VMAC=m 556CONFIG_CRYPTO_VMAC=m
@@ -574,7 +574,6 @@ CONFIG_CRYPTO_SEED=m
574CONFIG_CRYPTO_SERPENT=m 574CONFIG_CRYPTO_SERPENT=m
575CONFIG_CRYPTO_TEA=m 575CONFIG_CRYPTO_TEA=m
576CONFIG_CRYPTO_TWOFISH=m 576CONFIG_CRYPTO_TWOFISH=m
577CONFIG_CRYPTO_ZLIB=m
578CONFIG_CRYPTO_LZO=m 577CONFIG_CRYPTO_LZO=m
579CONFIG_CRYPTO_842=m 578CONFIG_CRYPTO_842=m
580CONFIG_CRYPTO_LZ4=m 579CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 8acc65e54995..475130c06dcb 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-bvme6000" 1CONFIG_LOCALVERSION="-bvme6000"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
62CONFIG_INET_XFRM_MODE_BEET=m 61CONFIG_INET_XFRM_MODE_BEET=m
63# CONFIG_INET_LRO is not set
64CONFIG_INET_DIAG=m 62CONFIG_INET_DIAG=m
65CONFIG_INET_UDP_DIAG=m 63CONFIG_INET_UDP_DIAG=m
66CONFIG_IPV6=m 64CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
281CONFIG_MPLS_ROUTING=m 279CONFIG_MPLS_ROUTING=m
282CONFIG_MPLS_IPTUNNEL=m 280CONFIG_MPLS_IPTUNNEL=m
283CONFIG_NET_L3_MASTER_DEV=y 281CONFIG_NET_L3_MASTER_DEV=y
282CONFIG_AF_KCM=m
284# CONFIG_WIRELESS is not set 283# CONFIG_WIRELESS is not set
284CONFIG_NET_DEVLINK=m
285# CONFIG_UEVENT_HELPER is not set 285# CONFIG_UEVENT_HELPER is not set
286CONFIG_DEVTMPFS=y 286CONFIG_DEVTMPFS=y
287CONFIG_DEVTMPFS_MOUNT=y 287CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
340CONFIG_IPVLAN=m 340CONFIG_IPVLAN=m
341CONFIG_VXLAN=m 341CONFIG_VXLAN=m
342CONFIG_GENEVE=m 342CONFIG_GENEVE=m
343CONFIG_MACSEC=m
343CONFIG_NETCONSOLE=m 344CONFIG_NETCONSOLE=m
344CONFIG_NETCONSOLE_DYNAMIC=y 345CONFIG_NETCONSOLE_DYNAMIC=y
345CONFIG_VETH=m 346CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
403CONFIG_XFS_FS=m 404CONFIG_XFS_FS=m
404CONFIG_OCFS2_FS=m 405CONFIG_OCFS2_FS=m
405# CONFIG_OCFS2_DEBUG_MASKLOG is not set 406# CONFIG_OCFS2_DEBUG_MASKLOG is not set
407CONFIG_FS_ENCRYPTION=m
406CONFIG_FANOTIFY=y 408CONFIG_FANOTIFY=y
407CONFIG_QUOTA_NETLINK_INTERFACE=y 409CONFIG_QUOTA_NETLINK_INTERFACE=y
408# CONFIG_PRINT_QUOTA_WARNING is not set 410# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
419CONFIG_PROC_KCORE=y 421CONFIG_PROC_KCORE=y
420CONFIG_PROC_CHILDREN=y 422CONFIG_PROC_CHILDREN=y
421CONFIG_TMPFS=y 423CONFIG_TMPFS=y
424CONFIG_ORANGEFS_FS=m
422CONFIG_AFFS_FS=m 425CONFIG_AFFS_FS=m
423CONFIG_ECRYPT_FS=m 426CONFIG_ECRYPT_FS=m
424CONFIG_ECRYPT_FS_MESSAGING=y 427CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
500CONFIG_TEST_STRING_HELPERS=m 503CONFIG_TEST_STRING_HELPERS=m
501CONFIG_TEST_KSTRTOX=m 504CONFIG_TEST_KSTRTOX=m
502CONFIG_TEST_PRINTF=m 505CONFIG_TEST_PRINTF=m
506CONFIG_TEST_BITMAP=m
503CONFIG_TEST_RHASHTABLE=m 507CONFIG_TEST_RHASHTABLE=m
504CONFIG_TEST_LKM=m 508CONFIG_TEST_LKM=m
505CONFIG_TEST_USER_COPY=m 509CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
508CONFIG_TEST_UDELAY=m 512CONFIG_TEST_UDELAY=m
509CONFIG_TEST_STATIC_KEYS=m 513CONFIG_TEST_STATIC_KEYS=m
510CONFIG_EARLY_PRINTK=y 514CONFIG_EARLY_PRINTK=y
511CONFIG_ENCRYPTED_KEYS=m
512CONFIG_CRYPTO_RSA=m 515CONFIG_CRYPTO_RSA=m
513CONFIG_CRYPTO_MANAGER=y 516CONFIG_CRYPTO_MANAGER=y
514CONFIG_CRYPTO_USER=m 517CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
516CONFIG_CRYPTO_MCRYPTD=m 519CONFIG_CRYPTO_MCRYPTD=m
517CONFIG_CRYPTO_TEST=m 520CONFIG_CRYPTO_TEST=m
518CONFIG_CRYPTO_CCM=m 521CONFIG_CRYPTO_CCM=m
519CONFIG_CRYPTO_GCM=m
520CONFIG_CRYPTO_CHACHA20POLY1305=m 522CONFIG_CRYPTO_CHACHA20POLY1305=m
521CONFIG_CRYPTO_CTS=m
522CONFIG_CRYPTO_LRW=m 523CONFIG_CRYPTO_LRW=m
523CONFIG_CRYPTO_PCBC=m 524CONFIG_CRYPTO_PCBC=m
524CONFIG_CRYPTO_XTS=m
525CONFIG_CRYPTO_KEYWRAP=m 525CONFIG_CRYPTO_KEYWRAP=m
526CONFIG_CRYPTO_XCBC=m 526CONFIG_CRYPTO_XCBC=m
527CONFIG_CRYPTO_VMAC=m 527CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
545CONFIG_CRYPTO_SERPENT=m 545CONFIG_CRYPTO_SERPENT=m
546CONFIG_CRYPTO_TEA=m 546CONFIG_CRYPTO_TEA=m
547CONFIG_CRYPTO_TWOFISH=m 547CONFIG_CRYPTO_TWOFISH=m
548CONFIG_CRYPTO_ZLIB=m
549CONFIG_CRYPTO_LZO=m 548CONFIG_CRYPTO_LZO=m
550CONFIG_CRYPTO_842=m 549CONFIG_CRYPTO_842=m
551CONFIG_CRYPTO_LZ4=m 550CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 0c6a3d52b26e..4339658c200f 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-hp300" 1CONFIG_LOCALVERSION="-hp300"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@ CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 61CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 62CONFIG_INET_XFRM_MODE_TUNNEL=m
64CONFIG_INET_XFRM_MODE_BEET=m 63CONFIG_INET_XFRM_MODE_BEET=m
65# CONFIG_INET_LRO is not set
66CONFIG_INET_DIAG=m 64CONFIG_INET_DIAG=m
67CONFIG_INET_UDP_DIAG=m 65CONFIG_INET_UDP_DIAG=m
68CONFIG_IPV6=m 66CONFIG_IPV6=m
@@ -283,7 +281,9 @@ CONFIG_NET_MPLS_GSO=m
283CONFIG_MPLS_ROUTING=m 281CONFIG_MPLS_ROUTING=m
284CONFIG_MPLS_IPTUNNEL=m 282CONFIG_MPLS_IPTUNNEL=m
285CONFIG_NET_L3_MASTER_DEV=y 283CONFIG_NET_L3_MASTER_DEV=y
284CONFIG_AF_KCM=m
286# CONFIG_WIRELESS is not set 285# CONFIG_WIRELESS is not set
286CONFIG_NET_DEVLINK=m
287# CONFIG_UEVENT_HELPER is not set 287# CONFIG_UEVENT_HELPER is not set
288CONFIG_DEVTMPFS=y 288CONFIG_DEVTMPFS=y
289CONFIG_DEVTMPFS_MOUNT=y 289CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@ CONFIG_MACVTAP=m
341CONFIG_IPVLAN=m 341CONFIG_IPVLAN=m
342CONFIG_VXLAN=m 342CONFIG_VXLAN=m
343CONFIG_GENEVE=m 343CONFIG_GENEVE=m
344CONFIG_MACSEC=m
344CONFIG_NETCONSOLE=m 345CONFIG_NETCONSOLE=m
345CONFIG_NETCONSOLE_DYNAMIC=y 346CONFIG_NETCONSOLE_DYNAMIC=y
346CONFIG_VETH=m 347CONFIG_VETH=m
@@ -413,6 +414,7 @@ CONFIG_JFS_FS=m
413CONFIG_XFS_FS=m 414CONFIG_XFS_FS=m
414CONFIG_OCFS2_FS=m 415CONFIG_OCFS2_FS=m
415# CONFIG_OCFS2_DEBUG_MASKLOG is not set 416# CONFIG_OCFS2_DEBUG_MASKLOG is not set
417CONFIG_FS_ENCRYPTION=m
416CONFIG_FANOTIFY=y 418CONFIG_FANOTIFY=y
417CONFIG_QUOTA_NETLINK_INTERFACE=y 419CONFIG_QUOTA_NETLINK_INTERFACE=y
418# CONFIG_PRINT_QUOTA_WARNING is not set 420# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -429,6 +431,7 @@ CONFIG_VFAT_FS=m
429CONFIG_PROC_KCORE=y 431CONFIG_PROC_KCORE=y
430CONFIG_PROC_CHILDREN=y 432CONFIG_PROC_CHILDREN=y
431CONFIG_TMPFS=y 433CONFIG_TMPFS=y
434CONFIG_ORANGEFS_FS=m
432CONFIG_AFFS_FS=m 435CONFIG_AFFS_FS=m
433CONFIG_ECRYPT_FS=m 436CONFIG_ECRYPT_FS=m
434CONFIG_ECRYPT_FS_MESSAGING=y 437CONFIG_ECRYPT_FS_MESSAGING=y
@@ -510,6 +513,7 @@ CONFIG_TEST_HEXDUMP=m
510CONFIG_TEST_STRING_HELPERS=m 513CONFIG_TEST_STRING_HELPERS=m
511CONFIG_TEST_KSTRTOX=m 514CONFIG_TEST_KSTRTOX=m
512CONFIG_TEST_PRINTF=m 515CONFIG_TEST_PRINTF=m
516CONFIG_TEST_BITMAP=m
513CONFIG_TEST_RHASHTABLE=m 517CONFIG_TEST_RHASHTABLE=m
514CONFIG_TEST_LKM=m 518CONFIG_TEST_LKM=m
515CONFIG_TEST_USER_COPY=m 519CONFIG_TEST_USER_COPY=m
@@ -518,7 +522,6 @@ CONFIG_TEST_FIRMWARE=m
518CONFIG_TEST_UDELAY=m 522CONFIG_TEST_UDELAY=m
519CONFIG_TEST_STATIC_KEYS=m 523CONFIG_TEST_STATIC_KEYS=m
520CONFIG_EARLY_PRINTK=y 524CONFIG_EARLY_PRINTK=y
521CONFIG_ENCRYPTED_KEYS=m
522CONFIG_CRYPTO_RSA=m 525CONFIG_CRYPTO_RSA=m
523CONFIG_CRYPTO_MANAGER=y 526CONFIG_CRYPTO_MANAGER=y
524CONFIG_CRYPTO_USER=m 527CONFIG_CRYPTO_USER=m
@@ -526,12 +529,9 @@ CONFIG_CRYPTO_CRYPTD=m
526CONFIG_CRYPTO_MCRYPTD=m 529CONFIG_CRYPTO_MCRYPTD=m
527CONFIG_CRYPTO_TEST=m 530CONFIG_CRYPTO_TEST=m
528CONFIG_CRYPTO_CCM=m 531CONFIG_CRYPTO_CCM=m
529CONFIG_CRYPTO_GCM=m
530CONFIG_CRYPTO_CHACHA20POLY1305=m 532CONFIG_CRYPTO_CHACHA20POLY1305=m
531CONFIG_CRYPTO_CTS=m
532CONFIG_CRYPTO_LRW=m 533CONFIG_CRYPTO_LRW=m
533CONFIG_CRYPTO_PCBC=m 534CONFIG_CRYPTO_PCBC=m
534CONFIG_CRYPTO_XTS=m
535CONFIG_CRYPTO_KEYWRAP=m 535CONFIG_CRYPTO_KEYWRAP=m
536CONFIG_CRYPTO_XCBC=m 536CONFIG_CRYPTO_XCBC=m
537CONFIG_CRYPTO_VMAC=m 537CONFIG_CRYPTO_VMAC=m
@@ -555,7 +555,6 @@ CONFIG_CRYPTO_SEED=m
555CONFIG_CRYPTO_SERPENT=m 555CONFIG_CRYPTO_SERPENT=m
556CONFIG_CRYPTO_TEA=m 556CONFIG_CRYPTO_TEA=m
557CONFIG_CRYPTO_TWOFISH=m 557CONFIG_CRYPTO_TWOFISH=m
558CONFIG_CRYPTO_ZLIB=m
559CONFIG_CRYPTO_LZO=m 558CONFIG_CRYPTO_LZO=m
560CONFIG_CRYPTO_842=m 559CONFIG_CRYPTO_842=m
561CONFIG_CRYPTO_LZ4=m 560CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 12a8a6cb32f4..831cc8c3a2e2 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-mac" 1CONFIG_LOCALVERSION="-mac"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -61,7 +60,6 @@ CONFIG_INET_IPCOMP=m
61CONFIG_INET_XFRM_MODE_TRANSPORT=m 60CONFIG_INET_XFRM_MODE_TRANSPORT=m
62CONFIG_INET_XFRM_MODE_TUNNEL=m 61CONFIG_INET_XFRM_MODE_TUNNEL=m
63CONFIG_INET_XFRM_MODE_BEET=m 62CONFIG_INET_XFRM_MODE_BEET=m
64# CONFIG_INET_LRO is not set
65CONFIG_INET_DIAG=m 63CONFIG_INET_DIAG=m
66CONFIG_INET_UDP_DIAG=m 64CONFIG_INET_UDP_DIAG=m
67CONFIG_IPV6=m 65CONFIG_IPV6=m
@@ -285,7 +283,9 @@ CONFIG_NET_MPLS_GSO=m
285CONFIG_MPLS_ROUTING=m 283CONFIG_MPLS_ROUTING=m
286CONFIG_MPLS_IPTUNNEL=m 284CONFIG_MPLS_IPTUNNEL=m
287CONFIG_NET_L3_MASTER_DEV=y 285CONFIG_NET_L3_MASTER_DEV=y
286CONFIG_AF_KCM=m
288# CONFIG_WIRELESS is not set 287# CONFIG_WIRELESS is not set
288CONFIG_NET_DEVLINK=m
289# CONFIG_UEVENT_HELPER is not set 289# CONFIG_UEVENT_HELPER is not set
290CONFIG_DEVTMPFS=y 290CONFIG_DEVTMPFS=y
291CONFIG_DEVTMPFS_MOUNT=y 291CONFIG_DEVTMPFS_MOUNT=y
@@ -357,6 +357,7 @@ CONFIG_MACVTAP=m
357CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_VXLAN=m 358CONFIG_VXLAN=m
359CONFIG_GENEVE=m 359CONFIG_GENEVE=m
360CONFIG_MACSEC=m
360CONFIG_NETCONSOLE=m 361CONFIG_NETCONSOLE=m
361CONFIG_NETCONSOLE_DYNAMIC=y 362CONFIG_NETCONSOLE_DYNAMIC=y
362CONFIG_VETH=m 363CONFIG_VETH=m
@@ -435,6 +436,7 @@ CONFIG_JFS_FS=m
435CONFIG_XFS_FS=m 436CONFIG_XFS_FS=m
436CONFIG_OCFS2_FS=m 437CONFIG_OCFS2_FS=m
437# CONFIG_OCFS2_DEBUG_MASKLOG is not set 438# CONFIG_OCFS2_DEBUG_MASKLOG is not set
439CONFIG_FS_ENCRYPTION=m
438CONFIG_FANOTIFY=y 440CONFIG_FANOTIFY=y
439CONFIG_QUOTA_NETLINK_INTERFACE=y 441CONFIG_QUOTA_NETLINK_INTERFACE=y
440# CONFIG_PRINT_QUOTA_WARNING is not set 442# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -451,6 +453,7 @@ CONFIG_VFAT_FS=m
451CONFIG_PROC_KCORE=y 453CONFIG_PROC_KCORE=y
452CONFIG_PROC_CHILDREN=y 454CONFIG_PROC_CHILDREN=y
453CONFIG_TMPFS=y 455CONFIG_TMPFS=y
456CONFIG_ORANGEFS_FS=m
454CONFIG_AFFS_FS=m 457CONFIG_AFFS_FS=m
455CONFIG_ECRYPT_FS=m 458CONFIG_ECRYPT_FS=m
456CONFIG_ECRYPT_FS_MESSAGING=y 459CONFIG_ECRYPT_FS_MESSAGING=y
@@ -532,6 +535,7 @@ CONFIG_TEST_HEXDUMP=m
532CONFIG_TEST_STRING_HELPERS=m 535CONFIG_TEST_STRING_HELPERS=m
533CONFIG_TEST_KSTRTOX=m 536CONFIG_TEST_KSTRTOX=m
534CONFIG_TEST_PRINTF=m 537CONFIG_TEST_PRINTF=m
538CONFIG_TEST_BITMAP=m
535CONFIG_TEST_RHASHTABLE=m 539CONFIG_TEST_RHASHTABLE=m
536CONFIG_TEST_LKM=m 540CONFIG_TEST_LKM=m
537CONFIG_TEST_USER_COPY=m 541CONFIG_TEST_USER_COPY=m
@@ -540,7 +544,6 @@ CONFIG_TEST_FIRMWARE=m
540CONFIG_TEST_UDELAY=m 544CONFIG_TEST_UDELAY=m
541CONFIG_TEST_STATIC_KEYS=m 545CONFIG_TEST_STATIC_KEYS=m
542CONFIG_EARLY_PRINTK=y 546CONFIG_EARLY_PRINTK=y
543CONFIG_ENCRYPTED_KEYS=m
544CONFIG_CRYPTO_RSA=m 547CONFIG_CRYPTO_RSA=m
545CONFIG_CRYPTO_MANAGER=y 548CONFIG_CRYPTO_MANAGER=y
546CONFIG_CRYPTO_USER=m 549CONFIG_CRYPTO_USER=m
@@ -548,12 +551,9 @@ CONFIG_CRYPTO_CRYPTD=m
548CONFIG_CRYPTO_MCRYPTD=m 551CONFIG_CRYPTO_MCRYPTD=m
549CONFIG_CRYPTO_TEST=m 552CONFIG_CRYPTO_TEST=m
550CONFIG_CRYPTO_CCM=m 553CONFIG_CRYPTO_CCM=m
551CONFIG_CRYPTO_GCM=m
552CONFIG_CRYPTO_CHACHA20POLY1305=m 554CONFIG_CRYPTO_CHACHA20POLY1305=m
553CONFIG_CRYPTO_CTS=m
554CONFIG_CRYPTO_LRW=m 555CONFIG_CRYPTO_LRW=m
555CONFIG_CRYPTO_PCBC=m 556CONFIG_CRYPTO_PCBC=m
556CONFIG_CRYPTO_XTS=m
557CONFIG_CRYPTO_KEYWRAP=m 557CONFIG_CRYPTO_KEYWRAP=m
558CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
559CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
@@ -577,7 +577,6 @@ CONFIG_CRYPTO_SEED=m
577CONFIG_CRYPTO_SERPENT=m 577CONFIG_CRYPTO_SERPENT=m
578CONFIG_CRYPTO_TEA=m 578CONFIG_CRYPTO_TEA=m
579CONFIG_CRYPTO_TWOFISH=m 579CONFIG_CRYPTO_TWOFISH=m
580CONFIG_CRYPTO_ZLIB=m
581CONFIG_CRYPTO_LZO=m 580CONFIG_CRYPTO_LZO=m
582CONFIG_CRYPTO_842=m 581CONFIG_CRYPTO_842=m
583CONFIG_CRYPTO_LZ4=m 582CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 64ff2dcb34c8..6377afeb522b 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-multi" 1CONFIG_LOCALVERSION="-multi"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -71,7 +70,6 @@ CONFIG_INET_IPCOMP=m
71CONFIG_INET_XFRM_MODE_TRANSPORT=m 70CONFIG_INET_XFRM_MODE_TRANSPORT=m
72CONFIG_INET_XFRM_MODE_TUNNEL=m 71CONFIG_INET_XFRM_MODE_TUNNEL=m
73CONFIG_INET_XFRM_MODE_BEET=m 72CONFIG_INET_XFRM_MODE_BEET=m
74# CONFIG_INET_LRO is not set
75CONFIG_INET_DIAG=m 73CONFIG_INET_DIAG=m
76CONFIG_INET_UDP_DIAG=m 74CONFIG_INET_UDP_DIAG=m
77CONFIG_IPV6=m 75CONFIG_IPV6=m
@@ -295,7 +293,9 @@ CONFIG_NET_MPLS_GSO=m
295CONFIG_MPLS_ROUTING=m 293CONFIG_MPLS_ROUTING=m
296CONFIG_MPLS_IPTUNNEL=m 294CONFIG_MPLS_IPTUNNEL=m
297CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
296CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_NET_DEVLINK=m
299# CONFIG_UEVENT_HELPER is not set 299# CONFIG_UEVENT_HELPER is not set
300CONFIG_DEVTMPFS=y 300CONFIG_DEVTMPFS=y
301CONFIG_DEVTMPFS_MOUNT=y 301CONFIG_DEVTMPFS_MOUNT=y
@@ -390,6 +390,7 @@ CONFIG_MACVTAP=m
390CONFIG_IPVLAN=m 390CONFIG_IPVLAN=m
391CONFIG_VXLAN=m 391CONFIG_VXLAN=m
392CONFIG_GENEVE=m 392CONFIG_GENEVE=m
393CONFIG_MACSEC=m
393CONFIG_NETCONSOLE=m 394CONFIG_NETCONSOLE=m
394CONFIG_NETCONSOLE_DYNAMIC=y 395CONFIG_NETCONSOLE_DYNAMIC=y
395CONFIG_VETH=m 396CONFIG_VETH=m
@@ -515,6 +516,7 @@ CONFIG_JFS_FS=m
515CONFIG_XFS_FS=m 516CONFIG_XFS_FS=m
516CONFIG_OCFS2_FS=m 517CONFIG_OCFS2_FS=m
517# CONFIG_OCFS2_DEBUG_MASKLOG is not set 518# CONFIG_OCFS2_DEBUG_MASKLOG is not set
519CONFIG_FS_ENCRYPTION=m
518CONFIG_FANOTIFY=y 520CONFIG_FANOTIFY=y
519CONFIG_QUOTA_NETLINK_INTERFACE=y 521CONFIG_QUOTA_NETLINK_INTERFACE=y
520# CONFIG_PRINT_QUOTA_WARNING is not set 522# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -531,6 +533,7 @@ CONFIG_VFAT_FS=m
531CONFIG_PROC_KCORE=y 533CONFIG_PROC_KCORE=y
532CONFIG_PROC_CHILDREN=y 534CONFIG_PROC_CHILDREN=y
533CONFIG_TMPFS=y 535CONFIG_TMPFS=y
536CONFIG_ORANGEFS_FS=m
534CONFIG_AFFS_FS=m 537CONFIG_AFFS_FS=m
535CONFIG_ECRYPT_FS=m 538CONFIG_ECRYPT_FS=m
536CONFIG_ECRYPT_FS_MESSAGING=y 539CONFIG_ECRYPT_FS_MESSAGING=y
@@ -612,6 +615,7 @@ CONFIG_TEST_HEXDUMP=m
612CONFIG_TEST_STRING_HELPERS=m 615CONFIG_TEST_STRING_HELPERS=m
613CONFIG_TEST_KSTRTOX=m 616CONFIG_TEST_KSTRTOX=m
614CONFIG_TEST_PRINTF=m 617CONFIG_TEST_PRINTF=m
618CONFIG_TEST_BITMAP=m
615CONFIG_TEST_RHASHTABLE=m 619CONFIG_TEST_RHASHTABLE=m
616CONFIG_TEST_LKM=m 620CONFIG_TEST_LKM=m
617CONFIG_TEST_USER_COPY=m 621CONFIG_TEST_USER_COPY=m
@@ -620,7 +624,6 @@ CONFIG_TEST_FIRMWARE=m
620CONFIG_TEST_UDELAY=m 624CONFIG_TEST_UDELAY=m
621CONFIG_TEST_STATIC_KEYS=m 625CONFIG_TEST_STATIC_KEYS=m
622CONFIG_EARLY_PRINTK=y 626CONFIG_EARLY_PRINTK=y
623CONFIG_ENCRYPTED_KEYS=m
624CONFIG_CRYPTO_RSA=m 627CONFIG_CRYPTO_RSA=m
625CONFIG_CRYPTO_MANAGER=y 628CONFIG_CRYPTO_MANAGER=y
626CONFIG_CRYPTO_USER=m 629CONFIG_CRYPTO_USER=m
@@ -628,12 +631,9 @@ CONFIG_CRYPTO_CRYPTD=m
628CONFIG_CRYPTO_MCRYPTD=m 631CONFIG_CRYPTO_MCRYPTD=m
629CONFIG_CRYPTO_TEST=m 632CONFIG_CRYPTO_TEST=m
630CONFIG_CRYPTO_CCM=m 633CONFIG_CRYPTO_CCM=m
631CONFIG_CRYPTO_GCM=m
632CONFIG_CRYPTO_CHACHA20POLY1305=m 634CONFIG_CRYPTO_CHACHA20POLY1305=m
633CONFIG_CRYPTO_CTS=m
634CONFIG_CRYPTO_LRW=m 635CONFIG_CRYPTO_LRW=m
635CONFIG_CRYPTO_PCBC=m 636CONFIG_CRYPTO_PCBC=m
636CONFIG_CRYPTO_XTS=m
637CONFIG_CRYPTO_KEYWRAP=m 637CONFIG_CRYPTO_KEYWRAP=m
638CONFIG_CRYPTO_XCBC=m 638CONFIG_CRYPTO_XCBC=m
639CONFIG_CRYPTO_VMAC=m 639CONFIG_CRYPTO_VMAC=m
@@ -657,7 +657,6 @@ CONFIG_CRYPTO_SEED=m
657CONFIG_CRYPTO_SERPENT=m 657CONFIG_CRYPTO_SERPENT=m
658CONFIG_CRYPTO_TEA=m 658CONFIG_CRYPTO_TEA=m
659CONFIG_CRYPTO_TWOFISH=m 659CONFIG_CRYPTO_TWOFISH=m
660CONFIG_CRYPTO_ZLIB=m
661CONFIG_CRYPTO_LZO=m 660CONFIG_CRYPTO_LZO=m
662CONFIG_CRYPTO_842=m 661CONFIG_CRYPTO_842=m
663CONFIG_CRYPTO_LZ4=m 662CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 07fc6abcfe0c..4304b3d56262 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-mvme147" 1CONFIG_LOCALVERSION="-mvme147"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -59,7 +58,6 @@ CONFIG_INET_IPCOMP=m
59CONFIG_INET_XFRM_MODE_TRANSPORT=m 58CONFIG_INET_XFRM_MODE_TRANSPORT=m
60CONFIG_INET_XFRM_MODE_TUNNEL=m 59CONFIG_INET_XFRM_MODE_TUNNEL=m
61CONFIG_INET_XFRM_MODE_BEET=m 60CONFIG_INET_XFRM_MODE_BEET=m
62# CONFIG_INET_LRO is not set
63CONFIG_INET_DIAG=m 61CONFIG_INET_DIAG=m
64CONFIG_INET_UDP_DIAG=m 62CONFIG_INET_UDP_DIAG=m
65CONFIG_IPV6=m 63CONFIG_IPV6=m
@@ -280,7 +278,9 @@ CONFIG_NET_MPLS_GSO=m
280CONFIG_MPLS_ROUTING=m 278CONFIG_MPLS_ROUTING=m
281CONFIG_MPLS_IPTUNNEL=m 279CONFIG_MPLS_IPTUNNEL=m
282CONFIG_NET_L3_MASTER_DEV=y 280CONFIG_NET_L3_MASTER_DEV=y
281CONFIG_AF_KCM=m
283# CONFIG_WIRELESS is not set 282# CONFIG_WIRELESS is not set
283CONFIG_NET_DEVLINK=m
284# CONFIG_UEVENT_HELPER is not set 284# CONFIG_UEVENT_HELPER is not set
285CONFIG_DEVTMPFS=y 285CONFIG_DEVTMPFS=y
286CONFIG_DEVTMPFS_MOUNT=y 286CONFIG_DEVTMPFS_MOUNT=y
@@ -339,6 +339,7 @@ CONFIG_MACVTAP=m
339CONFIG_IPVLAN=m 339CONFIG_IPVLAN=m
340CONFIG_VXLAN=m 340CONFIG_VXLAN=m
341CONFIG_GENEVE=m 341CONFIG_GENEVE=m
342CONFIG_MACSEC=m
342CONFIG_NETCONSOLE=m 343CONFIG_NETCONSOLE=m
343CONFIG_NETCONSOLE_DYNAMIC=y 344CONFIG_NETCONSOLE_DYNAMIC=y
344CONFIG_VETH=m 345CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
403CONFIG_XFS_FS=m 404CONFIG_XFS_FS=m
404CONFIG_OCFS2_FS=m 405CONFIG_OCFS2_FS=m
405# CONFIG_OCFS2_DEBUG_MASKLOG is not set 406# CONFIG_OCFS2_DEBUG_MASKLOG is not set
407CONFIG_FS_ENCRYPTION=m
406CONFIG_FANOTIFY=y 408CONFIG_FANOTIFY=y
407CONFIG_QUOTA_NETLINK_INTERFACE=y 409CONFIG_QUOTA_NETLINK_INTERFACE=y
408# CONFIG_PRINT_QUOTA_WARNING is not set 410# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
419CONFIG_PROC_KCORE=y 421CONFIG_PROC_KCORE=y
420CONFIG_PROC_CHILDREN=y 422CONFIG_PROC_CHILDREN=y
421CONFIG_TMPFS=y 423CONFIG_TMPFS=y
424CONFIG_ORANGEFS_FS=m
422CONFIG_AFFS_FS=m 425CONFIG_AFFS_FS=m
423CONFIG_ECRYPT_FS=m 426CONFIG_ECRYPT_FS=m
424CONFIG_ECRYPT_FS_MESSAGING=y 427CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
500CONFIG_TEST_STRING_HELPERS=m 503CONFIG_TEST_STRING_HELPERS=m
501CONFIG_TEST_KSTRTOX=m 504CONFIG_TEST_KSTRTOX=m
502CONFIG_TEST_PRINTF=m 505CONFIG_TEST_PRINTF=m
506CONFIG_TEST_BITMAP=m
503CONFIG_TEST_RHASHTABLE=m 507CONFIG_TEST_RHASHTABLE=m
504CONFIG_TEST_LKM=m 508CONFIG_TEST_LKM=m
505CONFIG_TEST_USER_COPY=m 509CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
508CONFIG_TEST_UDELAY=m 512CONFIG_TEST_UDELAY=m
509CONFIG_TEST_STATIC_KEYS=m 513CONFIG_TEST_STATIC_KEYS=m
510CONFIG_EARLY_PRINTK=y 514CONFIG_EARLY_PRINTK=y
511CONFIG_ENCRYPTED_KEYS=m
512CONFIG_CRYPTO_RSA=m 515CONFIG_CRYPTO_RSA=m
513CONFIG_CRYPTO_MANAGER=y 516CONFIG_CRYPTO_MANAGER=y
514CONFIG_CRYPTO_USER=m 517CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
516CONFIG_CRYPTO_MCRYPTD=m 519CONFIG_CRYPTO_MCRYPTD=m
517CONFIG_CRYPTO_TEST=m 520CONFIG_CRYPTO_TEST=m
518CONFIG_CRYPTO_CCM=m 521CONFIG_CRYPTO_CCM=m
519CONFIG_CRYPTO_GCM=m
520CONFIG_CRYPTO_CHACHA20POLY1305=m 522CONFIG_CRYPTO_CHACHA20POLY1305=m
521CONFIG_CRYPTO_CTS=m
522CONFIG_CRYPTO_LRW=m 523CONFIG_CRYPTO_LRW=m
523CONFIG_CRYPTO_PCBC=m 524CONFIG_CRYPTO_PCBC=m
524CONFIG_CRYPTO_XTS=m
525CONFIG_CRYPTO_KEYWRAP=m 525CONFIG_CRYPTO_KEYWRAP=m
526CONFIG_CRYPTO_XCBC=m 526CONFIG_CRYPTO_XCBC=m
527CONFIG_CRYPTO_VMAC=m 527CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
545CONFIG_CRYPTO_SERPENT=m 545CONFIG_CRYPTO_SERPENT=m
546CONFIG_CRYPTO_TEA=m 546CONFIG_CRYPTO_TEA=m
547CONFIG_CRYPTO_TWOFISH=m 547CONFIG_CRYPTO_TWOFISH=m
548CONFIG_CRYPTO_ZLIB=m
549CONFIG_CRYPTO_LZO=m 548CONFIG_CRYPTO_LZO=m
550CONFIG_CRYPTO_842=m 549CONFIG_CRYPTO_842=m
551CONFIG_CRYPTO_LZ4=m 550CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 69903ded88f7..074bda4094ff 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-mvme16x" 1CONFIG_LOCALVERSION="-mvme16x"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
62CONFIG_INET_XFRM_MODE_BEET=m 61CONFIG_INET_XFRM_MODE_BEET=m
63# CONFIG_INET_LRO is not set
64CONFIG_INET_DIAG=m 62CONFIG_INET_DIAG=m
65CONFIG_INET_UDP_DIAG=m 63CONFIG_INET_UDP_DIAG=m
66CONFIG_IPV6=m 64CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
281CONFIG_MPLS_ROUTING=m 279CONFIG_MPLS_ROUTING=m
282CONFIG_MPLS_IPTUNNEL=m 280CONFIG_MPLS_IPTUNNEL=m
283CONFIG_NET_L3_MASTER_DEV=y 281CONFIG_NET_L3_MASTER_DEV=y
282CONFIG_AF_KCM=m
284# CONFIG_WIRELESS is not set 283# CONFIG_WIRELESS is not set
284CONFIG_NET_DEVLINK=m
285# CONFIG_UEVENT_HELPER is not set 285# CONFIG_UEVENT_HELPER is not set
286CONFIG_DEVTMPFS=y 286CONFIG_DEVTMPFS=y
287CONFIG_DEVTMPFS_MOUNT=y 287CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@ CONFIG_MACVTAP=m
340CONFIG_IPVLAN=m 340CONFIG_IPVLAN=m
341CONFIG_VXLAN=m 341CONFIG_VXLAN=m
342CONFIG_GENEVE=m 342CONFIG_GENEVE=m
343CONFIG_MACSEC=m
343CONFIG_NETCONSOLE=m 344CONFIG_NETCONSOLE=m
344CONFIG_NETCONSOLE_DYNAMIC=y 345CONFIG_NETCONSOLE_DYNAMIC=y
345CONFIG_VETH=m 346CONFIG_VETH=m
@@ -403,6 +404,7 @@ CONFIG_JFS_FS=m
403CONFIG_XFS_FS=m 404CONFIG_XFS_FS=m
404CONFIG_OCFS2_FS=m 405CONFIG_OCFS2_FS=m
405# CONFIG_OCFS2_DEBUG_MASKLOG is not set 406# CONFIG_OCFS2_DEBUG_MASKLOG is not set
407CONFIG_FS_ENCRYPTION=m
406CONFIG_FANOTIFY=y 408CONFIG_FANOTIFY=y
407CONFIG_QUOTA_NETLINK_INTERFACE=y 409CONFIG_QUOTA_NETLINK_INTERFACE=y
408# CONFIG_PRINT_QUOTA_WARNING is not set 410# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@ CONFIG_VFAT_FS=m
419CONFIG_PROC_KCORE=y 421CONFIG_PROC_KCORE=y
420CONFIG_PROC_CHILDREN=y 422CONFIG_PROC_CHILDREN=y
421CONFIG_TMPFS=y 423CONFIG_TMPFS=y
424CONFIG_ORANGEFS_FS=m
422CONFIG_AFFS_FS=m 425CONFIG_AFFS_FS=m
423CONFIG_ECRYPT_FS=m 426CONFIG_ECRYPT_FS=m
424CONFIG_ECRYPT_FS_MESSAGING=y 427CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@ CONFIG_TEST_HEXDUMP=m
500CONFIG_TEST_STRING_HELPERS=m 503CONFIG_TEST_STRING_HELPERS=m
501CONFIG_TEST_KSTRTOX=m 504CONFIG_TEST_KSTRTOX=m
502CONFIG_TEST_PRINTF=m 505CONFIG_TEST_PRINTF=m
506CONFIG_TEST_BITMAP=m
503CONFIG_TEST_RHASHTABLE=m 507CONFIG_TEST_RHASHTABLE=m
504CONFIG_TEST_LKM=m 508CONFIG_TEST_LKM=m
505CONFIG_TEST_USER_COPY=m 509CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@ CONFIG_TEST_FIRMWARE=m
508CONFIG_TEST_UDELAY=m 512CONFIG_TEST_UDELAY=m
509CONFIG_TEST_STATIC_KEYS=m 513CONFIG_TEST_STATIC_KEYS=m
510CONFIG_EARLY_PRINTK=y 514CONFIG_EARLY_PRINTK=y
511CONFIG_ENCRYPTED_KEYS=m
512CONFIG_CRYPTO_RSA=m 515CONFIG_CRYPTO_RSA=m
513CONFIG_CRYPTO_MANAGER=y 516CONFIG_CRYPTO_MANAGER=y
514CONFIG_CRYPTO_USER=m 517CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@ CONFIG_CRYPTO_CRYPTD=m
516CONFIG_CRYPTO_MCRYPTD=m 519CONFIG_CRYPTO_MCRYPTD=m
517CONFIG_CRYPTO_TEST=m 520CONFIG_CRYPTO_TEST=m
518CONFIG_CRYPTO_CCM=m 521CONFIG_CRYPTO_CCM=m
519CONFIG_CRYPTO_GCM=m
520CONFIG_CRYPTO_CHACHA20POLY1305=m 522CONFIG_CRYPTO_CHACHA20POLY1305=m
521CONFIG_CRYPTO_CTS=m
522CONFIG_CRYPTO_LRW=m 523CONFIG_CRYPTO_LRW=m
523CONFIG_CRYPTO_PCBC=m 524CONFIG_CRYPTO_PCBC=m
524CONFIG_CRYPTO_XTS=m
525CONFIG_CRYPTO_KEYWRAP=m 525CONFIG_CRYPTO_KEYWRAP=m
526CONFIG_CRYPTO_XCBC=m 526CONFIG_CRYPTO_XCBC=m
527CONFIG_CRYPTO_VMAC=m 527CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_SEED=m
545CONFIG_CRYPTO_SERPENT=m 545CONFIG_CRYPTO_SERPENT=m
546CONFIG_CRYPTO_TEA=m 546CONFIG_CRYPTO_TEA=m
547CONFIG_CRYPTO_TWOFISH=m 547CONFIG_CRYPTO_TWOFISH=m
548CONFIG_CRYPTO_ZLIB=m
549CONFIG_CRYPTO_LZO=m 548CONFIG_CRYPTO_LZO=m
550CONFIG_CRYPTO_842=m 549CONFIG_CRYPTO_842=m
551CONFIG_CRYPTO_LZ4=m 550CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index bd8401686dde..07b9fa8d7f2e 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-q40" 1CONFIG_LOCALVERSION="-q40"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@ CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
62CONFIG_INET_XFRM_MODE_BEET=m 61CONFIG_INET_XFRM_MODE_BEET=m
63# CONFIG_INET_LRO is not set
64CONFIG_INET_DIAG=m 62CONFIG_INET_DIAG=m
65CONFIG_INET_UDP_DIAG=m 63CONFIG_INET_UDP_DIAG=m
66CONFIG_IPV6=m 64CONFIG_IPV6=m
@@ -281,7 +279,9 @@ CONFIG_NET_MPLS_GSO=m
281CONFIG_MPLS_ROUTING=m 279CONFIG_MPLS_ROUTING=m
282CONFIG_MPLS_IPTUNNEL=m 280CONFIG_MPLS_IPTUNNEL=m
283CONFIG_NET_L3_MASTER_DEV=y 281CONFIG_NET_L3_MASTER_DEV=y
282CONFIG_AF_KCM=m
284# CONFIG_WIRELESS is not set 283# CONFIG_WIRELESS is not set
284CONFIG_NET_DEVLINK=m
285# CONFIG_UEVENT_HELPER is not set 285# CONFIG_UEVENT_HELPER is not set
286CONFIG_DEVTMPFS=y 286CONFIG_DEVTMPFS=y
287CONFIG_DEVTMPFS_MOUNT=y 287CONFIG_DEVTMPFS_MOUNT=y
@@ -346,6 +346,7 @@ CONFIG_MACVTAP=m
346CONFIG_IPVLAN=m 346CONFIG_IPVLAN=m
347CONFIG_VXLAN=m 347CONFIG_VXLAN=m
348CONFIG_GENEVE=m 348CONFIG_GENEVE=m
349CONFIG_MACSEC=m
349CONFIG_NETCONSOLE=m 350CONFIG_NETCONSOLE=m
350CONFIG_NETCONSOLE_DYNAMIC=y 351CONFIG_NETCONSOLE_DYNAMIC=y
351CONFIG_VETH=m 352CONFIG_VETH=m
@@ -426,6 +427,7 @@ CONFIG_JFS_FS=m
426CONFIG_XFS_FS=m 427CONFIG_XFS_FS=m
427CONFIG_OCFS2_FS=m 428CONFIG_OCFS2_FS=m
428# CONFIG_OCFS2_DEBUG_MASKLOG is not set 429# CONFIG_OCFS2_DEBUG_MASKLOG is not set
430CONFIG_FS_ENCRYPTION=m
429CONFIG_FANOTIFY=y 431CONFIG_FANOTIFY=y
430CONFIG_QUOTA_NETLINK_INTERFACE=y 432CONFIG_QUOTA_NETLINK_INTERFACE=y
431# CONFIG_PRINT_QUOTA_WARNING is not set 433# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -442,6 +444,7 @@ CONFIG_VFAT_FS=m
442CONFIG_PROC_KCORE=y 444CONFIG_PROC_KCORE=y
443CONFIG_PROC_CHILDREN=y 445CONFIG_PROC_CHILDREN=y
444CONFIG_TMPFS=y 446CONFIG_TMPFS=y
447CONFIG_ORANGEFS_FS=m
445CONFIG_AFFS_FS=m 448CONFIG_AFFS_FS=m
446CONFIG_ECRYPT_FS=m 449CONFIG_ECRYPT_FS=m
447CONFIG_ECRYPT_FS_MESSAGING=y 450CONFIG_ECRYPT_FS_MESSAGING=y
@@ -523,6 +526,7 @@ CONFIG_TEST_HEXDUMP=m
523CONFIG_TEST_STRING_HELPERS=m 526CONFIG_TEST_STRING_HELPERS=m
524CONFIG_TEST_KSTRTOX=m 527CONFIG_TEST_KSTRTOX=m
525CONFIG_TEST_PRINTF=m 528CONFIG_TEST_PRINTF=m
529CONFIG_TEST_BITMAP=m
526CONFIG_TEST_RHASHTABLE=m 530CONFIG_TEST_RHASHTABLE=m
527CONFIG_TEST_LKM=m 531CONFIG_TEST_LKM=m
528CONFIG_TEST_USER_COPY=m 532CONFIG_TEST_USER_COPY=m
@@ -531,7 +535,6 @@ CONFIG_TEST_FIRMWARE=m
531CONFIG_TEST_UDELAY=m 535CONFIG_TEST_UDELAY=m
532CONFIG_TEST_STATIC_KEYS=m 536CONFIG_TEST_STATIC_KEYS=m
533CONFIG_EARLY_PRINTK=y 537CONFIG_EARLY_PRINTK=y
534CONFIG_ENCRYPTED_KEYS=m
535CONFIG_CRYPTO_RSA=m 538CONFIG_CRYPTO_RSA=m
536CONFIG_CRYPTO_MANAGER=y 539CONFIG_CRYPTO_MANAGER=y
537CONFIG_CRYPTO_USER=m 540CONFIG_CRYPTO_USER=m
@@ -539,12 +542,9 @@ CONFIG_CRYPTO_CRYPTD=m
539CONFIG_CRYPTO_MCRYPTD=m 542CONFIG_CRYPTO_MCRYPTD=m
540CONFIG_CRYPTO_TEST=m 543CONFIG_CRYPTO_TEST=m
541CONFIG_CRYPTO_CCM=m 544CONFIG_CRYPTO_CCM=m
542CONFIG_CRYPTO_GCM=m
543CONFIG_CRYPTO_CHACHA20POLY1305=m 545CONFIG_CRYPTO_CHACHA20POLY1305=m
544CONFIG_CRYPTO_CTS=m
545CONFIG_CRYPTO_LRW=m 546CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 547CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_XTS=m
548CONFIG_CRYPTO_KEYWRAP=m 548CONFIG_CRYPTO_KEYWRAP=m
549CONFIG_CRYPTO_XCBC=m 549CONFIG_CRYPTO_XCBC=m
550CONFIG_CRYPTO_VMAC=m 550CONFIG_CRYPTO_VMAC=m
@@ -568,7 +568,6 @@ CONFIG_CRYPTO_SEED=m
568CONFIG_CRYPTO_SERPENT=m 568CONFIG_CRYPTO_SERPENT=m
569CONFIG_CRYPTO_TEA=m 569CONFIG_CRYPTO_TEA=m
570CONFIG_CRYPTO_TWOFISH=m 570CONFIG_CRYPTO_TWOFISH=m
571CONFIG_CRYPTO_ZLIB=m
572CONFIG_CRYPTO_LZO=m 571CONFIG_CRYPTO_LZO=m
573CONFIG_CRYPTO_842=m 572CONFIG_CRYPTO_842=m
574CONFIG_CRYPTO_LZ4=m 573CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 5f9fb3ab9636..36e6fae02d45 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-sun3" 1CONFIG_LOCALVERSION="-sun3"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 56CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 57CONFIG_INET_XFRM_MODE_TUNNEL=m
59CONFIG_INET_XFRM_MODE_BEET=m 58CONFIG_INET_XFRM_MODE_BEET=m
60# CONFIG_INET_LRO is not set
61CONFIG_INET_DIAG=m 59CONFIG_INET_DIAG=m
62CONFIG_INET_UDP_DIAG=m 60CONFIG_INET_UDP_DIAG=m
63CONFIG_IPV6=m 61CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
278CONFIG_MPLS_ROUTING=m 276CONFIG_MPLS_ROUTING=m
279CONFIG_MPLS_IPTUNNEL=m 277CONFIG_MPLS_IPTUNNEL=m
280CONFIG_NET_L3_MASTER_DEV=y 278CONFIG_NET_L3_MASTER_DEV=y
279CONFIG_AF_KCM=m
281# CONFIG_WIRELESS is not set 280# CONFIG_WIRELESS is not set
281CONFIG_NET_DEVLINK=m
282# CONFIG_UEVENT_HELPER is not set 282# CONFIG_UEVENT_HELPER is not set
283CONFIG_DEVTMPFS=y 283CONFIG_DEVTMPFS=y
284CONFIG_DEVTMPFS_MOUNT=y 284CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
337CONFIG_IPVLAN=m 337CONFIG_IPVLAN=m
338CONFIG_VXLAN=m 338CONFIG_VXLAN=m
339CONFIG_GENEVE=m 339CONFIG_GENEVE=m
340CONFIG_MACSEC=m
340CONFIG_NETCONSOLE=m 341CONFIG_NETCONSOLE=m
341CONFIG_NETCONSOLE_DYNAMIC=y 342CONFIG_NETCONSOLE_DYNAMIC=y
342CONFIG_VETH=m 343CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
405CONFIG_XFS_FS=m 406CONFIG_XFS_FS=m
406CONFIG_OCFS2_FS=m 407CONFIG_OCFS2_FS=m
407# CONFIG_OCFS2_DEBUG_MASKLOG is not set 408# CONFIG_OCFS2_DEBUG_MASKLOG is not set
409CONFIG_FS_ENCRYPTION=m
408CONFIG_FANOTIFY=y 410CONFIG_FANOTIFY=y
409CONFIG_QUOTA_NETLINK_INTERFACE=y 411CONFIG_QUOTA_NETLINK_INTERFACE=y
410# CONFIG_PRINT_QUOTA_WARNING is not set 412# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
421CONFIG_PROC_KCORE=y 423CONFIG_PROC_KCORE=y
422CONFIG_PROC_CHILDREN=y 424CONFIG_PROC_CHILDREN=y
423CONFIG_TMPFS=y 425CONFIG_TMPFS=y
426CONFIG_ORANGEFS_FS=m
424CONFIG_AFFS_FS=m 427CONFIG_AFFS_FS=m
425CONFIG_ECRYPT_FS=m 428CONFIG_ECRYPT_FS=m
426CONFIG_ECRYPT_FS_MESSAGING=y 429CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
502CONFIG_TEST_STRING_HELPERS=m 505CONFIG_TEST_STRING_HELPERS=m
503CONFIG_TEST_KSTRTOX=m 506CONFIG_TEST_KSTRTOX=m
504CONFIG_TEST_PRINTF=m 507CONFIG_TEST_PRINTF=m
508CONFIG_TEST_BITMAP=m
505CONFIG_TEST_RHASHTABLE=m 509CONFIG_TEST_RHASHTABLE=m
506CONFIG_TEST_LKM=m 510CONFIG_TEST_LKM=m
507CONFIG_TEST_USER_COPY=m 511CONFIG_TEST_USER_COPY=m
@@ -509,7 +513,6 @@ CONFIG_TEST_BPF=m
509CONFIG_TEST_FIRMWARE=m 513CONFIG_TEST_FIRMWARE=m
510CONFIG_TEST_UDELAY=m 514CONFIG_TEST_UDELAY=m
511CONFIG_TEST_STATIC_KEYS=m 515CONFIG_TEST_STATIC_KEYS=m
512CONFIG_ENCRYPTED_KEYS=m
513CONFIG_CRYPTO_RSA=m 516CONFIG_CRYPTO_RSA=m
514CONFIG_CRYPTO_MANAGER=y 517CONFIG_CRYPTO_MANAGER=y
515CONFIG_CRYPTO_USER=m 518CONFIG_CRYPTO_USER=m
@@ -517,12 +520,9 @@ CONFIG_CRYPTO_CRYPTD=m
517CONFIG_CRYPTO_MCRYPTD=m 520CONFIG_CRYPTO_MCRYPTD=m
518CONFIG_CRYPTO_TEST=m 521CONFIG_CRYPTO_TEST=m
519CONFIG_CRYPTO_CCM=m 522CONFIG_CRYPTO_CCM=m
520CONFIG_CRYPTO_GCM=m
521CONFIG_CRYPTO_CHACHA20POLY1305=m 523CONFIG_CRYPTO_CHACHA20POLY1305=m
522CONFIG_CRYPTO_CTS=m
523CONFIG_CRYPTO_LRW=m 524CONFIG_CRYPTO_LRW=m
524CONFIG_CRYPTO_PCBC=m 525CONFIG_CRYPTO_PCBC=m
525CONFIG_CRYPTO_XTS=m
526CONFIG_CRYPTO_KEYWRAP=m 526CONFIG_CRYPTO_KEYWRAP=m
527CONFIG_CRYPTO_XCBC=m 527CONFIG_CRYPTO_XCBC=m
528CONFIG_CRYPTO_VMAC=m 528CONFIG_CRYPTO_VMAC=m
@@ -546,7 +546,6 @@ CONFIG_CRYPTO_SEED=m
546CONFIG_CRYPTO_SERPENT=m 546CONFIG_CRYPTO_SERPENT=m
547CONFIG_CRYPTO_TEA=m 547CONFIG_CRYPTO_TEA=m
548CONFIG_CRYPTO_TWOFISH=m 548CONFIG_CRYPTO_TWOFISH=m
549CONFIG_CRYPTO_ZLIB=m
550CONFIG_CRYPTO_LZO=m 549CONFIG_CRYPTO_LZO=m
551CONFIG_CRYPTO_842=m 550CONFIG_CRYPTO_842=m
552CONFIG_CRYPTO_LZ4=m 551CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 5d1c674530e2..903acf929511 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -1,7 +1,6 @@
1CONFIG_LOCALVERSION="-sun3x" 1CONFIG_LOCALVERSION="-sun3x"
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_BSD_PROCESS_ACCT=y 4CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 5CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@ CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 56CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 57CONFIG_INET_XFRM_MODE_TUNNEL=m
59CONFIG_INET_XFRM_MODE_BEET=m 58CONFIG_INET_XFRM_MODE_BEET=m
60# CONFIG_INET_LRO is not set
61CONFIG_INET_DIAG=m 59CONFIG_INET_DIAG=m
62CONFIG_INET_UDP_DIAG=m 60CONFIG_INET_UDP_DIAG=m
63CONFIG_IPV6=m 61CONFIG_IPV6=m
@@ -278,7 +276,9 @@ CONFIG_NET_MPLS_GSO=m
278CONFIG_MPLS_ROUTING=m 276CONFIG_MPLS_ROUTING=m
279CONFIG_MPLS_IPTUNNEL=m 277CONFIG_MPLS_IPTUNNEL=m
280CONFIG_NET_L3_MASTER_DEV=y 278CONFIG_NET_L3_MASTER_DEV=y
279CONFIG_AF_KCM=m
281# CONFIG_WIRELESS is not set 280# CONFIG_WIRELESS is not set
281CONFIG_NET_DEVLINK=m
282# CONFIG_UEVENT_HELPER is not set 282# CONFIG_UEVENT_HELPER is not set
283CONFIG_DEVTMPFS=y 283CONFIG_DEVTMPFS=y
284CONFIG_DEVTMPFS_MOUNT=y 284CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@ CONFIG_MACVTAP=m
337CONFIG_IPVLAN=m 337CONFIG_IPVLAN=m
338CONFIG_VXLAN=m 338CONFIG_VXLAN=m
339CONFIG_GENEVE=m 339CONFIG_GENEVE=m
340CONFIG_MACSEC=m
340CONFIG_NETCONSOLE=m 341CONFIG_NETCONSOLE=m
341CONFIG_NETCONSOLE_DYNAMIC=y 342CONFIG_NETCONSOLE_DYNAMIC=y
342CONFIG_VETH=m 343CONFIG_VETH=m
@@ -405,6 +406,7 @@ CONFIG_JFS_FS=m
405CONFIG_XFS_FS=m 406CONFIG_XFS_FS=m
406CONFIG_OCFS2_FS=m 407CONFIG_OCFS2_FS=m
407# CONFIG_OCFS2_DEBUG_MASKLOG is not set 408# CONFIG_OCFS2_DEBUG_MASKLOG is not set
409CONFIG_FS_ENCRYPTION=m
408CONFIG_FANOTIFY=y 410CONFIG_FANOTIFY=y
409CONFIG_QUOTA_NETLINK_INTERFACE=y 411CONFIG_QUOTA_NETLINK_INTERFACE=y
410# CONFIG_PRINT_QUOTA_WARNING is not set 412# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@ CONFIG_VFAT_FS=m
421CONFIG_PROC_KCORE=y 423CONFIG_PROC_KCORE=y
422CONFIG_PROC_CHILDREN=y 424CONFIG_PROC_CHILDREN=y
423CONFIG_TMPFS=y 425CONFIG_TMPFS=y
426CONFIG_ORANGEFS_FS=m
424CONFIG_AFFS_FS=m 427CONFIG_AFFS_FS=m
425CONFIG_ECRYPT_FS=m 428CONFIG_ECRYPT_FS=m
426CONFIG_ECRYPT_FS_MESSAGING=y 429CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@ CONFIG_TEST_HEXDUMP=m
502CONFIG_TEST_STRING_HELPERS=m 505CONFIG_TEST_STRING_HELPERS=m
503CONFIG_TEST_KSTRTOX=m 506CONFIG_TEST_KSTRTOX=m
504CONFIG_TEST_PRINTF=m 507CONFIG_TEST_PRINTF=m
508CONFIG_TEST_BITMAP=m
505CONFIG_TEST_RHASHTABLE=m 509CONFIG_TEST_RHASHTABLE=m
506CONFIG_TEST_LKM=m 510CONFIG_TEST_LKM=m
507CONFIG_TEST_USER_COPY=m 511CONFIG_TEST_USER_COPY=m
@@ -510,7 +514,6 @@ CONFIG_TEST_FIRMWARE=m
510CONFIG_TEST_UDELAY=m 514CONFIG_TEST_UDELAY=m
511CONFIG_TEST_STATIC_KEYS=m 515CONFIG_TEST_STATIC_KEYS=m
512CONFIG_EARLY_PRINTK=y 516CONFIG_EARLY_PRINTK=y
513CONFIG_ENCRYPTED_KEYS=m
514CONFIG_CRYPTO_RSA=m 517CONFIG_CRYPTO_RSA=m
515CONFIG_CRYPTO_MANAGER=y 518CONFIG_CRYPTO_MANAGER=y
516CONFIG_CRYPTO_USER=m 519CONFIG_CRYPTO_USER=m
@@ -518,12 +521,9 @@ CONFIG_CRYPTO_CRYPTD=m
518CONFIG_CRYPTO_MCRYPTD=m 521CONFIG_CRYPTO_MCRYPTD=m
519CONFIG_CRYPTO_TEST=m 522CONFIG_CRYPTO_TEST=m
520CONFIG_CRYPTO_CCM=m 523CONFIG_CRYPTO_CCM=m
521CONFIG_CRYPTO_GCM=m
522CONFIG_CRYPTO_CHACHA20POLY1305=m 524CONFIG_CRYPTO_CHACHA20POLY1305=m
523CONFIG_CRYPTO_CTS=m
524CONFIG_CRYPTO_LRW=m 525CONFIG_CRYPTO_LRW=m
525CONFIG_CRYPTO_PCBC=m 526CONFIG_CRYPTO_PCBC=m
526CONFIG_CRYPTO_XTS=m
527CONFIG_CRYPTO_KEYWRAP=m 527CONFIG_CRYPTO_KEYWRAP=m
528CONFIG_CRYPTO_XCBC=m 528CONFIG_CRYPTO_XCBC=m
529CONFIG_CRYPTO_VMAC=m 529CONFIG_CRYPTO_VMAC=m
@@ -547,7 +547,6 @@ CONFIG_CRYPTO_SEED=m
547CONFIG_CRYPTO_SERPENT=m 547CONFIG_CRYPTO_SERPENT=m
548CONFIG_CRYPTO_TEA=m 548CONFIG_CRYPTO_TEA=m
549CONFIG_CRYPTO_TWOFISH=m 549CONFIG_CRYPTO_TWOFISH=m
550CONFIG_CRYPTO_ZLIB=m
551CONFIG_CRYPTO_LZO=m 550CONFIG_CRYPTO_LZO=m
552CONFIG_CRYPTO_842=m 551CONFIG_CRYPTO_842=m
553CONFIG_CRYPTO_LZ4=m 552CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index bafaff6dcd7b..a857d82ec509 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 377 7#define NR_syscalls 379
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 0ca729665f29..9fe674bf911f 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -382,5 +382,7 @@
382#define __NR_membarrier 374 382#define __NR_membarrier 374
383#define __NR_mlock2 375 383#define __NR_mlock2 375
384#define __NR_copy_file_range 376 384#define __NR_copy_file_range 376
385#define __NR_preadv2 377
386#define __NR_pwritev2 378
385 387
386#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 388#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 8bb94261ff97..d6fd6d9ced24 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -397,3 +397,5 @@ ENTRY(sys_call_table)
397 .long sys_membarrier 397 .long sys_membarrier
398 .long sys_mlock2 /* 375 */ 398 .long sys_mlock2 /* 375 */
399 .long sys_copy_file_range 399 .long sys_copy_file_range
400 .long sys_preadv2
401 .long sys_pwritev2
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 745695db5ba0..f2f264b5aafe 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -261,7 +261,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
261 au1x_dma_chan_t *cp; 261 au1x_dma_chan_t *cp;
262 262
263 /* 263 /*
264 * We do the intialization on the first channel allocation. 264 * We do the initialization on the first channel allocation.
265 * We have to wait because of the interrupt handler initialization 265 * We have to wait because of the interrupt handler initialization
266 * which can't be done successfully during board set up. 266 * which can't be done successfully during board set up.
267 */ 267 */
@@ -964,7 +964,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
964 dp->dscr_source1 = dscr->dscr_source1; 964 dp->dscr_source1 = dscr->dscr_source1;
965 dp->dscr_cmd1 = dscr->dscr_cmd1; 965 dp->dscr_cmd1 = dscr->dscr_cmd1;
966 nbytes = dscr->dscr_cmd1; 966 nbytes = dscr->dscr_cmd1;
967 /* Allow the caller to specifiy if an interrupt is generated */ 967 /* Allow the caller to specify if an interrupt is generated */
968 dp->dscr_cmd0 &= ~DSCR_CMD0_IE; 968 dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
969 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V; 969 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
970 ctp->chan_ptr->ddma_dbell = 0; 970 ctp->chan_ptr->ddma_dbell = 0;
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index bdeed9d13c6f..433c4b9a9f0a 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
503 if (board == BCSR_WHOAMI_DB1500) { 503 if (board == BCSR_WHOAMI_DB1500) {
504 c0 = AU1500_GPIO2_INT; 504 c0 = AU1500_GPIO2_INT;
505 c1 = AU1500_GPIO5_INT; 505 c1 = AU1500_GPIO5_INT;
506 d0 = AU1500_GPIO0_INT; 506 d0 = 0; /* GPIO number, NOT irq! */
507 d1 = AU1500_GPIO3_INT; 507 d1 = 3; /* GPIO number, NOT irq! */
508 s0 = AU1500_GPIO1_INT; 508 s0 = AU1500_GPIO1_INT;
509 s1 = AU1500_GPIO4_INT; 509 s1 = AU1500_GPIO4_INT;
510 } else if (board == BCSR_WHOAMI_DB1100) { 510 } else if (board == BCSR_WHOAMI_DB1100) {
511 c0 = AU1100_GPIO2_INT; 511 c0 = AU1100_GPIO2_INT;
512 c1 = AU1100_GPIO5_INT; 512 c1 = AU1100_GPIO5_INT;
513 d0 = AU1100_GPIO0_INT; 513 d0 = 0; /* GPIO number, NOT irq! */
514 d1 = AU1100_GPIO3_INT; 514 d1 = 3; /* GPIO number, NOT irq! */
515 s0 = AU1100_GPIO1_INT; 515 s0 = AU1100_GPIO1_INT;
516 s1 = AU1100_GPIO4_INT; 516 s1 = AU1100_GPIO4_INT;
517 517
@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
545 } else if (board == BCSR_WHOAMI_DB1000) { 545 } else if (board == BCSR_WHOAMI_DB1000) {
546 c0 = AU1000_GPIO2_INT; 546 c0 = AU1000_GPIO2_INT;
547 c1 = AU1000_GPIO5_INT; 547 c1 = AU1000_GPIO5_INT;
548 d0 = AU1000_GPIO0_INT; 548 d0 = 0; /* GPIO number, NOT irq! */
549 d1 = AU1000_GPIO3_INT; 549 d1 = 3; /* GPIO number, NOT irq! */
550 s0 = AU1000_GPIO1_INT; 550 s0 = AU1000_GPIO1_INT;
551 s1 = AU1000_GPIO4_INT; 551 s1 = AU1000_GPIO4_INT;
552 platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs)); 552 platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
553 } else if ((board == BCSR_WHOAMI_PB1500) || 553 } else if ((board == BCSR_WHOAMI_PB1500) ||
554 (board == BCSR_WHOAMI_PB1500R2)) { 554 (board == BCSR_WHOAMI_PB1500R2)) {
555 c0 = AU1500_GPIO203_INT; 555 c0 = AU1500_GPIO203_INT;
556 d0 = AU1500_GPIO201_INT; 556 d0 = 1; /* GPIO number, NOT irq! */
557 s0 = AU1500_GPIO202_INT; 557 s0 = AU1500_GPIO202_INT;
558 twosocks = 0; 558 twosocks = 0;
559 flashsize = 64; 559 flashsize = 64;
@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
566 */ 566 */
567 } else if (board == BCSR_WHOAMI_PB1100) { 567 } else if (board == BCSR_WHOAMI_PB1100) {
568 c0 = AU1100_GPIO11_INT; 568 c0 = AU1100_GPIO11_INT;
569 d0 = AU1100_GPIO9_INT; 569 d0 = 9; /* GPIO number, NOT irq! */
570 s0 = AU1100_GPIO10_INT; 570 s0 = AU1100_GPIO10_INT;
571 twosocks = 0; 571 twosocks = 0;
572 flashsize = 64; 572 flashsize = 64;
@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
583 } else 583 } else
584 return 0; /* unknown board, no further dev setup to do */ 584 return 0; /* unknown board, no further dev setup to do */
585 585
586 irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
587 irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW); 586 irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
588 irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW); 587 irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
589 588
@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
597 c0, d0, /*s0*/0, 0, 0); 596 c0, d0, /*s0*/0, 0, 0);
598 597
599 if (twosocks) { 598 if (twosocks) {
600 irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
601 irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW); 599 irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
602 irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW); 600 irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
603 601
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index b518f029f5e7..1c01d6eadb08 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
514 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 514 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
515 AU1000_PCMCIA_IO_PHYS_ADDR, 515 AU1000_PCMCIA_IO_PHYS_ADDR,
516 AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 516 AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
517 AU1550_GPIO3_INT, AU1550_GPIO0_INT, 517 AU1550_GPIO3_INT, 0,
518 /*AU1550_GPIO21_INT*/0, 0, 0); 518 /*AU1550_GPIO21_INT*/0, 0, 0);
519 519
520 db1x_register_pcmcia_socket( 520 db1x_register_pcmcia_socket(
@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
524 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, 524 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
525 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, 525 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
526 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, 526 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
527 AU1550_GPIO5_INT, AU1550_GPIO1_INT, 527 AU1550_GPIO5_INT, 1,
528 /*AU1550_GPIO22_INT*/0, 0, 1); 528 /*AU1550_GPIO22_INT*/0, 0, 1);
529 529
530 platform_device_register(&db1550_nand_dev); 530 platform_device_register(&db1550_nand_dev);
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index eb5117ced95a..618dfd735eed 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -26,8 +26,7 @@
26#include "common.h" 26#include "common.h"
27 27
28#define AR71XX_BASE_FREQ 40000000 28#define AR71XX_BASE_FREQ 40000000
29#define AR724X_BASE_FREQ 5000000 29#define AR724X_BASE_FREQ 40000000
30#define AR913X_BASE_FREQ 5000000
31 30
32static struct clk *clks[3]; 31static struct clk *clks[3];
33static struct clk_onecell_data clk_data = { 32static struct clk_onecell_data clk_data = {
@@ -103,8 +102,8 @@ static void __init ar724x_clocks_init(void)
103 div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK); 102 div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
104 freq = div * ref_rate; 103 freq = div * ref_rate;
105 104
106 div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK); 105 div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
107 freq *= div; 106 freq /= div;
108 107
109 cpu_rate = freq; 108 cpu_rate = freq;
110 109
@@ -123,39 +122,6 @@ static void __init ar724x_clocks_init(void)
123 clk_add_alias("uart", NULL, "ahb", NULL); 122 clk_add_alias("uart", NULL, "ahb", NULL);
124} 123}
125 124
126static void __init ar913x_clocks_init(void)
127{
128 unsigned long ref_rate;
129 unsigned long cpu_rate;
130 unsigned long ddr_rate;
131 unsigned long ahb_rate;
132 u32 pll;
133 u32 freq;
134 u32 div;
135
136 ref_rate = AR913X_BASE_FREQ;
137 pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
138
139 div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK);
140 freq = div * ref_rate;
141
142 cpu_rate = freq;
143
144 div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
145 ddr_rate = freq / div;
146
147 div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
148 ahb_rate = cpu_rate / div;
149
150 ath79_add_sys_clkdev("ref", ref_rate);
151 clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
152 clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
153 clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
154
155 clk_add_alias("wdt", NULL, "ahb", NULL);
156 clk_add_alias("uart", NULL, "ahb", NULL);
157}
158
159static void __init ar933x_clocks_init(void) 125static void __init ar933x_clocks_init(void)
160{ 126{
161 unsigned long ref_rate; 127 unsigned long ref_rate;
@@ -443,10 +409,8 @@ void __init ath79_clocks_init(void)
443{ 409{
444 if (soc_is_ar71xx()) 410 if (soc_is_ar71xx())
445 ar71xx_clocks_init(); 411 ar71xx_clocks_init();
446 else if (soc_is_ar724x()) 412 else if (soc_is_ar724x() || soc_is_ar913x())
447 ar724x_clocks_init(); 413 ar724x_clocks_init();
448 else if (soc_is_ar913x())
449 ar913x_clocks_init();
450 else if (soc_is_ar933x()) 414 else if (soc_is_ar933x())
451 ar933x_clocks_init(); 415 ar933x_clocks_init();
452 else if (soc_is_ar934x()) 416 else if (soc_is_ar934x())
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 959c145a0a2c..ca7ad131d057 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -714,11 +714,11 @@ void bcm47xx_sprom_register_fallbacks(void)
714{ 714{
715#if defined(CONFIG_BCM47XX_SSB) 715#if defined(CONFIG_BCM47XX_SSB)
716 if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb)) 716 if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
717 pr_warn("Failed to registered ssb SPROM handler\n"); 717 pr_warn("Failed to register ssb SPROM handler\n");
718#endif 718#endif
719 719
720#if defined(CONFIG_BCM47XX_BCMA) 720#if defined(CONFIG_BCM47XX_BCMA)
721 if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma)) 721 if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
722 pr_warn("Failed to registered bcma SPROM handler\n"); 722 pr_warn("Failed to register bcma SPROM handler\n");
723#endif 723#endif
724} 724}
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 4eff1ef02eff..309d2ad67e4d 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -39,10 +39,11 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
39vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o 39vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
40endif 40endif
41 41
42vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o 42vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
43 43
44$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib 44extra-y += ashldi3.c bswapsi.c
45$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c 45$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
46$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
46 $(call cmd,shipped) 47 $(call cmd,shipped)
47 48
48targets := $(notdir $(vmlinuzobjs-y)) 49targets := $(notdir $(vmlinuzobjs-y))
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index adb33e355043..56035e5b7008 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -82,7 +82,7 @@
82 }; 82 };
83 83
84 gisb-arb@400000 { 84 gisb-arb@400000 {
85 compatible = "brcm,bcm7400-gisb-arb"; 85 compatible = "brcm,bcm7435-gisb-arb";
86 reg = <0x400000 0xdc>; 86 reg = <0x400000 0xdc>;
87 native-endian; 87 native-endian;
88 interrupt-parent = <&sun_l2_intc>; 88 interrupt-parent = <&sun_l2_intc>;
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 3ad4ba9b12fd..3c2ed9ee5b2f 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -83,7 +83,7 @@
83 }; 83 };
84 84
85 pll: pll-controller@18050000 { 85 pll: pll-controller@18050000 {
86 compatible = "qca,ar9132-ppl", 86 compatible = "qca,ar9132-pll",
87 "qca,ar9130-pll"; 87 "qca,ar9130-pll";
88 reg = <0x18050000 0x20>; 88 reg = <0x18050000 0x20>;
89 89
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index e535ee3c26a4..4f1540e5f963 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -18,7 +18,7 @@
18 reg = <0x0 0x2000000>; 18 reg = <0x0 0x2000000>;
19 }; 19 };
20 20
21 extosc: oscillator { 21 extosc: ref {
22 compatible = "fixed-clock"; 22 compatible = "fixed-clock";
23 #clock-cells = <0>; 23 #clock-cells = <0>;
24 clock-frequency = <40000000>; 24 clock-frequency = <40000000>;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
index e59d1b79f24c..2f415d9d0f3c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
@@ -68,7 +68,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
68 gmx_rx_int_en.s.pause_drp = 1; 68 gmx_rx_int_en.s.pause_drp = 1;
69 /* Skipping gmx_rx_int_en.s.reserved_16_18 */ 69 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
70 /*gmx_rx_int_en.s.ifgerr = 1; */ 70 /*gmx_rx_int_en.s.ifgerr = 1; */
71 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 71 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
72 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 72 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
73 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 73 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
74 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 74 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -89,7 +89,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
89 /*gmx_rx_int_en.s.phy_spd = 1; */ 89 /*gmx_rx_int_en.s.phy_spd = 1; */
90 /*gmx_rx_int_en.s.phy_link = 1; */ 90 /*gmx_rx_int_en.s.phy_link = 1; */
91 /*gmx_rx_int_en.s.ifgerr = 1; */ 91 /*gmx_rx_int_en.s.ifgerr = 1; */
92 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 92 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
93 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 93 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
94 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 94 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
95 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 95 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -112,7 +112,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
112 /*gmx_rx_int_en.s.phy_spd = 1; */ 112 /*gmx_rx_int_en.s.phy_spd = 1; */
113 /*gmx_rx_int_en.s.phy_link = 1; */ 113 /*gmx_rx_int_en.s.phy_link = 1; */
114 /*gmx_rx_int_en.s.ifgerr = 1; */ 114 /*gmx_rx_int_en.s.ifgerr = 1; */
115 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 115 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
116 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 116 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
117 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 117 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
118 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 118 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -134,7 +134,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
134 /*gmx_rx_int_en.s.phy_spd = 1; */ 134 /*gmx_rx_int_en.s.phy_spd = 1; */
135 /*gmx_rx_int_en.s.phy_link = 1; */ 135 /*gmx_rx_int_en.s.phy_link = 1; */
136 /*gmx_rx_int_en.s.ifgerr = 1; */ 136 /*gmx_rx_int_en.s.ifgerr = 1; */
137 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 137 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
138 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 138 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
139 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 139 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
140 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 140 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -156,7 +156,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
156 /*gmx_rx_int_en.s.phy_spd = 1; */ 156 /*gmx_rx_int_en.s.phy_spd = 1; */
157 /*gmx_rx_int_en.s.phy_link = 1; */ 157 /*gmx_rx_int_en.s.phy_link = 1; */
158 /*gmx_rx_int_en.s.ifgerr = 1; */ 158 /*gmx_rx_int_en.s.ifgerr = 1; */
159 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 159 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
160 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 160 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
161 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 161 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
162 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 162 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -179,7 +179,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
179 /*gmx_rx_int_en.s.phy_spd = 1; */ 179 /*gmx_rx_int_en.s.phy_spd = 1; */
180 /*gmx_rx_int_en.s.phy_link = 1; */ 180 /*gmx_rx_int_en.s.phy_link = 1; */
181 /*gmx_rx_int_en.s.ifgerr = 1; */ 181 /*gmx_rx_int_en.s.ifgerr = 1; */
182 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 182 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
183 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 183 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
184 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 184 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
185 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 185 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -209,7 +209,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
209 gmx_rx_int_en.s.pause_drp = 1; 209 gmx_rx_int_en.s.pause_drp = 1;
210 /* Skipping gmx_rx_int_en.s.reserved_16_18 */ 210 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
211 /*gmx_rx_int_en.s.ifgerr = 1; */ 211 /*gmx_rx_int_en.s.ifgerr = 1; */
212 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 212 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
213 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 213 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
214 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 214 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
215 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 215 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 87be167a7a6a..676fab50dd2b 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -189,7 +189,7 @@ void cvmx_pko_initialize_global(void)
189 /* 189 /*
190 * Set the size of the PKO command buffers to an odd number of 190 * Set the size of the PKO command buffers to an odd number of
191 * 64bit words. This allows the normal two word send to stay 191 * 64bit words. This allows the normal two word send to stay
192 * aligned and never span a comamnd word buffer. 192 * aligned and never span a command word buffer.
193 */ 193 */
194 config.u64 = 0; 194 config.u64 = 0;
195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; 195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index b7fa9ae28c36..42412ba0f3bf 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -331,7 +331,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
331 } 331 }
332 332
333 if (!(avail_coremask & (1 << coreid))) { 333 if (!(avail_coremask & (1 << coreid))) {
334 /* core not available, assume, that catched by simple-executive */ 334 /* core not available, assume, that caught by simple-executive */
335 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 335 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
336 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 336 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
337 } 337 }
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 4e36b6e1869c..43e0ba24470c 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -17,13 +17,12 @@ CONFIG_IKCONFIG=y
17CONFIG_IKCONFIG_PROC=y 17CONFIG_IKCONFIG_PROC=y
18CONFIG_LOG_BUF_SHIFT=14 18CONFIG_LOG_BUF_SHIFT=14
19CONFIG_CGROUPS=y 19CONFIG_CGROUPS=y
20CONFIG_MEMCG=y
21CONFIG_CGROUP_SCHED=y
20CONFIG_CGROUP_FREEZER=y 22CONFIG_CGROUP_FREEZER=y
21CONFIG_CGROUP_DEVICE=y
22CONFIG_CPUSETS=y 23CONFIG_CPUSETS=y
24CONFIG_CGROUP_DEVICE=y
23CONFIG_CGROUP_CPUACCT=y 25CONFIG_CGROUP_CPUACCT=y
24CONFIG_MEMCG=y
25CONFIG_MEMCG_KMEM=y
26CONFIG_CGROUP_SCHED=y
27CONFIG_NAMESPACES=y 26CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 27CONFIG_USER_NS=y
29CONFIG_CC_OPTIMIZE_FOR_SIZE=y 28CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,6 +51,11 @@ CONFIG_DEVTMPFS=y
52# CONFIG_ALLOW_DEV_COREDUMP is not set 51# CONFIG_ALLOW_DEV_COREDUMP is not set
53CONFIG_DMA_CMA=y 52CONFIG_DMA_CMA=y
54CONFIG_CMA_SIZE_MBYTES=32 53CONFIG_CMA_SIZE_MBYTES=32
54CONFIG_MTD=y
55CONFIG_MTD_NAND=y
56CONFIG_MTD_NAND_JZ4780=y
57CONFIG_MTD_UBI=y
58CONFIG_MTD_UBI_FASTMAP=y
55CONFIG_NETDEVICES=y 59CONFIG_NETDEVICES=y
56# CONFIG_NET_VENDOR_ARC is not set 60# CONFIG_NET_VENDOR_ARC is not set
57# CONFIG_NET_CADENCE is not set 61# CONFIG_NET_CADENCE is not set
@@ -103,7 +107,7 @@ CONFIG_PROC_KCORE=y
103# CONFIG_PROC_PAGE_MONITOR is not set 107# CONFIG_PROC_PAGE_MONITOR is not set
104CONFIG_TMPFS=y 108CONFIG_TMPFS=y
105CONFIG_CONFIGFS_FS=y 109CONFIG_CONFIGFS_FS=y
106# CONFIG_MISC_FILESYSTEMS is not set 110CONFIG_UBIFS_FS=y
107# CONFIG_NETWORK_FILESYSTEMS is not set 111# CONFIG_NETWORK_FILESYSTEMS is not set
108CONFIG_NLS=y 112CONFIG_NLS=y
109CONFIG_NLS_CODEPAGE_437=y 113CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 8c6f508e59de..d7b99180c6e1 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -5,7 +5,7 @@
5 * Written by Ralf Baechle and Andreas Busse, modified for DECstation 5 * Written by Ralf Baechle and Andreas Busse, modified for DECstation
6 * support by Paul Antoine and Harald Koerfgen. 6 * support by Paul Antoine and Harald Koerfgen.
7 * 7 *
8 * completly rewritten: 8 * completely rewritten:
9 * Copyright (C) 1998 Harald Koerfgen 9 * Copyright (C) 1998 Harald Koerfgen
10 * 10 *
11 * Rewritten extensively for controller-driven IRQ support 11 * Rewritten extensively for controller-driven IRQ support
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 5537b94572b2..0d75b5a0bad4 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -9,7 +9,7 @@
9 * PROM library functions for acquiring/using memory descriptors given to us 9 * PROM library functions for acquiring/using memory descriptors given to us
10 * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set 10 * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set
11 * because on some machines like SGI IP27 the ARC memory configuration data 11 * because on some machines like SGI IP27 the ARC memory configuration data
12 * completly bogus and alternate easier to use mechanisms are available. 12 * completely bogus and alternate easier to use mechanisms are available.
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index e7dc785a91ca..af12c1f9f1a8 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -102,7 +102,7 @@ extern void cpu_probe(void);
102extern void cpu_report(void); 102extern void cpu_report(void);
103 103
104extern const char *__cpu_name[]; 104extern const char *__cpu_name[];
105#define cpu_name_string() __cpu_name[smp_processor_id()] 105#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
106 106
107struct seq_file; 107struct seq_file;
108struct notifier_block; 108struct notifier_block;
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index cf92fe733995..c4873e8594ef 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -141,7 +141,7 @@ octeon_main_processor:
141.endm 141.endm
142 142
143/* 143/*
144 * Do SMP slave processor setup necessary before we can savely execute C code. 144 * Do SMP slave processor setup necessary before we can safely execute C code.
145 */ 145 */
146 .macro smp_slave_setup 146 .macro smp_slave_setup
147 .endm 147 .endm
diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
index 13b0751b010a..a229297c880b 100644
--- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
@@ -16,7 +16,7 @@
16 .endm 16 .endm
17 17
18/* 18/*
19 * Do SMP slave processor setup necessary before we can savely execute C code. 19 * Do SMP slave processor setup necessary before we can safely execute C code.
20 */ 20 */
21 .macro smp_slave_setup 21 .macro smp_slave_setup
22 .endm 22 .endm
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
index cf4384bfa846..b0b7261ff3ad 100644
--- a/arch/mips/include/asm/mach-ip27/irq.h
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -11,7 +11,7 @@
11#define __ASM_MACH_IP27_IRQ_H 11#define __ASM_MACH_IP27_IRQ_H
12 12
13/* 13/*
14 * A hardwired interrupt number is completly stupid for this system - a 14 * A hardwired interrupt number is completely stupid for this system - a
15 * large configuration might have thousands if not tenthousands of 15 * large configuration might have thousands if not tenthousands of
16 * interrupts. 16 * interrupts.
17 */ 17 */
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index b087cb83da3a..f992c1db876b 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -81,7 +81,7 @@
81 .endm 81 .endm
82 82
83/* 83/*
84 * Do SMP slave processor setup necessary before we can savely execute C code. 84 * Do SMP slave processor setup necessary before we can safely execute C code.
85 */ 85 */
86 .macro smp_slave_setup 86 .macro smp_slave_setup
87 GET_NASID_ASM t1 87 GET_NASID_ASM t1
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index bf8c3e1860e7..7c7708a23baa 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -27,7 +27,7 @@ enum jz_gpio_function {
27 27
28/* 28/*
29 Usually a driver for a SoC component has to request several gpio pins and 29 Usually a driver for a SoC component has to request several gpio pins and
30 configure them as funcion pins. 30 configure them as function pins.
31 jz_gpio_bulk_request can be used to ease this process. 31 jz_gpio_bulk_request can be used to ease this process.
32 Usually one would do something like: 32 Usually one would do something like:
33 33
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b196825a1de9..d4635391c36a 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -28,7 +28,7 @@ extern void __iomem *mips_cm_l2sync_base;
28 * This function returns the physical base address of the Coherence Manager 28 * This function returns the physical base address of the Coherence Manager
29 * global control block, or 0 if no Coherence Manager is present. It provides 29 * global control block, or 0 if no Coherence Manager is present. It provides
30 * a default implementation which reads the CMGCRBase register where available, 30 * a default implementation which reads the CMGCRBase register where available,
31 * and may be overriden by platforms which determine this address in a 31 * and may be overridden by platforms which determine this address in a
32 * different way by defining a function with the same prototype except for the 32 * different way by defining a function with the same prototype except for the
33 * name mips_cm_phys_base (without underscores). 33 * name mips_cm_phys_base (without underscores).
34 */ 34 */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
index 1f6ea8352ca9..20621e1ca238 100644
--- a/arch/mips/include/asm/mips-r2-to-r6-emul.h
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -79,7 +79,7 @@ struct r2_decoder_table {
79}; 79};
80 80
81 81
82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
83 const char *str); 83 const char *str);
84 84
85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR 85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index f7dd17d0dc22..f4f1996e0fac 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -33,7 +33,7 @@
33/* Packet buffers */ 33/* Packet buffers */
34#define CVMX_FPA_PACKET_POOL (0) 34#define CVMX_FPA_PACKET_POOL (0)
35#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE 35#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
36/* Work queue entrys */ 36/* Work queue entries */
37#define CVMX_FPA_WQE_POOL (1) 37#define CVMX_FPA_WQE_POOL (1)
38#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE 38#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
39/* PKO queue command buffers */ 39/* PKO queue command buffers */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 19e139c9f337..3e982e0c397e 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -189,7 +189,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
189static inline void *cvmx_phys_to_ptr(uint64_t physical_address) 189static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
190{ 190{
191 if (sizeof(void *) == 8) { 191 if (sizeof(void *) == 8) {
192 /* Just set the top bit, avoiding any TLB uglyness */ 192 /* Just set the top bit, avoiding any TLB ugliness */
193 return CASTPTR(void, 193 return CASTPTR(void,
194 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 194 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
195 physical_address)); 195 physical_address));
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 8d7a63b52ac7..3206245d1ed6 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -269,16 +269,16 @@ typedef struct bridge_err_cmdword_s {
269 union { 269 union {
270 u32 cmd_word; 270 u32 cmd_word;
271 struct { 271 struct {
272 u32 didn:4, /* Destination ID */ 272 u32 didn:4, /* Destination ID */
273 sidn:4, /* Source ID */ 273 sidn:4, /* Source ID */
274 pactyp:4, /* Packet type */ 274 pactyp:4, /* Packet type */
275 tnum:5, /* Trans Number */ 275 tnum:5, /* Trans Number */
276 coh:1, /* Coh Transacti */ 276 coh:1, /* Coh Transaction */
277 ds:2, /* Data size */ 277 ds:2, /* Data size */
278 gbr:1, /* GBR enable */ 278 gbr:1, /* GBR enable */
279 vbpm:1, /* VBPM message */ 279 vbpm:1, /* VBPM message */
280 error:1, /* Error occurred */ 280 error:1, /* Error occurred */
281 barr:1, /* Barrier op */ 281 barr:1, /* Barrier op */
282 rsvd:8; 282 rsvd:8;
283 } berr_st; 283 } berr_st;
284 } berr_un; 284 } berr_un;
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 59920b345942..4a9c99050c13 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -147,7 +147,7 @@ struct hpc3_ethregs {
147#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */ 147#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
148#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */ 148#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
149#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */ 149#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
150#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */ 150#define HPC3_EPCFG_TST 0x1000 /* Diagnostic ram test feature bit */
151 151
152 u32 _unused2[0x1000/4 - 8]; /* padding */ 152 u32 _unused2[0x1000/4 - 8]; /* padding */
153 153
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 26ddfff28c8e..105a9479ac5f 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -144,7 +144,7 @@ struct linux_tinfo {
144struct linux_vdirent { 144struct linux_vdirent {
145 ULONG namelen; 145 ULONG namelen;
146 unsigned char attr; 146 unsigned char attr;
147 char fname[32]; /* XXX imperical, should be a define */ 147 char fname[32]; /* XXX empirical, should be a define */
148}; 148};
149 149
150/* Other stuff for files. */ 150/* Other stuff for files. */
@@ -179,7 +179,7 @@ struct linux_finfo {
179 enum linux_devtypes dtype; 179 enum linux_devtypes dtype;
180 unsigned long namelen; 180 unsigned long namelen;
181 unsigned char attr; 181 unsigned char attr;
182 char name[32]; /* XXX imperical, should be define */ 182 char name[32]; /* XXX empirical, should be define */
183}; 183};
184 184
185/* This describes the vector containing function pointers to the ARC 185/* This describes the vector containing function pointers to the ARC
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index e33f0363235b..feb385180f87 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -355,7 +355,7 @@ struct ioc3_etxd {
355#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */ 355#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */
356#define SSCR_RESET 0x80000000 /* reset DMA channels */ 356#define SSCR_RESET 0x80000000 /* reset DMA channels */
357 357
358/* all producer/comsumer pointers are the same bitfield */ 358/* all producer/consumer pointers are the same bitfield */
359#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */ 359#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
360#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */ 360#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
361#define PROD_CONS_PTR_OFF 3 361#define PROD_CONS_PTR_OFF 3
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 5998b13e9764..57ece90f8cf1 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -628,7 +628,7 @@ typedef union h1_icrbb_u {
628/* 628/*
629 * Values for field imsgtype 629 * Values for field imsgtype
630 */ 630 */
631#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ 631#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Message from Xtalk */
632#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ 632#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
633#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */ 633#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
634#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ 634#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 095ecafe6bd3..7f109d4f64a4 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -95,7 +95,7 @@ static inline bool eva_kernel_access(void)
95} 95}
96 96
97/* 97/*
98 * Is a address valid? This does a straighforward calculation rather 98 * Is a address valid? This does a straightforward calculation rather
99 * than tests. 99 * than tests.
100 * 100 *
101 * Address valid if: 101 * Address valid if:
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3129795de940..24ad815c7f38 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -381,16 +381,18 @@
381#define __NR_membarrier (__NR_Linux + 358) 381#define __NR_membarrier (__NR_Linux + 358)
382#define __NR_mlock2 (__NR_Linux + 359) 382#define __NR_mlock2 (__NR_Linux + 359)
383#define __NR_copy_file_range (__NR_Linux + 360) 383#define __NR_copy_file_range (__NR_Linux + 360)
384#define __NR_preadv2 (__NR_Linux + 361)
385#define __NR_pwritev2 (__NR_Linux + 362)
384 386
385/* 387/*
386 * Offset of the last Linux o32 flavoured syscall 388 * Offset of the last Linux o32 flavoured syscall
387 */ 389 */
388#define __NR_Linux_syscalls 360 390#define __NR_Linux_syscalls 362
389 391
390#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 392#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
391 393
392#define __NR_O32_Linux 4000 394#define __NR_O32_Linux 4000
393#define __NR_O32_Linux_syscalls 360 395#define __NR_O32_Linux_syscalls 362
394 396
395#if _MIPS_SIM == _MIPS_SIM_ABI64 397#if _MIPS_SIM == _MIPS_SIM_ABI64
396 398
@@ -719,16 +721,18 @@
719#define __NR_membarrier (__NR_Linux + 318) 721#define __NR_membarrier (__NR_Linux + 318)
720#define __NR_mlock2 (__NR_Linux + 319) 722#define __NR_mlock2 (__NR_Linux + 319)
721#define __NR_copy_file_range (__NR_Linux + 320) 723#define __NR_copy_file_range (__NR_Linux + 320)
724#define __NR_preadv2 (__NR_Linux + 321)
725#define __NR_pwritev2 (__NR_Linux + 322)
722 726
723/* 727/*
724 * Offset of the last Linux 64-bit flavoured syscall 728 * Offset of the last Linux 64-bit flavoured syscall
725 */ 729 */
726#define __NR_Linux_syscalls 320 730#define __NR_Linux_syscalls 322
727 731
728#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 732#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
729 733
730#define __NR_64_Linux 5000 734#define __NR_64_Linux 5000
731#define __NR_64_Linux_syscalls 320 735#define __NR_64_Linux_syscalls 322
732 736
733#if _MIPS_SIM == _MIPS_SIM_NABI32 737#if _MIPS_SIM == _MIPS_SIM_NABI32
734 738
@@ -1061,15 +1065,17 @@
1061#define __NR_membarrier (__NR_Linux + 322) 1065#define __NR_membarrier (__NR_Linux + 322)
1062#define __NR_mlock2 (__NR_Linux + 323) 1066#define __NR_mlock2 (__NR_Linux + 323)
1063#define __NR_copy_file_range (__NR_Linux + 324) 1067#define __NR_copy_file_range (__NR_Linux + 324)
1068#define __NR_preadv2 (__NR_Linux + 325)
1069#define __NR_pwritev2 (__NR_Linux + 326)
1064 1070
1065/* 1071/*
1066 * Offset of the last N32 flavoured syscall 1072 * Offset of the last N32 flavoured syscall
1067 */ 1073 */
1068#define __NR_Linux_syscalls 324 1074#define __NR_Linux_syscalls 326
1069 1075
1070#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1076#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1071 1077
1072#define __NR_N32_Linux 6000 1078#define __NR_N32_Linux 6000
1073#define __NR_N32_Linux_syscalls 324 1079#define __NR_N32_Linux_syscalls 326
1074 1080
1075#endif /* _UAPI_ASM_UNISTD_H */ 1081#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 1448c1f43d4e..760217bbb2fa 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -24,7 +24,7 @@ static char *cm2_tr[8] = {
24 "0x04", "cpc", "0x06", "0x07" 24 "0x04", "cpc", "0x06", "0x07"
25}; 25};
26 26
27/* CM3 Tag ECC transation type */ 27/* CM3 Tag ECC transaction type */
28static char *cm3_tr[16] = { 28static char *cm3_tr[16] = {
29 [0x0] = "ReqNoData", 29 [0x0] = "ReqNoData",
30 [0x1] = "0x1", 30 [0x1] = "0x1",
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 1f5aac7f9ec3..3fff89ae760b 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -940,42 +940,42 @@ repeat:
940 switch (rt) { 940 switch (rt) {
941 case tgei_op: 941 case tgei_op:
942 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) 942 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
943 do_trap_or_bp(regs, 0, "TGEI"); 943 do_trap_or_bp(regs, 0, 0, "TGEI");
944 944
945 MIPS_R2_STATS(traps); 945 MIPS_R2_STATS(traps);
946 946
947 break; 947 break;
948 case tgeiu_op: 948 case tgeiu_op:
949 if (regs->regs[rs] >= MIPSInst_UIMM(inst)) 949 if (regs->regs[rs] >= MIPSInst_UIMM(inst))
950 do_trap_or_bp(regs, 0, "TGEIU"); 950 do_trap_or_bp(regs, 0, 0, "TGEIU");
951 951
952 MIPS_R2_STATS(traps); 952 MIPS_R2_STATS(traps);
953 953
954 break; 954 break;
955 case tlti_op: 955 case tlti_op:
956 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) 956 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
957 do_trap_or_bp(regs, 0, "TLTI"); 957 do_trap_or_bp(regs, 0, 0, "TLTI");
958 958
959 MIPS_R2_STATS(traps); 959 MIPS_R2_STATS(traps);
960 960
961 break; 961 break;
962 case tltiu_op: 962 case tltiu_op:
963 if (regs->regs[rs] < MIPSInst_UIMM(inst)) 963 if (regs->regs[rs] < MIPSInst_UIMM(inst))
964 do_trap_or_bp(regs, 0, "TLTIU"); 964 do_trap_or_bp(regs, 0, 0, "TLTIU");
965 965
966 MIPS_R2_STATS(traps); 966 MIPS_R2_STATS(traps);
967 967
968 break; 968 break;
969 case teqi_op: 969 case teqi_op:
970 if (regs->regs[rs] == MIPSInst_SIMM(inst)) 970 if (regs->regs[rs] == MIPSInst_SIMM(inst))
971 do_trap_or_bp(regs, 0, "TEQI"); 971 do_trap_or_bp(regs, 0, 0, "TEQI");
972 972
973 MIPS_R2_STATS(traps); 973 MIPS_R2_STATS(traps);
974 974
975 break; 975 break;
976 case tnei_op: 976 case tnei_op:
977 if (regs->regs[rs] != MIPSInst_SIMM(inst)) 977 if (regs->regs[rs] != MIPSInst_SIMM(inst))
978 do_trap_or_bp(regs, 0, "TNEI"); 978 do_trap_or_bp(regs, 0, 0, "TNEI");
979 979
980 MIPS_R2_STATS(traps); 980 MIPS_R2_STATS(traps);
981 981
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
index 2b70723071c3..9083d63b765c 100644
--- a/arch/mips/kernel/module-rela.c
+++ b/arch/mips/kernel/module-rela.c
@@ -109,9 +109,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
109 struct module *me) 109 struct module *me)
110{ 110{
111 Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; 111 Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
112 int (*handler)(struct module *me, u32 *location, Elf_Addr v);
112 Elf_Sym *sym; 113 Elf_Sym *sym;
113 u32 *location; 114 u32 *location;
114 unsigned int i; 115 unsigned int i, type;
115 Elf_Addr v; 116 Elf_Addr v;
116 int res; 117 int res;
117 118
@@ -134,9 +135,21 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
134 return -ENOENT; 135 return -ENOENT;
135 } 136 }
136 137
137 v = sym->st_value + rel[i].r_addend; 138 type = ELF_MIPS_R_TYPE(rel[i]);
139
140 if (type < ARRAY_SIZE(reloc_handlers_rela))
141 handler = reloc_handlers_rela[type];
142 else
143 handler = NULL;
138 144
139 res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); 145 if (!handler) {
146 pr_err("%s: Unknown relocation type %u\n",
147 me->name, type);
148 return -EINVAL;
149 }
150
151 v = sym->st_value + rel[i].r_addend;
152 res = handler(me, location, v);
140 if (res) 153 if (res)
141 return res; 154 return res;
142 } 155 }
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 1833f5171ccd..f9b2936d598d 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -197,9 +197,10 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
197 struct module *me) 197 struct module *me)
198{ 198{
199 Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; 199 Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
200 int (*handler)(struct module *me, u32 *location, Elf_Addr v);
200 Elf_Sym *sym; 201 Elf_Sym *sym;
201 u32 *location; 202 u32 *location;
202 unsigned int i; 203 unsigned int i, type;
203 Elf_Addr v; 204 Elf_Addr v;
204 int res; 205 int res;
205 206
@@ -223,9 +224,21 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
223 return -ENOENT; 224 return -ENOENT;
224 } 225 }
225 226
226 v = sym->st_value; 227 type = ELF_MIPS_R_TYPE(rel[i]);
228
229 if (type < ARRAY_SIZE(reloc_handlers_rel))
230 handler = reloc_handlers_rel[type];
231 else
232 handler = NULL;
227 233
228 res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v); 234 if (!handler) {
235 pr_err("%s: Unknown relocation type %u\n",
236 me->name, type);
237 return -EINVAL;
238 }
239
240 v = sym->st_value;
241 res = handler(me, location, v);
229 if (res) 242 if (res)
230 return res; 243 return res;
231 } 244 }
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d7b8dd43147a..9bc1191b1ab0 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -530,7 +530,7 @@ static void mipspmu_enable(struct pmu *pmu)
530 530
531/* 531/*
532 * MIPS performance counters can be per-TC. The control registers can 532 * MIPS performance counters can be per-TC. The control registers can
533 * not be directly accessed accross CPUs. Hence if we want to do global 533 * not be directly accessed across CPUs. Hence if we want to do global
534 * control, we need cross CPU calls. on_each_cpu() can help us, but we 534 * control, we need cross CPU calls. on_each_cpu() can help us, but we
535 * can not make sure this function is called with interrupts enabled. So 535 * can not make sure this function is called with interrupts enabled. So
536 * here we pause local counters and then grab a rwlock and leave the 536 * here we pause local counters and then grab a rwlock and leave the
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289977cc..fa3f9ebad8f4 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -472,7 +472,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
472 /* 472 /*
473 * Disable all but self interventions. The load from COHCTL is defined 473 * Disable all but self interventions. The load from COHCTL is defined
474 * by the interAptiv & proAptiv SUMs as ensuring that the operation 474 * by the interAptiv & proAptiv SUMs as ensuring that the operation
475 * resulting from the preceeding store is complete. 475 * resulting from the preceding store is complete.
476 */ 476 */
477 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); 477 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
478 uasm_i_sw(&p, t0, 0, r_pcohctl); 478 uasm_i_sw(&p, t0, 0, r_pcohctl);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eddd5fd6fdfa..92880cee449e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -615,7 +615,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
615 * allows us to only worry about whether an FP mode switch is in 615 * allows us to only worry about whether an FP mode switch is in
616 * progress when FP is first used in a tasks time slice. Pretty much all 616 * progress when FP is first used in a tasks time slice. Pretty much all
617 * of the mode switch overhead can thus be confined to cases where mode 617 * of the mode switch overhead can thus be confined to cases where mode
618 * switches are actually occuring. That is, to here. However for the 618 * switches are actually occurring. That is, to here. However for the
619 * thread performing the mode switch it may take a while... 619 * thread performing the mode switch it may take a while...
620 */ 620 */
621 if (num_online_cpus() > 1) { 621 if (num_online_cpus() > 1) {
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a56317444bda..d01fe53a6638 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -596,3 +596,5 @@ EXPORT(sys_call_table)
596 PTR sys_membarrier 596 PTR sys_membarrier
597 PTR sys_mlock2 597 PTR sys_mlock2
598 PTR sys_copy_file_range /* 4360 */ 598 PTR sys_copy_file_range /* 4360 */
599 PTR sys_preadv2
600 PTR sys_pwritev2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2b2dc14610d0..6b73ecc02597 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -434,4 +434,6 @@ EXPORT(sys_call_table)
434 PTR sys_membarrier 434 PTR sys_membarrier
435 PTR sys_mlock2 435 PTR sys_mlock2
436 PTR sys_copy_file_range /* 5320 */ 436 PTR sys_copy_file_range /* 5320 */
437 PTR sys_preadv2
438 PTR sys_pwritev2
437 .size sys_call_table,.-sys_call_table 439 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2bf5c8593d91..71f99d5f7a06 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -424,4 +424,6 @@ EXPORT(sysn32_call_table)
424 PTR sys_membarrier 424 PTR sys_membarrier
425 PTR sys_mlock2 425 PTR sys_mlock2
426 PTR sys_copy_file_range 426 PTR sys_copy_file_range
427 PTR compat_sys_preadv2 /* 6325 */
428 PTR compat_sys_pwritev2
427 .size sysn32_call_table,.-sysn32_call_table 429 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index c5b759e584c7..91b43eea2d5a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -579,4 +579,6 @@ EXPORT(sys32_call_table)
579 PTR sys_membarrier 579 PTR sys_membarrier
580 PTR sys_mlock2 580 PTR sys_mlock2
581 PTR sys_copy_file_range /* 4360 */ 581 PTR sys_copy_file_range /* 4360 */
582 PTR compat_sys_preadv2
583 PTR compat_sys_pwritev2
582 .size sys32_call_table,.-sys32_call_table 584 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 37708d9af638..27cb638f0824 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -243,6 +243,18 @@ static int __init mips_smp_ipi_init(void)
243 struct irq_domain *ipidomain; 243 struct irq_domain *ipidomain;
244 struct device_node *node; 244 struct device_node *node;
245 245
246 /*
247 * In some cases like qemu-malta, it is desired to try SMP with
248 * a single core. Qemu-malta has no GIC, so an attempt to set any IPIs
249 * would cause a BUG_ON() to be triggered since there's no ipidomain.
250 *
251 * Since for a single core system IPIs aren't required really, skip the
252 * initialisation which should generally keep any such configurations
253 * happy and only fail hard when trying to truely run SMP.
254 */
255 if (cpumask_weight(cpu_possible_mask) == 1)
256 return 0;
257
246 node = of_irq_find_parent(of_root); 258 node = of_irq_find_parent(of_root);
247 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); 259 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
248 260
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bf14da9f3e33..ae0c89d23ad7 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -56,6 +56,7 @@
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/ptrace.h> 57#include <asm/ptrace.h>
58#include <asm/sections.h> 58#include <asm/sections.h>
59#include <asm/siginfo.h>
59#include <asm/tlbdebug.h> 60#include <asm/tlbdebug.h>
60#include <asm/traps.h> 61#include <asm/traps.h>
61#include <asm/uaccess.h> 62#include <asm/uaccess.h>
@@ -871,7 +872,7 @@ out:
871 exception_exit(prev_state); 872 exception_exit(prev_state);
872} 873}
873 874
874void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 875void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
875 const char *str) 876 const char *str)
876{ 877{
877 siginfo_t info = { 0 }; 878 siginfo_t info = { 0 };
@@ -928,7 +929,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
928 default: 929 default:
929 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 930 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
930 die_if_kernel(b, regs); 931 die_if_kernel(b, regs);
931 force_sig(SIGTRAP, current); 932 if (si_code) {
933 info.si_signo = SIGTRAP;
934 info.si_code = si_code;
935 force_sig_info(SIGTRAP, &info, current);
936 } else {
937 force_sig(SIGTRAP, current);
938 }
932 } 939 }
933} 940}
934 941
@@ -1012,7 +1019,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
1012 break; 1019 break;
1013 } 1020 }
1014 1021
1015 do_trap_or_bp(regs, bcode, "Break"); 1022 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1016 1023
1017out: 1024out:
1018 set_fs(seg); 1025 set_fs(seg);
@@ -1054,7 +1061,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
1054 tcode = (opcode >> 6) & ((1 << 10) - 1); 1061 tcode = (opcode >> 6) & ((1 << 10) - 1);
1055 } 1062 }
1056 1063
1057 do_trap_or_bp(regs, tcode, "Trap"); 1064 do_trap_or_bp(regs, tcode, 0, "Trap");
1058 1065
1059out: 1066out:
1060 set_fs(seg); 1067 set_fs(seg);
@@ -1115,19 +1122,7 @@ no_r2_instr:
1115 if (unlikely(compute_return_epc(regs) < 0)) 1122 if (unlikely(compute_return_epc(regs) < 0))
1116 goto out; 1123 goto out;
1117 1124
1118 if (get_isa16_mode(regs->cp0_epc)) { 1125 if (!get_isa16_mode(regs->cp0_epc)) {
1119 unsigned short mmop[2] = { 0 };
1120
1121 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1122 status = SIGSEGV;
1123 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1124 status = SIGSEGV;
1125 opcode = mmop[0];
1126 opcode = (opcode << 16) | mmop[1];
1127
1128 if (status < 0)
1129 status = simulate_rdhwr_mm(regs, opcode);
1130 } else {
1131 if (unlikely(get_user(opcode, epc) < 0)) 1126 if (unlikely(get_user(opcode, epc) < 0))
1132 status = SIGSEGV; 1127 status = SIGSEGV;
1133 1128
@@ -1142,6 +1137,18 @@ no_r2_instr:
1142 1137
1143 if (status < 0) 1138 if (status < 0)
1144 status = simulate_fp(regs, opcode, old_epc, old31); 1139 status = simulate_fp(regs, opcode, old_epc, old31);
1140 } else if (cpu_has_mmips) {
1141 unsigned short mmop[2] = { 0 };
1142
1143 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1144 status = SIGSEGV;
1145 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1146 status = SIGSEGV;
1147 opcode = mmop[0];
1148 opcode = (opcode << 16) | mmop[1];
1149
1150 if (status < 0)
1151 status = simulate_rdhwr_mm(regs, opcode);
1145 } 1152 }
1146 1153
1147 if (status < 0) 1154 if (status < 0)
@@ -1492,6 +1499,7 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
1492 */ 1499 */
1493asmlinkage void do_watch(struct pt_regs *regs) 1500asmlinkage void do_watch(struct pt_regs *regs)
1494{ 1501{
1502 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1495 enum ctx_state prev_state; 1503 enum ctx_state prev_state;
1496 u32 cause; 1504 u32 cause;
1497 1505
@@ -1512,7 +1520,7 @@ asmlinkage void do_watch(struct pt_regs *regs)
1512 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 1520 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1513 mips_read_watch_registers(); 1521 mips_read_watch_registers();
1514 local_irq_enable(); 1522 local_irq_enable();
1515 force_sig(SIGTRAP, current); 1523 force_sig_info(SIGTRAP, &info, current);
1516 } else { 1524 } else {
1517 mips_clear_watch_registers(); 1525 mips_clear_watch_registers();
1518 local_irq_enable(); 1526 local_irq_enable();
@@ -2214,7 +2222,7 @@ void __init trap_init(void)
2214 2222
2215 /* 2223 /*
2216 * Copy the generic exception handlers to their final destination. 2224 * Copy the generic exception handlers to their final destination.
2217 * This will be overriden later as suitable for a particular 2225 * This will be overridden later as suitable for a particular
2218 * configuration. 2226 * configuration.
2219 */ 2227 */
2220 set_handler(0x180, &except_vec3_generic, 0x80); 2228 set_handler(0x180, &except_vec3_generic, 0x80);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 490cea569d57..5c62065cbf22 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
885{ 885{
886 union mips_instruction insn; 886 union mips_instruction insn;
887 unsigned long value; 887 unsigned long value;
888 unsigned int res; 888 unsigned int res, preempted;
889 unsigned long origpc; 889 unsigned long origpc;
890 unsigned long orig31; 890 unsigned long orig31;
891 void __user *fault_addr = NULL; 891 void __user *fault_addr = NULL;
@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1226 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) 1226 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1227 goto sigbus; 1227 goto sigbus;
1228 1228
1229 /* 1229 do {
1230 * Disable preemption to avoid a race between copying 1230 /*
1231 * state from userland, migrating to another CPU and 1231 * If we have live MSA context keep track of
1232 * updating the hardware vector register below. 1232 * whether we get preempted in order to avoid
1233 */ 1233 * the register context we load being clobbered
1234 preempt_disable(); 1234 * by the live context as it's saved during
1235 1235 * preemption. If we don't have live context
1236 res = __copy_from_user_inatomic(fpr, addr, 1236 * then it can't be saved to clobber the value
1237 sizeof(*fpr)); 1237 * we load.
1238 if (res) 1238 */
1239 goto fault; 1239 preempted = test_thread_flag(TIF_USEDMSA);
1240 1240
1241 /* 1241 res = __copy_from_user_inatomic(fpr, addr,
1242 * Update the hardware register if it is in use by the 1242 sizeof(*fpr));
1243 * task in this quantum, in order to avoid having to 1243 if (res)
1244 * save & restore the whole vector context. 1244 goto fault;
1245 */
1246 if (test_thread_flag(TIF_USEDMSA))
1247 write_msa_wr(wd, fpr, df);
1248 1245
1249 preempt_enable(); 1246 /*
1247 * Update the hardware register if it is in use
1248 * by the task in this quantum, in order to
1249 * avoid having to save & restore the whole
1250 * vector context.
1251 */
1252 preempt_disable();
1253 if (test_thread_flag(TIF_USEDMSA)) {
1254 write_msa_wr(wd, fpr, df);
1255 preempted = 0;
1256 }
1257 preempt_enable();
1258 } while (preempted);
1250 break; 1259 break;
1251 1260
1252 case msa_st_op: 1261 case msa_st_op:
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a08c43946247..e0e1d0a611fc 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -632,7 +632,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
632 632
633 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); 633 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
634 634
635 /* Alocate new kernel and user ASIDs if needed */ 635 /* Allocate new kernel and user ASIDs if needed */
636 636
637 local_irq_save(flags); 637 local_irq_save(flags);
638 638
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index ad988000563f..c4038d2a724c 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -500,7 +500,7 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
500 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); 500 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
501 501
502 /* 502 /*
503 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) 503 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
504 */ 504 */
505 kvm_write_c0_guest_intctl(cop0, 0xFC000000); 505 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
506 506
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index ad3c73436777..47d26c805eac 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -97,7 +97,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
97{ 97{
98 assert(xm); /* we don't gen exact zeros (probably should) */ 98 assert(xm); /* we don't gen exact zeros (probably should) */
99 99
100 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */ 100 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */
101 assert(xm & (DP_HIDDEN_BIT << 3)); 101 assert(xm & (DP_HIDDEN_BIT << 3));
102 102
103 if (xe < DP_EMIN) { 103 if (xe < DP_EMIN) {
@@ -165,7 +165,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
165 /* strip grs bits */ 165 /* strip grs bits */
166 xm >>= 3; 166 xm >>= 3;
167 167
168 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 168 assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
169 assert(xe >= DP_EMIN); 169 assert(xe >= DP_EMIN);
170 170
171 if (xe > DP_EMAX) { 171 if (xe > DP_EMAX) {
@@ -198,7 +198,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
198 ieee754_setcx(IEEE754_UNDERFLOW); 198 ieee754_setcx(IEEE754_UNDERFLOW);
199 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); 199 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
200 } else { 200 } else {
201 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 201 assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
202 assert(xm & DP_HIDDEN_BIT); 202 assert(xm & DP_HIDDEN_BIT);
203 203
204 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 204 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index def00ffc50fc..e0b2c450b963 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -97,7 +97,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
97{ 97{
98 assert(xm); /* we don't gen exact zeros (probably should) */ 98 assert(xm); /* we don't gen exact zeros (probably should) */
99 99
100 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */ 100 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */
101 assert(xm & (SP_HIDDEN_BIT << 3)); 101 assert(xm & (SP_HIDDEN_BIT << 3));
102 102
103 if (xe < SP_EMIN) { 103 if (xe < SP_EMIN) {
@@ -163,7 +163,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
163 /* strip grs bits */ 163 /* strip grs bits */
164 xm >>= 3; 164 xm >>= 3;
165 165
166 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 166 assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
167 assert(xe >= SP_EMIN); 167 assert(xe >= SP_EMIN);
168 168
169 if (xe > SP_EMAX) { 169 if (xe > SP_EMAX) {
@@ -196,7 +196,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
196 ieee754_setcx(IEEE754_UNDERFLOW); 196 ieee754_setcx(IEEE754_UNDERFLOW);
197 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm); 197 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
198 } else { 198 } else {
199 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 199 assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
200 assert(xm & SP_HIDDEN_BIT); 200 assert(xm & SP_HIDDEN_BIT);
201 201
202 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); 202 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index dc7c5a5214a9..026cb59a914d 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -158,7 +158,7 @@ static inline int __init indy_sc_probe(void)
158 return 1; 158 return 1;
159} 159}
160 160
161/* XXX Check with wje if the Indy caches can differenciate between 161/* XXX Check with wje if the Indy caches can differentiate between
162 writeback + invalidate and just invalidate. */ 162 writeback + invalidate and just invalidate. */
163static struct bcache_ops indy_sc_ops = { 163static struct bcache_ops indy_sc_ops = {
164 .bc_enable = indy_sc_enable, 164 .bc_enable = indy_sc_enable,
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 5037d5868cef..c17d7627f872 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -19,6 +19,7 @@
19#include <asm/cpu.h> 19#include <asm/cpu.h>
20#include <asm/cpu-type.h> 20#include <asm/cpu-type.h>
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#include <asm/hazards.h>
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23#include <asm/pgtable.h> 24#include <asm/pgtable.h>
24#include <asm/tlb.h> 25#include <asm/tlb.h>
@@ -486,6 +487,10 @@ static void r4k_tlb_configure(void)
486 * be set to fixed-size pages. 487 * be set to fixed-size pages.
487 */ 488 */
488 write_c0_pagemask(PM_DEFAULT_MASK); 489 write_c0_pagemask(PM_DEFAULT_MASK);
490 back_to_back_c0_hazard();
491 if (read_c0_pagemask() != PM_DEFAULT_MASK)
492 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
493
489 write_c0_wired(0); 494 write_c0_wired(0);
490 if (current_cpu_type() == CPU_R10000 || 495 if (current_cpu_type() == CPU_R10000 ||
491 current_cpu_type() == CPU_R12000 || 496 current_cpu_type() == CPU_R12000 ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 5a04b6f5c6fb..84c6e3fda84a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -12,7 +12,7 @@
12 * Copyright (C) 2011 MIPS Technologies, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc.
13 * 13 *
14 * ... and the days got worse and worse and now you see 14 * ... and the days got worse and worse and now you see
15 * I've gone completly out of my mind. 15 * I've gone completely out of my mind.
16 * 16 *
17 * They're coming to take me a away haha 17 * They're coming to take me a away haha
18 * they're coming to take me a away hoho hihi haha 18 * they're coming to take me a away hoho hihi haha
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 8d0eb2643248..f1f88291451e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2000 by Silicon Graphics, Inc. 7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig 8 * Copyright (C) 2004 by Christoph Hellwig
9 * 9 *
10 * On SGI IP27 the ARC memory configuration data is completly bogus but 10 * On SGI IP27 the ARC memory configuration data is completely bogus but
11 * alternate easier to use mechanisms are available. 11 * alternate easier to use mechanisms are available.
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c
index c2cfcb121e34..2fcefe720283 100644
--- a/arch/nios2/lib/memset.c
+++ b/arch/nios2/lib/memset.c
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count)
68 "=r" (charcnt), /* %1 Output */ 68 "=r" (charcnt), /* %1 Output */
69 "=r" (dwordcnt), /* %2 Output */ 69 "=r" (dwordcnt), /* %2 Output */
70 "=r" (fill8reg), /* %3 Output */ 70 "=r" (fill8reg), /* %3 Output */
71 "=r" (wrkrega) /* %4 Output */ 71 "=&r" (wrkrega) /* %4 Output only */
72 : "r" (c), /* %5 Input */ 72 : "r" (c), /* %5 Input */
73 "0" (s), /* %0 Input/Output */ 73 "0" (s), /* %0 Input/Output */
74 "1" (count) /* %1 Input/Output */ 74 "1" (count) /* %1 Input/Output */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bd3c873951a1..88cfaa8af78e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -4,8 +4,8 @@ config PARISC
4 select ARCH_MIGHT_HAVE_PC_PARPORT 4 select ARCH_MIGHT_HAVE_PC_PARPORT
5 select HAVE_IDE 5 select HAVE_IDE
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_FUNCTION_TRACER if 64BIT 7 select HAVE_FUNCTION_TRACER
8 select HAVE_FUNCTION_GRAPH_TRACER if 64BIT 8 select HAVE_FUNCTION_GRAPH_TRACER
9 select ARCH_WANT_FRAME_POINTERS 9 select ARCH_WANT_FRAME_POINTERS
10 select RTC_CLASS 10 select RTC_CLASS
11 select RTC_DRV_GENERIC 11 select RTC_DRV_GENERIC
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index bc989e522a04..68b7cbd0810a 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -2,9 +2,13 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config TRACE_IRQFLAGS_SUPPORT
6 def_bool y
7
5config DEBUG_RODATA 8config DEBUG_RODATA
6 bool "Write protect kernel read-only data structures" 9 bool "Write protect kernel read-only data structures"
7 depends on DEBUG_KERNEL 10 depends on DEBUG_KERNEL
11 default y
8 help 12 help
9 Mark the kernel read-only data as write-protected in the pagetables, 13 Mark the kernel read-only data as write-protected in the pagetables,
10 in order to catch accidental (and incorrect) writes to such const 14 in order to catch accidental (and incorrect) writes to such const
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 965a0999fc4c..75cb451b1f03 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -62,9 +62,7 @@ cflags-y += -mdisable-fpregs
62 62
63# Without this, "ld -r" results in .text sections that are too big 63# Without this, "ld -r" results in .text sections that are too big
64# (> 0x40000) for branches to reach stubs. 64# (> 0x40000) for branches to reach stubs.
65ifndef CONFIG_FUNCTION_TRACER 65cflags-y += -ffunction-sections
66 cflags-y += -ffunction-sections
67endif
68 66
69# Use long jumps instead of long branches (needed if your linker fails to 67# Use long jumps instead of long branches (needed if your linker fails to
70# link a too big vmlinux executable). Not enabled for building modules. 68# link a too big vmlinux executable). Not enabled for building modules.
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 544ed8ef87eb..24cd81d58d70 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -4,23 +4,7 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5extern void mcount(void); 5extern void mcount(void);
6 6
7/* 7#define MCOUNT_INSN_SIZE 4
8 * Stack of return addresses for functions of a thread.
9 * Used in struct thread_info
10 */
11struct ftrace_ret_stack {
12 unsigned long ret;
13 unsigned long func;
14 unsigned long long calltime;
15};
16
17/*
18 * Primary handler of a function return.
19 * It relays on ftrace_return_to_handler.
20 * Defined in entry.S
21 */
22extern void return_to_handler(void);
23
24 8
25extern unsigned long return_address(unsigned int); 9extern unsigned long return_address(unsigned int);
26 10
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index d4dd6e58682c..7955e43f3f3f 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -44,20 +44,18 @@ static inline long access_ok(int type, const void __user * addr,
44#define LDD_USER(ptr) BUILD_BUG() 44#define LDD_USER(ptr) BUILD_BUG()
45#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr) 45#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr)
46#define STD_USER(x, ptr) __put_user_asm64(x, ptr) 46#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
47#define ASM_WORD_INSN ".word\t"
48#else 47#else
49#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr) 48#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr)
50#define LDD_USER(ptr) __get_user_asm("ldd", ptr) 49#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
51#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr) 50#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr)
52#define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 51#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
53#define ASM_WORD_INSN ".dword\t"
54#endif 52#endif
55 53
56/* 54/*
57 * The exception table contains two values: the first is an address 55 * The exception table contains two values: the first is the relative offset to
58 * for an instruction that is allowed to fault, and the second is 56 * the address of the instruction that is allowed to fault, and the second is
59 * the address to the fixup routine. Even on a 64bit kernel we could 57 * the relative offset to the address of the fixup routine. Since relative
60 * use a 32bit (unsigned int) address here. 58 * addresses are used, 32bit values are sufficient even on 64bit kernel.
61 */ 59 */
62 60
63#define ARCH_HAS_RELATIVE_EXTABLE 61#define ARCH_HAS_RELATIVE_EXTABLE
@@ -77,6 +75,7 @@ struct exception_table_entry {
77 */ 75 */
78struct exception_data { 76struct exception_data {
79 unsigned long fault_ip; 77 unsigned long fault_ip;
78 unsigned long fault_gp;
80 unsigned long fault_space; 79 unsigned long fault_space;
81 unsigned long fault_addr; 80 unsigned long fault_addr;
82}; 81};
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index ff87b4603e3d..69a11183d48d 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -15,11 +15,7 @@ ifdef CONFIG_FUNCTION_TRACER
15# Do not profile debug and lowlevel utilities 15# Do not profile debug and lowlevel utilities
16CFLAGS_REMOVE_ftrace.o = -pg 16CFLAGS_REMOVE_ftrace.o = -pg
17CFLAGS_REMOVE_cache.o = -pg 17CFLAGS_REMOVE_cache.o = -pg
18CFLAGS_REMOVE_irq.o = -pg
19CFLAGS_REMOVE_pacache.o = -pg
20CFLAGS_REMOVE_perf.o = -pg 18CFLAGS_REMOVE_perf.o = -pg
21CFLAGS_REMOVE_traps.o = -pg
22CFLAGS_REMOVE_unaligned.o = -pg
23CFLAGS_REMOVE_unwind.o = -pg 19CFLAGS_REMOVE_unwind.o = -pg
24endif 20endif
25 21
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index d2f62570a7b1..78d30d2ea2d8 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -299,6 +299,7 @@ int main(void)
299#endif 299#endif
300 BLANK(); 300 BLANK();
301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
302 DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
302 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 303 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
303 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); 304 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
304 BLANK(); 305 BLANK();
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 91c2a39cd5aa..67001277256c 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
319 if (!mapping) 319 if (!mapping)
320 return; 320 return;
321 321
322 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 322 pgoff = page->index;
323 323
324 /* We have carefully arranged in arch_get_unmapped_area() that 324 /* We have carefully arranged in arch_get_unmapped_area() that
325 * *any* mappings of a file are always congruently mapped (whether 325 * *any* mappings of a file are always congruently mapped (whether
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 623496c11756..39127d3e70e5 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1970,43 +1970,98 @@ pt_regs_ok:
1970 b intr_restore 1970 b intr_restore
1971 copy %r25,%r16 1971 copy %r25,%r16
1972 1972
1973 .import schedule,code
1974syscall_do_resched: 1973syscall_do_resched:
1975 BL schedule,%r2 1974 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1975 load32 schedule,%r19
1976 bv %r0(%r19) /* jumps to schedule() */
1976#ifdef CONFIG_64BIT 1977#ifdef CONFIG_64BIT
1977 ldo -16(%r30),%r29 /* Reference param save area */ 1978 ldo -16(%r30),%r29 /* Reference param save area */
1978#else 1979#else
1979 nop 1980 nop
1980#endif 1981#endif
1981 b syscall_check_resched /* if resched, we start over again */
1982 nop
1983ENDPROC(syscall_exit) 1982ENDPROC(syscall_exit)
1984 1983
1985 1984
1986#ifdef CONFIG_FUNCTION_TRACER 1985#ifdef CONFIG_FUNCTION_TRACER
1986
1987 .import ftrace_function_trampoline,code 1987 .import ftrace_function_trampoline,code
1988ENTRY(_mcount) 1988 .align L1_CACHE_BYTES
1989 copy %r3, %arg2 1989 .globl mcount
1990 .type mcount, @function
1991ENTRY(mcount)
1992_mcount:
1993 .export _mcount,data
1994 .proc
1995 .callinfo caller,frame=0
1996 .entry
1997 /*
1998 * The 64bit mcount() function pointer needs 4 dwords, of which the
1999 * first two are free. We optimize it here and put 2 instructions for
2000 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
2001 * have all on one L1 cacheline.
2002 */
1990 b ftrace_function_trampoline 2003 b ftrace_function_trampoline
2004 copy %r3, %arg2 /* caller original %sp */
2005ftrace_stub:
2006 .globl ftrace_stub
2007 .type ftrace_stub, @function
2008#ifdef CONFIG_64BIT
2009 bve (%rp)
2010#else
2011 bv %r0(%rp)
2012#endif
1991 nop 2013 nop
1992ENDPROC(_mcount) 2014#ifdef CONFIG_64BIT
2015 .dword mcount
2016 .dword 0 /* code in head.S puts value of global gp here */
2017#endif
2018 .exit
2019 .procend
2020ENDPROC(mcount)
1993 2021
2022 .align 8
2023 .globl return_to_handler
2024 .type return_to_handler, @function
1994ENTRY(return_to_handler) 2025ENTRY(return_to_handler)
1995 load32 return_trampoline, %rp 2026 .proc
1996 copy %ret0, %arg0 2027 .callinfo caller,frame=FRAME_SIZE
1997 copy %ret1, %arg1 2028 .entry
1998 b ftrace_return_to_handler 2029 .export parisc_return_to_handler,data
1999 nop 2030parisc_return_to_handler:
2000return_trampoline: 2031 copy %r3,%r1
2001 copy %ret0, %rp 2032 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2002 copy %r23, %ret0 2033 copy %sp,%r3
2003 copy %r24, %ret1 2034 STREGM %r1,FRAME_SIZE(%sp)
2035 STREG %ret0,8(%r3)
2036 STREG %ret1,16(%r3)
2004 2037
2005.globl ftrace_stub 2038#ifdef CONFIG_64BIT
2006ftrace_stub: 2039 loadgp
2040#endif
2041
2042 /* call ftrace_return_to_handler(0) */
2043#ifdef CONFIG_64BIT
2044 ldo -16(%sp),%ret1 /* Reference param save area */
2045#endif
2046 BL ftrace_return_to_handler,%r2
2047 ldi 0,%r26
2048 copy %ret0,%rp
2049
2050 /* restore original return values */
2051 LDREG 8(%r3),%ret0
2052 LDREG 16(%r3),%ret1
2053
2054 /* return from function */
2055#ifdef CONFIG_64BIT
2056 bve (%rp)
2057#else
2007 bv %r0(%rp) 2058 bv %r0(%rp)
2008 nop 2059#endif
2060 LDREGM -FRAME_SIZE(%sp),%r3
2061 .exit
2062 .procend
2009ENDPROC(return_to_handler) 2063ENDPROC(return_to_handler)
2064
2010#endif /* CONFIG_FUNCTION_TRACER */ 2065#endif /* CONFIG_FUNCTION_TRACER */
2011 2066
2012#ifdef CONFIG_IRQSTACKS 2067#ifdef CONFIG_IRQSTACKS
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 559d400f9385..b13f9ec6f294 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Code for tracing calls in Linux kernel. 2 * Code for tracing calls in Linux kernel.
3 * Copyright (C) 2009 Helge Deller <deller@gmx.de> 3 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
4 * 4 *
5 * based on code for x86 which is: 5 * based on code for x86 which is:
6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
@@ -13,104 +13,21 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15 15
16#include <asm/assembly.h>
16#include <asm/sections.h> 17#include <asm/sections.h>
17#include <asm/ftrace.h> 18#include <asm/ftrace.h>
18 19
19 20
20
21#ifdef CONFIG_FUNCTION_GRAPH_TRACER 21#ifdef CONFIG_FUNCTION_GRAPH_TRACER
22
23/* Add a function return address to the trace stack on thread info.*/
24static int push_return_trace(unsigned long ret, unsigned long long time,
25 unsigned long func, int *depth)
26{
27 int index;
28
29 if (!current->ret_stack)
30 return -EBUSY;
31
32 /* The return trace stack is full */
33 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
34 atomic_inc(&current->trace_overrun);
35 return -EBUSY;
36 }
37
38 index = ++current->curr_ret_stack;
39 barrier();
40 current->ret_stack[index].ret = ret;
41 current->ret_stack[index].func = func;
42 current->ret_stack[index].calltime = time;
43 *depth = index;
44
45 return 0;
46}
47
48/* Retrieve a function return address to the trace stack on thread info.*/
49static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
50{
51 int index;
52
53 index = current->curr_ret_stack;
54
55 if (unlikely(index < 0)) {
56 ftrace_graph_stop();
57 WARN_ON(1);
58 /* Might as well panic, otherwise we have no where to go */
59 *ret = (unsigned long)
60 dereference_function_descriptor(&panic);
61 return;
62 }
63
64 *ret = current->ret_stack[index].ret;
65 trace->func = current->ret_stack[index].func;
66 trace->calltime = current->ret_stack[index].calltime;
67 trace->overrun = atomic_read(&current->trace_overrun);
68 trace->depth = index;
69 barrier();
70 current->curr_ret_stack--;
71
72}
73
74/*
75 * Send the trace to the ring-buffer.
76 * @return the original return address.
77 */
78unsigned long ftrace_return_to_handler(unsigned long retval0,
79 unsigned long retval1)
80{
81 struct ftrace_graph_ret trace;
82 unsigned long ret;
83
84 pop_return_trace(&trace, &ret);
85 trace.rettime = local_clock();
86 ftrace_graph_return(&trace);
87
88 if (unlikely(!ret)) {
89 ftrace_graph_stop();
90 WARN_ON(1);
91 /* Might as well panic. What else to do? */
92 ret = (unsigned long)
93 dereference_function_descriptor(&panic);
94 }
95
96 /* HACK: we hand over the old functions' return values
97 in %r23 and %r24. Assembly in entry.S will take care
98 and move those to their final registers %ret0 and %ret1 */
99 asm( "copy %0, %%r23 \n\t"
100 "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
101
102 return ret;
103}
104
105/* 22/*
106 * Hook the return address and push it in the stack of return addrs 23 * Hook the return address and push it in the stack of return addrs
107 * in current thread info. 24 * in current thread info.
108 */ 25 */
109void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 26static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
110{ 27{
111 unsigned long old; 28 unsigned long old;
112 unsigned long long calltime;
113 struct ftrace_graph_ent trace; 29 struct ftrace_graph_ent trace;
30 extern int parisc_return_to_handler;
114 31
115 if (unlikely(ftrace_graph_is_dead())) 32 if (unlikely(ftrace_graph_is_dead()))
116 return; 33 return;
@@ -119,64 +36,47 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
119 return; 36 return;
120 37
121 old = *parent; 38 old = *parent;
122 *parent = (unsigned long)
123 dereference_function_descriptor(&return_to_handler);
124 39
125 if (unlikely(!__kernel_text_address(old))) { 40 trace.func = self_addr;
126 ftrace_graph_stop(); 41 trace.depth = current->curr_ret_stack + 1;
127 *parent = old;
128 WARN_ON(1);
129 return;
130 }
131
132 calltime = local_clock();
133 42
134 if (push_return_trace(old, calltime, 43 /* Only trace if the calling function expects to */
135 self_addr, &trace.depth) == -EBUSY) { 44 if (!ftrace_graph_entry(&trace))
136 *parent = old;
137 return; 45 return;
138 }
139 46
140 trace.func = self_addr; 47 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
48 0 ) == -EBUSY)
49 return;
141 50
142 /* Only trace if the calling function expects to */ 51 /* activate parisc_return_to_handler() as return point */
143 if (!ftrace_graph_entry(&trace)) { 52 *parent = (unsigned long) &parisc_return_to_handler;
144 current->curr_ret_stack--;
145 *parent = old;
146 }
147} 53}
148
149#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 54#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
150 55
151 56void notrace ftrace_function_trampoline(unsigned long parent,
152void ftrace_function_trampoline(unsigned long parent,
153 unsigned long self_addr, 57 unsigned long self_addr,
154 unsigned long org_sp_gr3) 58 unsigned long org_sp_gr3)
155{ 59{
156 extern ftrace_func_t ftrace_trace_function; 60 extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
61 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
157 62
158 if (ftrace_trace_function != ftrace_stub) { 63 if (ftrace_trace_function != ftrace_stub) {
159 ftrace_trace_function(parent, self_addr); 64 /* struct ftrace_ops *op, struct pt_regs *regs); */
65 ftrace_trace_function(parent, self_addr, NULL, NULL);
160 return; 66 return;
161 } 67 }
68
162#ifdef CONFIG_FUNCTION_GRAPH_TRACER 69#ifdef CONFIG_FUNCTION_GRAPH_TRACER
163 if (ftrace_graph_entry && ftrace_graph_return) { 70 if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
164 unsigned long sp; 71 ftrace_graph_entry != ftrace_graph_entry_stub) {
165 unsigned long *parent_rp; 72 unsigned long *parent_rp;
166 73
167 asm volatile ("copy %%r30, %0" : "=r"(sp));
168 /* sanity check: is stack pointer which we got from
169 assembler function in entry.S in a reasonable
170 range compared to current stack pointer? */
171 if ((sp - org_sp_gr3) > 0x400)
172 return;
173
174 /* calculate pointer to %rp in stack */ 74 /* calculate pointer to %rp in stack */
175 parent_rp = (unsigned long *) org_sp_gr3 - 0x10; 75 parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
176 /* sanity check: parent_rp should hold parent */ 76 /* sanity check: parent_rp should hold parent */
177 if (*parent_rp != parent) 77 if (*parent_rp != parent)
178 return; 78 return;
179 79
180 prepare_ftrace_return(parent_rp, self_addr); 80 prepare_ftrace_return(parent_rp, self_addr);
181 return; 81 return;
182 } 82 }
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 75aa0db9f69e..bbbe360b458f 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -129,6 +129,15 @@ $pgt_fill_loop:
129 /* And the stack pointer too */ 129 /* And the stack pointer too */
130 ldo THREAD_SZ_ALGN(%r6),%sp 130 ldo THREAD_SZ_ALGN(%r6),%sp
131 131
132#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
133 .import _mcount,data
134 /* initialize mcount FPTR */
135 /* Get the global data pointer */
136 loadgp
137 load32 PA(_mcount), %r10
138 std %dp,0x18(%r10)
139#endif
140
132#ifdef CONFIG_SMP 141#ifdef CONFIG_SMP
133 /* Set the smp rendezvous address into page zero. 142 /* Set the smp rendezvous address into page zero.
134 ** It would be safer to do this in init_smp_config() but 143 ** It would be safer to do this in init_smp_config() but
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index b9d75d9fa9ac..a0ecdb4abcc8 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -660,6 +660,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
660 } 660 }
661 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); 661 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
662 break; 662 break;
663 case R_PARISC_PCREL32:
664 /* 32-bit PC relative address */
665 *loc = val - dot - 8 + addend;
666 break;
663 667
664 default: 668 default:
665 printk(KERN_ERR "module %s: Unknown relocation: %u\n", 669 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
@@ -788,6 +792,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
788 CHECK_RELOC(val, 22); 792 CHECK_RELOC(val, 22);
789 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); 793 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
790 break; 794 break;
795 case R_PARISC_PCREL32:
796 /* 32-bit PC relative address */
797 *loc = val - dot - 8 + addend;
798 break;
791 case R_PARISC_DIR64: 799 case R_PARISC_DIR64:
792 /* 64-bit effective address */ 800 /* 64-bit effective address */
793 *loc64 = val + addend; 801 *loc64 = val + addend;
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 568b2c61ea02..3cad8aadc69e 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
47EXPORT_SYMBOL(lclear_user); 47EXPORT_SYMBOL(lclear_user);
48EXPORT_SYMBOL(lstrnlen_user); 48EXPORT_SYMBOL(lstrnlen_user);
49 49
50/* Global fixups */ 50/* Global fixups - defined as int to avoid creation of function pointers */
51extern void fixup_get_user_skip_1(void); 51extern int fixup_get_user_skip_1;
52extern void fixup_get_user_skip_2(void); 52extern int fixup_get_user_skip_2;
53extern void fixup_put_user_skip_1(void); 53extern int fixup_put_user_skip_1;
54extern void fixup_put_user_skip_2(void); 54extern int fixup_put_user_skip_2;
55EXPORT_SYMBOL(fixup_get_user_skip_1); 55EXPORT_SYMBOL(fixup_get_user_skip_1);
56EXPORT_SYMBOL(fixup_get_user_skip_2); 56EXPORT_SYMBOL(fixup_get_user_skip_2);
57EXPORT_SYMBOL(fixup_put_user_skip_1); 57EXPORT_SYMBOL(fixup_put_user_skip_1);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 16e0735e2f46..97d6b208e129 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -795,6 +795,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
795 795
796 if (fault_space == 0 && !faulthandler_disabled()) 796 if (fault_space == 0 && !faulthandler_disabled())
797 { 797 {
798 /* Clean up and return if in exception table. */
799 if (fixup_exception(regs))
800 return;
798 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 801 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
799 parisc_terminate("Kernel Fault", regs, code, fault_address); 802 parisc_terminate("Kernel Fault", regs, code, fault_address);
800 } 803 }
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index 536ef66bb94b..1052b747e011 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -26,6 +26,7 @@
26 26
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 .macro get_fault_ip t1 t2 28 .macro get_fault_ip t1 t2
29 loadgp
29 addil LT%__per_cpu_offset,%r27 30 addil LT%__per_cpu_offset,%r27
30 LDREG RT%__per_cpu_offset(%r1),\t1 31 LDREG RT%__per_cpu_offset(%r1),\t1
31 /* t2 = smp_processor_id() */ 32 /* t2 = smp_processor_id() */
@@ -40,14 +41,19 @@
40 LDREG RT%exception_data(%r1),\t1 41 LDREG RT%exception_data(%r1),\t1
41 /* t1 = this_cpu_ptr(&exception_data) */ 42 /* t1 = this_cpu_ptr(&exception_data) */
42 add,l \t1,\t2,\t1 43 add,l \t1,\t2,\t1
44 /* %r27 = t1->fault_gp - restore gp */
45 LDREG EXCDATA_GP(\t1), %r27
43 /* t1 = t1->fault_ip */ 46 /* t1 = t1->fault_ip */
44 LDREG EXCDATA_IP(\t1), \t1 47 LDREG EXCDATA_IP(\t1), \t1
45 .endm 48 .endm
46#else 49#else
47 .macro get_fault_ip t1 t2 50 .macro get_fault_ip t1 t2
51 loadgp
48 /* t1 = this_cpu_ptr(&exception_data) */ 52 /* t1 = this_cpu_ptr(&exception_data) */
49 addil LT%exception_data,%r27 53 addil LT%exception_data,%r27
50 LDREG RT%exception_data(%r1),\t2 54 LDREG RT%exception_data(%r1),\t2
55 /* %r27 = t2->fault_gp - restore gp */
56 LDREG EXCDATA_GP(\t2), %r27
51 /* t1 = t2->fault_ip */ 57 /* t1 = t2->fault_ip */
52 LDREG EXCDATA_IP(\t2), \t1 58 LDREG EXCDATA_IP(\t2), \t1
53 .endm 59 .endm
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 26fac9c671c9..16dbe81c97c9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -145,6 +145,7 @@ int fixup_exception(struct pt_regs *regs)
145 struct exception_data *d; 145 struct exception_data *d;
146 d = this_cpu_ptr(&exception_data); 146 d = this_cpu_ptr(&exception_data);
147 d->fault_ip = regs->iaoq[0]; 147 d->fault_ip = regs->iaoq[0];
148 d->fault_gp = regs->gr[27];
148 d->fault_space = regs->isr; 149 d->fault_space = regs->isr;
149 d->fault_addr = regs->ior; 150 d->fault_addr = regs->ior;
150 151
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3c07d6b96877..6b3e7c6ee096 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -22,7 +22,7 @@
22#include <linux/swap.h> 22#include <linux/swap.h>
23#include <linux/unistd.h> 23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */ 24#include <linux/nodemask.h> /* for node_online_map */
25#include <linux/pagemap.h> /* for release_pages and page_cache_release */ 25#include <linux/pagemap.h> /* for release_pages */
26#include <linux/compat.h> 26#include <linux/compat.h>
27 27
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3fa9df70aa20..2fc5d4db503c 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -384,3 +384,5 @@ SYSCALL(ni_syscall)
384SYSCALL(ni_syscall) 384SYSCALL(ni_syscall)
385SYSCALL(mlock2) 385SYSCALL(mlock2)
386SYSCALL(copy_file_range) 386SYSCALL(copy_file_range)
387COMPAT_SYS_SPU(preadv2)
388COMPAT_SYS_SPU(pwritev2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1f2594d45605..cf12c580f6b2 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define NR_syscalls 380 15#define NR_syscalls 382
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18 18
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 8dde19962a5b..f63c96cd3608 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -31,6 +31,7 @@
31#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ 31#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
32 0x00000040 32 0x00000040
33 33
34/* Reserved - do not use 0x00000004 */
34#define PPC_FEATURE_TRUE_LE 0x00000002 35#define PPC_FEATURE_TRUE_LE 0x00000002
35#define PPC_FEATURE_PPC_LE 0x00000001 36#define PPC_FEATURE_PPC_LE 0x00000001
36 37
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 940290d45b08..e9f5f41aa55a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -390,5 +390,7 @@
390#define __NR_membarrier 365 390#define __NR_membarrier 365
391#define __NR_mlock2 378 391#define __NR_mlock2 378
392#define __NR_copy_file_range 379 392#define __NR_copy_file_range 379
393#define __NR_preadv2 380
394#define __NR_pwritev2 381
393 395
394#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 396#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 7030b035905d..a15fe1d4e84a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
148 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 148 unsigned long cpu_features; /* CPU_FTR_xxx bit */
149 unsigned long mmu_features; /* MMU_FTR_xxx bit */ 149 unsigned long mmu_features; /* MMU_FTR_xxx bit */
150 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 150 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
151 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
151 unsigned char pabyte; /* byte number in ibm,pa-features */ 152 unsigned char pabyte; /* byte number in ibm,pa-features */
152 unsigned char pabit; /* bit number (big-endian) */ 153 unsigned char pabit; /* bit number (big-endian) */
153 unsigned char invert; /* if 1, pa bit set => clear feature */ 154 unsigned char invert; /* if 1, pa bit set => clear feature */
154} ibm_pa_features[] __initdata = { 155} ibm_pa_features[] __initdata = {
155 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 156 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
156 {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 157 {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
157 {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, 158 {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
158 {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, 159 {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
159 {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, 160 {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
160 {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 161 {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
161 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 162 {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
162 /* 163 /*
163 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), 164 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
164 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP 165 * we don't want to turn on TM here, so we use the *_COMP versions
165 * which is 0 if the kernel doesn't support TM. 166 * which are 0 if the kernel doesn't support TM.
166 */ 167 */
167 {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, 168 {CPU_FTR_TM_COMP, 0, 0,
169 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
168}; 170};
169 171
170static void __init scan_features(unsigned long node, const unsigned char *ftrs, 172static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
195 if (bit ^ fp->invert) { 197 if (bit ^ fp->invert) {
196 cur_cpu_spec->cpu_features |= fp->cpu_features; 198 cur_cpu_spec->cpu_features |= fp->cpu_features;
197 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 199 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
200 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
198 cur_cpu_spec->mmu_features |= fp->mmu_features; 201 cur_cpu_spec->mmu_features |= fp->mmu_features;
199 } else { 202 } else {
200 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 203 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
201 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 204 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
205 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
202 cur_cpu_spec->mmu_features &= ~fp->mmu_features; 206 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
203 } 207 }
204 } 208 }
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index dfa863876778..6ca5f0525e57 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
732 return -ENOMEM; 732 return -ENOMEM;
733 733
734 sb->s_maxbytes = MAX_LFS_FILESIZE; 734 sb->s_maxbytes = MAX_LFS_FILESIZE;
735 sb->s_blocksize = PAGE_CACHE_SIZE; 735 sb->s_blocksize = PAGE_SIZE;
736 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 736 sb->s_blocksize_bits = PAGE_SHIFT;
737 sb->s_magic = SPUFS_MAGIC; 737 sb->s_magic = SPUFS_MAGIC;
738 sb->s_op = &s_ops; 738 sb->s_op = &s_ops;
739 sb->s_fs_info = info; 739 sb->s_fs_info = info;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index aad23e3dff2c..bf24ab188921 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -4,6 +4,9 @@ config MMU
4config ZONE_DMA 4config ZONE_DMA
5 def_bool y 5 def_bool y
6 6
7config CPU_BIG_ENDIAN
8 def_bool y
9
7config LOCKDEP_SUPPORT 10config LOCKDEP_SUPPORT
8 def_bool y 11 def_bool y
9 12
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 0f3da2cb2bd6..255c7eec4481 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
278 sbi->uid = current_uid(); 278 sbi->uid = current_uid();
279 sbi->gid = current_gid(); 279 sbi->gid = current_gid();
280 sb->s_fs_info = sbi; 280 sb->s_fs_info = sbi;
281 sb->s_blocksize = PAGE_CACHE_SIZE; 281 sb->s_blocksize = PAGE_SIZE;
282 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 282 sb->s_blocksize_bits = PAGE_SHIFT;
283 sb->s_magic = HYPFS_MAGIC; 283 sb->s_magic = HYPFS_MAGIC;
284 sb->s_op = &hypfs_s_ops; 284 sb->s_op = &hypfs_s_ops;
285 if (hypfs_parse_options(data, sb)) 285 if (hypfs_parse_options(data, sb))
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d29ad9545b41..081b2ad99d73 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@ typedef struct {
11 spinlock_t list_lock; 11 spinlock_t list_lock;
12 struct list_head pgtable_list; 12 struct list_head pgtable_list;
13 struct list_head gmap_list; 13 struct list_head gmap_list;
14 unsigned long asce_bits; 14 unsigned long asce;
15 unsigned long asce_limit; 15 unsigned long asce_limit;
16 unsigned long vdso_base; 16 unsigned long vdso_base;
17 /* The mmu context allocates 4K page tables. */ 17 /* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d321469eeda7..c837b79b455d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
26 mm->context.has_pgste = 0; 26 mm->context.has_pgste = 0;
27 mm->context.use_skey = 0; 27 mm->context.use_skey = 0;
28#endif 28#endif
29 if (mm->context.asce_limit == 0) { 29 switch (mm->context.asce_limit) {
30 case 1UL << 42:
31 /*
32 * forked 3-level task, fall through to set new asce with new
33 * mm->pgd
34 */
35 case 0:
30 /* context created by exec, set asce limit to 4TB */ 36 /* context created by exec, set asce limit to 4TB */
31 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
32 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
33 mm->context.asce_limit = STACK_TOP_MAX; 37 mm->context.asce_limit = STACK_TOP_MAX;
34 } else if (mm->context.asce_limit == (1UL << 31)) { 38 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
39 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
40 break;
41 case 1UL << 53:
42 /* forked 4-level task, set new asce with new mm->pgd */
43 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
44 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
45 break;
46 case 1UL << 31:
47 /* forked 2-level compat task, set new asce with new mm->pgd */
48 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
49 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
50 /* pgd_alloc() did not increase mm->nr_pmds */
35 mm_inc_nr_pmds(mm); 51 mm_inc_nr_pmds(mm);
36 } 52 }
37 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 53 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
42 58
43static inline void set_user_asce(struct mm_struct *mm) 59static inline void set_user_asce(struct mm_struct *mm)
44{ 60{
45 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); 61 S390_lowcore.user_asce = mm->context.asce;
46 if (current->thread.mm_segment.ar4) 62 if (current->thread.mm_segment.ar4)
47 __ctl_load(S390_lowcore.user_asce, 7, 7); 63 __ctl_load(S390_lowcore.user_asce, 7, 7);
48 set_cpu_flag(CIF_ASCE); 64 set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
71{ 87{
72 int cpu = smp_processor_id(); 88 int cpu = smp_processor_id();
73 89
74 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); 90 S390_lowcore.user_asce = next->context.asce;
75 if (prev == next) 91 if (prev == next)
76 return; 92 return;
77 if (MACHINE_HAS_TLB_LC) 93 if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b6bfa169a002..535a46d46d28 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -44,7 +44,8 @@ struct zpci_fmb {
44 u64 rpcit_ops; 44 u64 rpcit_ops;
45 u64 dma_rbytes; 45 u64 dma_rbytes;
46 u64 dma_wbytes; 46 u64 dma_wbytes;
47} __packed __aligned(64); 47 u64 pad[2];
48} __packed __aligned(128);
48 49
49enum zpci_state { 50enum zpci_state {
50 ZPCI_FN_STATE_RESERVED, 51 ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 9b3d9b6099f2..da34cb6b1f3b 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
52 return _REGION2_ENTRY_EMPTY; 52 return _REGION2_ENTRY_EMPTY;
53} 53}
54 54
55int crst_table_upgrade(struct mm_struct *, unsigned long limit); 55int crst_table_upgrade(struct mm_struct *);
56void crst_table_downgrade(struct mm_struct *, unsigned long limit); 56void crst_table_downgrade(struct mm_struct *);
57 57
58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
59{ 59{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d6fd22ea270d..18cdede1aeda 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
175 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ 175 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
176 regs->psw.addr = new_psw; \ 176 regs->psw.addr = new_psw; \
177 regs->gprs[15] = new_stackp; \ 177 regs->gprs[15] = new_stackp; \
178 crst_table_downgrade(current->mm, 1UL << 31); \ 178 crst_table_downgrade(current->mm); \
179 execve_tail(); \ 179 execve_tail(); \
180} while (0) 180} while (0)
181 181
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
index 781a9cf9b002..e10f8337367b 100644
--- a/arch/s390/include/asm/seccomp.h
+++ b/arch/s390/include/asm/seccomp.h
@@ -13,4 +13,6 @@
13#define __NR_seccomp_exit_32 __NR_exit 13#define __NR_seccomp_exit_32 __NR_exit
14#define __NR_seccomp_sigreturn_32 __NR_sigreturn 14#define __NR_seccomp_sigreturn_32 __NR_sigreturn
15 15
16#include <asm-generic/seccomp.h>
17
16#endif /* _ASM_S390_SECCOMP_H */ 18#endif /* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index ca148f7c3eaa..a2e6ef32e054 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
110static inline void __tlb_flush_kernel(void) 110static inline void __tlb_flush_kernel(void)
111{ 111{
112 if (MACHINE_HAS_IDTE) 112 if (MACHINE_HAS_IDTE)
113 __tlb_flush_idte((unsigned long) init_mm.pgd | 113 __tlb_flush_idte(init_mm.context.asce);
114 init_mm.context.asce_bits);
115 else 114 else
116 __tlb_flush_global(); 115 __tlb_flush_global();
117} 116}
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
133static inline void __tlb_flush_kernel(void) 132static inline void __tlb_flush_kernel(void)
134{ 133{
135 if (MACHINE_HAS_TLB_LC) 134 if (MACHINE_HAS_TLB_LC)
136 __tlb_flush_idte_local((unsigned long) init_mm.pgd | 135 __tlb_flush_idte_local(init_mm.context.asce);
137 init_mm.context.asce_bits);
138 else 136 else
139 __tlb_flush_local(); 137 __tlb_flush_local();
140} 138}
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
148 * only ran on the local cpu. 146 * only ran on the local cpu.
149 */ 147 */
150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) 148 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
151 __tlb_flush_asce(mm, (unsigned long) mm->pgd | 149 __tlb_flush_asce(mm, mm->context.asce);
152 mm->context.asce_bits);
153 else 150 else
154 __tlb_flush_full(mm); 151 __tlb_flush_full(mm);
155} 152}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index d4549c964589..e5f50a7d2f4e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -105,6 +105,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
105 if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 105 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
106 return; 106 return;
107 local_irq_restore(flags); 107 local_irq_restore(flags);
108 continue;
108 } 109 }
109 /* Check if the lock owner is running. */ 110 /* Check if the lock owner is running. */
110 if (first_diag && cpu_is_preempted(~owner)) { 111 if (first_diag && cpu_is_preempted(~owner)) {
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 69247b4dcc43..cace818d86eb 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -23,7 +23,7 @@
23/** 23/**
24 * gmap_alloc - allocate a guest address space 24 * gmap_alloc - allocate a guest address space
25 * @mm: pointer to the parent mm_struct 25 * @mm: pointer to the parent mm_struct
26 * @limit: maximum size of the gmap address space 26 * @limit: maximum address of the gmap address space
27 * 27 *
28 * Returns a guest address space structure. 28 * Returns a guest address space structure.
29 */ 29 */
@@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
292 if ((from | to | len) & (PMD_SIZE - 1)) 292 if ((from | to | len) & (PMD_SIZE - 1))
293 return -EINVAL; 293 return -EINVAL;
294 if (len == 0 || from + len < from || to + len < to || 294 if (len == 0 || from + len < from || to + len < to ||
295 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) 295 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
296 return -EINVAL; 296 return -EINVAL;
297 297
298 flush = 0; 298 flush = 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7b0451397d6..2489b2e917c8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,7 +89,8 @@ void __init paging_init(void)
89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
90 pgd_type = _REGION3_ENTRY_EMPTY; 90 pgd_type = _REGION3_ENTRY_EMPTY;
91 } 91 }
92 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 92 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
93 S390_lowcore.kernel_asce = init_mm.context.asce;
93 clear_table((unsigned long *) init_mm.pgd, pgd_type, 94 clear_table((unsigned long *) init_mm.pgd, pgd_type,
94 sizeof(unsigned long)*2048); 95 sizeof(unsigned long)*2048);
95 vmem_map_init(); 96 vmem_map_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 45c4daa49930..89cf09e5f168 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
174 if (!(flags & MAP_FIXED)) 174 if (!(flags & MAP_FIXED))
175 addr = 0; 175 addr = 0;
176 if ((addr + len) >= TASK_SIZE) 176 if ((addr + len) >= TASK_SIZE)
177 return crst_table_upgrade(current->mm, TASK_MAX_SIZE); 177 return crst_table_upgrade(current->mm);
178 return 0; 178 return 0;
179} 179}
180 180
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
191 return area; 191 return area;
192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
193 /* Upgrade the page table to 4 levels and retry. */ 193 /* Upgrade the page table to 4 levels and retry. */
194 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 194 rc = crst_table_upgrade(mm);
195 if (rc) 195 if (rc)
196 return (unsigned long) rc; 196 return (unsigned long) rc;
197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
213 return area; 213 return area;
214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
215 /* Upgrade the page table to 4 levels and retry. */ 215 /* Upgrade the page table to 4 levels and retry. */
216 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 216 rc = crst_table_upgrade(mm);
217 if (rc) 217 if (rc)
218 return (unsigned long) rc; 218 return (unsigned long) rc;
219 area = arch_get_unmapped_area_topdown(filp, addr, len, 219 area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f6c3de26cda8..e8b5962ac12a 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg)
76 __tlb_flush_local(); 76 __tlb_flush_local();
77} 77}
78 78
79int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 79int crst_table_upgrade(struct mm_struct *mm)
80{ 80{
81 unsigned long *table, *pgd; 81 unsigned long *table, *pgd;
82 unsigned long entry;
83 int flush;
84 82
85 BUG_ON(limit > TASK_MAX_SIZE); 83 /* upgrade should only happen from 3 to 4 levels */
86 flush = 0; 84 BUG_ON(mm->context.asce_limit != (1UL << 42));
87repeat: 85
88 table = crst_table_alloc(mm); 86 table = crst_table_alloc(mm);
89 if (!table) 87 if (!table)
90 return -ENOMEM; 88 return -ENOMEM;
89
91 spin_lock_bh(&mm->page_table_lock); 90 spin_lock_bh(&mm->page_table_lock);
92 if (mm->context.asce_limit < limit) { 91 pgd = (unsigned long *) mm->pgd;
93 pgd = (unsigned long *) mm->pgd; 92 crst_table_init(table, _REGION2_ENTRY_EMPTY);
94 if (mm->context.asce_limit <= (1UL << 31)) { 93 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
95 entry = _REGION3_ENTRY_EMPTY; 94 mm->pgd = (pgd_t *) table;
96 mm->context.asce_limit = 1UL << 42; 95 mm->context.asce_limit = 1UL << 53;
97 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 96 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
98 _ASCE_USER_BITS | 97 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
99 _ASCE_TYPE_REGION3; 98 mm->task_size = mm->context.asce_limit;
100 } else {
101 entry = _REGION2_ENTRY_EMPTY;
102 mm->context.asce_limit = 1UL << 53;
103 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS |
105 _ASCE_TYPE_REGION2;
106 }
107 crst_table_init(table, entry);
108 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
109 mm->pgd = (pgd_t *) table;
110 mm->task_size = mm->context.asce_limit;
111 table = NULL;
112 flush = 1;
113 }
114 spin_unlock_bh(&mm->page_table_lock); 99 spin_unlock_bh(&mm->page_table_lock);
115 if (table) 100
116 crst_table_free(mm, table); 101 on_each_cpu(__crst_table_upgrade, mm, 0);
117 if (mm->context.asce_limit < limit)
118 goto repeat;
119 if (flush)
120 on_each_cpu(__crst_table_upgrade, mm, 0);
121 return 0; 102 return 0;
122} 103}
123 104
124void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) 105void crst_table_downgrade(struct mm_struct *mm)
125{ 106{
126 pgd_t *pgd; 107 pgd_t *pgd;
127 108
109 /* downgrade should only happen from 3 to 2 levels (compat only) */
110 BUG_ON(mm->context.asce_limit != (1UL << 42));
111
128 if (current->active_mm == mm) { 112 if (current->active_mm == mm) {
129 clear_user_asce(); 113 clear_user_asce();
130 __tlb_flush_mm(mm); 114 __tlb_flush_mm(mm);
131 } 115 }
132 while (mm->context.asce_limit > limit) { 116
133 pgd = mm->pgd; 117 pgd = mm->pgd;
134 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 118 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 case _REGION_ENTRY_TYPE_R2: 119 mm->context.asce_limit = 1UL << 31;
136 mm->context.asce_limit = 1UL << 42; 120 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
137 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 121 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
138 _ASCE_USER_BITS | 122 mm->task_size = mm->context.asce_limit;
139 _ASCE_TYPE_REGION3; 123 crst_table_free(mm, (unsigned long *) pgd);
140 break; 124
141 case _REGION_ENTRY_TYPE_R3:
142 mm->context.asce_limit = 1UL << 31;
143 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
144 _ASCE_USER_BITS |
145 _ASCE_TYPE_SEGMENT;
146 break;
147 default:
148 BUG();
149 }
150 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
151 mm->task_size = mm->context.asce_limit;
152 crst_table_free(mm, (unsigned long *) pgd);
153 }
154 if (current->active_mm == mm) 125 if (current->active_mm == mm)
155 set_user_asce(mm); 126 set_user_asce(mm);
156} 127}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index e595e89eac65..1ea8c07eab84 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
457 zdev->dma_table = dma_alloc_cpu_table(); 457 zdev->dma_table = dma_alloc_cpu_table();
458 if (!zdev->dma_table) { 458 if (!zdev->dma_table) {
459 rc = -ENOMEM; 459 rc = -ENOMEM;
460 goto out_clean; 460 goto out;
461 } 461 }
462 462
463 /* 463 /*
@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
477 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); 477 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
478 if (!zdev->iommu_bitmap) { 478 if (!zdev->iommu_bitmap) {
479 rc = -ENOMEM; 479 rc = -ENOMEM;
480 goto out_reg; 480 goto free_dma_table;
481 } 481 }
482 482
483 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 483 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
484 (u64) zdev->dma_table); 484 (u64) zdev->dma_table);
485 if (rc) 485 if (rc)
486 goto out_reg; 486 goto free_bitmap;
487 return 0;
488 487
489out_reg: 488 return 0;
489free_bitmap:
490 vfree(zdev->iommu_bitmap);
491 zdev->iommu_bitmap = NULL;
492free_dma_table:
490 dma_free_cpu_table(zdev->dma_table); 493 dma_free_cpu_table(zdev->dma_table);
491out_clean: 494 zdev->dma_table = NULL;
495out:
492 return rc; 496 return rc;
493} 497}
494 498
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1baf0ba96242..c9f8bbdb1bf8 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -34,11 +34,6 @@ enum {
34DECLARE_PER_CPU(int, cpu_state); 34DECLARE_PER_CPU(int, cpu_state);
35 35
36void smp_message_recv(unsigned int msg); 36void smp_message_recv(unsigned int msg);
37void smp_timer_broadcast(const struct cpumask *mask);
38
39void local_timer_interrupt(void);
40void local_timer_setup(unsigned int cpu);
41void local_timer_stop(unsigned int cpu);
42 37
43void arch_send_call_function_single_ipi(int cpu); 38void arch_send_call_function_single_ipi(int cpu);
44void arch_send_call_function_ipi_mask(const struct cpumask *mask); 39void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index b0a282d65f6a..358e3f516ef6 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -17,7 +17,7 @@
17 17
18#define mc_capable() (1) 18#define mc_capable() (1)
19 19
20const struct cpumask *cpu_coregroup_mask(unsigned int cpu); 20const struct cpumask *cpu_coregroup_mask(int cpu);
21 21
22extern cpumask_t cpu_core_map[NR_CPUS]; 22extern cpumask_t cpu_core_map[NR_CPUS];
23 23
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 4a298808789c..839612c8a0a0 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -73,8 +73,6 @@ static void shx3_prepare_cpus(unsigned int max_cpus)
73{ 73{
74 int i; 74 int i;
75 75
76 local_timer_setup(0);
77
78 BUILD_BUG_ON(SMP_MSG_NR >= 8); 76 BUILD_BUG_ON(SMP_MSG_NR >= 8);
79 77
80 for (i = 0; i < SMP_MSG_NR; i++) 78 for (i = 0; i < SMP_MSG_NR; i++)
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 772caffba22f..c82912a61d74 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
21cpumask_t cpu_core_map[NR_CPUS]; 21cpumask_t cpu_core_map[NR_CPUS];
22EXPORT_SYMBOL(cpu_core_map); 22EXPORT_SYMBOL(cpu_core_map);
23 23
24static cpumask_t cpu_coregroup_map(unsigned int cpu) 24static cpumask_t cpu_coregroup_map(int cpu)
25{ 25{
26 /* 26 /*
27 * Presently all SH-X3 SMP cores are multi-cores, so just keep it 27 * Presently all SH-X3 SMP cores are multi-cores, so just keep it
@@ -30,7 +30,7 @@ static cpumask_t cpu_coregroup_map(unsigned int cpu)
30 return *cpu_possible_mask; 30 return *cpu_possible_mask;
31} 31}
32 32
33const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 33const struct cpumask *cpu_coregroup_mask(int cpu)
34{ 34{
35 return &cpu_core_map[cpu]; 35 return &cpu_core_map[cpu];
36} 36}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 6915ff2bd996..8774cb23064f 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -26,7 +26,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
26 vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 26 vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
27 27
28KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 28KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
29KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 29KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
30KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 30KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
31cflags-$(CONFIG_X86_32) := -march=i386 31cflags-$(CONFIG_X86_32) := -march=i386
32cflags-$(CONFIG_X86_64) := -mcmodel=small 32cflags-$(CONFIG_X86_64) := -mcmodel=small
@@ -40,6 +40,18 @@ GCOV_PROFILE := n
40UBSAN_SANITIZE :=n 40UBSAN_SANITIZE :=n
41 41
42LDFLAGS := -m elf_$(UTS_MACHINE) 42LDFLAGS := -m elf_$(UTS_MACHINE)
43ifeq ($(CONFIG_RELOCATABLE),y)
44# If kernel is relocatable, build compressed kernel as PIE.
45ifeq ($(CONFIG_X86_32),y)
46LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
47else
48# To build 64-bit compressed kernel as PIE, we disable relocation
49# overflow check to avoid relocation overflow error with a new linker
50# command-line option, -z noreloc-overflow.
51LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
52 && echo "-z noreloc-overflow -pie --no-dynamic-linker")
53endif
54endif
43LDFLAGS_vmlinux := -T 55LDFLAGS_vmlinux := -T
44 56
45hostprogs-y := mkpiggy 57hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 8ef964ddc18e..0256064da8da 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -31,6 +31,34 @@
31#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/bootparam.h> 32#include <asm/bootparam.h>
33 33
34/*
35 * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
36 * relocation to get the symbol address in PIC. When the compressed x86
37 * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
38 * relocations to their fixed symbol addresses. However, when the
39 * compressed x86 kernel is loaded at a different address, it leads
40 * to the following load failure:
41 *
42 * Failed to allocate space for phdrs
43 *
44 * during the decompression stage.
45 *
46 * If the compressed x86 kernel is relocatable at run-time, it should be
47 * compiled with -fPIE, instead of -fPIC, if possible and should be built as
48 * Position Independent Executable (PIE) so that linker won't optimize
49 * R_386_GOT32X relocation to its fixed symbol address. Older
50 * linkers generate R_386_32 relocations against locally defined symbols,
51 * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
52 * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
53 * R_386_32 relocations when relocating the kernel. To generate
54 * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
55 * hidden:
56 */
57 .hidden _bss
58 .hidden _ebss
59 .hidden _got
60 .hidden _egot
61
34 __HEAD 62 __HEAD
35ENTRY(startup_32) 63ENTRY(startup_32)
36#ifdef CONFIG_EFI_STUB 64#ifdef CONFIG_EFI_STUB
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index b0c0d16ef58d..86558a199139 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -33,6 +33,14 @@
33#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
34#include <asm/bootparam.h> 34#include <asm/bootparam.h>
35 35
36/*
37 * Locally defined symbols should be marked hidden:
38 */
39 .hidden _bss
40 .hidden _ebss
41 .hidden _got
42 .hidden _egot
43
36 __HEAD 44 __HEAD
37 .code32 45 .code32
38ENTRY(startup_32) 46ENTRY(startup_32)
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a8a0224fa0f8..081255cea1ee 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
453 453
454 req = cast_mcryptd_ctx_to_req(req_ctx); 454 req = cast_mcryptd_ctx_to_req(req_ctx);
455 if (irqs_disabled()) 455 if (irqs_disabled())
456 rctx->complete(&req->base, ret); 456 req_ctx->complete(&req->base, ret);
457 else { 457 else {
458 local_bh_disable(); 458 local_bh_disable();
459 rctx->complete(&req->base, ret); 459 req_ctx->complete(&req->base, ret);
460 local_bh_enable(); 460 local_bh_enable();
461 } 461 }
462 } 462 }
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 86a9bec18dab..bd3e8421b57c 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -115,7 +115,7 @@ static __initconst const u64 amd_hw_cache_event_ids
115/* 115/*
116 * AMD Performance Monitor K7 and later. 116 * AMD Performance Monitor K7 and later.
117 */ 117 */
118static const u64 amd_perfmon_event_map[] = 118static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
119{ 119{
120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 68fa55b4d42e..aff79884e17d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3639,6 +3639,7 @@ __init int intel_pmu_init(void)
3639 3639
3640 case 78: /* 14nm Skylake Mobile */ 3640 case 78: /* 14nm Skylake Mobile */
3641 case 94: /* 14nm Skylake Desktop */ 3641 case 94: /* 14nm Skylake Desktop */
3642 case 85: /* 14nm Skylake Server */
3642 x86_pmu.late_ack = true; 3643 x86_pmu.late_ack = true;
3643 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 3644 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3644 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 3645 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6c3b7c1780c9..1ca5d1e7d4f2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -63,7 +63,7 @@ static enum {
63 63
64#define LBR_PLM (LBR_KERNEL | LBR_USER) 64#define LBR_PLM (LBR_KERNEL | LBR_USER)
65 65
66#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ 66#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
67#define LBR_NOT_SUPP -1 /* LBR filter not supported */ 67#define LBR_NOT_SUPP -1 /* LBR filter not supported */
68#define LBR_IGN 0 /* ignored */ 68#define LBR_IGN 0 /* ignored */
69 69
@@ -610,8 +610,10 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
610 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate 610 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
611 * in suppress mode. So LBR_SELECT should be set to 611 * in suppress mode. So LBR_SELECT should be set to
612 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) 612 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
613 * But the 10th bit LBR_CALL_STACK does not operate
614 * in suppress mode.
613 */ 615 */
614 reg->config = mask ^ x86_pmu.lbr_sel_mask; 616 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
615 617
616 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) && 618 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
617 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) && 619 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 6af7cf71d6b2..09a77dbc73c9 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -136,9 +136,21 @@ static int __init pt_pmu_hw_init(void)
136 struct dev_ext_attribute *de_attrs; 136 struct dev_ext_attribute *de_attrs;
137 struct attribute **attrs; 137 struct attribute **attrs;
138 size_t size; 138 size_t size;
139 u64 reg;
139 int ret; 140 int ret;
140 long i; 141 long i;
141 142
143 if (boot_cpu_has(X86_FEATURE_VMX)) {
144 /*
145 * Intel SDM, 36.5 "Tracing post-VMXON" says that
146 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
147 * post-VMXON.
148 */
149 rdmsrl(MSR_IA32_VMX_MISC, reg);
150 if (reg & BIT(14))
151 pt_pmu.vmx = true;
152 }
153
142 attrs = NULL; 154 attrs = NULL;
143 155
144 for (i = 0; i < PT_CPUID_LEAVES; i++) { 156 for (i = 0; i < PT_CPUID_LEAVES; i++) {
@@ -269,20 +281,23 @@ static void pt_config(struct perf_event *event)
269 281
270 reg |= (event->attr.config & PT_CONFIG_MASK); 282 reg |= (event->attr.config & PT_CONFIG_MASK);
271 283
284 event->hw.config = reg;
272 wrmsrl(MSR_IA32_RTIT_CTL, reg); 285 wrmsrl(MSR_IA32_RTIT_CTL, reg);
273} 286}
274 287
275static void pt_config_start(bool start) 288static void pt_config_stop(struct perf_event *event)
276{ 289{
277 u64 ctl; 290 u64 ctl = READ_ONCE(event->hw.config);
291
292 /* may be already stopped by a PMI */
293 if (!(ctl & RTIT_CTL_TRACEEN))
294 return;
278 295
279 rdmsrl(MSR_IA32_RTIT_CTL, ctl); 296 ctl &= ~RTIT_CTL_TRACEEN;
280 if (start)
281 ctl |= RTIT_CTL_TRACEEN;
282 else
283 ctl &= ~RTIT_CTL_TRACEEN;
284 wrmsrl(MSR_IA32_RTIT_CTL, ctl); 297 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
285 298
299 WRITE_ONCE(event->hw.config, ctl);
300
286 /* 301 /*
287 * A wrmsr that disables trace generation serializes other PT 302 * A wrmsr that disables trace generation serializes other PT
288 * registers and causes all data packets to be written to memory, 303 * registers and causes all data packets to be written to memory,
@@ -291,8 +306,7 @@ static void pt_config_start(bool start)
291 * The below WMB, separating data store and aux_head store matches 306 * The below WMB, separating data store and aux_head store matches
292 * the consumer's RMB that separates aux_head load and data load. 307 * the consumer's RMB that separates aux_head load and data load.
293 */ 308 */
294 if (!start) 309 wmb();
295 wmb();
296} 310}
297 311
298static void pt_config_buffer(void *buf, unsigned int topa_idx, 312static void pt_config_buffer(void *buf, unsigned int topa_idx,
@@ -942,11 +956,17 @@ void intel_pt_interrupt(void)
942 if (!ACCESS_ONCE(pt->handle_nmi)) 956 if (!ACCESS_ONCE(pt->handle_nmi))
943 return; 957 return;
944 958
945 pt_config_start(false); 959 /*
960 * If VMX is on and PT does not support it, don't touch anything.
961 */
962 if (READ_ONCE(pt->vmx_on))
963 return;
946 964
947 if (!event) 965 if (!event)
948 return; 966 return;
949 967
968 pt_config_stop(event);
969
950 buf = perf_get_aux(&pt->handle); 970 buf = perf_get_aux(&pt->handle);
951 if (!buf) 971 if (!buf)
952 return; 972 return;
@@ -983,6 +1003,35 @@ void intel_pt_interrupt(void)
983 } 1003 }
984} 1004}
985 1005
1006void intel_pt_handle_vmx(int on)
1007{
1008 struct pt *pt = this_cpu_ptr(&pt_ctx);
1009 struct perf_event *event;
1010 unsigned long flags;
1011
1012 /* PT plays nice with VMX, do nothing */
1013 if (pt_pmu.vmx)
1014 return;
1015
1016 /*
1017 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1018 * sure to not try to set it while VMX is on. Disable
1019 * interrupts to avoid racing with pmu callbacks;
1020 * concurrent PMI should be handled fine.
1021 */
1022 local_irq_save(flags);
1023 WRITE_ONCE(pt->vmx_on, on);
1024
1025 if (on) {
1026 /* prevent pt_config_stop() from writing RTIT_CTL */
1027 event = pt->handle.event;
1028 if (event)
1029 event->hw.config = 0;
1030 }
1031 local_irq_restore(flags);
1032}
1033EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1034
986/* 1035/*
987 * PMU callbacks 1036 * PMU callbacks
988 */ 1037 */
@@ -992,6 +1041,9 @@ static void pt_event_start(struct perf_event *event, int mode)
992 struct pt *pt = this_cpu_ptr(&pt_ctx); 1041 struct pt *pt = this_cpu_ptr(&pt_ctx);
993 struct pt_buffer *buf = perf_get_aux(&pt->handle); 1042 struct pt_buffer *buf = perf_get_aux(&pt->handle);
994 1043
1044 if (READ_ONCE(pt->vmx_on))
1045 return;
1046
995 if (!buf || pt_buffer_is_full(buf, pt)) { 1047 if (!buf || pt_buffer_is_full(buf, pt)) {
996 event->hw.state = PERF_HES_STOPPED; 1048 event->hw.state = PERF_HES_STOPPED;
997 return; 1049 return;
@@ -1014,7 +1066,8 @@ static void pt_event_stop(struct perf_event *event, int mode)
1014 * see comment in intel_pt_interrupt(). 1066 * see comment in intel_pt_interrupt().
1015 */ 1067 */
1016 ACCESS_ONCE(pt->handle_nmi) = 0; 1068 ACCESS_ONCE(pt->handle_nmi) = 0;
1017 pt_config_start(false); 1069
1070 pt_config_stop(event);
1018 1071
1019 if (event->hw.state == PERF_HES_STOPPED) 1072 if (event->hw.state == PERF_HES_STOPPED)
1020 return; 1073 return;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 336878a5d205..3abb5f5cccc8 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -65,6 +65,7 @@ enum pt_capabilities {
65struct pt_pmu { 65struct pt_pmu {
66 struct pmu pmu; 66 struct pmu pmu;
67 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; 67 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
68 bool vmx;
68}; 69};
69 70
70/** 71/**
@@ -107,10 +108,12 @@ struct pt_buffer {
107 * struct pt - per-cpu pt context 108 * struct pt - per-cpu pt context
108 * @handle: perf output handle 109 * @handle: perf output handle
109 * @handle_nmi: do handle PT PMI on this cpu, there's an active event 110 * @handle_nmi: do handle PT PMI on this cpu, there's an active event
111 * @vmx_on: 1 if VMX is ON on this cpu
110 */ 112 */
111struct pt { 113struct pt {
112 struct perf_output_handle handle; 114 struct perf_output_handle handle;
113 int handle_nmi; 115 int handle_nmi;
116 int vmx_on;
114}; 117};
115 118
116#endif /* __INTEL_PT_H__ */ 119#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 70c93f9b03ac..1705c9d75e44 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -718,6 +718,7 @@ static int __init rapl_pmu_init(void)
718 break; 718 break;
719 case 60: /* Haswell */ 719 case 60: /* Haswell */
720 case 69: /* Haswell-Celeron */ 720 case 69: /* Haswell-Celeron */
721 case 70: /* Haswell GT3e */
721 case 61: /* Broadwell */ 722 case 61: /* Broadwell */
722 case 71: /* Broadwell-H */ 723 case 71: /* Broadwell-H */
723 rapl_cntr_mask = RAPL_IDX_HSW; 724 rapl_cntr_mask = RAPL_IDX_HSW;
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index f8a29d2c97b0..e6a8613fbfb0 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -4,6 +4,7 @@
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h> 5#include <asm-generic/hugetlb.h>
6 6
7#define hugepages_supported() cpu_has_pse
7 8
8static inline int is_hugepage_only_range(struct mm_struct *mm, 9static inline int is_hugepage_only_range(struct mm_struct *mm,
9 unsigned long addr, 10 unsigned long addr,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f62a9f37f79f..b7e394485a5f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -43,7 +43,7 @@
43 43
44#define KVM_PIO_PAGE_OFFSET 1 44#define KVM_PIO_PAGE_OFFSET 1
45#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 45#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
46#define KVM_HALT_POLL_NS_DEFAULT 500000 46#define KVM_HALT_POLL_NS_DEFAULT 400000
47 47
48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
49 49
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 426e946ed0c0..5b3c9a55f51c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -167,6 +167,14 @@
167#define MSR_PKG_C9_RESIDENCY 0x00000631 167#define MSR_PKG_C9_RESIDENCY 0x00000631
168#define MSR_PKG_C10_RESIDENCY 0x00000632 168#define MSR_PKG_C10_RESIDENCY 0x00000632
169 169
170/* Interrupt Response Limit */
171#define MSR_PKGC3_IRTL 0x0000060a
172#define MSR_PKGC6_IRTL 0x0000060b
173#define MSR_PKGC7_IRTL 0x0000060c
174#define MSR_PKGC8_IRTL 0x00000633
175#define MSR_PKGC9_IRTL 0x00000634
176#define MSR_PKGC10_IRTL 0x00000635
177
170/* Run Time Average Power Limiting (RAPL) Interface */ 178/* Run Time Average Power Limiting (RAPL) Interface */
171 179
172#define MSR_RAPL_POWER_UNIT 0x00000606 180#define MSR_RAPL_POWER_UNIT 0x00000606
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 5a2ed3ed2f26..f353061bba1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -285,6 +285,10 @@ static inline void perf_events_lapic_init(void) { }
285static inline void perf_check_microcode(void) { } 285static inline void perf_check_microcode(void) { }
286#endif 286#endif
287 287
288#ifdef CONFIG_CPU_SUP_INTEL
289 extern void intel_pt_handle_vmx(int on);
290#endif
291
288#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 292#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
289 extern void amd_pmu_enable_virt(void); 293 extern void amd_pmu_enable_virt(void);
290 extern void amd_pmu_disable_virt(void); 294 extern void amd_pmu_disable_virt(void);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index ad59d70bcb1a..ef495511f019 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
256 struct irq_desc *desc; 256 struct irq_desc *desc;
257 int cpu, vector; 257 int cpu, vector;
258 258
259 BUG_ON(!data->cfg.vector); 259 if (!data->cfg.vector)
260 return;
260 261
261 vector = data->cfg.vector; 262 vector = data->cfg.vector;
262 for_each_cpu_and(cpu, data->domain, cpu_online_mask) 263 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 0a850100c594..2658e2af74ec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
29void mce_gen_pool_process(void) 29void mce_gen_pool_process(void)
30{ 30{
31 struct llist_node *head; 31 struct llist_node *head;
32 struct mce_evt_llist *node; 32 struct mce_evt_llist *node, *tmp;
33 struct mce *mce; 33 struct mce *mce;
34 34
35 head = llist_del_all(&mce_event_llist); 35 head = llist_del_all(&mce_event_llist);
@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
37 return; 37 return;
38 38
39 head = llist_reverse_order(head); 39 head = llist_reverse_order(head);
40 llist_for_each_entry(node, head, llnode) { 40 llist_for_each_entry_safe(node, tmp, head, llnode) {
41 mce = &node->mce; 41 mce = &node->mce;
42 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); 42 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
43 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); 43 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 4e7c6933691c..10c11b4da31d 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
152 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 152 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
153}; 153};
154 154
155static unsigned char hv_get_nmi_reason(void)
156{
157 return 0;
158}
159
155static void __init ms_hyperv_init_platform(void) 160static void __init ms_hyperv_init_platform(void)
156{ 161{
157 /* 162 /*
@@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
191 machine_ops.crash_shutdown = hv_machine_crash_shutdown; 196 machine_ops.crash_shutdown = hv_machine_crash_shutdown;
192#endif 197#endif
193 mark_tsc_unstable("running on Hyper-V"); 198 mark_tsc_unstable("running on Hyper-V");
199
200 /*
201 * Generation 2 instances don't support reading the NMI status from
202 * 0x61 port.
203 */
204 if (efi_enabled(EFI_BOOT))
205 x86_platform.get_nmi_reason = hv_get_nmi_reason;
194} 206}
195 207
196const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 208const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 54cdbd2003fe..af1112980dd4 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -389,12 +389,6 @@ default_entry:
389 /* Make changes effective */ 389 /* Make changes effective */
390 wrmsr 390 wrmsr
391 391
392 /*
393 * And make sure that all the mappings we set up have NX set from
394 * the beginning.
395 */
396 orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
397
398enable_paging: 392enable_paging:
399 393
400/* 394/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8efb839948e5..bbbaa802d13e 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -534,6 +534,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
534 do_cpuid_1_ent(&entry[i], function, idx); 534 do_cpuid_1_ent(&entry[i], function, idx);
535 if (idx == 1) { 535 if (idx == 1) {
536 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features; 536 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
537 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
537 entry[i].ebx = 0; 538 entry[i].ebx = 0;
538 if (entry[i].eax & (F(XSAVES)|F(XSAVEC))) 539 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
539 entry[i].ebx = 540 entry[i].ebx =
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5ff3485acb60..01bd7b7a6866 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1116 break; 1116 break;
1117 case HVCALL_POST_MESSAGE: 1117 case HVCALL_POST_MESSAGE:
1118 case HVCALL_SIGNAL_EVENT: 1118 case HVCALL_SIGNAL_EVENT:
1119 /* don't bother userspace if it has no way to handle it */
1120 if (!vcpu_to_synic(vcpu)->active) {
1121 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1122 break;
1123 }
1119 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 1124 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1120 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; 1125 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1121 vcpu->run->hyperv.u.hcall.input = param; 1126 vcpu->run->hyperv.u.hcall.input = param;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 443d2a57ad3d..1a2da0e5a373 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
1369 1369
1370 hrtimer_start(&apic->lapic_timer.timer, 1370 hrtimer_start(&apic->lapic_timer.timer,
1371 ktime_add_ns(now, apic->lapic_timer.period), 1371 ktime_add_ns(now, apic->lapic_timer.period),
1372 HRTIMER_MODE_ABS); 1372 HRTIMER_MODE_ABS_PINNED);
1373 1373
1374 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 1374 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1375 PRIx64 ", " 1375 PRIx64 ", "
@@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
1402 expire = ktime_add_ns(now, ns); 1402 expire = ktime_add_ns(now, ns);
1403 expire = ktime_sub_ns(expire, lapic_timer_advance_ns); 1403 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1404 hrtimer_start(&apic->lapic_timer.timer, 1404 hrtimer_start(&apic->lapic_timer.timer,
1405 expire, HRTIMER_MODE_ABS); 1405 expire, HRTIMER_MODE_ABS_PINNED);
1406 } else 1406 } else
1407 apic_timer_expired(apic); 1407 apic_timer_expired(apic);
1408 1408
@@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
1868 apic->vcpu = vcpu; 1868 apic->vcpu = vcpu;
1869 1869
1870 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 1870 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1871 HRTIMER_MODE_ABS); 1871 HRTIMER_MODE_ABS_PINNED);
1872 apic->lapic_timer.timer.function = apic_timer_fn; 1872 apic->lapic_timer.timer.function = apic_timer_fn;
1873 1873
1874 /* 1874 /*
@@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2003 2003
2004 timer = &vcpu->arch.apic->lapic_timer.timer; 2004 timer = &vcpu->arch.apic->lapic_timer.timer;
2005 if (hrtimer_cancel(timer)) 2005 if (hrtimer_cancel(timer))
2006 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 2006 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2007} 2007}
2008 2008
2009/* 2009/*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70e95d097ef1..1ff4dbb73fb7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
557 !is_writable_pte(new_spte)) 557 !is_writable_pte(new_spte))
558 ret = true; 558 ret = true;
559 559
560 if (!shadow_accessed_mask) 560 if (!shadow_accessed_mask) {
561 /*
562 * We don't set page dirty when dropping non-writable spte.
563 * So do it now if the new spte is becoming non-writable.
564 */
565 if (ret)
566 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
561 return ret; 567 return ret;
568 }
562 569
563 /* 570 /*
564 * Flush TLB when accessed/dirty bits are changed in the page tables, 571 * Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
605 612
606 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 613 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
607 kvm_set_pfn_accessed(pfn); 614 kvm_set_pfn_accessed(pfn);
608 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 615 if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
616 PT_WRITABLE_MASK))
609 kvm_set_pfn_dirty(pfn); 617 kvm_set_pfn_dirty(pfn);
610 return 1; 618 return 1;
611} 619}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index b70df72e2b33..66b33b96a31b 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -173,10 +173,9 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
173 int index = (pfec >> 1) + 173 int index = (pfec >> 1) +
174 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 174 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
175 bool fault = (mmu->permissions[index] >> pte_access) & 1; 175 bool fault = (mmu->permissions[index] >> pte_access) & 1;
176 u32 errcode = PFERR_PRESENT_MASK;
176 177
177 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); 178 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
178 pfec |= PFERR_PRESENT_MASK;
179
180 if (unlikely(mmu->pkru_mask)) { 179 if (unlikely(mmu->pkru_mask)) {
181 u32 pkru_bits, offset; 180 u32 pkru_bits, offset;
182 181
@@ -189,15 +188,15 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
189 pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; 188 pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
190 189
191 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ 190 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
192 offset = pfec - 1 + 191 offset = (pfec & ~1) +
193 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); 192 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
194 193
195 pkru_bits &= mmu->pkru_mask >> offset; 194 pkru_bits &= mmu->pkru_mask >> offset;
196 pfec |= -pkru_bits & PFERR_PK_MASK; 195 errcode |= -pkru_bits & PFERR_PK_MASK;
197 fault |= (pkru_bits != 0); 196 fault |= (pkru_bits != 0);
198 } 197 }
199 198
200 return -(uint32_t)fault & pfec; 199 return -(u32)fault & errcode;
201} 200}
202 201
203void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); 202void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1d971c7553c3..bc019f70e0b6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -360,7 +360,7 @@ retry_walk:
360 goto error; 360 goto error;
361 361
362 if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { 362 if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
363 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK; 363 errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
364 goto error; 364 goto error;
365 } 365 }
366 366
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ee1c8a93871c..133679d520af 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3103,6 +3103,8 @@ static __init int vmx_disabled_by_bios(void)
3103 3103
3104static void kvm_cpu_vmxon(u64 addr) 3104static void kvm_cpu_vmxon(u64 addr)
3105{ 3105{
3106 intel_pt_handle_vmx(1);
3107
3106 asm volatile (ASM_VMX_VMXON_RAX 3108 asm volatile (ASM_VMX_VMXON_RAX
3107 : : "a"(&addr), "m"(addr) 3109 : : "a"(&addr), "m"(addr)
3108 : "memory", "cc"); 3110 : "memory", "cc");
@@ -3172,6 +3174,8 @@ static void vmclear_local_loaded_vmcss(void)
3172static void kvm_cpu_vmxoff(void) 3174static void kvm_cpu_vmxoff(void)
3173{ 3175{
3174 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); 3176 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
3177
3178 intel_pt_handle_vmx(0);
3175} 3179}
3176 3180
3177static void hardware_disable(void) 3181static void hardware_disable(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 742d0f7d3556..9b7798c7b210 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -700,7 +700,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
700 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 700 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
701 return 1; 701 return 1;
702 } 702 }
703 kvm_put_guest_xcr0(vcpu);
704 vcpu->arch.xcr0 = xcr0; 703 vcpu->arch.xcr0 = xcr0;
705 704
706 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 705 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -6095,12 +6094,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6095 } 6094 }
6096 6095
6097 /* try to inject new event if pending */ 6096 /* try to inject new event if pending */
6098 if (vcpu->arch.nmi_pending) { 6097 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6099 if (kvm_x86_ops->nmi_allowed(vcpu)) { 6098 --vcpu->arch.nmi_pending;
6100 --vcpu->arch.nmi_pending; 6099 vcpu->arch.nmi_injected = true;
6101 vcpu->arch.nmi_injected = true; 6100 kvm_x86_ops->set_nmi(vcpu);
6102 kvm_x86_ops->set_nmi(vcpu);
6103 }
6104 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 6101 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
6105 /* 6102 /*
6106 * Because interrupts can be injected asynchronously, we are 6103 * Because interrupts can be injected asynchronously, we are
@@ -6569,10 +6566,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6569 if (inject_pending_event(vcpu, req_int_win) != 0) 6566 if (inject_pending_event(vcpu, req_int_win) != 0)
6570 req_immediate_exit = true; 6567 req_immediate_exit = true;
6571 /* enable NMI/IRQ window open exits if needed */ 6568 /* enable NMI/IRQ window open exits if needed */
6572 else if (vcpu->arch.nmi_pending) 6569 else {
6573 kvm_x86_ops->enable_nmi_window(vcpu); 6570 if (vcpu->arch.nmi_pending)
6574 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 6571 kvm_x86_ops->enable_nmi_window(vcpu);
6575 kvm_x86_ops->enable_irq_window(vcpu); 6572 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6573 kvm_x86_ops->enable_irq_window(vcpu);
6574 }
6576 6575
6577 if (kvm_lapic_enabled(vcpu)) { 6576 if (kvm_lapic_enabled(vcpu)) {
6578 update_cr8_intercept(vcpu); 6577 update_cr8_intercept(vcpu);
@@ -6590,8 +6589,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6590 kvm_x86_ops->prepare_guest_switch(vcpu); 6589 kvm_x86_ops->prepare_guest_switch(vcpu);
6591 if (vcpu->fpu_active) 6590 if (vcpu->fpu_active)
6592 kvm_load_guest_fpu(vcpu); 6591 kvm_load_guest_fpu(vcpu);
6593 kvm_load_guest_xcr0(vcpu);
6594
6595 vcpu->mode = IN_GUEST_MODE; 6592 vcpu->mode = IN_GUEST_MODE;
6596 6593
6597 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 6594 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6618,6 +6615,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6618 goto cancel_injection; 6615 goto cancel_injection;
6619 } 6616 }
6620 6617
6618 kvm_load_guest_xcr0(vcpu);
6619
6621 if (req_immediate_exit) 6620 if (req_immediate_exit)
6622 smp_send_reschedule(vcpu->cpu); 6621 smp_send_reschedule(vcpu->cpu);
6623 6622
@@ -6667,6 +6666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6667 vcpu->mode = OUTSIDE_GUEST_MODE; 6666 vcpu->mode = OUTSIDE_GUEST_MODE;
6668 smp_wmb(); 6667 smp_wmb();
6669 6668
6669 kvm_put_guest_xcr0(vcpu);
6670
6670 /* Interrupt is enabled by handle_external_intr() */ 6671 /* Interrupt is enabled by handle_external_intr() */
6671 kvm_x86_ops->handle_external_intr(vcpu); 6672 kvm_x86_ops->handle_external_intr(vcpu);
6672 6673
@@ -7314,7 +7315,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7314 * and assume host would use all available bits. 7315 * and assume host would use all available bits.
7315 * Guest xcr0 would be loaded later. 7316 * Guest xcr0 would be loaded later.
7316 */ 7317 */
7317 kvm_put_guest_xcr0(vcpu);
7318 vcpu->guest_fpu_loaded = 1; 7318 vcpu->guest_fpu_loaded = 1;
7319 __kernel_fpu_begin(); 7319 __kernel_fpu_begin();
7320 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); 7320 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7323,8 +7323,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7323 7323
7324void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 7324void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7325{ 7325{
7326 kvm_put_guest_xcr0(vcpu);
7327
7328 if (!vcpu->guest_fpu_loaded) { 7326 if (!vcpu->guest_fpu_loaded) {
7329 vcpu->fpu_counter = 0; 7327 vcpu->fpu_counter = 0;
7330 return; 7328 return;
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 8bea84724a7d..f65a33f505b6 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -32,8 +32,9 @@ early_param("noexec", noexec_setup);
32 32
33void x86_configure_nx(void) 33void x86_configure_nx(void)
34{ 34{
35 /* If disable_nx is set, clear NX on all new mappings going forward. */ 35 if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
36 if (disable_nx) 36 __supported_pte_mask |= _PAGE_NX;
37 else
37 __supported_pte_mask &= ~_PAGE_NX; 38 __supported_pte_mask &= ~_PAGE_NX;
38} 39}
39 40
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index abf4901c917b..db52a7fafcc2 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -66,7 +66,7 @@ static u32 xen_apic_read(u32 reg)
66 66
67 ret = HYPERVISOR_platform_op(&op); 67 ret = HYPERVISOR_platform_op(&op);
68 if (ret) 68 if (ret)
69 return 0; 69 op.u.pcpu_info.apic_id = BAD_APICID;
70 70
71 return op.u.pcpu_info.apic_id << 24; 71 return op.u.pcpu_info.apic_id << 24;
72} 72}
@@ -142,6 +142,14 @@ static void xen_silent_inquire(int apicid)
142{ 142{
143} 143}
144 144
145static int xen_cpu_present_to_apicid(int cpu)
146{
147 if (cpu_present(cpu))
148 return xen_get_apic_id(xen_apic_read(APIC_ID));
149 else
150 return BAD_APICID;
151}
152
145static struct apic xen_pv_apic = { 153static struct apic xen_pv_apic = {
146 .name = "Xen PV", 154 .name = "Xen PV",
147 .probe = xen_apic_probe_pv, 155 .probe = xen_apic_probe_pv,
@@ -162,7 +170,7 @@ static struct apic xen_pv_apic = {
162 170
163 .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */ 171 .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
164 .setup_apic_routing = NULL, 172 .setup_apic_routing = NULL,
165 .cpu_present_to_apicid = default_cpu_present_to_apicid, 173 .cpu_present_to_apicid = xen_cpu_present_to_apicid,
166 .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */ 174 .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */
167 .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */ 175 .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */
168 .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */ 176 .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3c6d17fd423a..719cf291dcdf 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -545,6 +545,8 @@ static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
545 * data back is to call: 545 * data back is to call:
546 */ 546 */
547 tick_nohz_idle_enter(); 547 tick_nohz_idle_enter();
548
549 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
548} 550}
549 551
550#else /* !CONFIG_HOTPLUG_CPU */ 552#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 9e2ba5c6e1dd..f42e78de1e10 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -27,6 +27,12 @@ static bool xen_pvspin = true;
27 27
28static void xen_qlock_kick(int cpu) 28static void xen_qlock_kick(int cpu)
29{ 29{
30 int irq = per_cpu(lock_kicker_irq, cpu);
31
32 /* Don't kick if the target's kicker interrupt is not initialized. */
33 if (irq == -1)
34 return;
35
30 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 36 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
31} 37}
32 38
diff --git a/block/bio.c b/block/bio.c
index f124a0a624fc..807d25e466ec 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1339 * release the pages we didn't map into the bio, if any 1339 * release the pages we didn't map into the bio, if any
1340 */ 1340 */
1341 while (j < page_limit) 1341 while (j < page_limit)
1342 page_cache_release(pages[j++]); 1342 put_page(pages[j++]);
1343 } 1343 }
1344 1344
1345 kfree(pages); 1345 kfree(pages);
@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1365 for (j = 0; j < nr_pages; j++) { 1365 for (j = 0; j < nr_pages; j++) {
1366 if (!pages[j]) 1366 if (!pages[j])
1367 break; 1367 break;
1368 page_cache_release(pages[j]); 1368 put_page(pages[j]);
1369 } 1369 }
1370 out: 1370 out:
1371 kfree(pages); 1371 kfree(pages);
@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
1385 if (bio_data_dir(bio) == READ) 1385 if (bio_data_dir(bio) == READ)
1386 set_page_dirty_lock(bvec->bv_page); 1386 set_page_dirty_lock(bvec->bv_page);
1387 1387
1388 page_cache_release(bvec->bv_page); 1388 put_page(bvec->bv_page);
1389 } 1389 }
1390 1390
1391 bio_put(bio); 1391 bio_put(bio);
@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
1615 * the BIO and the offending pages and re-dirty the pages in process context. 1615 * the BIO and the offending pages and re-dirty the pages in process context.
1616 * 1616 *
1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1618 * here on. It will run one page_cache_release() against each page and will 1618 * here on. It will run one put_page() against each page and will run one
1619 * run one bio_put() against the BIO. 1619 * bio_put() against the BIO.
1620 */ 1620 */
1621 1621
1622static void bio_dirty_fn(struct work_struct *work); 1622static void bio_dirty_fn(struct work_struct *work);
@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
1658 struct page *page = bvec->bv_page; 1658 struct page *page = bvec->bv_page;
1659 1659
1660 if (PageDirty(page) || PageCompound(page)) { 1660 if (PageDirty(page) || PageCompound(page)) {
1661 page_cache_release(page); 1661 put_page(page);
1662 bvec->bv_page = NULL; 1662 bvec->bv_page = NULL;
1663 } else { 1663 } else {
1664 nr_clean_pages++; 1664 nr_clean_pages++;
diff --git a/block/blk-core.c b/block/blk-core.c
index 827f8badd143..b60537b2c35b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
706 goto fail_id; 706 goto fail_id;
707 707
708 q->backing_dev_info.ra_pages = 708 q->backing_dev_info.ra_pages =
709 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 709 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
710 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; 710 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
711 q->backing_dev_info.name = "block"; 711 q->backing_dev_info.name = "block";
712 q->node = node_id; 712 q->node = node_id;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c7bb666aafd1..331e4eee0dda 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
239 struct queue_limits *limits = &q->limits; 239 struct queue_limits *limits = &q->limits;
240 unsigned int max_sectors; 240 unsigned int max_sectors;
241 241
242 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 242 if ((max_hw_sectors << 9) < PAGE_SIZE) {
243 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 243 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
244 printk(KERN_INFO "%s: set to minimum %d\n", 244 printk(KERN_INFO "%s: set to minimum %d\n",
245 __func__, max_hw_sectors); 245 __func__, max_hw_sectors);
246 } 246 }
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
329 **/ 329 **/
330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
331{ 331{
332 if (max_size < PAGE_CACHE_SIZE) { 332 if (max_size < PAGE_SIZE) {
333 max_size = PAGE_CACHE_SIZE; 333 max_size = PAGE_SIZE;
334 printk(KERN_INFO "%s: set to minimum %d\n", 334 printk(KERN_INFO "%s: set to minimum %d\n",
335 __func__, max_size); 335 __func__, max_size);
336 } 336 }
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
760 **/ 760 **/
761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
762{ 762{
763 if (mask < PAGE_CACHE_SIZE - 1) { 763 if (mask < PAGE_SIZE - 1) {
764 mask = PAGE_CACHE_SIZE - 1; 764 mask = PAGE_SIZE - 1;
765 printk(KERN_INFO "%s: set to minimum %lx\n", 765 printk(KERN_INFO "%s: set to minimum %lx\n",
766 __func__, mask); 766 __func__, mask);
767 } 767 }
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index dd93763057ce..995b58d46ed1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
76static ssize_t queue_ra_show(struct request_queue *q, char *page) 76static ssize_t queue_ra_show(struct request_queue *q, char *page)
77{ 77{
78 unsigned long ra_kb = q->backing_dev_info.ra_pages << 78 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
79 (PAGE_CACHE_SHIFT - 10); 79 (PAGE_SHIFT - 10);
80 80
81 return queue_var_show(ra_kb, (page)); 81 return queue_var_show(ra_kb, (page));
82} 82}
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
90 if (ret < 0) 90 if (ret < 0)
91 return ret; 91 return ret;
92 92
93 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 93 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
94 94
95 return ret; 95 return ret;
96} 96}
@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
117 if (blk_queue_cluster(q)) 117 if (blk_queue_cluster(q))
118 return queue_var_show(queue_max_segment_size(q), (page)); 118 return queue_var_show(queue_max_segment_size(q), (page));
119 119
120 return queue_var_show(PAGE_CACHE_SIZE, (page)); 120 return queue_var_show(PAGE_SIZE, (page));
121} 121}
122 122
123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
198{ 198{
199 unsigned long max_sectors_kb, 199 unsigned long max_sectors_kb,
200 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 200 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
201 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 201 page_kb = 1 << (PAGE_SHIFT - 10);
202 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 202 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
203 203
204 if (ret < 0) 204 if (ret < 0)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e3c591dd8f19..4a349787bc62 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4075 * idle timer unplug to continue working. 4075 * idle timer unplug to continue working.
4076 */ 4076 */
4077 if (cfq_cfqq_wait_request(cfqq)) { 4077 if (cfq_cfqq_wait_request(cfqq)) {
4078 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 4078 if (blk_rq_bytes(rq) > PAGE_SIZE ||
4079 cfqd->busy_queues > 1) { 4079 cfqd->busy_queues > 1) {
4080 cfq_del_timer(cfqd, cfqq); 4080 cfq_del_timer(cfqd, cfqq);
4081 cfq_clear_cfqq_wait_request(cfqq); 4081 cfq_clear_cfqq_wait_request(cfqq);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f678c733df40..556826ac7cb4 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
710 return -EINVAL; 710 return -EINVAL;
711 bdi = blk_get_backing_dev_info(bdev); 711 bdi = blk_get_backing_dev_info(bdev);
712 return compat_put_long(arg, 712 return compat_put_long(arg,
713 (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 713 (bdi->ra_pages * PAGE_SIZE) / 512);
714 case BLKROGET: /* compatible */ 714 case BLKROGET: /* compatible */
715 return compat_put_int(arg, bdev_read_only(bdev) != 0); 715 return compat_put_int(arg, bdev_read_only(bdev) != 0);
716 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ 716 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
729 if (!capable(CAP_SYS_ADMIN)) 729 if (!capable(CAP_SYS_ADMIN))
730 return -EACCES; 730 return -EACCES;
731 bdi = blk_get_backing_dev_info(bdev); 731 bdi = blk_get_backing_dev_info(bdev);
732 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 732 bdi->ra_pages = (arg * 512) / PAGE_SIZE;
733 return 0; 733 return 0;
734 case BLKGETSIZE: 734 case BLKGETSIZE:
735 size = i_size_read(bdev->bd_inode); 735 size = i_size_read(bdev->bd_inode);
diff --git a/block/ioctl.c b/block/ioctl.c
index d8996bbd7f12..4ff1f92f89ca 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
550 if (!arg) 550 if (!arg)
551 return -EINVAL; 551 return -EINVAL;
552 bdi = blk_get_backing_dev_info(bdev); 552 bdi = blk_get_backing_dev_info(bdev);
553 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 553 return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
554 case BLKROGET: 554 case BLKROGET:
555 return put_int(arg, bdev_read_only(bdev) != 0); 555 return put_int(arg, bdev_read_only(bdev) != 0);
556 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ 556 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
578 if(!capable(CAP_SYS_ADMIN)) 578 if(!capable(CAP_SYS_ADMIN))
579 return -EACCES; 579 return -EACCES;
580 bdi = blk_get_backing_dev_info(bdev); 580 bdi = blk_get_backing_dev_info(bdev);
581 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 581 bdi->ra_pages = (arg * 512) / PAGE_SIZE;
582 return 0; 582 return 0;
583 case BLKBSZSET: 583 case BLKBSZSET:
584 return blkdev_bszset(bdev, mode, argp); 584 return blkdev_bszset(bdev, mode, argp);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5d8701941054..d7eb77e1e3a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
361 goto out_del; 361 goto out_del;
362 } 362 }
363 363
364 err = hd_ref_init(p);
365 if (err) {
366 if (flags & ADDPART_FLAG_WHOLEDISK)
367 goto out_remove_file;
368 goto out_del;
369 }
370
364 /* everything is up and running, commence */ 371 /* everything is up and running, commence */
365 rcu_assign_pointer(ptbl->part[partno], p); 372 rcu_assign_pointer(ptbl->part[partno], p);
366 373
367 /* suppress uevent if the disk suppresses it */ 374 /* suppress uevent if the disk suppresses it */
368 if (!dev_get_uevent_suppress(ddev)) 375 if (!dev_get_uevent_suppress(ddev))
369 kobject_uevent(&pdev->kobj, KOBJ_ADD); 376 kobject_uevent(&pdev->kobj, KOBJ_ADD);
370 377 return p;
371 if (!hd_ref_init(p))
372 return p;
373 378
374out_free_info: 379out_free_info:
375 free_part_info(p); 380 free_part_info(p);
@@ -378,6 +383,8 @@ out_free_stats:
378out_free: 383out_free:
379 kfree(p); 384 kfree(p);
380 return ERR_PTR(err); 385 return ERR_PTR(err);
386out_remove_file:
387 device_remove_file(pdev, &dev_attr_whole_disk);
381out_del: 388out_del:
382 kobject_put(p->holder_dir); 389 kobject_put(p->holder_dir);
383 device_del(pdev); 390 device_del(pdev);
@@ -566,8 +573,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
566{ 573{
567 struct address_space *mapping = bdev->bd_inode->i_mapping; 574 struct address_space *mapping = bdev->bd_inode->i_mapping;
568 575
569 return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), 576 return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
570 NULL); 577 NULL);
571} 578}
572 579
573unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) 580unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
@@ -584,9 +591,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
584 if (PageError(page)) 591 if (PageError(page))
585 goto fail; 592 goto fail;
586 p->v = page; 593 p->v = page;
587 return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9); 594 return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
588fail: 595fail:
589 page_cache_release(page); 596 put_page(page);
590 } 597 }
591 p->v = NULL; 598 p->v = NULL;
592 return NULL; 599 return NULL;
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 1cea67d43e1d..ead8dc0d084e 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
387 req_ctx->child_req.src = req->src; 387 req_ctx->child_req.src = req->src;
388 req_ctx->child_req.src_len = req->src_len; 388 req_ctx->child_req.src_len = req->src_len;
389 req_ctx->child_req.dst = req_ctx->out_sg; 389 req_ctx->child_req.dst = req_ctx->out_sg;
390 req_ctx->child_req.dst_len = ctx->key_size - 1; 390 req_ctx->child_req.dst_len = ctx->key_size ;
391 391
392 req_ctx->out_buf = kmalloc(ctx->key_size - 1, 392 req_ctx->out_buf = kmalloc(ctx->key_size,
393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
394 GFP_KERNEL : GFP_ATOMIC); 394 GFP_KERNEL : GFP_ATOMIC);
395 if (!req_ctx->out_buf) 395 if (!req_ctx->out_buf)
396 return -ENOMEM; 396 return -ENOMEM;
397 397
398 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 398 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
399 ctx->key_size - 1, NULL); 399 ctx->key_size, NULL);
400 400
401 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 401 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
@@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
595 req_ctx->child_req.src = req->src; 595 req_ctx->child_req.src = req->src;
596 req_ctx->child_req.src_len = req->src_len; 596 req_ctx->child_req.src_len = req->src_len;
597 req_ctx->child_req.dst = req_ctx->out_sg; 597 req_ctx->child_req.dst = req_ctx->out_sg;
598 req_ctx->child_req.dst_len = ctx->key_size - 1; 598 req_ctx->child_req.dst_len = ctx->key_size;
599 599
600 req_ctx->out_buf = kmalloc(ctx->key_size - 1, 600 req_ctx->out_buf = kmalloc(ctx->key_size,
601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
602 GFP_KERNEL : GFP_ATOMIC); 602 GFP_KERNEL : GFP_ATOMIC);
603 if (!req_ctx->out_buf) 603 if (!req_ctx->out_buf)
604 return -ENOMEM; 604 return -ENOMEM;
605 605
606 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 606 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
607 ctx->key_size - 1, NULL); 607 ctx->key_size, NULL);
608 608
609 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 609 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a1e0b9ab847a..5fb7718f256c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
246 return -EEXIST; 246 return -EEXIST;
247 } 247 }
248 dev->power.wakeup = ws; 248 dev->power.wakeup = ws;
249 if (dev->power.wakeirq)
250 device_wakeup_attach_irq(dev, dev->power.wakeirq);
249 spin_unlock_irq(&dev->power.lock); 251 spin_unlock_irq(&dev->power.lock);
250 return 0; 252 return 0;
251} 253}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8fed39e..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
136 return false; 136 return false;
137} 137}
138 138
139#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
140static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 139static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
141 struct bcma_device *core) 140 struct bcma_device *core)
142{ 141{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
184 struct of_phandle_args out_irq; 183 struct of_phandle_args out_irq;
185 int ret; 184 int ret;
186 185
187 if (!parent || !parent->dev.of_node) 186 if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
188 return 0; 187 return 0;
189 188
190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 189 ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
202{ 201{
203 struct device_node *node; 202 struct device_node *node;
204 203
204 if (!IS_ENABLED(CONFIG_OF_IRQ))
205 return;
206
205 node = bcma_of_find_child_device(parent, core); 207 node = bcma_of_find_child_device(parent, core);
206 if (node) 208 if (node)
207 core->dev.of_node = node; 209 core->dev.of_node = node;
208 210
209 core->irq = bcma_of_get_irq(parent, core, 0); 211 core->irq = bcma_of_get_irq(parent, core, 0);
210} 212}
211#else
212static void bcma_of_fill_device(struct platform_device *parent,
213 struct bcma_device *core)
214{
215}
216static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
217 struct bcma_device *core, int num)
218{
219 return 0;
220}
221#endif /* CONFIG_OF */
222 213
223unsigned int bcma_core_irq(struct bcma_device *core, int num) 214unsigned int bcma_core_irq(struct bcma_device *core, int num)
224{ 215{
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..ec9d8610b25f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
397 WARN_ON(d->flags & DEVFL_UP); 397 WARN_ON(d->flags & DEVFL_UP);
398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); 398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
399 q->backing_dev_info.name = "aoe"; 399 q->backing_dev_info.name = "aoe";
400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; 400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
401 d->bufpool = mp; 401 d->bufpool = mp;
402 d->blkq = gd->queue = q; 402 d->blkq = gd->queue = q;
403 q->queuedata = d; 403 q->queuedata = d;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f7ecc287d733..51a071e32221 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
374 struct page *page, int rw) 374 struct page *page, int rw)
375{ 375{
376 struct brd_device *brd = bdev->bd_disk->private_data; 376 struct brd_device *brd = bdev->bd_disk->private_data;
377 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); 377 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
378 page_endio(page, rw & WRITE, err); 378 page_endio(page, rw & WRITE, err);
379 return err; 379 return err;
380} 380}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4cad75..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@ struct bm_extent {
1327#endif 1327#endif
1328#endif 1328#endif
1329 1329
1330/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 1330/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
1331 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 1331 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1332 * Since we may live in a mixed-platform cluster, 1332 * Since we may live in a mixed-platform cluster,
1333 * we limit us to a platform agnostic constant here for now. 1333 * we limit us to a platform agnostic constant here for now.
1334 * A followup commit may allow even bigger BIO sizes, 1334 * A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 226eb0c9f0fb..1fd1dccebb6b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1178 blk_queue_max_hw_sectors(q, max_hw_sectors); 1178 blk_queue_max_hw_sectors(q, max_hw_sectors);
1179 /* This is the workaround for "bio would need to, but cannot, be split" */ 1179 /* This is the workaround for "bio would need to, but cannot, be split" */
1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1181 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 1181 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1182 1182
1183 if (b) { 1183 if (b) {
1184 struct drbd_connection *connection = first_peer_device(device)->connection; 1184 struct drbd_connection *connection = first_peer_device(device)->connection;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
488 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 488 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
489 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, 489 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
490 bio_segments(bio), blk_rq_bytes(cmd->rq)); 490 bio_segments(bio), blk_rq_bytes(cmd->rq));
491 /*
492 * This bio may be started from the middle of the 'bvec'
493 * because of bio splitting, so offset from the bvec must
494 * be passed to iov iterator
495 */
496 iter.iov_offset = bio->bi_iter.bi_bvec_done;
491 497
492 cmd->iocb.ki_pos = pos; 498 cmd->iocb.ki_pos = pos;
493 cmd->iocb.ki_filp = file; 499 cmd->iocb.ki_filp = file;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c6234428607..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size); 538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features); 540 u64 *snap_features);
541static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
542 541
543static int rbd_open(struct block_device *bdev, fmode_t mode) 542static int rbd_open(struct block_device *bdev, fmode_t mode)
544{ 543{
@@ -1953,7 +1952,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
1953 1952
1954 osdc = &rbd_dev->rbd_client->client->osdc; 1953 osdc = &rbd_dev->rbd_client->client->osdc;
1955 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, 1954 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1956 GFP_ATOMIC); 1955 GFP_NOIO);
1957 if (!osd_req) 1956 if (!osd_req)
1958 return NULL; /* ENOMEM */ 1957 return NULL; /* ENOMEM */
1959 1958
@@ -2002,7 +2001,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2002 rbd_dev = img_request->rbd_dev; 2001 rbd_dev = img_request->rbd_dev;
2003 osdc = &rbd_dev->rbd_client->client->osdc; 2002 osdc = &rbd_dev->rbd_client->client->osdc;
2004 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops, 2003 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2005 false, GFP_ATOMIC); 2004 false, GFP_NOIO);
2006 if (!osd_req) 2005 if (!osd_req)
2007 return NULL; /* ENOMEM */ 2006 return NULL; /* ENOMEM */
2008 2007
@@ -2504,7 +2503,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2504 bio_chain_clone_range(&bio_list, 2503 bio_chain_clone_range(&bio_list,
2505 &bio_offset, 2504 &bio_offset,
2506 clone_size, 2505 clone_size,
2507 GFP_ATOMIC); 2506 GFP_NOIO);
2508 if (!obj_request->bio_list) 2507 if (!obj_request->bio_list)
2509 goto out_unwind; 2508 goto out_unwind;
2510 } else if (type == OBJ_REQUEST_PAGES) { 2509 } else if (type == OBJ_REQUEST_PAGES) {
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3127 struct rbd_device *rbd_dev = (struct rbd_device *)data; 3126 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3128 int ret; 3127 int ret;
3129 3128
3130 if (!rbd_dev)
3131 return;
3132
3133 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 3129 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3134 rbd_dev->header_name, (unsigned long long)notify_id, 3130 rbd_dev->header_name, (unsigned long long)notify_id,
3135 (unsigned int)opcode); 3131 (unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3263 3259
3264 ceph_osdc_cancel_event(rbd_dev->watch_event); 3260 ceph_osdc_cancel_event(rbd_dev->watch_event);
3265 rbd_dev->watch_event = NULL; 3261 rbd_dev->watch_event = NULL;
3262
3263 dout("%s flushing notifies\n", __func__);
3264 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3266} 3265}
3267 3266
3268/* 3267/*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3642static void rbd_dev_update_size(struct rbd_device *rbd_dev) 3641static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3643{ 3642{
3644 sector_t size; 3643 sector_t size;
3645 bool removing;
3646 3644
3647 /* 3645 /*
3648 * Don't hold the lock while doing disk operations, 3646 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3649 * or lock ordering will conflict with the bdev mutex via: 3647 * try to update its size. If REMOVING is set, updating size
3650 * rbd_add() -> blkdev_get() -> rbd_open() 3648 * is just useless work since the device can't be opened.
3651 */ 3649 */
3652 spin_lock_irq(&rbd_dev->lock); 3650 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3653 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); 3651 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3654 spin_unlock_irq(&rbd_dev->lock);
3655 /*
3656 * If the device is being removed, rbd_dev->disk has
3657 * been destroyed, so don't try to update its size
3658 */
3659 if (!removing) {
3660 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 3652 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3661 dout("setting size to %llu sectors", (unsigned long long)size); 3653 dout("setting size to %llu sectors", (unsigned long long)size);
3662 set_capacity(rbd_dev->disk, size); 3654 set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4191 __le64 features; 4183 __le64 features;
4192 __le64 incompat; 4184 __le64 incompat;
4193 } __attribute__ ((packed)) features_buf = { 0 }; 4185 } __attribute__ ((packed)) features_buf = { 0 };
4194 u64 incompat; 4186 u64 unsup;
4195 int ret; 4187 int ret;
4196 4188
4197 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 4189 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4204 if (ret < sizeof (features_buf)) 4196 if (ret < sizeof (features_buf))
4205 return -ERANGE; 4197 return -ERANGE;
4206 4198
4207 incompat = le64_to_cpu(features_buf.incompat); 4199 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4208 if (incompat & ~RBD_FEATURES_SUPPORTED) 4200 if (unsup) {
4201 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4202 unsup);
4209 return -ENXIO; 4203 return -ENXIO;
4204 }
4210 4205
4211 *snap_features = le64_to_cpu(features_buf.features); 4206 *snap_features = le64_to_cpu(features_buf.features);
4212 4207
@@ -5187,6 +5182,10 @@ out_err:
5187 return ret; 5182 return ret;
5188} 5183}
5189 5184
5185/*
5186 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5187 * upon return.
5188 */
5190static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 5189static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5191{ 5190{
5192 int ret; 5191 int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5195 5194
5196 ret = rbd_dev_id_get(rbd_dev); 5195 ret = rbd_dev_id_get(rbd_dev);
5197 if (ret) 5196 if (ret)
5198 return ret; 5197 goto err_out_unlock;
5199 5198
5200 BUILD_BUG_ON(DEV_NAME_LEN 5199 BUILD_BUG_ON(DEV_NAME_LEN
5201 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); 5200 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5236 /* Everything's ready. Announce the disk to the world. */ 5235 /* Everything's ready. Announce the disk to the world. */
5237 5236
5238 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5237 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5239 add_disk(rbd_dev->disk); 5238 up_write(&rbd_dev->header_rwsem);
5240 5239
5240 add_disk(rbd_dev->disk);
5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, 5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5242 (unsigned long long) rbd_dev->mapping.size); 5242 (unsigned long long) rbd_dev->mapping.size);
5243 5243
@@ -5252,6 +5252,8 @@ err_out_blkdev:
5252 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5252 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5253err_out_id: 5253err_out_id:
5254 rbd_dev_id_put(rbd_dev); 5254 rbd_dev_id_put(rbd_dev);
5255err_out_unlock:
5256 up_write(&rbd_dev->header_rwsem);
5255 return ret; 5257 return ret;
5256} 5258}
5257 5259
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5442 spec = NULL; /* rbd_dev now owns this */ 5444 spec = NULL; /* rbd_dev now owns this */
5443 rbd_opts = NULL; /* rbd_dev now owns this */ 5445 rbd_opts = NULL; /* rbd_dev now owns this */
5444 5446
5447 down_write(&rbd_dev->header_rwsem);
5445 rc = rbd_dev_image_probe(rbd_dev, 0); 5448 rc = rbd_dev_image_probe(rbd_dev, 0);
5446 if (rc < 0) 5449 if (rc < 0)
5447 goto err_out_rbd_dev; 5450 goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
5471 return rc; 5474 return rc;
5472 5475
5473err_out_rbd_dev: 5476err_out_rbd_dev:
5477 up_write(&rbd_dev->header_rwsem);
5474 rbd_dev_destroy(rbd_dev); 5478 rbd_dev_destroy(rbd_dev);
5475err_out_client: 5479err_out_client:
5476 rbd_put_client(rbdc); 5480 rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
5577 return ret; 5581 return ret;
5578 5582
5579 rbd_dev_header_unwatch_sync(rbd_dev); 5583 rbd_dev_header_unwatch_sync(rbd_dev);
5580 /*
5581 * flush remaining watch callbacks - these must be complete
5582 * before the osd_client is shutdown
5583 */
5584 dout("%s: flushing notifies", __func__);
5585 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5586 5584
5587 /* 5585 /*
5588 * Don't free anything from rbd_dev->disk until after all 5586 * Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c2e52864bb03..ce54a0160faa 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -972,7 +972,7 @@ int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
972 } 972 }
973 } 973 }
974 974
975 pr_err("invalid dram address 0x%x\n", phyaddr); 975 pr_err("invalid dram address %pa\n", &phyaddr);
976 return -EINVAL; 976 return -EINVAL;
977} 977}
978EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info); 978EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2aeaf27a..350b7309c26d 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
108 108
109 for (i = 0; i < ARRAY_SIZE(priv->bank); i++) { 109 for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
110 for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) { 110 for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
111 if (priv->bank[i].end > priv->bank[j].base || 111 if (priv->bank[i].end > priv->bank[j].base &&
112 priv->bank[i].base < priv->bank[j].end) { 112 priv->bank[i].base < priv->bank[j].end) {
113 dev_err(priv->dev, 113 dev_err(priv->dev,
114 "region overlap between bank%d and bank%d\n", 114 "region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ca9c40309757..5132c9cde50d 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/hw_random.h> 14#include <linux/hw_random.h>
15#include <linux/of.h>
15 16
16#define RNG_CTRL 0x00 17#define RNG_CTRL 0x00
17#define RNG_EN (1 << 0) 18#define RNG_EN (1 << 0)
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 2bcecafdeaea..c407c47a3232 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
42 42
43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350, 43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
44 32, clocksource_mmio_readl_up); 44 32, clocksource_mmio_readl_up);
45 if (!ret) { 45 if (ret) {
46 pr_err("%s: registration failed\n", np->full_name); 46 pr_err("%s: registration failed\n", np->full_name);
47 return; 47 return;
48 } 48 }
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..5f8dbe640a20 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -4,9 +4,6 @@
4 * Copyright (C) 2014 Linaro. 4 * Copyright (C) 2014 Linaro.
5 * Viresh Kumar <viresh.kumar@linaro.org> 5 * Viresh Kumar <viresh.kumar@linaro.org>
6 * 6 *
7 * The OPP code in function set_target() is reused from
8 * drivers/cpufreq/omap-cpufreq.c
9 *
10 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b87596b591b3..e93405f0eac4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1491{ 1491{
1492 unsigned int new_freq; 1492 unsigned int new_freq;
1493 1493
1494 if (cpufreq_suspended)
1495 return 0;
1496
1494 new_freq = cpufreq_driver->get(policy->cpu); 1497 new_freq = cpufreq_driver->get(policy->cpu);
1495 if (!new_freq) 1498 if (!new_freq)
1496 return 0; 1499 return 0;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfeae8c5..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; 193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
194 j_cdbs->prev_cpu_wall = cur_wall_time; 194 j_cdbs->prev_cpu_wall = cur_wall_time;
195 195
196 if (cur_idle_time <= j_cdbs->prev_cpu_idle) { 196 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
197 idle_time = 0; 197 j_cdbs->prev_cpu_idle = cur_idle_time;
198 } else {
199 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
200 j_cdbs->prev_cpu_idle = cur_idle_time;
201 }
202 198
203 if (ignore_nice) { 199 if (ignore_nice) {
204 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 200 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4b644526fd59..f502d5b90c25 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x)
64 return ret; 64 return ret;
65} 65}
66 66
67/**
68 * struct sample - Store performance sample
69 * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
70 * performance during last sample period
71 * @busy_scaled: Scaled busy value which is used to calculate next
72 * P state. This can be different than core_pct_busy
73 * to account for cpu idle period
74 * @aperf: Difference of actual performance frequency clock count
75 * read from APERF MSR between last and current sample
76 * @mperf: Difference of maximum performance frequency clock count
77 * read from MPERF MSR between last and current sample
78 * @tsc: Difference of time stamp counter between last and
79 * current sample
80 * @freq: Effective frequency calculated from APERF/MPERF
81 * @time: Current time from scheduler
82 *
83 * This structure is used in the cpudata structure to store performance sample
84 * data for choosing next P State.
85 */
67struct sample { 86struct sample {
68 int32_t core_pct_busy; 87 int32_t core_pct_busy;
69 int32_t busy_scaled; 88 int32_t busy_scaled;
@@ -74,6 +93,20 @@ struct sample {
74 u64 time; 93 u64 time;
75}; 94};
76 95
96/**
97 * struct pstate_data - Store P state data
98 * @current_pstate: Current requested P state
99 * @min_pstate: Min P state possible for this platform
100 * @max_pstate: Max P state possible for this platform
101 * @max_pstate_physical:This is physical Max P state for a processor
102 * This can be higher than the max_pstate which can
103 * be limited by platform thermal design power limits
104 * @scaling: Scaling factor to convert frequency to cpufreq
105 * frequency units
106 * @turbo_pstate: Max Turbo P state possible for this platform
107 *
108 * Stores the per cpu model P state limits and current P state.
109 */
77struct pstate_data { 110struct pstate_data {
78 int current_pstate; 111 int current_pstate;
79 int min_pstate; 112 int min_pstate;
@@ -83,6 +116,19 @@ struct pstate_data {
83 int turbo_pstate; 116 int turbo_pstate;
84}; 117};
85 118
119/**
120 * struct vid_data - Stores voltage information data
121 * @min: VID data for this platform corresponding to
122 * the lowest P state
123 * @max: VID data corresponding to the highest P State.
124 * @turbo: VID data for turbo P state
125 * @ratio: Ratio of (vid max - vid min) /
126 * (max P state - Min P State)
127 *
128 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
129 * This data is used in Atom platforms, where in addition to target P state,
130 * the voltage data needs to be specified to select next P State.
131 */
86struct vid_data { 132struct vid_data {
87 int min; 133 int min;
88 int max; 134 int max;
@@ -90,6 +136,18 @@ struct vid_data {
90 int32_t ratio; 136 int32_t ratio;
91}; 137};
92 138
139/**
140 * struct _pid - Stores PID data
141 * @setpoint: Target set point for busyness or performance
142 * @integral: Storage for accumulated error values
143 * @p_gain: PID proportional gain
144 * @i_gain: PID integral gain
145 * @d_gain: PID derivative gain
146 * @deadband: PID deadband
147 * @last_err: Last error storage for integral part of PID calculation
148 *
149 * Stores PID coefficients and last error for PID controller.
150 */
93struct _pid { 151struct _pid {
94 int setpoint; 152 int setpoint;
95 int32_t integral; 153 int32_t integral;
@@ -100,6 +158,23 @@ struct _pid {
100 int32_t last_err; 158 int32_t last_err;
101}; 159};
102 160
161/**
162 * struct cpudata - Per CPU instance data storage
163 * @cpu: CPU number for this instance data
164 * @update_util: CPUFreq utility callback information
165 * @pstate: Stores P state limits for this CPU
166 * @vid: Stores VID limits for this CPU
167 * @pid: Stores PID parameters for this CPU
168 * @last_sample_time: Last Sample time
169 * @prev_aperf: Last APERF value read from APERF MSR
170 * @prev_mperf: Last MPERF value read from MPERF MSR
171 * @prev_tsc: Last timestamp counter (TSC) value
172 * @prev_cummulative_iowait: IO Wait time difference from last and
173 * current sample
174 * @sample: Storage for storing last Sample data
175 *
176 * This structure stores per CPU instance data for all CPUs.
177 */
103struct cpudata { 178struct cpudata {
104 int cpu; 179 int cpu;
105 180
@@ -118,6 +193,19 @@ struct cpudata {
118}; 193};
119 194
120static struct cpudata **all_cpu_data; 195static struct cpudata **all_cpu_data;
196
197/**
198 * struct pid_adjust_policy - Stores static PID configuration data
199 * @sample_rate_ms: PID calculation sample rate in ms
200 * @sample_rate_ns: Sample rate calculation in ns
201 * @deadband: PID deadband
202 * @setpoint: PID Setpoint
203 * @p_gain_pct: PID proportional gain
204 * @i_gain_pct: PID integral gain
205 * @d_gain_pct: PID derivative gain
206 *
207 * Stores per CPU model static PID configuration data.
208 */
121struct pstate_adjust_policy { 209struct pstate_adjust_policy {
122 int sample_rate_ms; 210 int sample_rate_ms;
123 s64 sample_rate_ns; 211 s64 sample_rate_ns;
@@ -128,6 +216,20 @@ struct pstate_adjust_policy {
128 int i_gain_pct; 216 int i_gain_pct;
129}; 217};
130 218
219/**
220 * struct pstate_funcs - Per CPU model specific callbacks
221 * @get_max: Callback to get maximum non turbo effective P state
222 * @get_max_physical: Callback to get maximum non turbo physical P state
223 * @get_min: Callback to get minimum P state
224 * @get_turbo: Callback to get turbo P state
225 * @get_scaling: Callback to get frequency scaling factor
226 * @get_val: Callback to convert P state to actual MSR write value
227 * @get_vid: Callback to get VID data for Atom platforms
228 * @get_target_pstate: Callback to a function to calculate next P state to use
229 *
230 * Core and Atom CPU models have different way to get P State limits. This
231 * structure is used to store those callbacks.
232 */
131struct pstate_funcs { 233struct pstate_funcs {
132 int (*get_max)(void); 234 int (*get_max)(void);
133 int (*get_max_physical)(void); 235 int (*get_max_physical)(void);
@@ -139,6 +241,11 @@ struct pstate_funcs {
139 int32_t (*get_target_pstate)(struct cpudata *); 241 int32_t (*get_target_pstate)(struct cpudata *);
140}; 242};
141 243
244/**
245 * struct cpu_defaults- Per CPU model default config data
246 * @pid_policy: PID config data
247 * @funcs: Callback function data
248 */
142struct cpu_defaults { 249struct cpu_defaults {
143 struct pstate_adjust_policy pid_policy; 250 struct pstate_adjust_policy pid_policy;
144 struct pstate_funcs funcs; 251 struct pstate_funcs funcs;
@@ -151,6 +258,34 @@ static struct pstate_adjust_policy pid_params;
151static struct pstate_funcs pstate_funcs; 258static struct pstate_funcs pstate_funcs;
152static int hwp_active; 259static int hwp_active;
153 260
261
262/**
263 * struct perf_limits - Store user and policy limits
264 * @no_turbo: User requested turbo state from intel_pstate sysfs
265 * @turbo_disabled: Platform turbo status either from msr
266 * MSR_IA32_MISC_ENABLE or when maximum available pstate
267 * matches the maximum turbo pstate
268 * @max_perf_pct: Effective maximum performance limit in percentage, this
269 * is minimum of either limits enforced by cpufreq policy
270 * or limits from user set limits via intel_pstate sysfs
271 * @min_perf_pct: Effective minimum performance limit in percentage, this
272 * is maximum of either limits enforced by cpufreq policy
273 * or limits from user set limits via intel_pstate sysfs
274 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
275 * This value is used to limit max pstate
276 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
277 * This value is used to limit min pstate
278 * @max_policy_pct: The maximum performance in percentage enforced by
279 * cpufreq setpolicy interface
280 * @max_sysfs_pct: The maximum performance in percentage enforced by
281 * intel pstate sysfs interface
282 * @min_policy_pct: The minimum performance in percentage enforced by
283 * cpufreq setpolicy interface
284 * @min_sysfs_pct: The minimum performance in percentage enforced by
285 * intel pstate sysfs interface
286 *
287 * Storage for user and policy defined limits.
288 */
154struct perf_limits { 289struct perf_limits {
155 int no_turbo; 290 int no_turbo;
156 int turbo_disabled; 291 int turbo_disabled;
@@ -678,6 +813,11 @@ static int core_get_max_pstate(void)
678 if (err) 813 if (err)
679 goto skip_tar; 814 goto skip_tar;
680 815
816 /* For level 1 and 2, bits[23:16] contain the ratio */
817 if (tdp_ctrl)
818 tdp_ratio >>= 16;
819
820 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
681 if (tdp_ratio - 1 == tar) { 821 if (tdp_ratio - 1 == tar) {
682 max_pstate = tar; 822 max_pstate = tar;
683 pr_debug("max_pstate=TAC %x\n", max_pstate); 823 pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -910,7 +1050,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
910 cpu->prev_aperf = aperf; 1050 cpu->prev_aperf = aperf;
911 cpu->prev_mperf = mperf; 1051 cpu->prev_mperf = mperf;
912 cpu->prev_tsc = tsc; 1052 cpu->prev_tsc = tsc;
913 return true; 1053 /*
1054 * First time this function is invoked in a given cycle, all of the
1055 * previous sample data fields are equal to zero or stale and they must
1056 * be populated with meaningful numbers for things to work, so assume
1057 * that sample.time will always be reset before setting the utilization
1058 * update hook and make the caller skip the sample then.
1059 */
1060 return !!cpu->last_sample_time;
914} 1061}
915 1062
916static inline int32_t get_avg_frequency(struct cpudata *cpu) 1063static inline int32_t get_avg_frequency(struct cpudata *cpu)
@@ -984,11 +1131,14 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
984 * enough period of time to adjust our busyness. 1131 * enough period of time to adjust our busyness.
985 */ 1132 */
986 duration_ns = cpu->sample.time - cpu->last_sample_time; 1133 duration_ns = cpu->sample.time - cpu->last_sample_time;
987 if ((s64)duration_ns > pid_params.sample_rate_ns * 3 1134 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
988 && cpu->last_sample_time > 0) {
989 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1135 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
990 int_tofp(duration_ns)); 1136 int_tofp(duration_ns));
991 core_busy = mul_fp(core_busy, sample_ratio); 1137 core_busy = mul_fp(core_busy, sample_ratio);
1138 } else {
1139 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
1140 if (sample_ratio < int_tofp(1))
1141 core_busy = 0;
992 } 1142 }
993 1143
994 cpu->sample.busy_scaled = core_busy; 1144 cpu->sample.busy_scaled = core_busy;
@@ -1100,10 +1250,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1100 intel_pstate_get_cpu_pstates(cpu); 1250 intel_pstate_get_cpu_pstates(cpu);
1101 1251
1102 intel_pstate_busy_pid_reset(cpu); 1252 intel_pstate_busy_pid_reset(cpu);
1103 intel_pstate_sample(cpu, 0);
1104 1253
1105 cpu->update_util.func = intel_pstate_update_util; 1254 cpu->update_util.func = intel_pstate_update_util;
1106 cpufreq_set_update_util_data(cpunum, &cpu->update_util);
1107 1255
1108 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1256 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1109 1257
@@ -1122,22 +1270,54 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
1122 return get_avg_frequency(cpu); 1270 return get_avg_frequency(cpu);
1123} 1271}
1124 1272
1273static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1274{
1275 struct cpudata *cpu = all_cpu_data[cpu_num];
1276
1277 /* Prevent intel_pstate_update_util() from using stale data. */
1278 cpu->sample.time = 0;
1279 cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
1280}
1281
1282static void intel_pstate_clear_update_util_hook(unsigned int cpu)
1283{
1284 cpufreq_set_update_util_data(cpu, NULL);
1285 synchronize_sched();
1286}
1287
1288static void intel_pstate_set_performance_limits(struct perf_limits *limits)
1289{
1290 limits->no_turbo = 0;
1291 limits->turbo_disabled = 0;
1292 limits->max_perf_pct = 100;
1293 limits->max_perf = int_tofp(1);
1294 limits->min_perf_pct = 100;
1295 limits->min_perf = int_tofp(1);
1296 limits->max_policy_pct = 100;
1297 limits->max_sysfs_pct = 100;
1298 limits->min_policy_pct = 0;
1299 limits->min_sysfs_pct = 0;
1300}
1301
1125static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1302static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1126{ 1303{
1127 if (!policy->cpuinfo.max_freq) 1304 if (!policy->cpuinfo.max_freq)
1128 return -ENODEV; 1305 return -ENODEV;
1129 1306
1130 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1307 intel_pstate_clear_update_util_hook(policy->cpu);
1131 policy->max >= policy->cpuinfo.max_freq) { 1308
1132 pr_debug("intel_pstate: set performance\n"); 1309 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
1133 limits = &performance_limits; 1310 limits = &performance_limits;
1134 if (hwp_active) 1311 if (policy->max >= policy->cpuinfo.max_freq) {
1135 intel_pstate_hwp_set(policy->cpus); 1312 pr_debug("intel_pstate: set performance\n");
1136 return 0; 1313 intel_pstate_set_performance_limits(limits);
1314 goto out;
1315 }
1316 } else {
1317 pr_debug("intel_pstate: set powersave\n");
1318 limits = &powersave_limits;
1137 } 1319 }
1138 1320
1139 pr_debug("intel_pstate: set powersave\n");
1140 limits = &powersave_limits;
1141 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1321 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1142 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1322 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1143 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1323 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1163,6 +1343,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1163 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1343 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1164 int_tofp(100)); 1344 int_tofp(100));
1165 1345
1346 out:
1347 intel_pstate_set_update_util_hook(policy->cpu);
1348
1166 if (hwp_active) 1349 if (hwp_active)
1167 intel_pstate_hwp_set(policy->cpus); 1350 intel_pstate_hwp_set(policy->cpus);
1168 1351
@@ -1187,8 +1370,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1187 1370
1188 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1371 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1189 1372
1190 cpufreq_set_update_util_data(cpu_num, NULL); 1373 intel_pstate_clear_update_util_hook(cpu_num);
1191 synchronize_sched();
1192 1374
1193 if (hwp_active) 1375 if (hwp_active)
1194 return; 1376 return;
@@ -1455,8 +1637,7 @@ out:
1455 get_online_cpus(); 1637 get_online_cpus();
1456 for_each_online_cpu(cpu) { 1638 for_each_online_cpu(cpu) {
1457 if (all_cpu_data[cpu]) { 1639 if (all_cpu_data[cpu]) {
1458 cpufreq_set_update_util_data(cpu, NULL); 1640 intel_pstate_clear_update_util_hook(cpu);
1459 synchronize_sched();
1460 kfree(all_cpu_data[cpu]); 1641 kfree(all_cpu_data[cpu]);
1461 } 1642 }
1462 } 1643 }
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc53d247..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); 225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
226 struct ccp_aes_cmac_exp_ctx state; 226 struct ccp_aes_cmac_exp_ctx state;
227 227
228 /* Don't let anything leak to 'out' */
229 memset(&state, 0, sizeof(state));
230
228 state.null_msg = rctx->null_msg; 231 state.null_msg = rctx->null_msg;
229 memcpy(state.iv, rctx->iv, sizeof(state.iv)); 232 memcpy(state.iv, rctx->iv, sizeof(state.iv));
230 state.buf_count = rctx->buf_count; 233 state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad72897dc2..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
213 struct ccp_sha_exp_ctx state; 213 struct ccp_sha_exp_ctx state;
214 214
215 /* Don't let anything leak to 'out' */
216 memset(&state, 0, sizeof(state));
217
215 state.type = rctx->type; 218 state.type = rctx->type;
216 state.msg_bits = rctx->msg_bits; 219 state.msg_bits = rctx->msg_bits;
217 state.first = rctx->first; 220 state.first = rctx->first;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a0d4a08313ae..aae05547b924 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
63 ptr->eptr = upper_32_bits(dma_addr); 63 ptr->eptr = upper_32_bits(dma_addr);
64} 64}
65 65
66static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68{
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72}
73
66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 74static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
67 bool is_sec1) 75 bool is_sec1)
68{ 76{
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1083 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1091 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1084 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1092 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1085 : DMA_TO_DEVICE); 1093 : DMA_TO_DEVICE);
1086
1087 /* hmac data */ 1094 /* hmac data */
1088 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1095 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1089 if (sg_count > 1 && 1096 if (sg_count > 1 &&
1090 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, 1097 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1091 areq->assoclen, 1098 areq->assoclen,
1092 &edesc->link_tbl[tbl_off])) > 1) { 1099 &edesc->link_tbl[tbl_off])) > 1) {
1093 tbl_off += ret;
1094
1095 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * 1100 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1096 sizeof(struct talitos_ptr), 0); 1101 sizeof(struct talitos_ptr), 0);
1097 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; 1102 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1098 1103
1099 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1104 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1100 edesc->dma_len, DMA_BIDIRECTIONAL); 1105 edesc->dma_len, DMA_BIDIRECTIONAL);
1106
1107 tbl_off += ret;
1101 } else { 1108 } else {
1102 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); 1109 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1103 desc->ptr[1].j_extent = 0; 1110 desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1126 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1133 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1127 sg_link_tbl_len += authsize; 1134 sg_link_tbl_len += authsize;
1128 1135
1129 if (sg_count > 1 && 1136 if (sg_count == 1) {
1130 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, 1137 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1131 sg_link_tbl_len, 1138 areq->assoclen, 0);
1132 &edesc->link_tbl[tbl_off])) > 1) { 1139 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1133 tbl_off += ret; 1140 areq->assoclen, sg_link_tbl_len,
1141 &edesc->link_tbl[tbl_off])) >
1142 1) {
1134 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1143 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1135 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + 1144 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1136 tbl_off * 1145 tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1147 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len, 1148 edesc->dma_len,
1140 DMA_BIDIRECTIONAL); 1149 DMA_BIDIRECTIONAL);
1141 } else 1150 tbl_off += ret;
1142 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); 1151 } else {
1152 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1153 }
1143 1154
1144 /* cipher out */ 1155 /* cipher out */
1145 desc->ptr[5].len = cpu_to_be16(cryptlen); 1156 desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1151 1162
1152 edesc->icv_ool = false; 1163 edesc->icv_ool = false;
1153 1164
1154 if (sg_count > 1 && 1165 if (sg_count == 1) {
1155 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, 1166 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1167 areq->assoclen, 0);
1168 } else if ((sg_count =
1169 sg_to_link_tbl_offset(areq->dst, sg_count,
1156 areq->assoclen, cryptlen, 1170 areq->assoclen, cryptlen,
1157 &edesc->link_tbl[tbl_off])) > 1171 &edesc->link_tbl[tbl_off])) > 1) {
1158 1) {
1159 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1172 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1160 1173
1161 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + 1174 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1178 edesc->dma_len, DMA_BIDIRECTIONAL); 1191 edesc->dma_len, DMA_BIDIRECTIONAL);
1179 1192
1180 edesc->icv_ool = true; 1193 edesc->icv_ool = true;
1181 } else 1194 } else {
1182 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); 1195 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1196 }
1183 1197
1184 /* iv out */ 1198 /* iv out */
1185 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1199 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
2629 struct talitos_alg_template algt; 2643 struct talitos_alg_template algt;
2630}; 2644};
2631 2645
2632static int talitos_cra_init(struct crypto_tfm *tfm) 2646static int talitos_init_common(struct talitos_ctx *ctx,
2647 struct talitos_crypto_alg *talitos_alg)
2633{ 2648{
2634 struct crypto_alg *alg = tfm->__crt_alg;
2635 struct talitos_crypto_alg *talitos_alg;
2636 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2637 struct talitos_private *priv; 2649 struct talitos_private *priv;
2638 2650
2639 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2640 talitos_alg = container_of(__crypto_ahash_alg(alg),
2641 struct talitos_crypto_alg,
2642 algt.alg.hash);
2643 else
2644 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2645 algt.alg.crypto);
2646
2647 /* update context with ptr to dev */ 2651 /* update context with ptr to dev */
2648 ctx->dev = talitos_alg->dev; 2652 ctx->dev = talitos_alg->dev;
2649 2653
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
2661 return 0; 2665 return 0;
2662} 2666}
2663 2667
2668static int talitos_cra_init(struct crypto_tfm *tfm)
2669{
2670 struct crypto_alg *alg = tfm->__crt_alg;
2671 struct talitos_crypto_alg *talitos_alg;
2672 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2673
2674 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2675 talitos_alg = container_of(__crypto_ahash_alg(alg),
2676 struct talitos_crypto_alg,
2677 algt.alg.hash);
2678 else
2679 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2680 algt.alg.crypto);
2681
2682 return talitos_init_common(ctx, talitos_alg);
2683}
2684
2664static int talitos_cra_init_aead(struct crypto_aead *tfm) 2685static int talitos_cra_init_aead(struct crypto_aead *tfm)
2665{ 2686{
2666 talitos_cra_init(crypto_aead_tfm(tfm)); 2687 struct aead_alg *alg = crypto_aead_alg(tfm);
2667 return 0; 2688 struct talitos_crypto_alg *talitos_alg;
2689 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2690
2691 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2692 algt.alg.aead);
2693
2694 return talitos_init_common(ctx, talitos_alg);
2668} 2695}
2669 2696
2670static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 2697static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1f0e29..97199b3c25a2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
130static void dwc_initialize(struct dw_dma_chan *dwc) 130static void dwc_initialize(struct dw_dma_chan *dwc)
131{ 131{
132 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 132 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
133 struct dw_dma_slave *dws = dwc->chan.private;
134 u32 cfghi = DWC_CFGH_FIFO_MODE; 133 u32 cfghi = DWC_CFGH_FIFO_MODE;
135 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); 134 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
136 135
137 if (dwc->initialized == true) 136 if (dwc->initialized == true)
138 return; 137 return;
139 138
140 if (dws) { 139 cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
141 /* 140 cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
142 * We need controller-specific data to set up slave
143 * transfers.
144 */
145 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
146
147 cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
148 cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
149 } else {
150 cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
151 cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
152 }
153 141
154 channel_writel(dwc, CFG_LO, cfglo); 142 channel_writel(dwc, CFG_LO, cfglo);
155 channel_writel(dwc, CFG_HI, cfghi); 143 channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
941 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 929 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
942 struct dw_dma_slave *dws = param; 930 struct dw_dma_slave *dws = param;
943 931
944 if (!dws || dws->dma_dev != chan->device->dev) 932 if (dws->dma_dev != chan->device->dev)
945 return false; 933 return false;
946 934
947 /* We have to copy data since dws can be temporary storage */ 935 /* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1165 * doesn't mean what you think it means), and status writeback. 1153 * doesn't mean what you think it means), and status writeback.
1166 */ 1154 */
1167 1155
1156 /*
1157 * We need controller-specific data to set up slave transfers.
1158 */
1159 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1160 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1161 return -EINVAL;
1162 }
1163
1168 /* Enable controller here if needed */ 1164 /* Enable controller here if needed */
1169 if (!dw->in_use) 1165 if (!dw->in_use)
1170 dw_dma_on(dw); 1166 dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1226 spin_lock_irqsave(&dwc->lock, flags); 1222 spin_lock_irqsave(&dwc->lock, flags);
1227 list_splice_init(&dwc->free_list, &list); 1223 list_splice_init(&dwc->free_list, &list);
1228 dwc->descs_allocated = 0; 1224 dwc->descs_allocated = 0;
1225
1226 /* Clear custom channel configuration */
1227 dwc->src_id = 0;
1228 dwc->dst_id = 0;
1229
1230 dwc->src_master = 0;
1231 dwc->dst_master = 0;
1232
1229 dwc->initialized = false; 1233 dwc->initialized = false;
1230 1234
1231 /* Disable interrupts */ 1235 /* Disable interrupts */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ee3463e774f8..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1238 struct edma_desc *edesc; 1238 struct edma_desc *edesc;
1239 dma_addr_t src_addr, dst_addr; 1239 dma_addr_t src_addr, dst_addr;
1240 enum dma_slave_buswidth dev_width; 1240 enum dma_slave_buswidth dev_width;
1241 bool use_intermediate = false;
1241 u32 burst; 1242 u32 burst;
1242 int i, ret, nslots; 1243 int i, ret, nslots;
1243 1244
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1279 * but the synchronization is difficult to achieve with Cyclic and 1280 * but the synchronization is difficult to achieve with Cyclic and
1280 * cannot be guaranteed, so we error out early. 1281 * cannot be guaranteed, so we error out early.
1281 */ 1282 */
1282 if (nslots > MAX_NR_SG) 1283 if (nslots > MAX_NR_SG) {
1283 return NULL; 1284 /*
1285 * If the burst and period sizes are the same, we can put
1286 * the full buffer into a single period and activate
1287 * intermediate interrupts. This will produce interrupts
1288 * after each burst, which is also after each desired period.
1289 */
1290 if (burst == period_len) {
1291 period_len = buf_len;
1292 nslots = 2;
1293 use_intermediate = true;
1294 } else {
1295 return NULL;
1296 }
1297 }
1284 1298
1285 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1299 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1286 GFP_ATOMIC); 1300 GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1358 /* 1372 /*
1359 * Enable period interrupt only if it is requested 1373 * Enable period interrupt only if it is requested
1360 */ 1374 */
1361 if (tx_flags & DMA_PREP_INTERRUPT) 1375 if (tx_flags & DMA_PREP_INTERRUPT) {
1362 edesc->pset[i].param.opt |= TCINTEN; 1376 edesc->pset[i].param.opt |= TCINTEN;
1377
1378 /* Also enable intermediate interrupts if necessary */
1379 if (use_intermediate)
1380 edesc->pset[i].param.opt |= ITCINTEN;
1381 }
1363 } 1382 }
1364 1383
1365 /* Place the cyclic channel to highest priority queue */ 1384 /* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
1570 return IRQ_HANDLED; 1589 return IRQ_HANDLED;
1571} 1590}
1572 1591
1573static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1574{
1575 struct platform_device *tc_pdev;
1576 int ret;
1577
1578 if (!IS_ENABLED(CONFIG_OF) || !tc)
1579 return;
1580
1581 tc_pdev = of_find_device_by_node(tc->node);
1582 if (!tc_pdev) {
1583 pr_err("%s: TPTC device is not found\n", __func__);
1584 return;
1585 }
1586 if (!pm_runtime_enabled(&tc_pdev->dev))
1587 pm_runtime_enable(&tc_pdev->dev);
1588
1589 if (enable)
1590 ret = pm_runtime_get_sync(&tc_pdev->dev);
1591 else
1592 ret = pm_runtime_put_sync(&tc_pdev->dev);
1593
1594 if (ret < 0)
1595 pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
1596 enable ? "get" : "put", dev_name(&tc_pdev->dev));
1597}
1598
1599/* Alloc channel resources */ 1592/* Alloc channel resources */
1600static int edma_alloc_chan_resources(struct dma_chan *chan) 1593static int edma_alloc_chan_resources(struct dma_chan *chan)
1601{ 1594{
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1632 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, 1625 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1633 echan->hw_triggered ? "HW" : "SW"); 1626 echan->hw_triggered ? "HW" : "SW");
1634 1627
1635 edma_tc_set_pm_state(echan->tc, true);
1636
1637 return 0; 1628 return 0;
1638 1629
1639err_slot: 1630err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1670 echan->alloced = false; 1661 echan->alloced = false;
1671 } 1662 }
1672 1663
1673 edma_tc_set_pm_state(echan->tc, false);
1674 echan->tc = NULL; 1664 echan->tc = NULL;
1675 echan->hw_triggered = false; 1665 echan->hw_triggered = false;
1676 1666
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
2417 int i; 2407 int i;
2418 2408
2419 for (i = 0; i < ecc->num_channels; i++) { 2409 for (i = 0; i < ecc->num_channels; i++) {
2420 if (echan[i].alloced) { 2410 if (echan[i].alloced)
2421 edma_setup_interrupt(&echan[i], false); 2411 edma_setup_interrupt(&echan[i], false);
2422 edma_tc_set_pm_state(echan[i].tc, false);
2423 }
2424 } 2412 }
2425 2413
2426 return 0; 2414 return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
2450 2438
2451 /* Set up channel -> slot mapping for the entry slot */ 2439 /* Set up channel -> slot mapping for the entry slot */
2452 edma_set_chmap(&echan[i], echan[i].slot[0]); 2440 edma_set_chmap(&echan[i], echan[i].slot[0]);
2453
2454 edma_tc_set_pm_state(echan[i].tc, true);
2455 } 2441 }
2456 } 2442 }
2457 2443
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
2475 2461
2476static int edma_tptc_probe(struct platform_device *pdev) 2462static int edma_tptc_probe(struct platform_device *pdev)
2477{ 2463{
2478 return 0; 2464 pm_runtime_enable(&pdev->dev);
2465 return pm_runtime_get_sync(&pdev->dev);
2479} 2466}
2480 2467
2481static struct platform_driver edma_tptc_driver = { 2468static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145edb936..ee510515ce18 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
64 64
65 if (hsuc->direction == DMA_MEM_TO_DEV) { 65 if (hsuc->direction == DMA_MEM_TO_DEV) {
66 bsr = config->dst_maxburst; 66 bsr = config->dst_maxburst;
67 mtsr = config->dst_addr_width; 67 mtsr = config->src_addr_width;
68 } else if (hsuc->direction == DMA_DEV_TO_MEM) { 68 } else if (hsuc->direction == DMA_DEV_TO_MEM) {
69 bsr = config->src_maxburst; 69 bsr = config->src_maxburst;
70 mtsr = config->src_addr_width; 70 mtsr = config->dst_addr_width;
71 } 71 }
72 72
73 hsu_chan_disable(hsuc); 73 hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
135 sr = hsu_chan_readl(hsuc, HSU_CH_SR); 135 sr = hsu_chan_readl(hsuc, HSU_CH_SR);
136 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 136 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
137 137
138 return sr; 138 return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
139} 139}
140 140
141irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) 141irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) 254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
255{ 255{
256 struct hsu_dma_desc *desc = hsuc->desc; 256 struct hsu_dma_desc *desc = hsuc->desc;
257 size_t bytes = desc->length; 257 size_t bytes = 0;
258 int i; 258 int i;
259 259
260 i = desc->active % HSU_DMA_CHAN_NR_DESC; 260 for (i = desc->active; i < desc->nents; i++)
261 bytes += desc->sg[i].len;
262
263 i = HSU_DMA_CHAN_NR_DESC - 1;
261 do { 264 do {
262 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); 265 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
263 } while (--i >= 0); 266 } while (--i >= 0);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee8cd05..6b070c22b1df 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
41#define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) 41#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
42#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) 42#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
43#define HSU_CH_SR_CHE BIT(15) 43#define HSU_CH_SR_CHE BIT(15)
44#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
45#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
46#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
44 47
45/* Bits in HSU_CH_CR */ 48/* Bits in HSU_CH_CR */
46#define HSU_CH_CR_CHA BIT(0) 49#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 43bd5aee7ffe..1e984e18c126 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@ struct omap_chan {
48 unsigned dma_sig; 48 unsigned dma_sig;
49 bool cyclic; 49 bool cyclic;
50 bool paused; 50 bool paused;
51 bool running;
51 52
52 int dma_ch; 53 int dma_ch;
53 struct omap_desc *desc; 54 struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
294 295
295 /* Enable channel */ 296 /* Enable channel */
296 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); 297 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
298
299 c->running = true;
297} 300}
298 301
299static void omap_dma_stop(struct omap_chan *c) 302static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
355 358
356 omap_dma_chan_write(c, CLNK_CTRL, val); 359 omap_dma_chan_write(c, CLNK_CTRL, val);
357 } 360 }
361
362 c->running = false;
358} 363}
359 364
360static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, 365static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
673 struct omap_chan *c = to_omap_dma_chan(chan); 678 struct omap_chan *c = to_omap_dma_chan(chan);
674 struct virt_dma_desc *vd; 679 struct virt_dma_desc *vd;
675 enum dma_status ret; 680 enum dma_status ret;
676 uint32_t ccr;
677 unsigned long flags; 681 unsigned long flags;
678 682
679 ccr = omap_dma_chan_read(c, CCR);
680 /* The channel is no longer active, handle the completion right away */
681 if (!(ccr & CCR_ENABLE))
682 omap_dma_callback(c->dma_ch, 0, c);
683
684 ret = dma_cookie_status(chan, cookie, txstate); 683 ret = dma_cookie_status(chan, cookie, txstate);
684
685 if (!c->paused && c->running) {
686 uint32_t ccr = omap_dma_chan_read(c, CCR);
687 /*
688 * The channel is no longer active, set the return value
689 * accordingly
690 */
691 if (!(ccr & CCR_ENABLE))
692 ret = DMA_COMPLETE;
693 }
694
685 if (ret == DMA_COMPLETE || !txstate) 695 if (ret == DMA_COMPLETE || !txstate)
686 return ret; 696 return ret;
687 697
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
945 d->ccr = c->ccr; 955 d->ccr = c->ccr;
946 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; 956 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
947 957
948 d->cicr = CICR_DROP_IE; 958 d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
949 if (tx_flags & DMA_PREP_INTERRUPT)
950 d->cicr |= CICR_FRAME_IE;
951 959
952 d->csdp = data_type; 960 d->csdp = data_type;
953 961
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 0ee0321868d3..ef67f278e076 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1236 struct xilinx_vdma_device *xdev = ofdma->of_dma_data; 1236 struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
1237 int chan_id = dma_spec->args[0]; 1237 int chan_id = dma_spec->args[0];
1238 1238
1239 if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) 1239 if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
1240 return NULL; 1240 return NULL;
1241 1241
1242 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 1242 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1866 1866
1867 i7_dev = get_i7core_dev(mce->socketid); 1867 i7_dev = get_i7core_dev(mce->socketid);
1868 if (!i7_dev) 1868 if (!i7_dev)
1869 return NOTIFY_BAD; 1869 return NOTIFY_DONE;
1870 1870
1871 mci = i7_dev->mci; 1871 mci = i7_dev->mci;
1872 pvt = mci->pvt_info; 1872 pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 93f0d4120289..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -362,6 +362,7 @@ struct sbridge_pvt {
362 362
363 /* Memory type detection */ 363 /* Memory type detection */
364 bool is_mirrored, is_lockstep, is_close_pg; 364 bool is_mirrored, is_lockstep, is_close_pg;
365 bool is_chan_hash;
365 366
366 /* Fifo double buffers */ 367 /* Fifo double buffers */
367 struct mce mce_entry[MCE_LOG_LEN]; 368 struct mce mce_entry[MCE_LOG_LEN];
@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
1060 return (pkg >> 2) & 0x1; 1061 return (pkg >> 2) & 0x1;
1061} 1062}
1062 1063
1064static int haswell_chan_hash(int idx, u64 addr)
1065{
1066 int i;
1067
1068 /*
1069 * XOR even bits from 12:26 to bit0 of idx,
1070 * odd bits from 13:27 to bit1
1071 */
1072 for (i = 12; i < 28; i += 2)
1073 idx ^= (addr >> i) & 3;
1074
1075 return idx;
1076}
1077
1063/**************************************************************************** 1078/****************************************************************************
1064 Memory check routines 1079 Memory check routines
1065 ****************************************************************************/ 1080 ****************************************************************************/
@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
1616 KNL_MAX_CHANNELS : NUM_CHANNELS; 1631 KNL_MAX_CHANNELS : NUM_CHANNELS;
1617 u64 knl_mc_sizes[KNL_MAX_CHANNELS]; 1632 u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1618 1633
1634 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1635 pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
1636 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1637 }
1619 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || 1638 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1620 pvt->info.type == KNIGHTS_LANDING) 1639 pvt->info.type == KNIGHTS_LANDING)
1621 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg); 1640 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
2118 } 2137 }
2119 2138
2120 ch_way = TAD_CH(reg) + 1; 2139 ch_way = TAD_CH(reg) + 1;
2121 sck_way = 1 << TAD_SOCK(reg); 2140 sck_way = TAD_SOCK(reg);
2122 2141
2123 if (ch_way == 3) 2142 if (ch_way == 3)
2124 idx = addr >> 6; 2143 idx = addr >> 6;
2125 else 2144 else {
2126 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 2145 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2146 if (pvt->is_chan_hash)
2147 idx = haswell_chan_hash(idx, addr);
2148 }
2127 idx = idx % ch_way; 2149 idx = idx % ch_way;
2128 2150
2129 /* 2151 /*
@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
2157 switch(ch_way) { 2179 switch(ch_way) {
2158 case 2: 2180 case 2:
2159 case 4: 2181 case 4:
2160 sck_xch = 1 << sck_way * (ch_way >> 1); 2182 sck_xch = (1 << sck_way) * (ch_way >> 1);
2161 break; 2183 break;
2162 default: 2184 default:
2163 sprintf(msg, "Invalid mirror set. Can't decode addr"); 2185 sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
2193 2215
2194 ch_addr = addr - offset; 2216 ch_addr = addr - offset;
2195 ch_addr >>= (6 + shiftup); 2217 ch_addr >>= (6 + shiftup);
2196 ch_addr /= ch_way * sck_way; 2218 ch_addr /= sck_xch;
2197 ch_addr <<= (6 + shiftup); 2219 ch_addr <<= (6 + shiftup);
2198 ch_addr |= addr & ((1 << (6 + shiftup)) - 1); 2220 ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2199 2221
@@ -3146,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3146 3168
3147 mci = get_mci_for_node_id(mce->socketid); 3169 mci = get_mci_for_node_id(mce->socketid);
3148 if (!mci) 3170 if (!mci)
3149 return NOTIFY_BAD; 3171 return NOTIFY_DONE;
3150 pvt = mci->pvt_info; 3172 pvt = mci->pvt_info;
3151 3173
3152 /* 3174 /*
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 841a4b586395..8b3226dca1d9 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -348,8 +348,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
348 palmas_vbus_irq_handler, 348 palmas_vbus_irq_handler,
349 IRQF_TRIGGER_FALLING | 349 IRQF_TRIGGER_FALLING |
350 IRQF_TRIGGER_RISING | 350 IRQF_TRIGGER_RISING |
351 IRQF_ONESHOT | 351 IRQF_ONESHOT,
352 IRQF_EARLY_RESUME,
353 "palmas_usb_vbus", 352 "palmas_usb_vbus",
354 palmas_usb); 353 palmas_usb);
355 if (status < 0) { 354 if (status < 0) {
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index aa1f743152a2..8714f8c271ba 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -203,7 +203,19 @@ void __init efi_init(void)
203 203
204 reserve_regions(); 204 reserve_regions();
205 early_memunmap(memmap.map, params.mmap_size); 205 early_memunmap(memmap.map, params.mmap_size);
206 memblock_mark_nomap(params.mmap & PAGE_MASK, 206
207 PAGE_ALIGN(params.mmap_size + 207 if (IS_ENABLED(CONFIG_ARM)) {
208 (params.mmap & ~PAGE_MASK))); 208 /*
209 * ARM currently does not allow ioremap_cache() to be called on
210 * memory regions that are covered by struct page. So remove the
211 * UEFI memory map from the linear mapping.
212 */
213 memblock_mark_nomap(params.mmap & PAGE_MASK,
214 PAGE_ALIGN(params.mmap_size +
215 (params.mmap & ~PAGE_MASK)));
216 } else {
217 memblock_reserve(params.mmap & PAGE_MASK,
218 PAGE_ALIGN(params.mmap_size +
219 (params.mmap & ~PAGE_MASK)));
220 }
209} 221}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c0a234..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
202 { NULL_GUID, "", NULL }, 202 { NULL_GUID, "", NULL },
203}; 203};
204 204
205/*
206 * Check if @var_name matches the pattern given in @match_name.
207 *
208 * @var_name: an array of @len non-NUL characters.
209 * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
210 * final "*" character matches any trailing characters @var_name,
211 * including the case when there are none left in @var_name.
212 * @match: on output, the number of non-wildcard characters in @match_name
213 * that @var_name matches, regardless of the return value.
214 * @return: whether @var_name fully matches @match_name.
215 */
205static bool 216static bool
206variable_matches(const char *var_name, size_t len, const char *match_name, 217variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match) 218 int *match)
208{ 219{
209 for (*match = 0; ; (*match)++) { 220 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match]; 221 char c = match_name[*match];
211 char u = var_name[*match];
212 222
213 /* Wildcard in the matching name means we've matched */ 223 switch (c) {
214 if (c == '*') 224 case '*':
225 /* Wildcard in @match_name means we've matched. */
215 return true; 226 return true;
216 227
217 /* Case sensitive match */ 228 case '\0':
218 if (!c && *match == len) 229 /* @match_name has ended. Has @var_name too? */
219 return true; 230 return (*match == len);
220 231
221 if (c != u) 232 default:
233 /*
234 * We've reached a non-wildcard char in @match_name.
235 * Continue only if there's an identical character in
236 * @var_name.
237 */
238 if (*match < len && c == var_name[*match])
239 continue;
222 return false; 240 return false;
223 241 }
224 if (!c)
225 return true;
226 } 242 }
227 return true;
228} 243}
229 244
230bool 245bool
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 11bfee8b79a9..b5d05807e6ec 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
360 .init = psci_dt_cpu_init_idle, 360 .init = psci_dt_cpu_init_idle,
361}; 361};
362 362
363CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); 363CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
364#endif 364#endif
365#endif 365#endif
366 366
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index fedbff55a7f3..815c4a5cae54 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -77,12 +77,28 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
77static inline void fw_cfg_read_blob(u16 key, 77static inline void fw_cfg_read_blob(u16 key,
78 void *buf, loff_t pos, size_t count) 78 void *buf, loff_t pos, size_t count)
79{ 79{
80 u32 glk;
81 acpi_status status;
82
83 /* If we have ACPI, ensure mutual exclusion against any potential
84 * device access by the firmware, e.g. via AML methods:
85 */
86 status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
87 if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
88 /* Should never get here */
89 WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
90 memset(buf, 0, count);
91 return;
92 }
93
80 mutex_lock(&fw_cfg_dev_lock); 94 mutex_lock(&fw_cfg_dev_lock);
81 iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl); 95 iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
82 while (pos-- > 0) 96 while (pos-- > 0)
83 ioread8(fw_cfg_reg_data); 97 ioread8(fw_cfg_reg_data);
84 ioread8_rep(fw_cfg_reg_data, buf, count); 98 ioread8_rep(fw_cfg_reg_data, buf, count);
85 mutex_unlock(&fw_cfg_dev_lock); 99 mutex_unlock(&fw_cfg_dev_lock);
100
101 acpi_release_global_lock(glk);
86} 102}
87 103
88/* clean up fw_cfg device i/o */ 104/* clean up fw_cfg device i/o */
@@ -727,12 +743,18 @@ device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
727 743
728static int __init fw_cfg_sysfs_init(void) 744static int __init fw_cfg_sysfs_init(void)
729{ 745{
746 int ret;
747
730 /* create /sys/firmware/qemu_fw_cfg/ top level directory */ 748 /* create /sys/firmware/qemu_fw_cfg/ top level directory */
731 fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj); 749 fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
732 if (!fw_cfg_top_ko) 750 if (!fw_cfg_top_ko)
733 return -ENOMEM; 751 return -ENOMEM;
734 752
735 return platform_driver_register(&fw_cfg_sysfs_driver); 753 ret = platform_driver_register(&fw_cfg_sysfs_driver);
754 if (ret)
755 fw_cfg_kobj_cleanup(fw_cfg_top_ko);
756
757 return ret;
736} 758}
737 759
738static void __exit fw_cfg_sysfs_exit(void) 760static void __exit fw_cfg_sysfs_exit(void)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d0d3065a7557..e66084c295fb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -18,6 +18,7 @@
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/platform_data/pca953x.h> 19#include <linux/platform_data/pca953x.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <asm/unaligned.h>
21#include <linux/of_platform.h> 22#include <linux/of_platform.h>
22#include <linux/acpi.h> 23#include <linux/acpi.h>
23 24
@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
159 switch (chip->chip_type) { 160 switch (chip->chip_type) {
160 case PCA953X_TYPE: 161 case PCA953X_TYPE:
161 ret = i2c_smbus_write_word_data(chip->client, 162 ret = i2c_smbus_write_word_data(chip->client,
162 reg << 1, (u16) *val); 163 reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
163 break; 164 break;
164 case PCA957X_TYPE: 165 case PCA957X_TYPE:
165 ret = i2c_smbus_write_byte_data(chip->client, reg << 1, 166 ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2b7b78664b8..76ac906b4d78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
283 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET)); 283 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
284 284
285 ret = pinctrl_gpio_direction_output(chip->base + offset); 285 ret = pinctrl_gpio_direction_output(chip->base + offset);
286 if (!ret) 286 if (ret)
287 return 0; 287 return ret;
288 288
289 spin_lock_irqsave(&gpio_lock, flags); 289 spin_lock_irqsave(&gpio_lock, flags);
290 290
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 72065532c1c7..b747c76fd2b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -68,6 +68,7 @@ LIST_HEAD(gpio_devices);
68static void gpiochip_free_hogs(struct gpio_chip *chip); 68static void gpiochip_free_hogs(struct gpio_chip *chip);
69static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 69static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
70 70
71static bool gpiolib_initialized;
71 72
72static inline void desc_set_label(struct gpio_desc *d, const char *label) 73static inline void desc_set_label(struct gpio_desc *d, const char *label)
73{ 74{
@@ -440,9 +441,63 @@ static void gpiodevice_release(struct device *dev)
440 cdev_del(&gdev->chrdev); 441 cdev_del(&gdev->chrdev);
441 list_del(&gdev->list); 442 list_del(&gdev->list);
442 ida_simple_remove(&gpio_ida, gdev->id); 443 ida_simple_remove(&gpio_ida, gdev->id);
444 kfree(gdev->label);
445 kfree(gdev->descs);
443 kfree(gdev); 446 kfree(gdev);
444} 447}
445 448
449static int gpiochip_setup_dev(struct gpio_device *gdev)
450{
451 int status;
452
453 cdev_init(&gdev->chrdev, &gpio_fileops);
454 gdev->chrdev.owner = THIS_MODULE;
455 gdev->chrdev.kobj.parent = &gdev->dev.kobj;
456 gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
457 status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
458 if (status < 0)
459 chip_warn(gdev->chip, "failed to add char device %d:%d\n",
460 MAJOR(gpio_devt), gdev->id);
461 else
462 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
463 MAJOR(gpio_devt), gdev->id);
464 status = device_add(&gdev->dev);
465 if (status)
466 goto err_remove_chardev;
467
468 status = gpiochip_sysfs_register(gdev);
469 if (status)
470 goto err_remove_device;
471
472 /* From this point, the .release() function cleans up gpio_device */
473 gdev->dev.release = gpiodevice_release;
474 get_device(&gdev->dev);
475 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
476 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
477 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
478
479 return 0;
480
481err_remove_device:
482 device_del(&gdev->dev);
483err_remove_chardev:
484 cdev_del(&gdev->chrdev);
485 return status;
486}
487
488static void gpiochip_setup_devs(void)
489{
490 struct gpio_device *gdev;
491 int err;
492
493 list_for_each_entry(gdev, &gpio_devices, list) {
494 err = gpiochip_setup_dev(gdev);
495 if (err)
496 pr_err("%s: Failed to initialize gpio device (%d)\n",
497 dev_name(&gdev->dev), err);
498 }
499}
500
446/** 501/**
447 * gpiochip_add_data() - register a gpio_chip 502 * gpiochip_add_data() - register a gpio_chip
448 * @chip: the chip to register, with chip->base initialized 503 * @chip: the chip to register, with chip->base initialized
@@ -457,6 +512,9 @@ static void gpiodevice_release(struct device *dev)
457 * the gpio framework's arch_initcall(). Otherwise sysfs initialization 512 * the gpio framework's arch_initcall(). Otherwise sysfs initialization
458 * for GPIOs will fail rudely. 513 * for GPIOs will fail rudely.
459 * 514 *
515 * gpiochip_add_data() must only be called after gpiolib initialization,
516 * ie after core_initcall().
517 *
460 * If chip->base is negative, this requests dynamic assignment of 518 * If chip->base is negative, this requests dynamic assignment of
461 * a range of valid GPIOs. 519 * a range of valid GPIOs.
462 */ 520 */
@@ -504,8 +562,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
504 else 562 else
505 gdev->owner = THIS_MODULE; 563 gdev->owner = THIS_MODULE;
506 564
507 gdev->descs = devm_kcalloc(&gdev->dev, chip->ngpio, 565 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
508 sizeof(gdev->descs[0]), GFP_KERNEL);
509 if (!gdev->descs) { 566 if (!gdev->descs) {
510 status = -ENOMEM; 567 status = -ENOMEM;
511 goto err_free_gdev; 568 goto err_free_gdev;
@@ -514,16 +571,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
514 if (chip->ngpio == 0) { 571 if (chip->ngpio == 0) {
515 chip_err(chip, "tried to insert a GPIO chip with zero lines\n"); 572 chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
516 status = -EINVAL; 573 status = -EINVAL;
517 goto err_free_gdev; 574 goto err_free_descs;
518 } 575 }
519 576
520 if (chip->label) 577 if (chip->label)
521 gdev->label = devm_kstrdup(&gdev->dev, chip->label, GFP_KERNEL); 578 gdev->label = kstrdup(chip->label, GFP_KERNEL);
522 else 579 else
523 gdev->label = devm_kstrdup(&gdev->dev, "unknown", GFP_KERNEL); 580 gdev->label = kstrdup("unknown", GFP_KERNEL);
524 if (!gdev->label) { 581 if (!gdev->label) {
525 status = -ENOMEM; 582 status = -ENOMEM;
526 goto err_free_gdev; 583 goto err_free_descs;
527 } 584 }
528 585
529 gdev->ngpio = chip->ngpio; 586 gdev->ngpio = chip->ngpio;
@@ -543,7 +600,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
543 if (base < 0) { 600 if (base < 0) {
544 status = base; 601 status = base;
545 spin_unlock_irqrestore(&gpio_lock, flags); 602 spin_unlock_irqrestore(&gpio_lock, flags);
546 goto err_free_gdev; 603 goto err_free_label;
547 } 604 }
548 /* 605 /*
549 * TODO: it should not be necessary to reflect the assigned 606 * TODO: it should not be necessary to reflect the assigned
@@ -558,7 +615,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
558 status = gpiodev_add_to_list(gdev); 615 status = gpiodev_add_to_list(gdev);
559 if (status) { 616 if (status) {
560 spin_unlock_irqrestore(&gpio_lock, flags); 617 spin_unlock_irqrestore(&gpio_lock, flags);
561 goto err_free_gdev; 618 goto err_free_label;
562 } 619 }
563 620
564 for (i = 0; i < chip->ngpio; i++) { 621 for (i = 0; i < chip->ngpio; i++) {
@@ -596,39 +653,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
596 * we get a device node entry in sysfs under 653 * we get a device node entry in sysfs under
597 * /sys/bus/gpio/devices/gpiochipN/dev that can be used for 654 * /sys/bus/gpio/devices/gpiochipN/dev that can be used for
598 * coldplug of device nodes and other udev business. 655 * coldplug of device nodes and other udev business.
656 * We can do this only if gpiolib has been initialized.
657 * Otherwise, defer until later.
599 */ 658 */
600 cdev_init(&gdev->chrdev, &gpio_fileops); 659 if (gpiolib_initialized) {
601 gdev->chrdev.owner = THIS_MODULE; 660 status = gpiochip_setup_dev(gdev);
602 gdev->chrdev.kobj.parent = &gdev->dev.kobj; 661 if (status)
603 gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id); 662 goto err_remove_chip;
604 status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1); 663 }
605 if (status < 0)
606 chip_warn(chip, "failed to add char device %d:%d\n",
607 MAJOR(gpio_devt), gdev->id);
608 else
609 chip_dbg(chip, "added GPIO chardev (%d:%d)\n",
610 MAJOR(gpio_devt), gdev->id);
611 status = device_add(&gdev->dev);
612 if (status)
613 goto err_remove_chardev;
614
615 status = gpiochip_sysfs_register(gdev);
616 if (status)
617 goto err_remove_device;
618
619 /* From this point, the .release() function cleans up gpio_device */
620 gdev->dev.release = gpiodevice_release;
621 get_device(&gdev->dev);
622 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
623 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
624 dev_name(&gdev->dev), chip->label ? : "generic");
625
626 return 0; 664 return 0;
627 665
628err_remove_device:
629 device_del(&gdev->dev);
630err_remove_chardev:
631 cdev_del(&gdev->chrdev);
632err_remove_chip: 666err_remove_chip:
633 acpi_gpiochip_remove(chip); 667 acpi_gpiochip_remove(chip);
634 gpiochip_free_hogs(chip); 668 gpiochip_free_hogs(chip);
@@ -637,6 +671,10 @@ err_remove_from_list:
637 spin_lock_irqsave(&gpio_lock, flags); 671 spin_lock_irqsave(&gpio_lock, flags);
638 list_del(&gdev->list); 672 list_del(&gdev->list);
639 spin_unlock_irqrestore(&gpio_lock, flags); 673 spin_unlock_irqrestore(&gpio_lock, flags);
674err_free_label:
675 kfree(gdev->label);
676err_free_descs:
677 kfree(gdev->descs);
640err_free_gdev: 678err_free_gdev:
641 ida_simple_remove(&gpio_ida, gdev->id); 679 ida_simple_remove(&gpio_ida, gdev->id);
642 /* failures here can mean systems won't boot... */ 680 /* failures here can mean systems won't boot... */
@@ -2231,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2231 return desc; 2269 return desc;
2232} 2270}
2233 2271
2234static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, 2272static struct gpio_desc *acpi_find_gpio(struct device *dev,
2273 const char *con_id,
2235 unsigned int idx, 2274 unsigned int idx,
2236 enum gpio_lookup_flags *flags) 2275 enum gpiod_flags flags,
2276 enum gpio_lookup_flags *lookupflags)
2237{ 2277{
2238 struct acpi_device *adev = ACPI_COMPANION(dev); 2278 struct acpi_device *adev = ACPI_COMPANION(dev);
2239 struct acpi_gpio_info info; 2279 struct acpi_gpio_info info;
@@ -2264,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
2264 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); 2304 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
2265 if (IS_ERR(desc)) 2305 if (IS_ERR(desc))
2266 return desc; 2306 return desc;
2307
2308 if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
2309 info.gpioint) {
2310 dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
2311 return ERR_PTR(-ENOENT);
2312 }
2267 } 2313 }
2268 2314
2269 if (info.polarity == GPIO_ACTIVE_LOW) 2315 if (info.polarity == GPIO_ACTIVE_LOW)
2270 *flags |= GPIO_ACTIVE_LOW; 2316 *lookupflags |= GPIO_ACTIVE_LOW;
2271 2317
2272 return desc; 2318 return desc;
2273} 2319}
@@ -2530,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2530 desc = of_find_gpio(dev, con_id, idx, &lookupflags); 2576 desc = of_find_gpio(dev, con_id, idx, &lookupflags);
2531 } else if (ACPI_COMPANION(dev)) { 2577 } else if (ACPI_COMPANION(dev)) {
2532 dev_dbg(dev, "using ACPI for GPIO lookup\n"); 2578 dev_dbg(dev, "using ACPI for GPIO lookup\n");
2533 desc = acpi_find_gpio(dev, con_id, idx, &lookupflags); 2579 desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
2534 } 2580 }
2535 } 2581 }
2536 2582
@@ -2829,6 +2875,9 @@ static int __init gpiolib_dev_init(void)
2829 if (ret < 0) { 2875 if (ret < 0) {
2830 pr_err("gpiolib: failed to allocate char dev region\n"); 2876 pr_err("gpiolib: failed to allocate char dev region\n");
2831 bus_unregister(&gpio_bus_type); 2877 bus_unregister(&gpio_bus_type);
2878 } else {
2879 gpiolib_initialized = true;
2880 gpiochip_setup_devs();
2832 } 2881 }
2833 return ret; 2882 return ret;
2834} 2883}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c4a21c6428f5..1bcbade479dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,8 @@ struct amdgpu_uvd {
1591 struct amdgpu_bo *vcpu_bo; 1591 struct amdgpu_bo *vcpu_bo;
1592 void *cpu_addr; 1592 void *cpu_addr;
1593 uint64_t gpu_addr; 1593 uint64_t gpu_addr;
1594 unsigned fw_version;
1595 void *saved_bo;
1594 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1596 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1595 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1597 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1596 struct delayed_work idle_work; 1598 struct delayed_work idle_work;
@@ -2033,6 +2035,7 @@ struct amdgpu_device {
2033 2035
2034 /* tracking pinned memory */ 2036 /* tracking pinned memory */
2035 u64 vram_pin_size; 2037 u64 vram_pin_size;
2038 u64 invisible_pin_size;
2036 u64 gart_pin_size; 2039 u64 gart_pin_size;
2037 2040
2038 /* amdkfd interface */ 2041 /* amdkfd interface */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index d6b0bff510aa..b7b583c42ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
425 struct acp_pm_domain *apd; 425 struct acp_pm_domain *apd;
426 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 426 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 427
428 /* return early if no ACP */
429 if (!adev->acp.acp_genpd)
430 return 0;
431
428 /* SMU block will power on ACP irrespective of ACP runtime status. 432 /* SMU block will power on ACP irrespective of ACP runtime status.
429 * Power off explicitly based on genpd ACP runtime status so that ACP 433 * Power off explicitly based on genpd ACP runtime status so that ACP
430 * hw and ACP-genpd status are in sync. 434 * hw and ACP-genpd status are in sync.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0ea43ff..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
63 return amdgpu_atpx_priv.atpx_detected; 63 return amdgpu_atpx_priv.atpx_detected;
64} 64}
65 65
66bool amdgpu_has_atpx_dgpu_power_cntl(void) {
67 return amdgpu_atpx_priv.atpx.functions.power_cntl;
68}
69
70/** 66/**
71 * amdgpu_atpx_call - call an ATPX method 67 * amdgpu_atpx_call - call an ATPX method
72 * 68 *
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
146 */ 142 */
147static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) 143static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
148{ 144{
145 /* make sure required functions are enabled */
146 /* dGPU power control is required */
147 if (atpx->functions.power_cntl == false) {
148 printk("ATPX dGPU power cntl not present, forcing\n");
149 atpx->functions.power_cntl = true;
150 }
151
149 if (atpx->functions.px_params) { 152 if (atpx->functions.px_params) {
150 union acpi_object *info; 153 union acpi_object *info;
151 struct atpx_px_params output; 154 struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a4b101e10c6..6043dc7c3a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
816 struct drm_device *ddev = adev->ddev; 816 struct drm_device *ddev = adev->ddev;
817 struct drm_crtc *crtc; 817 struct drm_crtc *crtc;
818 uint32_t line_time_us, vblank_lines; 818 uint32_t line_time_us, vblank_lines;
819 struct cgs_mode_info *mode_info;
819 820
820 if (info == NULL) 821 if (info == NULL)
821 return -EINVAL; 822 return -EINVAL;
822 823
824 mode_info = info->mode_info;
825
823 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 826 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
824 list_for_each_entry(crtc, 827 list_for_each_entry(crtc,
825 &ddev->mode_config.crtc_list, head) { 828 &ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
828 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); 831 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
829 info->display_count++; 832 info->display_count++;
830 } 833 }
831 if (info->mode_info != NULL && 834 if (mode_info != NULL &&
832 crtc->enabled && amdgpu_crtc->enabled && 835 crtc->enabled && amdgpu_crtc->enabled &&
833 amdgpu_crtc->hw_mode.clock) { 836 amdgpu_crtc->hw_mode.clock) {
834 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 837 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
836 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 839 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
837 amdgpu_crtc->hw_mode.crtc_vdisplay + 840 amdgpu_crtc->hw_mode.crtc_vdisplay +
838 (amdgpu_crtc->v_border * 2); 841 (amdgpu_crtc->v_border * 2);
839 info->mode_info->vblank_time_us = vblank_lines * line_time_us; 842 mode_info->vblank_time_us = vblank_lines * line_time_us;
840 info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 843 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
841 info->mode_info->ref_clock = adev->clock.spll.reference_freq; 844 mode_info->ref_clock = adev->clock.spll.reference_freq;
842 info->mode_info++; 845 mode_info = NULL;
843 } 846 }
844 } 847 }
845 } 848 }
@@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
847 return 0; 850 return 0;
848} 851}
849 852
853
854static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
855{
856 CGS_FUNC_ADEV;
857
858 adev->pm.dpm_enabled = enabled;
859
860 return 0;
861}
862
850/** \brief evaluate acpi namespace object, handle or pathname must be valid 863/** \brief evaluate acpi namespace object, handle or pathname must be valid
851 * \param cgs_device 864 * \param cgs_device
852 * \param info input/output arguments for the control method 865 * \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
1097 amdgpu_cgs_set_powergating_state, 1110 amdgpu_cgs_set_powergating_state,
1098 amdgpu_cgs_set_clockgating_state, 1111 amdgpu_cgs_set_clockgating_state,
1099 amdgpu_cgs_get_active_displays_info, 1112 amdgpu_cgs_get_active_displays_info,
1113 amdgpu_cgs_notify_dpm_enabled,
1100 amdgpu_cgs_call_acpi_method, 1114 amdgpu_cgs_call_acpi_method,
1101 amdgpu_cgs_query_system_info, 1115 amdgpu_cgs_query_system_info,
1102}; 1116};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 612117478b57..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
62 "LAST", 62 "LAST",
63}; 63};
64 64
65#if defined(CONFIG_VGA_SWITCHEROO)
66bool amdgpu_has_atpx_dgpu_power_cntl(void);
67#else
68static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
69#endif
70
71bool amdgpu_device_is_px(struct drm_device *dev) 65bool amdgpu_device_is_px(struct drm_device *dev)
72{ 66{
73 struct amdgpu_device *adev = dev->dev_private; 67 struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1485 1479
1486 if (amdgpu_runtime_pm == 1) 1480 if (amdgpu_runtime_pm == 1)
1487 runtime = true; 1481 runtime = true;
1488 if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) 1482 if (amdgpu_device_is_px(ddev))
1489 runtime = true; 1483 runtime = true;
1490 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); 1484 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1491 if (runtime) 1485 if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f0ed974bd4e0..3fb405b3a614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
57 if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) 57 if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
58 return true; 58 return true;
59 59
60 fence_put(*f); 60 fence_put(fence);
61 return false; 61 return false;
62} 62}
63 63
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 4303b447efe8..d81f1f4883a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
121{ 121{
122 struct amdgpu_device *adev = ring->adev; 122 struct amdgpu_device *adev = ring->adev;
123 struct amdgpu_fence *fence; 123 struct amdgpu_fence *fence;
124 struct fence **ptr; 124 struct fence *old, **ptr;
125 uint32_t seq; 125 uint32_t seq;
126 126
127 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 127 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
141 /* This function can't be called concurrently anyway, otherwise 141 /* This function can't be called concurrently anyway, otherwise
142 * emitting the fence would mess up the hardware ring buffer. 142 * emitting the fence would mess up the hardware ring buffer.
143 */ 143 */
144 BUG_ON(rcu_dereference_protected(*ptr, 1)); 144 old = rcu_dereference_protected(*ptr, 1);
145 if (old && !fence_is_signaled(old)) {
146 DRM_INFO("rcu slot is busy\n");
147 fence_wait(old, false);
148 }
145 149
146 rcu_assign_pointer(*ptr, fence_get(&fence->base)); 150 rcu_assign_pointer(*ptr, fence_get(&fence->base));
147 151
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f594cfaa97e5..762cfdb85147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
219 if (r) { 219 if (r) {
220 return r; 220 return r;
221 } 221 }
222 adev->ddev->vblank_disable_allowed = true;
223
222 /* enable msi */ 224 /* enable msi */
223 adev->irq.msi_enabled = false; 225 adev->irq.msi_enabled = false;
224 226
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7805a8706af7..b04337de65d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
303 fw_info.feature = adev->vce.fb_version; 303 fw_info.feature = adev->vce.fb_version;
304 break; 304 break;
305 case AMDGPU_INFO_FW_UVD: 305 case AMDGPU_INFO_FW_UVD:
306 fw_info.ver = 0; 306 fw_info.ver = adev->uvd.fw_version;
307 fw_info.feature = 0; 307 fw_info.feature = 0;
308 break; 308 break;
309 case AMDGPU_INFO_FW_GMC: 309 case AMDGPU_INFO_FW_GMC:
@@ -382,8 +382,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
382 struct drm_amdgpu_info_vram_gtt vram_gtt; 382 struct drm_amdgpu_info_vram_gtt vram_gtt;
383 383
384 vram_gtt.vram_size = adev->mc.real_vram_size; 384 vram_gtt.vram_size = adev->mc.real_vram_size;
385 vram_gtt.vram_size -= adev->vram_pin_size;
385 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; 386 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
386 vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size; 387 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
387 vram_gtt.gtt_size = adev->mc.gtt_size; 388 vram_gtt.gtt_size = adev->mc.gtt_size;
388 vram_gtt.gtt_size -= adev->gart_pin_size; 389 vram_gtt.gtt_size -= adev->gart_pin_size;
389 return copy_to_user(out, &vram_gtt, 390 return copy_to_user(out, &vram_gtt,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8d432e6901af..81bd964d3dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
53 53
54#define AMDGPU_MAX_HPD_PINS 6 54#define AMDGPU_MAX_HPD_PINS 6
55#define AMDGPU_MAX_CRTCS 6 55#define AMDGPU_MAX_CRTCS 6
56#define AMDGPU_MAX_AFMT_BLOCKS 7 56#define AMDGPU_MAX_AFMT_BLOCKS 9
57 57
58enum amdgpu_rmx_type { 58enum amdgpu_rmx_type {
59 RMX_OFF, 59 RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
309 struct atom_context *atom_context; 309 struct atom_context *atom_context;
310 struct card_info *atom_card_info; 310 struct card_info *atom_card_info;
311 bool mode_config_initialized; 311 bool mode_config_initialized;
312 struct amdgpu_crtc *crtcs[6]; 312 struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
313 struct amdgpu_afmt *afmt[7]; 313 struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
314 /* DVI-I properties */ 314 /* DVI-I properties */
315 struct drm_property *coherent_mode_property; 315 struct drm_property *coherent_mode_property;
316 /* DAC enable load detect */ 316 /* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 56d1458393cc..e557fc1f17c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
424 bo->pin_count = 1; 424 bo->pin_count = 1;
425 if (gpu_addr != NULL) 425 if (gpu_addr != NULL)
426 *gpu_addr = amdgpu_bo_gpu_offset(bo); 426 *gpu_addr = amdgpu_bo_gpu_offset(bo);
427 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 427 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
428 bo->adev->vram_pin_size += amdgpu_bo_size(bo); 428 bo->adev->vram_pin_size += amdgpu_bo_size(bo);
429 else 429 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
430 bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
431 } else
430 bo->adev->gart_pin_size += amdgpu_bo_size(bo); 432 bo->adev->gart_pin_size += amdgpu_bo_size(bo);
431 } else { 433 } else {
432 dev_err(bo->adev->dev, "%p pin failed\n", bo); 434 dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
456 } 458 }
457 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 459 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
458 if (likely(r == 0)) { 460 if (likely(r == 0)) {
459 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 461 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
460 bo->adev->vram_pin_size -= amdgpu_bo_size(bo); 462 bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
461 else 463 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
464 bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
465 } else
462 bo->adev->gart_pin_size -= amdgpu_bo_size(bo); 466 bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
463 } else { 467 } else {
464 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); 468 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -476,6 +480,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
476 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); 480 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
477} 481}
478 482
483static const char *amdgpu_vram_names[] = {
484 "UNKNOWN",
485 "GDDR1",
486 "DDR2",
487 "GDDR3",
488 "GDDR4",
489 "GDDR5",
490 "HBM",
491 "DDR3"
492};
493
479int amdgpu_bo_init(struct amdgpu_device *adev) 494int amdgpu_bo_init(struct amdgpu_device *adev)
480{ 495{
481 /* Add an MTRR for the VRAM */ 496 /* Add an MTRR for the VRAM */
@@ -484,8 +499,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
484 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 499 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
485 adev->mc.mc_vram_size >> 20, 500 adev->mc.mc_vram_size >> 20,
486 (unsigned long long)adev->mc.aper_size >> 20); 501 (unsigned long long)adev->mc.aper_size >> 20);
487 DRM_INFO("RAM width %dbits DDR\n", 502 DRM_INFO("RAM width %dbits %s\n",
488 adev->mc.vram_width); 503 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
489 return amdgpu_ttm_init(adev); 504 return amdgpu_ttm_init(adev);
490} 505}
491 506
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3cb6d6c413c7..e9c6ae6ed2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
143 adev->powerplay.pp_handle); 143 adev->powerplay.pp_handle);
144 144
145#ifdef CONFIG_DRM_AMD_POWERPLAY 145#ifdef CONFIG_DRM_AMD_POWERPLAY
146 if (adev->pp_enabled) { 146 if (adev->pp_enabled && adev->pm.dpm_enabled) {
147 amdgpu_pm_sysfs_init(adev); 147 amdgpu_pm_sysfs_init(adev);
148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); 148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
149 } 149 }
@@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
161 adev->powerplay.pp_handle); 161 adev->powerplay.pp_handle);
162 162
163#ifdef CONFIG_DRM_AMD_POWERPLAY 163#ifdef CONFIG_DRM_AMD_POWERPLAY
164 if (adev->pp_enabled) { 164 if (adev->pp_enabled)
165 if (amdgpu_dpm == 0) 165 adev->pm.dpm_enabled = true;
166 adev->pm.dpm_enabled = false;
167 else
168 adev->pm.dpm_enabled = true;
169 }
170#endif 166#endif
171 167
172 return ret; 168 return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f1a55d1888cb..11af4492b4be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
223{ 223{
224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); 224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
225 225
226 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
227 return -EPERM;
226 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 228 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
227} 229}
228 230
@@ -622,7 +624,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
622 set_page_dirty(page); 624 set_page_dirty(page);
623 625
624 mark_page_accessed(page); 626 mark_page_accessed(page);
625 page_cache_release(page); 627 put_page(page);
626 } 628 }
627 629
628 sg_free_table(ttm->sg); 630 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c1a581044417..871018c634e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
159 version_major, version_minor, family_id); 159 version_major, version_minor, family_id);
160 160
161 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
162 (family_id << 8));
163
161 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) 164 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
162 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; 165 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
163 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 166 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -241,32 +244,30 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
241 244
242int amdgpu_uvd_suspend(struct amdgpu_device *adev) 245int amdgpu_uvd_suspend(struct amdgpu_device *adev)
243{ 246{
244 struct amdgpu_ring *ring = &adev->uvd.ring; 247 unsigned size;
245 int i, r; 248 void *ptr;
249 int i;
246 250
247 if (adev->uvd.vcpu_bo == NULL) 251 if (adev->uvd.vcpu_bo == NULL)
248 return 0; 252 return 0;
249 253
250 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 254 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
251 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 255 if (atomic_read(&adev->uvd.handles[i]))
252 if (handle != 0) { 256 break;
253 struct fence *fence;
254 257
255 amdgpu_uvd_note_usage(adev); 258 if (i == AMDGPU_MAX_UVD_HANDLES)
259 return 0;
256 260
257 r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); 261 cancel_delayed_work_sync(&adev->uvd.idle_work);
258 if (r) {
259 DRM_ERROR("Error destroying UVD (%d)!\n", r);
260 continue;
261 }
262 262
263 fence_wait(fence, false); 263 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
264 fence_put(fence); 264 ptr = adev->uvd.cpu_addr;
265 265
266 adev->uvd.filp[i] = NULL; 266 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
267 atomic_set(&adev->uvd.handles[i], 0); 267 if (!adev->uvd.saved_bo)
268 } 268 return -ENOMEM;
269 } 269
270 memcpy(adev->uvd.saved_bo, ptr, size);
270 271
271 return 0; 272 return 0;
272} 273}
@@ -275,23 +276,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
275{ 276{
276 unsigned size; 277 unsigned size;
277 void *ptr; 278 void *ptr;
278 const struct common_firmware_header *hdr;
279 unsigned offset;
280 279
281 if (adev->uvd.vcpu_bo == NULL) 280 if (adev->uvd.vcpu_bo == NULL)
282 return -EINVAL; 281 return -EINVAL;
283 282
284 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
285 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
286 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
287 (adev->uvd.fw->size) - offset);
288
289 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 283 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
290 size -= le32_to_cpu(hdr->ucode_size_bytes);
291 ptr = adev->uvd.cpu_addr; 284 ptr = adev->uvd.cpu_addr;
292 ptr += le32_to_cpu(hdr->ucode_size_bytes);
293 285
294 memset(ptr, 0, size); 286 if (adev->uvd.saved_bo != NULL) {
287 memcpy(ptr, adev->uvd.saved_bo, size);
288 kfree(adev->uvd.saved_bo);
289 adev->uvd.saved_bo = NULL;
290 } else {
291 const struct common_firmware_header *hdr;
292 unsigned offset;
293
294 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
295 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
296 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
297 (adev->uvd.fw->size) - offset);
298 size -= le32_to_cpu(hdr->ucode_size_bytes);
299 ptr += le32_to_cpu(hdr->ucode_size_bytes);
300 memset(ptr, 0, size);
301 }
295 302
296 return 0; 303 return 0;
297} 304}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 4bec0c108cea..481a64fa9b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
234 if (i == AMDGPU_MAX_VCE_HANDLES) 234 if (i == AMDGPU_MAX_VCE_HANDLES)
235 return 0; 235 return 0;
236 236
237 cancel_delayed_work_sync(&adev->vce.idle_work);
237 /* TODO: suspending running encoding sessions isn't supported */ 238 /* TODO: suspending running encoding sessions isn't supported */
238 return -EINVAL; 239 return -EINVAL;
239} 240}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 82ce7d943884..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
903 gmc_v7_0_set_gart_funcs(adev); 903 gmc_v7_0_set_gart_funcs(adev);
904 gmc_v7_0_set_irq_funcs(adev); 904 gmc_v7_0_set_irq_funcs(adev);
905 905
906 if (adev->flags & AMD_IS_APU) {
907 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
908 } else {
909 u32 tmp = RREG32(mmMC_SEQ_MISC0);
910 tmp &= MC_SEQ_MISC0__MT__MASK;
911 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
912 }
913
914 return 0; 906 return 0;
915} 907}
916 908
@@ -918,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
918{ 910{
919 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
920 912
921 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 913 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
914 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
915 else
916 return 0;
922} 917}
923 918
924static int gmc_v7_0_sw_init(void *handle) 919static int gmc_v7_0_sw_init(void *handle)
@@ -927,6 +922,14 @@ static int gmc_v7_0_sw_init(void *handle)
927 int dma_bits; 922 int dma_bits;
928 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 923 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
929 924
925 if (adev->flags & AMD_IS_APU) {
926 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
927 } else {
928 u32 tmp = RREG32(mmMC_SEQ_MISC0);
929 tmp &= MC_SEQ_MISC0__MT__MASK;
930 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
931 }
932
930 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 933 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
931 if (r) 934 if (r)
932 return r; 935 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 29bd7b57dc91..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
863 gmc_v8_0_set_gart_funcs(adev); 863 gmc_v8_0_set_gart_funcs(adev);
864 gmc_v8_0_set_irq_funcs(adev); 864 gmc_v8_0_set_irq_funcs(adev);
865 865
866 if (adev->flags & AMD_IS_APU) {
867 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
868 } else {
869 u32 tmp = RREG32(mmMC_SEQ_MISC0);
870 tmp &= MC_SEQ_MISC0__MT__MASK;
871 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
872 }
873
874 return 0; 866 return 0;
875} 867}
876 868
@@ -878,15 +870,33 @@ static int gmc_v8_0_late_init(void *handle)
878{ 870{
879 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
880 872
881 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 873 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
874 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
875 else
876 return 0;
882} 877}
883 878
879#define mmMC_SEQ_MISC0_FIJI 0xA71
880
884static int gmc_v8_0_sw_init(void *handle) 881static int gmc_v8_0_sw_init(void *handle)
885{ 882{
886 int r; 883 int r;
887 int dma_bits; 884 int dma_bits;
888 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 885 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
889 886
887 if (adev->flags & AMD_IS_APU) {
888 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
889 } else {
890 u32 tmp;
891
892 if (adev->asic_type == CHIP_FIJI)
893 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
894 else
895 tmp = RREG32(mmMC_SEQ_MISC0);
896 tmp &= MC_SEQ_MISC0__MT__MASK;
897 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
898 }
899
890 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 900 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
891 if (r) 901 if (r)
892 return r; 902 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7bff929..0f14199cf716 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
307 307
308 amdgpu_irq_fini(adev); 308 amdgpu_irq_fini(adev);
309 amdgpu_ih_ring_fini(adev); 309 amdgpu_ih_ring_fini(adev);
310 amdgpu_irq_add_domain(adev); 310 amdgpu_irq_remove_domain(adev);
311 311
312 return 0; 312 return 0;
313} 313}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index c606ccb38d8b..cb463753115b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
224 int r; 224 int r;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 226
227 r = amdgpu_uvd_suspend(adev); 227 r = uvd_v4_2_hw_fini(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
231 r = uvd_v4_2_hw_fini(adev); 231 r = amdgpu_uvd_suspend(adev);
232 if (r) 232 if (r)
233 return r; 233 return r;
234 234
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e3c852d9d79a..16476d80f475 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
220 int r; 220 int r;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 222
223 r = amdgpu_uvd_suspend(adev); 223 r = uvd_v5_0_hw_fini(adev);
224 if (r) 224 if (r)
225 return r; 225 return r;
226 226
227 r = uvd_v5_0_hw_fini(adev); 227 r = amdgpu_uvd_suspend(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3375e614ac67..d49379145ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
214 int r; 214 int r;
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216 216
217 r = uvd_v6_0_hw_fini(adev);
218 if (r)
219 return r;
220
217 /* Skip this for APU for now */ 221 /* Skip this for APU for now */
218 if (!(adev->flags & AMD_IS_APU)) { 222 if (!(adev->flags & AMD_IS_APU)) {
219 r = amdgpu_uvd_suspend(adev); 223 r = amdgpu_uvd_suspend(adev);
220 if (r) 224 if (r)
221 return r; 225 return r;
222 } 226 }
223 r = uvd_v6_0_hw_fini(adev);
224 if (r)
225 return r;
226 227
227 return r; 228 return r;
228} 229}
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index aec38fc3834f..ab84d4947247 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
589 void *cgs_device, 589 void *cgs_device,
590 struct cgs_display_info *info); 590 struct cgs_display_info *info);
591 591
592typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
593
592typedef int (*cgs_call_acpi_method)(void *cgs_device, 594typedef int (*cgs_call_acpi_method)(void *cgs_device,
593 uint32_t acpi_method, 595 uint32_t acpi_method,
594 uint32_t acpi_function, 596 uint32_t acpi_function,
@@ -644,6 +646,8 @@ struct cgs_ops {
644 cgs_set_clockgating_state set_clockgating_state; 646 cgs_set_clockgating_state set_clockgating_state;
645 /* display manager */ 647 /* display manager */
646 cgs_get_active_displays_info get_active_displays_info; 648 cgs_get_active_displays_info get_active_displays_info;
649 /* notify dpm enabled */
650 cgs_notify_dpm_enabled notify_dpm_enabled;
647 /* ACPI */ 651 /* ACPI */
648 cgs_call_acpi_method call_acpi_method; 652 cgs_call_acpi_method call_acpi_method;
649 /* get system info */ 653 /* get system info */
@@ -734,8 +738,12 @@ struct cgs_device
734 CGS_CALL(set_powergating_state, dev, block_type, state) 738 CGS_CALL(set_powergating_state, dev, block_type, state)
735#define cgs_set_clockgating_state(dev, block_type, state) \ 739#define cgs_set_clockgating_state(dev, block_type, state) \
736 CGS_CALL(set_clockgating_state, dev, block_type, state) 740 CGS_CALL(set_clockgating_state, dev, block_type, state)
741#define cgs_notify_dpm_enabled(dev, enabled) \
742 CGS_CALL(notify_dpm_enabled, dev, enabled)
743
737#define cgs_get_active_displays_info(dev, info) \ 744#define cgs_get_active_displays_info(dev, info) \
738 CGS_CALL(get_active_displays_info, dev, info) 745 CGS_CALL(get_active_displays_info, dev, info)
746
739#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \ 747#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
740 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) 748 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
741#define cgs_query_system_info(dev, sys_info) \ 749#define cgs_query_system_info(dev, sys_info) \
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 6b52c78cb404..56856a2864d1 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
137 reset_display_configCounter_tasks, 137 reset_display_configCounter_tasks,
138 update_dal_configuration_tasks, 138 update_dal_configuration_tasks,
139 vari_bright_resume_tasks, 139 vari_bright_resume_tasks,
140 block_adjust_power_state_tasks,
141 setup_asic_tasks, 140 setup_asic_tasks,
142 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ 141 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
143 enable_dynamic_state_management_tasks, 142 enable_dynamic_state_management_tasks,
144 enable_clock_power_gatings_tasks, 143 enable_clock_power_gatings_tasks,
145 enable_disable_bapm_tasks, 144 enable_disable_bapm_tasks,
146 initialize_thermal_controller_tasks, 145 initialize_thermal_controller_tasks,
147 reset_boot_state_tasks, 146 get_2d_performance_state_tasks,
147 set_performance_state_tasks,
148 adjust_power_state_tasks, 148 adjust_power_state_tasks,
149 enable_disable_fps_tasks, 149 enable_disable_fps_tasks,
150 notify_hw_power_source_tasks, 150 notify_hw_power_source_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 51dedf84623c..89f31bc5b68b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
2389 2389
2390 for(count = 0; count < table->VceLevelCount; count++) { 2390 for(count = 0; count < table->VceLevelCount; count++) {
2391 table->VceLevel[count].Frequency = mm_table->entries[count].eclk; 2391 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
2392 table->VceLevel[count].MinVoltage = 0;
2392 table->VceLevel[count].MinVoltage |= 2393 table->VceLevel[count].MinVoltage |=
2393 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; 2394 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
2394 table->VceLevel[count].MinVoltage |= 2395 table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
2465 2466
2466 for (count = 0; count < table->SamuLevelCount; count++) { 2467 for (count = 0; count < table->SamuLevelCount; count++) {
2467 /* not sure whether we need evclk or not */ 2468 /* not sure whether we need evclk or not */
2469 table->SamuLevel[count].MinVoltage = 0;
2468 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; 2470 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
2469 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 2471 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2470 VOLTAGE_SCALE) << VDDC_SHIFT; 2472 VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
2562 table->UvdBootLevel = 0; 2564 table->UvdBootLevel = 0;
2563 2565
2564 for (count = 0; count < table->UvdLevelCount; count++) { 2566 for (count = 0; count < table->UvdLevelCount; count++) {
2567 table->UvdLevel[count].MinVoltage = 0;
2565 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; 2568 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
2566 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; 2569 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
2567 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 2570 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2900 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control) 2903 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
2901 fiji_populate_smc_voltage_tables(hwmgr, table); 2904 fiji_populate_smc_voltage_tables(hwmgr, table);
2902 2905
2906 table->SystemFlags = 0;
2907
2903 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2908 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2904 PHM_PlatformCaps_AutomaticDCTransition)) 2909 PHM_PlatformCaps_AutomaticDCTransition))
2905 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 2910 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2997 table->MemoryThermThrottleEnable = 1; 3002 table->MemoryThermThrottleEnable = 1;
2998 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ 3003 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
2999 table->PCIeGenInterval = 1; 3004 table->PCIeGenInterval = 1;
3005 table->VRConfig = 0;
3000 3006
3001 result = fiji_populate_vr_config(hwmgr, table); 3007 result = fiji_populate_vr_config(hwmgr, table);
3002 PP_ASSERT_WITH_CODE(0 == result, 3008 PP_ASSERT_WITH_CODE(0 == result,
@@ -5195,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
5195 return size; 5201 return size;
5196} 5202}
5197 5203
5204static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
5205 const struct fiji_performance_level *pl2)
5206{
5207 return ((pl1->memory_clock == pl2->memory_clock) &&
5208 (pl1->engine_clock == pl2->engine_clock) &&
5209 (pl1->pcie_gen == pl2->pcie_gen) &&
5210 (pl1->pcie_lane == pl2->pcie_lane));
5211}
5212
5213int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
5214{
5215 const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
5216 const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
5217 int i;
5218
5219 if (equal == NULL || psa == NULL || psb == NULL)
5220 return -EINVAL;
5221
5222 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5223 if (psa->performance_level_count != psb->performance_level_count) {
5224 *equal = false;
5225 return 0;
5226 }
5227
5228 for (i = 0; i < psa->performance_level_count; i++) {
5229 if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5230 /* If we have found even one performance level pair that is different the states are different. */
5231 *equal = false;
5232 return 0;
5233 }
5234 }
5235
5236 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5237 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
5238 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
5239 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5240 *equal &= (psa->acp_clk == psb->acp_clk);
5241
5242 return 0;
5243}
5244
5245bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5246{
5247 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5248 bool is_update_required = false;
5249 struct cgs_display_info info = {0,0,NULL};
5250
5251 cgs_get_active_displays_info(hwmgr->device, &info);
5252
5253 if (data->display_timing.num_existing_displays != info.display_count)
5254 is_update_required = true;
5255/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
5256 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5257 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
5258 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
5259 is_update_required = true;
5260*/
5261 return is_update_required;
5262}
5263
5264
5198static const struct pp_hwmgr_func fiji_hwmgr_funcs = { 5265static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5199 .backend_init = &fiji_hwmgr_backend_init, 5266 .backend_init = &fiji_hwmgr_backend_init,
5200 .backend_fini = &tonga_hwmgr_backend_fini, 5267 .backend_fini = &tonga_hwmgr_backend_fini,
@@ -5230,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5230 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, 5297 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
5231 .set_fan_control_mode = fiji_set_fan_control_mode, 5298 .set_fan_control_mode = fiji_set_fan_control_mode,
5232 .get_fan_control_mode = fiji_get_fan_control_mode, 5299 .get_fan_control_mode = fiji_get_fan_control_mode,
5300 .check_states_equal = fiji_check_states_equal,
5301 .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
5233 .get_pp_table = fiji_get_pp_table, 5302 .get_pp_table = fiji_get_pp_table,
5234 .set_pp_table = fiji_set_pp_table, 5303 .set_pp_table = fiji_set_pp_table,
5235 .force_clock_level = fiji_force_clock_level, 5304 .force_clock_level = fiji_force_clock_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index be31bed2538a..fa208ada6892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
58 58
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); 59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
60 60
61 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
62 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
63
61 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && 64 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
62 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) 65 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
63 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); 66 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
130 133
131int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) 134int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
132{ 135{
136 int ret = 1;
137 bool enabled;
133 PHM_FUNC_CHECK(hwmgr); 138 PHM_FUNC_CHECK(hwmgr);
134 139
135 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 140 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
136 PHM_PlatformCaps_TablelessHardwareInterface)) { 141 PHM_PlatformCaps_TablelessHardwareInterface)) {
137 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) 142 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
138 return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); 143 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
139 } else { 144 } else {
140 return phm_dispatch_table(hwmgr, 145 ret = phm_dispatch_table(hwmgr,
141 &(hwmgr->enable_dynamic_state_management), 146 &(hwmgr->enable_dynamic_state_management),
142 NULL, NULL); 147 NULL, NULL);
143 } 148 }
144 return 0; 149
150 enabled = ret == 0 ? true : false;
151
152 cgs_notify_dpm_enabled(hwmgr->device, enabled);
153
154 return ret;
145} 155}
146 156
147int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) 157int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 56b829f97699..3ac1ae4d8caf 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
57 DRM_ERROR("failed to map control registers area\n"); 57 DRM_ERROR("failed to map control registers area\n");
58 ret = PTR_ERR(hdlcd->mmio); 58 ret = PTR_ERR(hdlcd->mmio);
59 hdlcd->mmio = NULL; 59 hdlcd->mmio = NULL;
60 goto fail; 60 return ret;
61 } 61 }
62 62
63 version = hdlcd_read(hdlcd, HDLCD_REG_VERSION); 63 version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
64 if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) { 64 if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
65 DRM_ERROR("unknown product id: 0x%x\n", version); 65 DRM_ERROR("unknown product id: 0x%x\n", version);
66 ret = -EINVAL; 66 return -EINVAL;
67 goto fail;
68 } 67 }
69 DRM_INFO("found ARM HDLCD version r%dp%d\n", 68 DRM_INFO("found ARM HDLCD version r%dp%d\n",
70 (version & HDLCD_VERSION_MAJOR_MASK) >> 8, 69 (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
@@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
73 /* Get the optional framebuffer memory resource */ 72 /* Get the optional framebuffer memory resource */
74 ret = of_reserved_mem_device_init(drm->dev); 73 ret = of_reserved_mem_device_init(drm->dev);
75 if (ret && ret != -ENODEV) 74 if (ret && ret != -ENODEV)
76 goto fail; 75 return ret;
77 76
78 ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); 77 ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
79 if (ret) 78 if (ret)
@@ -101,8 +100,6 @@ irq_fail:
101 drm_crtc_cleanup(&hdlcd->crtc); 100 drm_crtc_cleanup(&hdlcd->crtc);
102setup_fail: 101setup_fail:
103 of_reserved_mem_device_release(drm->dev); 102 of_reserved_mem_device_release(drm->dev);
104fail:
105 devm_clk_put(drm->dev, hdlcd->clk);
106 103
107 return ret; 104 return ret;
108} 105}
@@ -412,7 +409,6 @@ err_unload:
412 pm_runtime_put_sync(drm->dev); 409 pm_runtime_put_sync(drm->dev);
413 pm_runtime_disable(drm->dev); 410 pm_runtime_disable(drm->dev);
414 of_reserved_mem_device_release(drm->dev); 411 of_reserved_mem_device_release(drm->dev);
415 devm_clk_put(dev, hdlcd->clk);
416err_free: 412err_free:
417 drm_dev_unref(drm); 413 drm_dev_unref(drm);
418 414
@@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev)
436 pm_runtime_put_sync(drm->dev); 432 pm_runtime_put_sync(drm->dev);
437 pm_runtime_disable(drm->dev); 433 pm_runtime_disable(drm->dev);
438 of_reserved_mem_device_release(drm->dev); 434 of_reserved_mem_device_release(drm->dev);
439 if (!IS_ERR(hdlcd->clk)) {
440 devm_clk_put(drm->dev, hdlcd->clk);
441 hdlcd->clk = NULL;
442 }
443 drm_mode_config_cleanup(drm); 435 drm_mode_config_cleanup(drm);
444 drm_dev_unregister(drm); 436 drm_dev_unregister(drm);
445 drm_dev_unref(drm); 437 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db31aa4..aca7f9cc6109 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
481 481
482 release: 482 release:
483 for_each_sg(sgt->sgl, sg, num, i) 483 for_each_sg(sgt->sgl, sg, num, i)
484 page_cache_release(sg_page(sg)); 484 put_page(sg_page(sg));
485 free_table: 485 free_table:
486 sg_free_table(sgt); 486 sg_free_table(sgt);
487 free_sgt: 487 free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
502 if (dobj->obj.filp) { 502 if (dobj->obj.filp) {
503 struct scatterlist *sg; 503 struct scatterlist *sg;
504 for_each_sg(sgt->sgl, sg, sgt->nents, i) 504 for_each_sg(sgt->sgl, sg, sgt->nents, i)
505 page_cache_release(sg_page(sg)); 505 put_page(sg_page(sg));
506 } 506 }
507 507
508 sg_free_table(sgt); 508 sg_free_table(sgt);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79d0daf..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1673 int i; 1673 int i;
1674 1674
1675 port = drm_dp_get_validated_port_ref(mgr, port);
1676 if (!port)
1677 return -EINVAL;
1678
1675 port_num = port->port_num; 1679 port_num = port->port_num;
1676 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1680 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1677 if (!mstb) { 1681 if (!mstb) {
1678 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1682 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1679 1683
1680 if (!mstb) 1684 if (!mstb) {
1685 drm_dp_put_port(port);
1681 return -EINVAL; 1686 return -EINVAL;
1687 }
1682 } 1688 }
1683 1689
1684 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1690 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1707 kfree(txmsg); 1713 kfree(txmsg);
1708fail_put: 1714fail_put:
1709 drm_dp_put_mst_branch_device(mstb); 1715 drm_dp_put_mst_branch_device(mstb);
1716 drm_dp_put_port(port);
1710 return ret; 1717 return ret;
1711} 1718}
1712 1719
@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1789 req_payload.start_slot = cur_slots; 1796 req_payload.start_slot = cur_slots;
1790 if (mgr->proposed_vcpis[i]) { 1797 if (mgr->proposed_vcpis[i]) {
1791 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1798 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1799 port = drm_dp_get_validated_port_ref(mgr, port);
1800 if (!port) {
1801 mutex_unlock(&mgr->payload_lock);
1802 return -EINVAL;
1803 }
1792 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1804 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1793 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1805 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1794 } else { 1806 } else {
@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1816 mgr->payloads[i].payload_state = req_payload.payload_state; 1828 mgr->payloads[i].payload_state = req_payload.payload_state;
1817 } 1829 }
1818 cur_slots += req_payload.num_slots; 1830 cur_slots += req_payload.num_slots;
1831
1832 if (port)
1833 drm_dp_put_port(port);
1819 } 1834 }
1820 1835
1821 for (i = 0; i < mgr->max_payloads; i++) { 1836 for (i = 0; i < mgr->max_payloads; i++) {
@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2121 2136
2122 if (mgr->mst_primary) { 2137 if (mgr->mst_primary) {
2123 int sret; 2138 int sret;
2139 u8 guid[16];
2140
2124 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2141 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2125 if (sret != DP_RECEIVER_CAP_SIZE) { 2142 if (sret != DP_RECEIVER_CAP_SIZE) {
2126 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2143 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2135 ret = -1; 2152 ret = -1;
2136 goto out_unlock; 2153 goto out_unlock;
2137 } 2154 }
2155
2156 /* Some hubs forget their guids after they resume */
2157 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2158 if (sret != 16) {
2159 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2160 ret = -1;
2161 goto out_unlock;
2162 }
2163 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2164
2138 ret = 0; 2165 ret = 0;
2139 } else 2166 } else
2140 ret = -1; 2167 ret = -1;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 414d7f61aa05..558ef9fc39e6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
205 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 205 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
206 /* 0x0f - 1024x768@43Hz, interlace */ 206 /* 0x0f - 1024x768@43Hz, interlace */
207 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 207 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
208 1208, 1264, 0, 768, 768, 772, 817, 0, 208 1208, 1264, 0, 768, 768, 776, 817, 0,
209 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 209 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
210 DRM_MODE_FLAG_INTERLACE) }, 210 DRM_MODE_FLAG_INTERLACE) },
211 /* 0x10 - 1024x768@60Hz */ 211 /* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
522 720, 840, 0, 480, 481, 484, 500, 0, 522 720, 840, 0, 480, 481, 484, 500, 0,
523 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ 523 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
524 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 524 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
525 704, 832, 0, 480, 489, 491, 520, 0, 525 704, 832, 0, 480, 489, 492, 520, 0,
526 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ 526 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
527 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 527 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
528 768, 864, 0, 480, 483, 486, 525, 0, 528 768, 864, 0, 480, 483, 486, 525, 0,
529 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ 529 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
530 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, 530 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
531 752, 800, 0, 480, 490, 492, 525, 0, 531 752, 800, 0, 480, 490, 492, 525, 0,
532 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ 532 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
533 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, 533 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
539 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 539 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
540 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 540 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
541 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ 541 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
542 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, 542 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
543 1136, 1312, 0, 768, 769, 772, 800, 0, 543 1136, 1312, 0, 768, 769, 772, 800, 0,
544 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ 544 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
545 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 545 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
2241{ 2241{
2242 int i, j, m, modes = 0; 2242 int i, j, m, modes = 0;
2243 struct drm_display_mode *mode; 2243 struct drm_display_mode *mode;
2244 u8 *est = ((u8 *)timing) + 5; 2244 u8 *est = ((u8 *)timing) + 6;
2245 2245
2246 for (i = 0; i < 6; i++) { 2246 for (i = 0; i < 6; i++) {
2247 for (j = 7; j >= 0; j--) { 2247 for (j = 7; j >= 0; j--) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..da0c5320789f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
534 534
535fail: 535fail:
536 while (i--) 536 while (i--)
537 page_cache_release(pages[i]); 537 put_page(pages[i]);
538 538
539 drm_free_large(pages); 539 drm_free_large(pages);
540 return ERR_CAST(p); 540 return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
569 mark_page_accessed(pages[i]); 569 mark_page_accessed(pages[i]);
570 570
571 /* Undo the reference we took when populating the table */ 571 /* Undo the reference we took when populating the table */
572 page_cache_release(pages[i]); 572 put_page(pages[i]);
573 } 573 }
574 574
575 drm_free_large(pages); 575 drm_free_large(pages);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0b5814..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
572 goto fail; 572 goto fail;
573 } 573 }
574 574
575 /*
576 * Set the GPU linear window to be at the end of the DMA window, where
577 * the CMA area is likely to reside. This ensures that we are able to
578 * map the command buffers while having the linear window overlap as
579 * much RAM as possible, so we can optimize mappings for other buffers.
580 *
581 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
582 * to different views of the memory on the individual engines.
583 */
584 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
585 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
586 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
587 if (dma_mask < PHYS_OFFSET + SZ_2G)
588 gpu->memory_base = PHYS_OFFSET;
589 else
590 gpu->memory_base = dma_mask - SZ_2G + 1;
591 }
592
575 ret = etnaviv_hw_reset(gpu); 593 ret = etnaviv_hw_reset(gpu);
576 if (ret) 594 if (ret)
577 goto fail; 595 goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1566{ 1584{
1567 struct device *dev = &pdev->dev; 1585 struct device *dev = &pdev->dev;
1568 struct etnaviv_gpu *gpu; 1586 struct etnaviv_gpu *gpu;
1569 u32 dma_mask;
1570 int err = 0; 1587 int err = 0;
1571 1588
1572 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); 1589 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1576 gpu->dev = &pdev->dev; 1593 gpu->dev = &pdev->dev;
1577 mutex_init(&gpu->lock); 1594 mutex_init(&gpu->lock);
1578 1595
1579 /*
1580 * Set the GPU linear window to be at the end of the DMA window, where
1581 * the CMA area is likely to reside. This ensures that we are able to
1582 * map the command buffers while having the linear window overlap as
1583 * much RAM as possible, so we can optimize mappings for other buffers.
1584 */
1585 dma_mask = (u32)dma_get_required_mask(dev);
1586 if (dma_mask < PHYS_OFFSET + SZ_2G)
1587 gpu->memory_base = PHYS_OFFSET;
1588 else
1589 gpu->memory_base = dma_mask - SZ_2G + 1;
1590
1591 /* Map registers: */ 1596 /* Map registers: */
1592 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); 1597 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1593 if (IS_ERR(gpu->mmio)) 1598 if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d39279596..baddf33fb475 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@ comment "Sub-drivers"
94 94
95config DRM_EXYNOS_G2D 95config DRM_EXYNOS_G2D
96 bool "G2D" 96 bool "G2D"
97 depends on !VIDEO_SAMSUNG_S5P_G2D 97 depends on VIDEO_SAMSUNG_S5P_G2D=n
98 select FRAME_VECTOR 98 select FRAME_VECTOR
99 help 99 help
100 Choose this option if you want to use Exynos G2D for DRM. 100 Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 968b31c522b2..23d2f958739b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,10 +2,10 @@
2# Makefile for the drm device driver. This driver provides support for the 2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \ 5exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
6 exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \ 6 exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
7 exynos_drm_plane.o
8 7
8exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
9exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o 9exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
10exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 10exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
11exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o 11exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6771c6..011211e4167d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
101 return 0; 101 return 0;
102 102
103err: 103err:
104 list_for_each_entry_reverse(subdrv, &subdrv->list, list) { 104 list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
105 if (subdrv->close) 105 if (subdrv->close)
106 subdrv->close(dev, subdrv->dev, file); 106 subdrv->close(dev, subdrv->dev, file);
107 } 107 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194644c8..81cc5537cf25 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
199 return exynos_fb->dma_addr[index]; 199 return exynos_fb->dma_addr[index];
200} 200}
201 201
202static void exynos_drm_output_poll_changed(struct drm_device *dev)
203{
204 struct exynos_drm_private *private = dev->dev_private;
205 struct drm_fb_helper *fb_helper = private->fb_helper;
206
207 if (fb_helper)
208 drm_fb_helper_hotplug_event(fb_helper);
209 else
210 exynos_drm_fbdev_init(dev);
211}
212
213static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 202static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
214 .fb_create = exynos_user_fb_create, 203 .fb_create = exynos_user_fb_create,
215 .output_poll_changed = exynos_drm_output_poll_changed, 204 .output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4ae860c44f1d..72d7c0b7c216 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
317 317
318 drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper); 318 drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
319} 319}
320
321void exynos_drm_output_poll_changed(struct drm_device *dev)
322{
323 struct exynos_drm_private *private = dev->dev_private;
324 struct drm_fb_helper *fb_helper = private->fb_helper;
325
326 if (fb_helper)
327 drm_fb_helper_hotplug_event(fb_helper);
328 else
329 exynos_drm_fbdev_init(dev);
330}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0ae192..330eef87f718 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
15#ifndef _EXYNOS_DRM_FBDEV_H_ 15#ifndef _EXYNOS_DRM_FBDEV_H_
16#define _EXYNOS_DRM_FBDEV_H_ 16#define _EXYNOS_DRM_FBDEV_H_
17 17
18#ifdef CONFIG_DRM_FBDEV_EMULATION
19
18int exynos_drm_fbdev_init(struct drm_device *dev); 20int exynos_drm_fbdev_init(struct drm_device *dev);
19int exynos_drm_fbdev_reinit(struct drm_device *dev);
20void exynos_drm_fbdev_fini(struct drm_device *dev); 21void exynos_drm_fbdev_fini(struct drm_device *dev);
21void exynos_drm_fbdev_restore_mode(struct drm_device *dev); 22void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
23void exynos_drm_output_poll_changed(struct drm_device *dev);
24
25#else
26
27static inline int exynos_drm_fbdev_init(struct drm_device *dev)
28{
29 return 0;
30}
31
32static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
33{
34}
35
36static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
37{
38}
39
40#define exynos_drm_output_poll_changed (NULL)
41
42#endif
22 43
23#endif 44#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 51d484ae9f49..018449f8d557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
888 * clock. On these SoCs the bootloader may enable it but any 888 * clock. On these SoCs the bootloader may enable it but any
889 * power domain off/on will reset it to disable state. 889 * power domain off/on will reset it to disable state.
890 */ 890 */
891 if (ctx->driver_data != &exynos5_fimd_driver_data || 891 if (ctx->driver_data != &exynos5_fimd_driver_data &&
892 ctx->driver_data != &exynos5420_fimd_driver_data) 892 ctx->driver_data != &exynos5420_fimd_driver_data)
893 return; 893 return;
894 894
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70e9e54..a0def0be6d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
129 } else 129 } else
130 val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX); 130 val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
131 131
132 regmap_write(mic->sysreg, DSD_CFG_MUX, val); 132 ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
133 if (ret) 133 if (ret)
134 DRM_ERROR("mic: Failed to read system register\n"); 134 DRM_ERROR("mic: Failed to read system register\n");
135} 135}
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
457 "samsung,disp-syscon"); 457 "samsung,disp-syscon");
458 if (IS_ERR(mic->sysreg)) { 458 if (IS_ERR(mic->sysreg)) {
459 DRM_ERROR("mic: Failed to get system register.\n"); 459 DRM_ERROR("mic: Failed to get system register.\n");
460 ret = PTR_ERR(mic->sysreg);
460 goto err; 461 goto err;
461 } 462 }
462 463
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d86227236f55..50185ac347b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13 13
14#include <drm/exynos_drm.h> 14#include <drm/drm_atomic.h>
15#include <drm/drm_plane_helper.h>
16#include <drm/drm_atomic_helper.h> 15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_plane_helper.h>
17#include <drm/exynos_drm.h>
17#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
18#include "exynos_drm_crtc.h" 19#include "exynos_drm_crtc.h"
19#include "exynos_drm_fb.h" 20#include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
57} 58}
58 59
59static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state) 60static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
60
61{ 61{
62 struct drm_plane_state *state = &exynos_state->base; 62 struct drm_plane_state *state = &exynos_state->base;
63 struct drm_crtc *crtc = exynos_state->base.crtc; 63 struct drm_crtc *crtc = state->crtc;
64 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 64 struct drm_crtc_state *crtc_state =
65 drm_atomic_get_existing_crtc_state(state->state, crtc);
66 struct drm_display_mode *mode = &crtc_state->adjusted_mode;
65 int crtc_x, crtc_y; 67 int crtc_x, crtc_y;
66 unsigned int crtc_w, crtc_h; 68 unsigned int crtc_w, crtc_h;
67 unsigned int src_x, src_y; 69 unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1aff932..c52f9adf5e04 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to 220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon. 221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
222 */ 222 */
223static int __deprecated 223static int
224i2c_dp_aux_add_bus(struct i2c_adapter *adapter) 224i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
225{ 225{
226 int error; 226 int error;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 20e82008b8b6..30798cbc6fc0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
758 dev_priv->display.hpd_irq_setup(dev); 758 dev_priv->display.hpd_irq_setup(dev);
759 spin_unlock_irq(&dev_priv->irq_lock); 759 spin_unlock_irq(&dev_priv->irq_lock);
760 760
761 intel_display_resume(dev);
762
763 intel_dp_mst_resume(dev); 761 intel_dp_mst_resume(dev);
764 762
763 intel_display_resume(dev);
764
765 /* 765 /*
766 * ... but also need to make sure that hotplug processing 766 * ... but also need to make sure that hotplug processing
767 * doesn't cause havoc. Like in the driver load code we don't 767 * doesn't cause havoc. Like in the driver load code we don't
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 10480939159c..daba7ebb9699 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
2634 2634
2635/* WaRsDisableCoarsePowerGating:skl,bxt */ 2635/* WaRsDisableCoarsePowerGating:skl,bxt */
2636#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2636#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
2637 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ 2637 IS_SKL_GT3(dev) || \
2638 IS_SKL_REVID(dev, 0, SKL_REVID_F0))) 2638 IS_SKL_GT4(dev))
2639
2639/* 2640/*
2640 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2641 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2641 * even when in MSI mode. This results in spurious interrupt warnings if the 2642 * even when in MSI mode. This results in spurious interrupt warnings if the
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3ac589e..dabc08987b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 drm_clflush_virt_range(vaddr, PAGE_SIZE); 177 drm_clflush_virt_range(vaddr, PAGE_SIZE);
178 kunmap_atomic(src); 178 kunmap_atomic(src);
179 179
180 page_cache_release(page); 180 put_page(page);
181 vaddr += PAGE_SIZE; 181 vaddr += PAGE_SIZE;
182 } 182 }
183 183
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
243 set_page_dirty(page); 243 set_page_dirty(page);
244 if (obj->madv == I915_MADV_WILLNEED) 244 if (obj->madv == I915_MADV_WILLNEED)
245 mark_page_accessed(page); 245 mark_page_accessed(page);
246 page_cache_release(page); 246 put_page(page);
247 vaddr += PAGE_SIZE; 247 vaddr += PAGE_SIZE;
248 } 248 }
249 obj->dirty = 0; 249 obj->dirty = 0;
@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2206 if (obj->madv == I915_MADV_WILLNEED) 2206 if (obj->madv == I915_MADV_WILLNEED)
2207 mark_page_accessed(page); 2207 mark_page_accessed(page);
2208 2208
2209 page_cache_release(page); 2209 put_page(page);
2210 } 2210 }
2211 obj->dirty = 0; 2211 obj->dirty = 0;
2212 2212
@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2346err_pages: 2346err_pages:
2347 sg_mark_end(sg); 2347 sg_mark_end(sg);
2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2349 page_cache_release(sg_page_iter_page(&sg_iter)); 2349 put_page(sg_page_iter_page(&sg_iter));
2350 sg_free_table(st); 2350 sg_free_table(st);
2351 kfree(st); 2351 kfree(st);
2352 2352
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 6be40f3ba2c7..4d30b60defda 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
501 if (pvec != NULL) { 501 if (pvec != NULL) {
502 struct mm_struct *mm = obj->userptr.mm->mm; 502 struct mm_struct *mm = obj->userptr.mm->mm;
503 503
504 down_read(&mm->mmap_sem); 504 ret = -EFAULT;
505 while (pinned < npages) { 505 if (atomic_inc_not_zero(&mm->mm_users)) {
506 ret = get_user_pages_remote(work->task, mm, 506 down_read(&mm->mmap_sem);
507 obj->userptr.ptr + pinned * PAGE_SIZE, 507 while (pinned < npages) {
508 npages - pinned, 508 ret = get_user_pages_remote
509 !obj->userptr.read_only, 0, 509 (work->task, mm,
510 pvec + pinned, NULL); 510 obj->userptr.ptr + pinned * PAGE_SIZE,
511 if (ret < 0) 511 npages - pinned,
512 break; 512 !obj->userptr.read_only, 0,
513 513 pvec + pinned, NULL);
514 pinned += ret; 514 if (ret < 0)
515 break;
516
517 pinned += ret;
518 }
519 up_read(&mm->mmap_sem);
520 mmput(mm);
515 } 521 }
516 up_read(&mm->mmap_sem);
517 } 522 }
518 523
519 mutex_lock(&dev->struct_mutex); 524 mutex_lock(&dev->struct_mutex);
@@ -683,7 +688,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683 set_page_dirty(page); 688 set_page_dirty(page);
684 689
685 mark_page_accessed(page); 690 mark_page_accessed(page);
686 page_cache_release(page); 691 put_page(page);
687 } 692 }
688 obj->dirty = 0; 693 obj->dirty = 0;
689 694
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5ab3f..1c212205d0e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1829 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1829 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1830 disable_rpm_wakeref_asserts(dev_priv); 1830 disable_rpm_wakeref_asserts(dev_priv);
1831 1831
1832 for (;;) { 1832 do {
1833 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1833 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1834 iir = I915_READ(VLV_IIR); 1834 iir = I915_READ(VLV_IIR);
1835 1835
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1857 1857
1858 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1858 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1859 POSTING_READ(GEN8_MASTER_IRQ); 1859 POSTING_READ(GEN8_MASTER_IRQ);
1860 } 1860 } while (0);
1861 1861
1862 enable_rpm_wakeref_asserts(dev_priv); 1862 enable_rpm_wakeref_asserts(dev_priv);
1863 1863
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a2bd698fe2f7..937e77228466 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
506 struct intel_connector *intel_connector = to_intel_connector(connector); 506 struct intel_connector *intel_connector = to_intel_connector(connector);
507 struct drm_device *dev = connector->dev; 507 struct drm_device *dev = connector->dev;
508 508
509 intel_connector->unregister(intel_connector);
510
509 /* need to nuke the connector */ 511 /* need to nuke the connector */
510 drm_modeset_lock_all(dev); 512 drm_modeset_lock_all(dev);
511 if (connector->state->crtc) { 513 if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
519 521
520 WARN(ret, "Disabling mst crtc failed with %i\n", ret); 522 WARN(ret, "Disabling mst crtc failed with %i\n", ret);
521 } 523 }
522 drm_modeset_unlock_all(dev);
523 524
524 intel_connector->unregister(intel_connector);
525
526 drm_modeset_lock_all(dev);
527 intel_connector_remove_from_fbdev(intel_connector); 525 intel_connector_remove_from_fbdev(intel_connector);
528 drm_connector_cleanup(connector); 526 drm_connector_cleanup(connector);
529 drm_modeset_unlock_all(dev); 527 drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6a978ce80244..5c6080fd0968 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
841 if (unlikely(total_bytes > remain_usable)) { 841 if (unlikely(total_bytes > remain_usable)) {
842 /* 842 /*
843 * The base request will fit but the reserved space 843 * The base request will fit but the reserved space
844 * falls off the end. So only need to to wait for the 844 * falls off the end. So don't need an immediate wrap
845 * reserved size after flushing out the remainder. 845 * and only need to effectively wait for the reserved
846 * size space from the start of ringbuffer.
846 */ 847 */
847 wait_bytes = remain_actual + ringbuf->reserved_size; 848 wait_bytes = remain_actual + ringbuf->reserved_size;
848 need_wrap = true;
849 } else if (total_bytes > ringbuf->space) { 849 } else if (total_bytes > ringbuf->space) {
850 /* No wrapping required, just waiting. */ 850 /* No wrapping required, just waiting. */
851 wait_bytes = total_bytes; 851 wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1913 struct intel_ringbuffer *ringbuf = request->ringbuf; 1913 struct intel_ringbuffer *ringbuf = request->ringbuf;
1914 int ret; 1914 int ret;
1915 1915
1916 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); 1916 ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
1917 if (ret) 1917 if (ret)
1918 return ret; 1918 return ret;
1919 1919
1920 /* We're using qword write, seqno should be aligned to 8 bytes. */
1921 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1922
1920 /* w/a for post sync ops following a GPGPU operation we 1923 /* w/a for post sync ops following a GPGPU operation we
1921 * need a prior CS_STALL, which is emitted by the flush 1924 * need a prior CS_STALL, which is emitted by the flush
1922 * following the batch. 1925 * following the batch.
1923 */ 1926 */
1924 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); 1927 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1925 intel_logical_ring_emit(ringbuf, 1928 intel_logical_ring_emit(ringbuf,
1926 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1929 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1927 PIPE_CONTROL_CS_STALL | 1930 PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1929 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); 1932 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
1930 intel_logical_ring_emit(ringbuf, 0); 1933 intel_logical_ring_emit(ringbuf, 0);
1931 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1934 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1935 /* We're thrashing one dword of HWS. */
1936 intel_logical_ring_emit(ringbuf, 0);
1932 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1937 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1938 intel_logical_ring_emit(ringbuf, MI_NOOP);
1933 return intel_logical_ring_advance_and_submit(request); 1939 return intel_logical_ring_advance_and_submit(request);
1934} 1940}
1935 1941
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30a8403a8f4f..cd9fe609aefb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
478 * and as part of the cleanup in the hw state restore we also redisable 478 * and as part of the cleanup in the hw state restore we also redisable
479 * the vga plane. 479 * the vga plane.
480 */ 480 */
481 if (!HAS_PCH_SPLIT(dev)) { 481 if (!HAS_PCH_SPLIT(dev))
482 drm_modeset_lock_all(dev);
483 intel_display_resume(dev); 482 intel_display_resume(dev);
484 drm_modeset_unlock_all(dev);
485 }
486 483
487 dev_priv->modeset_restore = MODESET_DONE; 484 dev_priv->modeset_restore = MODESET_DONE;
488 485
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 347d4df49a9b..8ed3cf34f82d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2876 const struct drm_plane_state *pstate, 2876 const struct drm_plane_state *pstate,
2877 int y) 2877 int y)
2878{ 2878{
2879 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2879 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2880 struct drm_framebuffer *fb = pstate->fb; 2880 struct drm_framebuffer *fb = pstate->fb;
2881 uint32_t width = 0, height = 0;
2882
2883 width = drm_rect_width(&intel_pstate->src) >> 16;
2884 height = drm_rect_height(&intel_pstate->src) >> 16;
2885
2886 if (intel_rotation_90_or_270(pstate->rotation))
2887 swap(width, height);
2881 2888
2882 /* for planar format */ 2889 /* for planar format */
2883 if (fb->pixel_format == DRM_FORMAT_NV12) { 2890 if (fb->pixel_format == DRM_FORMAT_NV12) {
2884 if (y) /* y-plane data rate */ 2891 if (y) /* y-plane data rate */
2885 return intel_crtc->config->pipe_src_w * 2892 return width * height *
2886 intel_crtc->config->pipe_src_h *
2887 drm_format_plane_cpp(fb->pixel_format, 0); 2893 drm_format_plane_cpp(fb->pixel_format, 0);
2888 else /* uv-plane data rate */ 2894 else /* uv-plane data rate */
2889 return (intel_crtc->config->pipe_src_w/2) * 2895 return (width / 2) * (height / 2) *
2890 (intel_crtc->config->pipe_src_h/2) *
2891 drm_format_plane_cpp(fb->pixel_format, 1); 2896 drm_format_plane_cpp(fb->pixel_format, 1);
2892 } 2897 }
2893 2898
2894 /* for packed formats */ 2899 /* for packed formats */
2895 return intel_crtc->config->pipe_src_w * 2900 return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
2896 intel_crtc->config->pipe_src_h *
2897 drm_format_plane_cpp(fb->pixel_format, 0);
2898} 2901}
2899 2902
2900/* 2903/*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
2973 struct drm_framebuffer *fb = plane->state->fb; 2976 struct drm_framebuffer *fb = plane->state->fb;
2974 int id = skl_wm_plane_id(intel_plane); 2977 int id = skl_wm_plane_id(intel_plane);
2975 2978
2976 if (fb == NULL) 2979 if (!to_intel_plane_state(plane->state)->visible)
2977 continue; 2980 continue;
2981
2978 if (plane->type == DRM_PLANE_TYPE_CURSOR) 2982 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2979 continue; 2983 continue;
2980 2984
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3000 uint16_t plane_blocks, y_plane_blocks = 0; 3004 uint16_t plane_blocks, y_plane_blocks = 0;
3001 int id = skl_wm_plane_id(intel_plane); 3005 int id = skl_wm_plane_id(intel_plane);
3002 3006
3003 if (pstate->fb == NULL) 3007 if (!to_intel_plane_state(pstate)->visible)
3004 continue; 3008 continue;
3005 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3009 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3006 continue; 3010 continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3123{ 3127{
3124 struct drm_plane *plane = &intel_plane->base; 3128 struct drm_plane *plane = &intel_plane->base;
3125 struct drm_framebuffer *fb = plane->state->fb; 3129 struct drm_framebuffer *fb = plane->state->fb;
3130 struct intel_plane_state *intel_pstate =
3131 to_intel_plane_state(plane->state);
3126 uint32_t latency = dev_priv->wm.skl_latency[level]; 3132 uint32_t latency = dev_priv->wm.skl_latency[level];
3127 uint32_t method1, method2; 3133 uint32_t method1, method2;
3128 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3134 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3129 uint32_t res_blocks, res_lines; 3135 uint32_t res_blocks, res_lines;
3130 uint32_t selected_result; 3136 uint32_t selected_result;
3131 uint8_t cpp; 3137 uint8_t cpp;
3138 uint32_t width = 0, height = 0;
3132 3139
3133 if (latency == 0 || !cstate->base.active || !fb) 3140 if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
3134 return false; 3141 return false;
3135 3142
3143 width = drm_rect_width(&intel_pstate->src) >> 16;
3144 height = drm_rect_height(&intel_pstate->src) >> 16;
3145
3146 if (intel_rotation_90_or_270(plane->state->rotation))
3147 swap(width, height);
3148
3136 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3149 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3137 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3150 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3138 cpp, latency); 3151 cpp, latency);
3139 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3152 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3140 cstate->base.adjusted_mode.crtc_htotal, 3153 cstate->base.adjusted_mode.crtc_htotal,
3141 cstate->pipe_src_w, 3154 width,
3142 cpp, fb->modifier[0], 3155 cpp,
3156 fb->modifier[0],
3143 latency); 3157 latency);
3144 3158
3145 plane_bytes_per_line = cstate->pipe_src_w * cpp; 3159 plane_bytes_per_line = width * cpp;
3146 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3160 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3147 3161
3148 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3162 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
968 968
969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
971 if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || 971 if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1085 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1087 1087
1088 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { 1088 /* This is tied to WaForceContextSaveRestoreNonCoherent */
1089 if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
1089 /* 1090 /*
1090 *Use Force Non-Coherent whenever executing a 3D context. This 1091 *Use Force Non-Coherent whenever executing a 3D context. This
1091 * is a workaround for a possible hang in the unlikely event 1092 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2090{ 2091{
2091 struct drm_i915_private *dev_priv = to_i915(dev); 2092 struct drm_i915_private *dev_priv = to_i915(dev);
2092 struct drm_i915_gem_object *obj = ringbuf->obj; 2093 struct drm_i915_gem_object *obj = ringbuf->obj;
2094 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2095 unsigned flags = PIN_OFFSET_BIAS | 4096;
2093 int ret; 2096 int ret;
2094 2097
2095 if (HAS_LLC(dev_priv) && !obj->stolen) { 2098 if (HAS_LLC(dev_priv) && !obj->stolen) {
2096 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); 2099 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
2097 if (ret) 2100 if (ret)
2098 return ret; 2101 return ret;
2099 2102
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2109 return -ENOMEM; 2112 return -ENOMEM;
2110 } 2113 }
2111 } else { 2114 } else {
2112 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2115 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2116 flags | PIN_MAPPABLE);
2113 if (ret) 2117 if (ret)
2114 return ret; 2118 return ret;
2115 2119
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2454 if (unlikely(total_bytes > remain_usable)) { 2458 if (unlikely(total_bytes > remain_usable)) {
2455 /* 2459 /*
2456 * The base request will fit but the reserved space 2460 * The base request will fit but the reserved space
2457 * falls off the end. So only need to to wait for the 2461 * falls off the end. So don't need an immediate wrap
2458 * reserved size after flushing out the remainder. 2462 * and only need to effectively wait for the reserved
2463 * size space from the start of ringbuffer.
2459 */ 2464 */
2460 wait_bytes = remain_actual + ringbuf->reserved_size; 2465 wait_bytes = remain_actual + ringbuf->reserved_size;
2461 need_wrap = true;
2462 } else if (total_bytes > ringbuf->space) { 2466 } else if (total_bytes > ringbuf->space) {
2463 /* No wrapping required, just waiting. */ 2467 /* No wrapping required, just waiting. */
2464 wait_bytes = total_bytes; 2468 wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 436d8f2b8682..68b6f69aa682 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 dev_priv->uncore.funcs.force_wake_get = 1190 dev_priv->uncore.funcs.force_wake_get =
1191 fw_domains_get_with_thread_status; 1191 fw_domains_get_with_thread_status;
1192 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1192 if (IS_HASWELL(dev))
1193 dev_priv->uncore.funcs.force_wake_put =
1194 fw_domains_put_with_fifo;
1195 else
1196 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1193 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1194 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1195 } else if (IS_IVYBRIDGE(dev)) { 1199 } else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 2a95d10e9d92..a24631fdf4ad 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
225 if (!iores) 225 if (!iores)
226 return -ENXIO; 226 return -ENXIO;
227 227
228 platform_set_drvdata(pdev, hdmi);
229
230 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); 228 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
231 /* 229 /*
232 * If we failed to find the CRTC(s) which this encoder is 230 * If we failed to find the CRTC(s) which this encoder is
@@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
245 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, 243 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS, NULL); 244 DRM_MODE_ENCODER_TMDS, NULL);
247 245
248 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 246 ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
247
248 /*
249 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
250 * which would have called the encoder cleanup. Do it manually.
251 */
252 if (ret)
253 drm_encoder_cleanup(encoder);
254
255 return ret;
249} 256}
250 257
251static void dw_hdmi_imx_unbind(struct device *dev, struct device *master, 258static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9876e0f0c3e1..e26dcdec2aba 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
326{ 326{
327 struct imx_drm_device *imxdrm = drm->dev_private; 327 struct imx_drm_device *imxdrm = drm->dev_private;
328 struct imx_drm_crtc *imx_drm_crtc; 328 struct imx_drm_crtc *imx_drm_crtc;
329 int ret;
330 329
331 /* 330 /*
332 * The vblank arrays are dimensioned by MAX_CRTC - we can't 331 * The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
351 350
352 *new_crtc = imx_drm_crtc; 351 *new_crtc = imx_drm_crtc;
353 352
354 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
355 if (ret)
356 goto err_register;
357
358 drm_crtc_helper_add(crtc, 353 drm_crtc_helper_add(crtc,
359 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 354 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
360 355
@@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
362 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL); 357 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
363 358
364 return 0; 359 return 0;
365
366err_register:
367 imxdrm->crtc[--imxdrm->pipes] = NULL;
368 kfree(imx_drm_crtc);
369 return ret;
370} 360}
371EXPORT_SYMBOL_GPL(imx_drm_add_crtc); 361EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
372 362
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 588827844f30..681ec6eb77d9 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
72int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, 72int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
73 int x, int y) 73 int x, int y)
74{ 74{
75 struct drm_gem_cma_object *cma_obj; 75 struct drm_gem_cma_object *cma_obj[3];
76 unsigned long eba; 76 unsigned long eba, ubo, vbo;
77 int active; 77 int active, i;
78 78
79 cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 79 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
80 if (!cma_obj) { 80 cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
81 DRM_DEBUG_KMS("entry is null.\n"); 81 if (!cma_obj[i]) {
82 return -EFAULT; 82 DRM_DEBUG_KMS("plane %d entry is null.\n", i);
83 return -EFAULT;
84 }
83 } 85 }
84 86
85 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", 87 eba = cma_obj[0]->paddr + fb->offsets[0] +
86 &cma_obj->paddr, x, y);
87
88 eba = cma_obj->paddr + fb->offsets[0] +
89 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; 88 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
90 89
90 if (eba & 0x7) {
91 DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
92 return -EINVAL;
93 }
94
95 if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
96 DRM_DEBUG_KMS("pitches out of range.\n");
97 return -EINVAL;
98 }
99
100 if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
101 DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
102 return -EINVAL;
103 }
104
105 ipu_plane->stride[0] = fb->pitches[0];
106
107 switch (fb->pixel_format) {
108 case DRM_FORMAT_YUV420:
109 case DRM_FORMAT_YVU420:
110 /*
111 * Multiplanar formats have to meet the following restrictions:
112 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
113 * - EBA, UBO and VBO are a multiple of 8
114 * - UBO and VBO are unsigned and not larger than 0xfffff8
115 * - Only EBA may be changed while scanout is active
116 * - The strides of U and V planes must be identical.
117 */
118 ubo = cma_obj[1]->paddr + fb->offsets[1] +
119 fb->pitches[1] * y / 2 + x / 2 - eba;
120 vbo = cma_obj[2]->paddr + fb->offsets[2] +
121 fb->pitches[2] * y / 2 + x / 2 - eba;
122
123 if ((ubo & 0x7) || (vbo & 0x7)) {
124 DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
125 return -EINVAL;
126 }
127
128 if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
129 DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
130 return -EINVAL;
131 }
132
133 if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
134 (ipu_plane->v_offset != vbo))) {
135 DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
136 return -EINVAL;
137 }
138
139 if (fb->pitches[1] != fb->pitches[2]) {
140 DRM_DEBUG_KMS("U/V pitches must be identical.\n");
141 return -EINVAL;
142 }
143
144 if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
145 DRM_DEBUG_KMS("U/V pitches out of range.\n");
146 return -EINVAL;
147 }
148
149 if (ipu_plane->enabled &&
150 (ipu_plane->stride[1] != fb->pitches[1])) {
151 DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
152 return -EINVAL;
153 }
154
155 ipu_plane->u_offset = ubo;
156 ipu_plane->v_offset = vbo;
157 ipu_plane->stride[1] = fb->pitches[1];
158
159 dev_dbg(ipu_plane->base.dev->dev,
160 "phys = %pad %pad %pad, x = %d, y = %d",
161 &cma_obj[0]->paddr, &cma_obj[1]->paddr,
162 &cma_obj[2]->paddr, x, y);
163 break;
164 default:
165 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
166 &cma_obj[0]->paddr, x, y);
167 break;
168 }
169
91 if (ipu_plane->enabled) { 170 if (ipu_plane->enabled) {
92 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); 171 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
93 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); 172 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
201 } 280 }
202 } 281 }
203 282
204 ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
205 if (ret) {
206 dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
207 return ret;
208 }
209
210 ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc, 283 ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
211 calc_bandwidth(crtc_w, crtc_h, 284 calc_bandwidth(crtc_w, crtc_h,
212 calc_vref(mode)), 64); 285 calc_vref(mode)), 64);
@@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
215 return ret; 288 return ret;
216 } 289 }
217 290
291 ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
292
218 ipu_cpmem_zero(ipu_plane->ipu_ch); 293 ipu_cpmem_zero(ipu_plane->ipu_ch);
219 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h); 294 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
220 ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format); 295 ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
233 if (interlaced) 308 if (interlaced)
234 ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]); 309 ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
235 310
311 if (fb->pixel_format == DRM_FORMAT_YUV420) {
312 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
313 ipu_plane->stride[1],
314 ipu_plane->u_offset,
315 ipu_plane->v_offset);
316 } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
317 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
318 ipu_plane->stride[1],
319 ipu_plane->v_offset,
320 ipu_plane->u_offset);
321 }
322
236 ipu_plane->w = src_w; 323 ipu_plane->w = src_w;
237 ipu_plane->h = src_h; 324 ipu_plane->h = src_h;
238 325
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 3a443b413c60..4448fd4ad4eb 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -29,6 +29,10 @@ struct ipu_plane {
29 int w; 29 int w;
30 int h; 30 int h;
31 31
32 unsigned int u_offset;
33 unsigned int v_offset;
34 unsigned int stride[2];
35
32 bool enabled; 36 bool enabled;
33}; 37};
34 38
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 16641cec18a2..b5370cb56e3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -11,6 +11,7 @@ struct nvkm_device_tegra {
11 11
12 struct reset_control *rst; 12 struct reset_control *rst;
13 struct clk *clk; 13 struct clk *clk;
14 struct clk *clk_ref;
14 struct clk *clk_pwr; 15 struct clk *clk_pwr;
15 16
16 struct regulator *vdd; 17 struct regulator *vdd;
@@ -36,6 +37,10 @@ struct nvkm_device_tegra_func {
36 * bypassed). A value of 0 means an IOMMU is never used. 37 * bypassed). A value of 0 means an IOMMU is never used.
37 */ 38 */
38 u8 iommu_bit; 39 u8 iommu_bit;
40 /*
41 * Whether the chip requires a reference clock
42 */
43 bool require_ref_clk;
39}; 44};
40 45
41int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *, 46int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ae96ebc490fb..e81aefe5ffa7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
1276 break; 1276 break;
1277 default: 1277 default:
1278 if (disp->dithering_mode) { 1278 if (disp->dithering_mode) {
1279 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1279 drm_object_attach_property(&connector->base, 1280 drm_object_attach_property(&connector->base,
1280 disp->dithering_mode, 1281 disp->dithering_mode,
1281 nv_connector-> 1282 nv_connector->
1282 dithering_mode); 1283 dithering_mode);
1283 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1284 } 1284 }
1285 if (disp->dithering_depth) { 1285 if (disp->dithering_depth) {
1286 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1286 drm_object_attach_property(&connector->base, 1287 drm_object_attach_property(&connector->base,
1287 disp->dithering_depth, 1288 disp->dithering_depth,
1288 nv_connector-> 1289 nv_connector->
1289 dithering_depth); 1290 dithering_depth);
1290 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1291 } 1291 }
1292 break; 1292 break;
1293 } 1293 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 2dfe58af12e4..4c4cc2260257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = {
55 .iommu_bit = 34, 55 .iommu_bit = 34,
56}; 56};
57 57
58static const struct nvkm_device_tegra_func gm20b_platform_data = {
59 .iommu_bit = 34,
60 .require_ref_clk = true,
61};
62
58static const struct of_device_id nouveau_platform_match[] = { 63static const struct of_device_id nouveau_platform_match[] = {
59 { 64 {
60 .compatible = "nvidia,gk20a", 65 .compatible = "nvidia,gk20a",
@@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = {
62 }, 67 },
63 { 68 {
64 .compatible = "nvidia,gm20b", 69 .compatible = "nvidia,gm20b",
65 .data = &gk20a_platform_data, 70 .data = &gm20b_platform_data,
66 }, 71 },
67 { } 72 { }
68}; 73};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9afa5f3e3c1c..ec12efb4689a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
35 ret = clk_prepare_enable(tdev->clk); 35 ret = clk_prepare_enable(tdev->clk);
36 if (ret) 36 if (ret)
37 goto err_clk; 37 goto err_clk;
38 if (tdev->clk_ref) {
39 ret = clk_prepare_enable(tdev->clk_ref);
40 if (ret)
41 goto err_clk_ref;
42 }
38 ret = clk_prepare_enable(tdev->clk_pwr); 43 ret = clk_prepare_enable(tdev->clk_pwr);
39 if (ret) 44 if (ret)
40 goto err_clk_pwr; 45 goto err_clk_pwr;
@@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
57err_clamp: 62err_clamp:
58 clk_disable_unprepare(tdev->clk_pwr); 63 clk_disable_unprepare(tdev->clk_pwr);
59err_clk_pwr: 64err_clk_pwr:
65 if (tdev->clk_ref)
66 clk_disable_unprepare(tdev->clk_ref);
67err_clk_ref:
60 clk_disable_unprepare(tdev->clk); 68 clk_disable_unprepare(tdev->clk);
61err_clk: 69err_clk:
62 regulator_disable(tdev->vdd); 70 regulator_disable(tdev->vdd);
@@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
71 udelay(10); 79 udelay(10);
72 80
73 clk_disable_unprepare(tdev->clk_pwr); 81 clk_disable_unprepare(tdev->clk_pwr);
82 if (tdev->clk_ref)
83 clk_disable_unprepare(tdev->clk_ref);
74 clk_disable_unprepare(tdev->clk); 84 clk_disable_unprepare(tdev->clk);
75 udelay(10); 85 udelay(10);
76 86
@@ -274,6 +284,13 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
274 goto free; 284 goto free;
275 } 285 }
276 286
287 if (func->require_ref_clk)
288 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
289 if (IS_ERR(tdev->clk_ref)) {
290 ret = PTR_ERR(tdev->clk_ref);
291 goto free;
292 }
293
277 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 294 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
278 if (IS_ERR(tdev->clk_pwr)) { 295 if (IS_ERR(tdev->clk_pwr)) {
279 ret = PTR_ERR(tdev->clk_pwr); 296 ret = PTR_ERR(tdev->clk_pwr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c56a886229f1..b2de290da16f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
1832 1832
1833 gf100_gr_mmio(gr, gr->func->mmio); 1833 gf100_gr_mmio(gr, gr->func->mmio);
1834 1834
1835 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
1836
1835 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1837 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1836 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 1838 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1837 do { 1839 do {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 43e5f503d1c5..030409a3ee4e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
375 375
376 qxl_bo_kunmap(user_bo); 376 qxl_bo_kunmap(user_bo);
377 377
378 qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
379 qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
380 qcrtc->hot_spot_x = hot_x;
381 qcrtc->hot_spot_y = hot_y;
382
378 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 383 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
379 cmd->type = QXL_CURSOR_SET; 384 cmd->type = QXL_CURSOR_SET;
380 cmd->u.set.position.x = qcrtc->cur_x; 385 cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
381 cmd->u.set.position.y = qcrtc->cur_y; 386 cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
382 387
383 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); 388 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
384 389
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
441 446
442 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 447 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
443 cmd->type = QXL_CURSOR_MOVE; 448 cmd->type = QXL_CURSOR_MOVE;
444 cmd->u.position.x = qcrtc->cur_x; 449 cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
445 cmd->u.position.y = qcrtc->cur_y; 450 cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
446 qxl_release_unmap(qdev, release, &cmd->release_info); 451 qxl_release_unmap(qdev, release, &cmd->release_info);
447 452
448 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 453 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1519b8..3f3897eb458c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
135 int index; 135 int index;
136 int cur_x; 136 int cur_x;
137 int cur_y; 137 int cur_y;
138 int hot_spot_x;
139 int hot_spot_y;
138}; 140};
139 141
140struct qxl_output { 142struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index cf61e0856f4a..b80b08f71cb4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
275 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 275 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
277 atombios_blank_crtc(crtc, ATOM_DISABLE); 277 atombios_blank_crtc(crtc, ATOM_DISABLE);
278 drm_vblank_on(dev, radeon_crtc->crtc_id); 278 if (dev->num_crtcs > radeon_crtc->crtc_id)
279 drm_vblank_on(dev, radeon_crtc->crtc_id);
279 radeon_crtc_load_lut(crtc); 280 radeon_crtc_load_lut(crtc);
280 break; 281 break;
281 case DRM_MODE_DPMS_STANDBY: 282 case DRM_MODE_DPMS_STANDBY:
282 case DRM_MODE_DPMS_SUSPEND: 283 case DRM_MODE_DPMS_SUSPEND:
283 case DRM_MODE_DPMS_OFF: 284 case DRM_MODE_DPMS_OFF:
284 drm_vblank_off(dev, radeon_crtc->crtc_id); 285 if (dev->num_crtcs > radeon_crtc->crtc_id)
286 drm_vblank_off(dev, radeon_crtc->crtc_id);
285 if (radeon_crtc->enabled) 287 if (radeon_crtc->enabled)
286 atombios_blank_crtc(crtc, ATOM_ENABLE); 288 atombios_blank_crtc(crtc, ATOM_ENABLE);
287 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf21b20..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
2608 WREG32(VM_CONTEXT1_CNTL, 0); 2608 WREG32(VM_CONTEXT1_CNTL, 0);
2609} 2609}
2610 2610
2611static const unsigned ni_dig_offsets[] =
2612{
2613 NI_DIG0_REGISTER_OFFSET,
2614 NI_DIG1_REGISTER_OFFSET,
2615 NI_DIG2_REGISTER_OFFSET,
2616 NI_DIG3_REGISTER_OFFSET,
2617 NI_DIG4_REGISTER_OFFSET,
2618 NI_DIG5_REGISTER_OFFSET
2619};
2620
2621static const unsigned ni_tx_offsets[] =
2622{
2623 NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2624 NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2625 NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2626 NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2627 NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2628 NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2629};
2630
2631static const unsigned evergreen_dp_offsets[] =
2632{
2633 EVERGREEN_DP0_REGISTER_OFFSET,
2634 EVERGREEN_DP1_REGISTER_OFFSET,
2635 EVERGREEN_DP2_REGISTER_OFFSET,
2636 EVERGREEN_DP3_REGISTER_OFFSET,
2637 EVERGREEN_DP4_REGISTER_OFFSET,
2638 EVERGREEN_DP5_REGISTER_OFFSET
2639};
2640
2641
2642/*
2643 * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2644 * We go from crtc to connector and it is not relible since it
2645 * should be an opposite direction .If crtc is enable then
2646 * find the dig_fe which selects this crtc and insure that it enable.
2647 * if such dig_fe is found then find dig_be which selects found dig_be and
2648 * insure that it enable and in DP_SST mode.
2649 * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2650 * from dp symbols clocks .
2651 */
2652static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2653 unsigned crtc_id, unsigned *ret_dig_fe)
2654{
2655 unsigned i;
2656 unsigned dig_fe;
2657 unsigned dig_be;
2658 unsigned dig_en_be;
2659 unsigned uniphy_pll;
2660 unsigned digs_fe_selected;
2661 unsigned dig_be_mode;
2662 unsigned dig_fe_mask;
2663 bool is_enabled = false;
2664 bool found_crtc = false;
2665
2666 /* loop through all running dig_fe to find selected crtc */
2667 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2668 dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2669 if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2670 crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2671 /* found running pipe */
2672 found_crtc = true;
2673 dig_fe_mask = 1 << i;
2674 dig_fe = i;
2675 break;
2676 }
2677 }
2678
2679 if (found_crtc) {
2680 /* loop through all running dig_be to find selected dig_fe */
2681 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2682 dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2683 /* if dig_fe_selected by dig_be? */
2684 digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2685 dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2686 if (dig_fe_mask & digs_fe_selected &&
2687 /* if dig_be in sst mode? */
2688 dig_be_mode == NI_DIG_BE_DPSST) {
2689 dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2690 ni_dig_offsets[i]);
2691 uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2692 ni_tx_offsets[i]);
2693 /* dig_be enable and tx is running */
2694 if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2695 dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2696 uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2697 is_enabled = true;
2698 *ret_dig_fe = dig_fe;
2699 break;
2700 }
2701 }
2702 }
2703 }
2704
2705 return is_enabled;
2706}
2707
2708/*
2709 * Blank dig when in dp sst mode
2710 * Dig ignores crtc timing
2711 */
2712static void evergreen_blank_dp_output(struct radeon_device *rdev,
2713 unsigned dig_fe)
2714{
2715 unsigned stream_ctrl;
2716 unsigned fifo_ctrl;
2717 unsigned counter = 0;
2718
2719 if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2720 DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2721 return;
2722 }
2723
2724 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2725 evergreen_dp_offsets[dig_fe]);
2726 if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2727 DRM_ERROR("dig %d , should be enable\n", dig_fe);
2728 return;
2729 }
2730
2731 stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2732 WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2733 evergreen_dp_offsets[dig_fe], stream_ctrl);
2734
2735 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2736 evergreen_dp_offsets[dig_fe]);
2737 while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2738 msleep(1);
2739 counter++;
2740 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2741 evergreen_dp_offsets[dig_fe]);
2742 }
2743 if (counter >= 32 )
2744 DRM_ERROR("counter exceeds %d\n", counter);
2745
2746 fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2747 fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2748 WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2749
2750}
2751
2611void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 2752void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2612{ 2753{
2613 u32 crtc_enabled, tmp, frame_count, blackout; 2754 u32 crtc_enabled, tmp, frame_count, blackout;
2614 int i, j; 2755 int i, j;
2756 unsigned dig_fe;
2615 2757
2616 if (!ASIC_IS_NODCE(rdev)) { 2758 if (!ASIC_IS_NODCE(rdev)) {
2617 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 2759 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2651 break; 2793 break;
2652 udelay(1); 2794 udelay(1);
2653 } 2795 }
2654 2796 /*we should disable dig if it drives dp sst*/
2797 /*but we are in radeon_device_init and the topology is unknown*/
2798 /*and it is available after radeon_modeset_init*/
2799 /*the following method radeon_atom_encoder_dpms_dig*/
2800 /*does the job if we initialize it properly*/
2801 /*for now we do it this manually*/
2802 /**/
2803 if (ASIC_IS_DCE5(rdev) &&
2804 evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2805 evergreen_blank_dp_output(rdev, dig_fe);
2806 /*we could remove 6 lines below*/
2655 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 2807 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2656 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2808 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2657 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2809 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
250 250
251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ 251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
252#define EVERGREEN_HDMI_BASE 0x7030 252#define EVERGREEN_HDMI_BASE 0x7030
253/*DIG block*/
254#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
255#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
256#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
257#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
258#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
259#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
260
261
262#define NI_DIG_FE_CNTL 0x7000
263# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
264# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
265
266
267#define NI_DIG_BE_CNTL 0x7140
268# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
269# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
270
271#define NI_DIG_BE_EN_CNTL 0x7144
272# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
273# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
274# define NI_DIG_BE_DPSST 0
253 275
254/* Display Port block */ 276/* Display Port block */
277#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
278#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
279#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
280#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
281#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
282#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
283
284
285#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
286# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
287# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
288#define EVERGREEN_DP_STEER_FIFO 0x7310
289# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
255#define EVERGREEN_DP_SEC_CNTL 0x7280 290#define EVERGREEN_DP_SEC_CNTL 0x7280
256# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) 291# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
257# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) 292# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
266# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) 301# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
267# define EVERGREEN_DP_SEC_SS_EN (1 << 28) 302# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
268 303
304/*DCIO_UNIPHY block*/
305#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
306#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
307#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
308#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
309#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
310#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
311
312#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
313# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
314
269#endif 315#endif
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70c0f0..827ccc87cbc3 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
109#define NI_DP_MSE_SAT2 0x7398 109#define NI_DP_MSE_SAT2 0x7398
110 110
111#define NI_DP_MSE_SAT_UPDATE 0x739c 111#define NI_DP_MSE_SAT_UPDATE 0x739c
112# define NI_DP_MSE_SAT_UPDATE_MASK 0x3
113# define NI_DP_MSE_16_MTP_KEEPOUT 0x100
112 114
113#define NI_DIG_BE_CNTL 0x7140 115#define NI_DIG_BE_CNTL 0x7140
114# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8) 116# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index fd8c4d317e60..95f4fea89302 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
62 return radeon_atpx_priv.atpx_detected; 62 return radeon_atpx_priv.atpx_detected;
63} 63}
64 64
65bool radeon_has_atpx_dgpu_power_cntl(void) {
66 return radeon_atpx_priv.atpx.functions.power_cntl;
67}
68
69/** 65/**
70 * radeon_atpx_call - call an ATPX method 66 * radeon_atpx_call - call an ATPX method
71 * 67 *
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
145 */ 141 */
146static int radeon_atpx_validate(struct radeon_atpx *atpx) 142static int radeon_atpx_validate(struct radeon_atpx *atpx)
147{ 143{
144 /* make sure required functions are enabled */
145 /* dGPU power control is required */
146 if (atpx->functions.power_cntl == false) {
147 printk("ATPX dGPU power cntl not present, forcing\n");
148 atpx->functions.power_cntl = true;
149 }
150
148 if (atpx->functions.px_params) { 151 if (atpx->functions.px_params) {
149 union acpi_object *info; 152 union acpi_object *info;
150 struct atpx_px_params output; 153 struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cfcc099c537d..81a63d7f5cd9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
2002 rdev->mode_info.dither_property, 2002 rdev->mode_info.dither_property,
2003 RADEON_FMT_DITHER_DISABLE); 2003 RADEON_FMT_DITHER_DISABLE);
2004 2004
2005 if (radeon_audio != 0) 2005 if (radeon_audio != 0) {
2006 drm_object_attach_property(&radeon_connector->base.base, 2006 drm_object_attach_property(&radeon_connector->base.base,
2007 rdev->mode_info.audio_property, 2007 rdev->mode_info.audio_property,
2008 RADEON_AUDIO_AUTO); 2008 RADEON_AUDIO_AUTO);
2009 radeon_connector->audio = RADEON_AUDIO_AUTO;
2010 }
2009 if (ASIC_IS_DCE5(rdev)) 2011 if (ASIC_IS_DCE5(rdev))
2010 drm_object_attach_property(&radeon_connector->base.base, 2012 drm_object_attach_property(&radeon_connector->base.base,
2011 rdev->mode_info.output_csc_property, 2013 rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2130 drm_object_attach_property(&radeon_connector->base.base, 2132 drm_object_attach_property(&radeon_connector->base.base,
2131 rdev->mode_info.audio_property, 2133 rdev->mode_info.audio_property,
2132 RADEON_AUDIO_AUTO); 2134 RADEON_AUDIO_AUTO);
2135 radeon_connector->audio = RADEON_AUDIO_AUTO;
2133 } 2136 }
2134 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 2137 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
2135 radeon_connector->dac_load_detect = true; 2138 radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2185 drm_object_attach_property(&radeon_connector->base.base, 2188 drm_object_attach_property(&radeon_connector->base.base,
2186 rdev->mode_info.audio_property, 2189 rdev->mode_info.audio_property,
2187 RADEON_AUDIO_AUTO); 2190 RADEON_AUDIO_AUTO);
2191 radeon_connector->audio = RADEON_AUDIO_AUTO;
2188 } 2192 }
2189 if (ASIC_IS_DCE5(rdev)) 2193 if (ASIC_IS_DCE5(rdev))
2190 drm_object_attach_property(&radeon_connector->base.base, 2194 drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2237 drm_object_attach_property(&radeon_connector->base.base, 2241 drm_object_attach_property(&radeon_connector->base.base,
2238 rdev->mode_info.audio_property, 2242 rdev->mode_info.audio_property,
2239 RADEON_AUDIO_AUTO); 2243 RADEON_AUDIO_AUTO);
2244 radeon_connector->audio = RADEON_AUDIO_AUTO;
2240 } 2245 }
2241 if (ASIC_IS_DCE5(rdev)) 2246 if (ASIC_IS_DCE5(rdev))
2242 drm_object_attach_property(&radeon_connector->base.base, 2247 drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4fd1a961012d..d0826fb0434c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
103 "LAST", 103 "LAST",
104}; 104};
105 105
106#if defined(CONFIG_VGA_SWITCHEROO)
107bool radeon_has_atpx_dgpu_power_cntl(void);
108#else
109static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
110#endif
111
112#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 106#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
113#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 107#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
114 108
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
1305 } 1299 }
1306 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); 1300 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1307 1301
1308 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1302 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1309 radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1303 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1310 pdev->subsystem_vendor, pdev->subsystem_device); 1304 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1311 1305
1312 /* mutex initialization are all done here so we 1306 /* mutex initialization are all done here so we
1313 * can recall function without having locking issues */ 1307 * can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
1439 * ignore it */ 1433 * ignore it */
1440 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1434 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1441 1435
1442 if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl()) 1436 if (rdev->flags & RADEON_IS_PX)
1443 runtime = true; 1437 runtime = true;
1444 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 1438 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1445 if (runtime) 1439 if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 43cffb526b0c..de504ea29c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
89 WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1); 89 WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
90 90
91 do { 91 do {
92 unsigned value1, value2;
93 udelay(10);
92 temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset); 94 temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
93 } while ((temp & 0x1) && retries++ < 10000); 95
96 value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
97 value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
98
99 if (!value1 && !value2)
100 break;
101 } while (retries++ < 50);
94 102
95 if (retries == 10000) 103 if (retries == 10000)
96 DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset); 104 DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
150 return 0; 158 return 0;
151} 159}
152 160
153static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y) 161static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
154{ 162{
155 struct drm_device *dev = mst->base.dev; 163 struct drm_device *dev = mst->base.dev;
156 struct radeon_device *rdev = dev->dev_private; 164 struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
158 uint32_t val, temp; 166 uint32_t val, temp;
159 uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe); 167 uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
160 int retries = 0; 168 int retries = 0;
169 uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
170 uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
161 171
162 val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y); 172 val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
163 173
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
165 175
166 do { 176 do {
167 temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset); 177 temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
178 udelay(10);
168 } while ((temp & 0x1) && (retries++ < 10000)); 179 } while ((temp & 0x1) && (retries++ < 10000));
169 180
170 if (retries >= 10000) 181 if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
246 kfree(radeon_connector); 257 kfree(radeon_connector);
247} 258}
248 259
249static int radeon_connector_dpms(struct drm_connector *connector, int mode)
250{
251 DRM_DEBUG_KMS("\n");
252 return 0;
253}
254
255static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { 260static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
256 .dpms = radeon_connector_dpms, 261 .dpms = drm_helper_connector_dpms,
257 .detect = radeon_dp_mst_detect, 262 .detect = radeon_dp_mst_detect,
258 .fill_modes = drm_helper_probe_single_connector_modes, 263 .fill_modes = drm_helper_probe_single_connector_modes,
259 .destroy = radeon_dp_mst_connector_destroy, 264 .destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
394 struct drm_crtc *crtc; 399 struct drm_crtc *crtc;
395 struct radeon_crtc *radeon_crtc; 400 struct radeon_crtc *radeon_crtc;
396 int ret, slots; 401 int ret, slots;
397 402 s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
398 if (!ASIC_IS_DCE5(rdev)) { 403 if (!ASIC_IS_DCE5(rdev)) {
399 DRM_ERROR("got mst dpms on non-DCE5\n"); 404 DRM_ERROR("got mst dpms on non-DCE5\n");
400 return; 405 return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
456 461
457 mst_enc->enc_active = true; 462 mst_enc->enc_active = true;
458 radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary); 463 radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
459 radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0); 464
465 fixed_pbn = drm_int2fixp(mst_enc->pbn);
466 fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
467 avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
468 radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
460 469
461 atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0, 470 atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
462 mst_enc->fe); 471 mst_enc->fe);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 979f3bf65f2c..1e9304d1c88f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
291 if (r) { 291 if (r) {
292 return r; 292 return r;
293 } 293 }
294 rdev->ddev->vblank_disable_allowed = true;
295
294 /* enable msi */ 296 /* enable msi */
295 rdev->msi_enabled = 0; 297 rdev->msi_enabled = 0;
296 298
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 24152dfef199..478d4099b0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
331 RADEON_CRTC_DISP_REQ_EN_B)); 331 RADEON_CRTC_DISP_REQ_EN_B));
332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); 332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
333 } 333 }
334 drm_vblank_on(dev, radeon_crtc->crtc_id); 334 if (dev->num_crtcs > radeon_crtc->crtc_id)
335 drm_vblank_on(dev, radeon_crtc->crtc_id);
335 radeon_crtc_load_lut(crtc); 336 radeon_crtc_load_lut(crtc);
336 break; 337 break;
337 case DRM_MODE_DPMS_STANDBY: 338 case DRM_MODE_DPMS_STANDBY:
338 case DRM_MODE_DPMS_SUSPEND: 339 case DRM_MODE_DPMS_SUSPEND:
339 case DRM_MODE_DPMS_OFF: 340 case DRM_MODE_DPMS_OFF:
340 drm_vblank_off(dev, radeon_crtc->crtc_id); 341 if (dev->num_crtcs > radeon_crtc->crtc_id)
342 drm_vblank_off(dev, radeon_crtc->crtc_id);
341 if (radeon_crtc->crtc_id) 343 if (radeon_crtc->crtc_id)
342 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); 344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
343 else { 345 else {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c008312e1bcd..90f739478a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
235{ 235{
236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
237 237
238 if (radeon_ttm_tt_has_userptr(bo->ttm))
239 return -EPERM;
238 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 240 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
239} 241}
240 242
@@ -615,7 +617,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
615 set_page_dirty(page); 617 set_page_dirty(page);
616 618
617 mark_page_accessed(page); 619 mark_page_accessed(page);
618 page_cache_release(page); 620 put_page(page);
619 } 621 }
620 622
621 sg_free_table(ttm->sg); 623 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index af4df81c4e0c..e6abc09b67e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, 2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2934 { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
2934 { 0, 0, 0, 0 }, 2935 { 0, 0, 0, 0 },
2935}; 2936};
2936 2937
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
230 230
231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
232{ 232{
233 struct ttm_bo_device *bdev = bo->bdev; 233 int put_count = 0;
234 struct ttm_mem_type_manager *man;
235 234
236 lockdep_assert_held(&bo->resv->lock.base); 235 lockdep_assert_held(&bo->resv->lock.base);
237 236
238 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 237 put_count = ttm_bo_del_from_lru(bo);
239 list_del_init(&bo->swap); 238 ttm_bo_list_ref_sub(bo, put_count, true);
240 list_del_init(&bo->lru); 239 ttm_bo_add_to_lru(bo);
241
242 } else {
243 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
244 list_move_tail(&bo->swap, &bo->glob->swap_lru);
245
246 man = &bdev->man[bo->mem.mem_type];
247 list_move_tail(&bo->lru, &man->lru);
248 }
249} 240}
250EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 241EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
251 242
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f9cc30..077ae9b2865d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
311 goto out_err; 311 goto out_err;
312 312
313 copy_highpage(to_page, from_page); 313 copy_highpage(to_page, from_page);
314 page_cache_release(from_page); 314 put_page(from_page);
315 } 315 }
316 316
317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
361 copy_highpage(to_page, from_page); 361 copy_highpage(to_page, from_page);
362 set_page_dirty(to_page); 362 set_page_dirty(to_page);
363 mark_page_accessed(to_page); 363 mark_page_accessed(to_page);
364 page_cache_release(to_page); 364 put_page(to_page);
365 } 365 }
366 366
367 ttm_tt_unpopulate(ttm); 367 ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index e797dfc07ae3..7e2a12c4fed2 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
188 if (NULL != (page = vsg->pages[i])) { 188 if (NULL != (page = vsg->pages[i])) {
189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
190 SetPageDirty(page); 190 SetPageDirty(page);
191 page_cache_release(page); 191 put_page(page);
192 } 192 }
193 } 193 }
194 case dr_via_pages_alloc: 194 case dr_via_pages_alloc:
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac87e24..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
267 return 0; 267 return 0;
268} 268}
269 269
270static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
271 struct drm_crtc_state *old_state)
272{
273 unsigned long flags;
274
275 spin_lock_irqsave(&crtc->dev->event_lock, flags);
276 if (crtc->state->event)
277 drm_crtc_send_vblank_event(crtc, crtc->state->event);
278 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
279}
280
270static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { 281static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
271 .enable = virtio_gpu_crtc_enable, 282 .enable = virtio_gpu_crtc_enable,
272 .disable = virtio_gpu_crtc_disable, 283 .disable = virtio_gpu_crtc_disable,
273 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, 284 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
274 .atomic_check = virtio_gpu_crtc_atomic_check, 285 .atomic_check = virtio_gpu_crtc_atomic_check,
286 .atomic_flush = virtio_gpu_crtc_atomic_flush,
275}; 287};
276 288
277static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, 289static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16c6084..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3293 &vmw_cmd_dx_cid_check, true, false, true), 3293 &vmw_cmd_dx_cid_check, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3295 true, false, true), 3295 true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, 3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3297 true, false, true), 3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3299 true, false, true), 3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3301 &vmw_cmd_ok, true, false, true), 3301 &vmw_cmd_dx_cid_check, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, 3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3303 true, false, true), 3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, 3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3305 true, false, true), 3305 true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3307 true, false, true), 3307 true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, 3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3309 true, false, true), 3309 true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3311 true, false, true), 3311 true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
573 mode = old_mode; 573 mode = old_mode;
574 old_mode = NULL; 574 old_mode = NULL;
575 } else if (!vmw_kms_validate_mode_vram(vmw_priv, 575 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
576 mode->hdisplay * 576 mode->hdisplay *
577 (var->bits_per_pixel + 7) / 8, 577 DIV_ROUND_UP(var->bits_per_pixel, 8),
578 mode->vdisplay)) { 578 mode->vdisplay)) {
579 drm_mode_destroy(vmw_priv->dev, mode); 579 drm_mode_destroy(vmw_priv->dev, mode);
580 return -EINVAL; 580 return -EINVAL;
581 } 581 }
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 883a314cd83a..6494a4d28171 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
395EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved); 395EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
396 396
397void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, 397void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
398 u32 pixel_format, int stride, 398 unsigned int uv_stride,
399 int u_offset, int v_offset) 399 unsigned int u_offset, unsigned int v_offset)
400{ 400{
401 switch (pixel_format) { 401 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
402 case V4L2_PIX_FMT_YUV420: 402 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
403 case V4L2_PIX_FMT_YUV422P: 403 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
404 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
405 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
406 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
407 break;
408 case V4L2_PIX_FMT_YVU420:
409 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
410 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
411 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
412 break;
413 case V4L2_PIX_FMT_NV12:
414 case V4L2_PIX_FMT_NV16:
415 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
416 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
417 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
418 break;
419 }
420} 404}
421EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full); 405EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
422 406
423void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, 407void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
424 u32 pixel_format, int stride, int height) 408 u32 pixel_format, int stride, int height)
425{ 409{
426 int u_offset, v_offset; 410 int fourcc, u_offset, v_offset;
427 int uv_stride = 0; 411 int uv_stride = 0;
428 412
429 switch (pixel_format) { 413 fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
430 case V4L2_PIX_FMT_YUV420: 414 switch (fourcc) {
431 case V4L2_PIX_FMT_YVU420: 415 case DRM_FORMAT_YUV420:
432 uv_stride = stride / 2; 416 uv_stride = stride / 2;
433 u_offset = stride * height; 417 u_offset = stride * height;
434 v_offset = u_offset + (uv_stride * height / 2); 418 v_offset = u_offset + (uv_stride * height / 2);
435 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
436 u_offset, v_offset);
437 break; 419 break;
438 case V4L2_PIX_FMT_YUV422P: 420 case DRM_FORMAT_YVU420:
421 uv_stride = stride / 2;
422 v_offset = stride * height;
423 u_offset = v_offset + (uv_stride * height / 2);
424 break;
425 case DRM_FORMAT_YUV422:
439 uv_stride = stride / 2; 426 uv_stride = stride / 2;
440 u_offset = stride * height; 427 u_offset = stride * height;
441 v_offset = u_offset + (uv_stride * height); 428 v_offset = u_offset + (uv_stride * height);
442 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
443 u_offset, v_offset);
444 break; 429 break;
445 case V4L2_PIX_FMT_NV12: 430 case DRM_FORMAT_NV12:
446 case V4L2_PIX_FMT_NV16: 431 case DRM_FORMAT_NV16:
432 uv_stride = stride;
447 u_offset = stride * height; 433 u_offset = stride * height;
448 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, 434 v_offset = 0;
449 u_offset, 0);
450 break; 435 break;
436 default:
437 return;
451 } 438 }
439 ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
452} 440}
453EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar); 441EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
454 442
@@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
684 672
685 switch (pix->pixelformat) { 673 switch (pix->pixelformat) {
686 case V4L2_PIX_FMT_YUV420: 674 case V4L2_PIX_FMT_YUV420:
687 case V4L2_PIX_FMT_YVU420:
688 offset = Y_OFFSET(pix, image->rect.left, image->rect.top); 675 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
689 u_offset = U_OFFSET(pix, image->rect.left, 676 u_offset = U_OFFSET(pix, image->rect.left,
690 image->rect.top) - offset; 677 image->rect.top) - offset;
691 v_offset = V_OFFSET(pix, image->rect.left, 678 v_offset = V_OFFSET(pix, image->rect.left,
692 image->rect.top) - offset; 679 image->rect.top) - offset;
693 680
694 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 681 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
695 pix->bytesperline,
696 u_offset, v_offset); 682 u_offset, v_offset);
697 break; 683 break;
684 case V4L2_PIX_FMT_YVU420:
685 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
686 u_offset = U_OFFSET(pix, image->rect.left,
687 image->rect.top) - offset;
688 v_offset = V_OFFSET(pix, image->rect.left,
689 image->rect.top) - offset;
690
691 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
692 v_offset, u_offset);
693 break;
698 case V4L2_PIX_FMT_YUV422P: 694 case V4L2_PIX_FMT_YUV422P:
699 offset = Y_OFFSET(pix, image->rect.left, image->rect.top); 695 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
700 u_offset = U2_OFFSET(pix, image->rect.left, 696 u_offset = U2_OFFSET(pix, image->rect.left,
@@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
702 v_offset = V2_OFFSET(pix, image->rect.left, 698 v_offset = V2_OFFSET(pix, image->rect.left,
703 image->rect.top) - offset; 699 image->rect.top) - offset;
704 700
705 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 701 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
706 pix->bytesperline,
707 u_offset, v_offset); 702 u_offset, v_offset);
708 break; 703 break;
709 case V4L2_PIX_FMT_NV12: 704 case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
712 image->rect.top) - offset; 707 image->rect.top) - offset;
713 v_offset = 0; 708 v_offset = 0;
714 709
715 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 710 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
716 pix->bytesperline,
717 u_offset, v_offset); 711 u_offset, v_offset);
718 break; 712 break;
719 case V4L2_PIX_FMT_NV16: 713 case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
722 image->rect.top) - offset; 716 image->rect.top) - offset;
723 v_offset = 0; 717 v_offset = 0;
724 718
725 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 719 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
726 pix->bytesperline,
727 u_offset, v_offset); 720 u_offset, v_offset);
728 break; 721 break;
729 case V4L2_PIX_FMT_UYVY: 722 case V4L2_PIX_FMT_UYVY:
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 042c3958e2a0..837b1ec22800 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -350,11 +350,13 @@ out:
350} 350}
351EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth); 351EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
352 352
353int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width) 353void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
354{ 354{
355 struct ipu_dmfc_priv *priv = dmfc->priv; 355 struct ipu_dmfc_priv *priv = dmfc->priv;
356 u32 dmfc_gen1; 356 u32 dmfc_gen1;
357 357
358 mutex_lock(&priv->mutex);
359
358 dmfc_gen1 = readl(priv->base + DMFC_GENERAL1); 360 dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
359 361
360 if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines) 362 if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
364 366
365 writel(dmfc_gen1, priv->base + DMFC_GENERAL1); 367 writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
366 368
367 return 0; 369 mutex_unlock(&priv->mutex);
368} 370}
369EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel); 371EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
370 372
371struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel) 373struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
372{ 374{
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bdb8cc89cacc..4f9c5c6deaed 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1979,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1979 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) }, 1979 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
1980 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) }, 1980 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
1981 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) }, 1981 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
1982 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
1983 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
1984 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
1982 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, 1985 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
1983 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1986 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1984 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, 1987 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5c0e43ed5c53..c6eaff5f8845 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -676,6 +676,7 @@
676#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b 676#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
677#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048 677#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
678#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d 678#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
679#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
679#define USB_DEVICE_ID_MS_NE4K 0x00db 680#define USB_DEVICE_ID_MS_NE4K 0x00db
680#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc 681#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
681#define USB_DEVICE_ID_MS_LK6K 0x00f9 682#define USB_DEVICE_ID_MS_LK6K 0x00f9
@@ -683,6 +684,8 @@
683#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 684#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
684#define USB_DEVICE_ID_MS_NE7K 0x071d 685#define USB_DEVICE_ID_MS_NE7K 0x071d
685#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 686#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
687#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
688#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600 0x0750
686#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 689#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
687#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3 690#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
688#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 691#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e356bd8d..1ac4ff4d57a6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
184 unsigned char byte2, unsigned char byte3) 184 unsigned char byte2, unsigned char byte3)
185{ 185{
186 int ret; 186 int ret;
187 unsigned char buf[] = {0x18, byte2, byte3}; 187 unsigned char *buf;
188
189 buf = kzalloc(3, GFP_KERNEL);
190 if (!buf)
191 return -ENOMEM;
192
193 buf[0] = 0x18;
194 buf[1] = byte2;
195 buf[2] = byte3;
188 196
189 switch (hdev->product) { 197 switch (hdev->product) {
190 case USB_DEVICE_ID_LENOVO_CUSBKBD: 198 case USB_DEVICE_ID_LENOVO_CUSBKBD:
191 ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf), 199 ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
192 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 200 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
193 break; 201 break;
194 case USB_DEVICE_ID_LENOVO_CBTKBD: 202 case USB_DEVICE_ID_LENOVO_CBTKBD:
195 ret = hid_hw_output_report(hdev, buf, sizeof(buf)); 203 ret = hid_hw_output_report(hdev, buf, 3);
196 break; 204 break;
197 default: 205 default:
198 ret = -EINVAL; 206 ret = -EINVAL;
199 break; 207 break;
200 } 208 }
201 209
210 kfree(buf);
211
202 return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */ 212 return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
203} 213}
204 214
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 75cd3bc59c54..e924d555536c 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
272 .driver_data = MS_PRESENTER }, 272 .driver_data = MS_PRESENTER },
273 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K), 273 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
274 .driver_data = MS_ERGONOMY | MS_RDESC_3K }, 274 .driver_data = MS_ERGONOMY | MS_RDESC_3K },
275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
276 .driver_data = MS_ERGONOMY },
277 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
278 .driver_data = MS_ERGONOMY },
279 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
280 .driver_data = MS_ERGONOMY },
275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0), 281 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
276 .driver_data = MS_NOGET }, 282 .driver_data = MS_NOGET },
277 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 283 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 25d3c4330bf6..c741f5e50a66 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,7 @@ static void mt_release_contacts(struct hid_device *hid)
1169 MT_TOOL_FINGER, 1169 MT_TOOL_FINGER,
1170 false); 1170 false);
1171 } 1171 }
1172 input_mt_sync_frame(input_dev);
1172 input_sync(input_dev); 1173 input_sync(input_dev);
1173 } 1174 }
1174 } 1175 }
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee2ce84..c830ed39348f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
2049 * -----+------------------------------+-----+-----+ 2049 * -----+------------------------------+-----+-----+
2050 * The single bits Yaw, Roll, Pitch in the lower right corner specify 2050 * The single bits Yaw, Roll, Pitch in the lower right corner specify
2051 * whether the wiimote is rotating fast (0) or slow (1). Speed for slow 2051 * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
2052 * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a 2052 * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
2053 * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast 2053 * units / deg/s. To get a linear scale for fast rotation we multiply
2054 * and 9 for slow. 2054 * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
2055 * previous scale reported by this driver.
2056 * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
2055 * If the wiimote is not rotating the sensor reports 2^13 = 8192. 2057 * If the wiimote is not rotating the sensor reports 2^13 = 8192.
2056 * Ext specifies whether an extension is connected to the motionp. 2058 * Ext specifies whether an extension is connected to the motionp.
2057 * which is parsed by wiimote-core. 2059 * which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
2070 z -= 8192; 2072 z -= 8192;
2071 2073
2072 if (!(ext[3] & 0x02)) 2074 if (!(ext[3] & 0x02))
2073 x *= 18; 2075 x = (x * 2000 * 9) / 440;
2074 else 2076 else
2075 x *= 9; 2077 x *= 9;
2076 if (!(ext[4] & 0x02)) 2078 if (!(ext[4] & 0x02))
2077 y *= 18; 2079 y = (y * 2000 * 9) / 440;
2078 else 2080 else
2079 y *= 9; 2081 y *= 9;
2080 if (!(ext[3] & 0x01)) 2082 if (!(ext[3] & 0x01))
2081 z *= 18; 2083 z = (z * 2000 * 9) / 440;
2082 else 2084 else
2083 z *= 9; 2085 z *= 9;
2084 2086
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160b9ea4..ae83af649a60 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
951 return ret; 951 return ret;
952} 952}
953 953
954static void usbhid_restart_queues(struct usbhid_device *usbhid)
955{
956 if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
957 usbhid_restart_out_queue(usbhid);
958 if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
959 usbhid_restart_ctrl_queue(usbhid);
960}
961
962static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid) 954static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
963{ 955{
964 struct usbhid_device *usbhid = hid->driver_data; 956 struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
1404 usb_kill_urb(usbhid->urbout); 1396 usb_kill_urb(usbhid->urbout);
1405} 1397}
1406 1398
1399static void hid_restart_io(struct hid_device *hid)
1400{
1401 struct usbhid_device *usbhid = hid->driver_data;
1402 int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
1403 int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
1404
1405 spin_lock_irq(&usbhid->lock);
1406 clear_bit(HID_SUSPENDED, &usbhid->iofl);
1407 usbhid_mark_busy(usbhid);
1408
1409 if (clear_halt || reset_pending)
1410 schedule_work(&usbhid->reset_work);
1411 usbhid->retry_delay = 0;
1412 spin_unlock_irq(&usbhid->lock);
1413
1414 if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
1415 return;
1416
1417 if (!clear_halt) {
1418 if (hid_start_in(hid) < 0)
1419 hid_io_error(hid);
1420 }
1421
1422 spin_lock_irq(&usbhid->lock);
1423 if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
1424 usbhid_restart_out_queue(usbhid);
1425 if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
1426 usbhid_restart_ctrl_queue(usbhid);
1427 spin_unlock_irq(&usbhid->lock);
1428}
1429
1407/* Treat USB reset pretty much the same as suspend/resume */ 1430/* Treat USB reset pretty much the same as suspend/resume */
1408static int hid_pre_reset(struct usb_interface *intf) 1431static int hid_pre_reset(struct usb_interface *intf)
1409{ 1432{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
1453 return 1; 1476 return 1;
1454 } 1477 }
1455 1478
1479 /* No need to do another reset or clear a halted endpoint */
1456 spin_lock_irq(&usbhid->lock); 1480 spin_lock_irq(&usbhid->lock);
1457 clear_bit(HID_RESET_PENDING, &usbhid->iofl); 1481 clear_bit(HID_RESET_PENDING, &usbhid->iofl);
1482 clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
1458 spin_unlock_irq(&usbhid->lock); 1483 spin_unlock_irq(&usbhid->lock);
1459 hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0); 1484 hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
1460 status = hid_start_in(hid); 1485
1461 if (status < 0) 1486 hid_restart_io(hid);
1462 hid_io_error(hid);
1463 usbhid_restart_queues(usbhid);
1464 1487
1465 return 0; 1488 return 0;
1466} 1489}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
1483#ifdef CONFIG_PM 1506#ifdef CONFIG_PM
1484static int hid_resume_common(struct hid_device *hid, bool driver_suspended) 1507static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
1485{ 1508{
1486 struct usbhid_device *usbhid = hid->driver_data; 1509 int status = 0;
1487 int status;
1488
1489 spin_lock_irq(&usbhid->lock);
1490 clear_bit(HID_SUSPENDED, &usbhid->iofl);
1491 usbhid_mark_busy(usbhid);
1492
1493 if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
1494 test_bit(HID_RESET_PENDING, &usbhid->iofl))
1495 schedule_work(&usbhid->reset_work);
1496 usbhid->retry_delay = 0;
1497
1498 usbhid_restart_queues(usbhid);
1499 spin_unlock_irq(&usbhid->lock);
1500
1501 status = hid_start_in(hid);
1502 if (status < 0)
1503 hid_io_error(hid);
1504 1510
1511 hid_restart_io(hid);
1505 if (driver_suspended && hid->driver && hid->driver->resume) 1512 if (driver_suspended && hid->driver && hid->driver->resume)
1506 status = hid->driver->resume(hid); 1513 status = hid->driver->resume(hid);
1507 return status; 1514 return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1570static int hid_resume(struct usb_interface *intf) 1577static int hid_resume(struct usb_interface *intf)
1571{ 1578{
1572 struct hid_device *hid = usb_get_intfdata (intf); 1579 struct hid_device *hid = usb_get_intfdata (intf);
1573 struct usbhid_device *usbhid = hid->driver_data;
1574 int status; 1580 int status;
1575 1581
1576 if (!test_bit(HID_STARTED, &usbhid->iofl))
1577 return 0;
1578
1579 status = hid_resume_common(hid, true); 1582 status = hid_resume_common(hid, true);
1580 dev_dbg(&intf->dev, "resume status %d\n", status); 1583 dev_dbg(&intf->dev, "resume status %d\n", status);
1581 return 0; 1584 return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
1584static int hid_reset_resume(struct usb_interface *intf) 1587static int hid_reset_resume(struct usb_interface *intf)
1585{ 1588{
1586 struct hid_device *hid = usb_get_intfdata(intf); 1589 struct hid_device *hid = usb_get_intfdata(intf);
1587 struct usbhid_device *usbhid = hid->driver_data;
1588 int status; 1590 int status;
1589 1591
1590 clear_bit(HID_SUSPENDED, &usbhid->iofl);
1591 status = hid_post_reset(intf); 1592 status = hid_post_reset(intf);
1592 if (status >= 0 && hid->driver && hid->driver->reset_resume) { 1593 if (status >= 0 && hid->driver && hid->driver->reset_resume) {
1593 int ret = hid->driver->reset_resume(hid); 1594 int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 68a560957871..ccf1883318c3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
152 hid_data->inputmode = field->report->id; 152 hid_data->inputmode = field->report->id;
153 hid_data->inputmode_index = usage->usage_index; 153 hid_data->inputmode_index = usage->usage_index;
154 break; 154 break;
155
156 case HID_UP_DIGITIZER:
157 if (field->report->id == 0x0B &&
158 (field->application == WACOM_G9_DIGITIZER ||
159 field->application == WACOM_G11_DIGITIZER)) {
160 wacom->wacom_wac.mode_report = field->report->id;
161 wacom->wacom_wac.mode_value = 0;
162 }
163 break;
164
165 case WACOM_G9_PAGE:
166 case WACOM_G11_PAGE:
167 if (field->report->id == 0x03 &&
168 (field->application == WACOM_G9_TOUCHSCREEN ||
169 field->application == WACOM_G11_TOUCHSCREEN)) {
170 wacom->wacom_wac.mode_report = field->report->id;
171 wacom->wacom_wac.mode_value = 0;
172 }
173 break;
155 } 174 }
156} 175}
157 176
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
322 return 0; 341 return 0;
323} 342}
324 343
325static int wacom_set_device_mode(struct hid_device *hdev, int report_id, 344static int wacom_set_device_mode(struct hid_device *hdev,
326 int length, int mode) 345 struct wacom_wac *wacom_wac)
327{ 346{
328 unsigned char *rep_data; 347 u8 *rep_data;
348 struct hid_report *r;
349 struct hid_report_enum *re;
350 int length;
329 int error = -ENOMEM, limit = 0; 351 int error = -ENOMEM, limit = 0;
330 352
331 rep_data = kzalloc(length, GFP_KERNEL); 353 if (wacom_wac->mode_report < 0)
354 return 0;
355
356 re = &(hdev->report_enum[HID_FEATURE_REPORT]);
357 r = re->report_id_hash[wacom_wac->mode_report];
358 if (!r)
359 return -EINVAL;
360
361 rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
332 if (!rep_data) 362 if (!rep_data)
333 return error; 363 return -ENOMEM;
364
365 length = hid_report_len(r);
334 366
335 do { 367 do {
336 rep_data[0] = report_id; 368 rep_data[0] = wacom_wac->mode_report;
337 rep_data[1] = mode; 369 rep_data[1] = wacom_wac->mode_value;
338 370
339 error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 371 error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
340 length, 1); 372 length, 1);
341 if (error >= 0) 373 if (error >= 0)
342 error = wacom_get_report(hdev, HID_FEATURE_REPORT, 374 error = wacom_get_report(hdev, HID_FEATURE_REPORT,
343 rep_data, length, 1); 375 rep_data, length, 1);
344 } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES); 376 } while (error >= 0 &&
377 rep_data[1] != wacom_wac->mode_report &&
378 limit++ < WAC_MSG_RETRIES);
345 379
346 kfree(rep_data); 380 kfree(rep_data);
347 381
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
411static int wacom_query_tablet_data(struct hid_device *hdev, 445static int wacom_query_tablet_data(struct hid_device *hdev,
412 struct wacom_features *features) 446 struct wacom_features *features)
413{ 447{
448 struct wacom *wacom = hid_get_drvdata(hdev);
449 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
450
414 if (hdev->bus == BUS_BLUETOOTH) 451 if (hdev->bus == BUS_BLUETOOTH)
415 return wacom_bt_query_tablet_data(hdev, 1, features); 452 return wacom_bt_query_tablet_data(hdev, 1, features);
416 453
417 if (features->type == HID_GENERIC) 454 if (features->type != HID_GENERIC) {
418 return wacom_hid_set_device_mode(hdev); 455 if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
419 456 if (features->type > TABLETPC) {
420 if (features->device_type & WACOM_DEVICETYPE_TOUCH) { 457 /* MT Tablet PC touch */
421 if (features->type > TABLETPC) { 458 wacom_wac->mode_report = 3;
422 /* MT Tablet PC touch */ 459 wacom_wac->mode_value = 4;
423 return wacom_set_device_mode(hdev, 3, 4, 4); 460 } else if (features->type == WACOM_24HDT) {
424 } 461 wacom_wac->mode_report = 18;
425 else if (features->type == WACOM_24HDT) { 462 wacom_wac->mode_value = 2;
426 return wacom_set_device_mode(hdev, 18, 3, 2); 463 } else if (features->type == WACOM_27QHDT) {
427 } 464 wacom_wac->mode_report = 131;
428 else if (features->type == WACOM_27QHDT) { 465 wacom_wac->mode_value = 2;
429 return wacom_set_device_mode(hdev, 131, 3, 2); 466 } else if (features->type == BAMBOO_PAD) {
430 } 467 wacom_wac->mode_report = 2;
431 else if (features->type == BAMBOO_PAD) { 468 wacom_wac->mode_value = 2;
432 return wacom_set_device_mode(hdev, 2, 2, 2); 469 }
433 } 470 } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
434 } else if (features->device_type & WACOM_DEVICETYPE_PEN) { 471 if (features->type <= BAMBOO_PT) {
435 if (features->type <= BAMBOO_PT) { 472 wacom_wac->mode_report = 2;
436 return wacom_set_device_mode(hdev, 2, 2, 2); 473 wacom_wac->mode_value = 2;
474 }
437 } 475 }
438 } 476 }
439 477
478 wacom_set_device_mode(hdev, wacom_wac);
479
480 if (features->type == HID_GENERIC)
481 return wacom_hid_set_device_mode(hdev);
482
440 return 0; 483 return 0;
441} 484}
442 485
@@ -1817,6 +1860,9 @@ static int wacom_probe(struct hid_device *hdev,
1817 goto fail_type; 1860 goto fail_type;
1818 } 1861 }
1819 1862
1863 wacom_wac->hid_data.inputmode = -1;
1864 wacom_wac->mode_report = -1;
1865
1820 wacom->usbdev = dev; 1866 wacom->usbdev = dev;
1821 wacom->intf = intf; 1867 wacom->intf = intf;
1822 mutex_init(&wacom->lock); 1868 mutex_init(&wacom->lock);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bd198bbd4df0..02c4efea241c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2426,6 +2426,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2426 } 2426 }
2427 2427
2428 /* 2428 /*
2429 * Hack for the Bamboo One:
2430 * the device presents a PAD/Touch interface as most Bamboos and even
2431 * sends ghosts PAD data on it. However, later, we must disable this
2432 * ghost interface, and we can not detect it unless we set it here
2433 * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
2434 */
2435 if (features->type == BAMBOO_PEN &&
2436 features->pktlen == WACOM_PKGLEN_BBTOUCH3)
2437 features->device_type |= WACOM_DEVICETYPE_PAD;
2438
2439 /*
2429 * Raw Wacom-mode pen and touch events both come from interface 2440 * Raw Wacom-mode pen and touch events both come from interface
2430 * 0, whose HID descriptor has an application usage of 0xFF0D 2441 * 0, whose HID descriptor has an application usage of 0xFF0D
2431 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back 2442 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f29599..e2084d914c14 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
84#define WACOM_DEVICETYPE_WL_MONITOR 0x0008 84#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
85 85
86#define WACOM_VENDORDEFINED_PEN 0xff0d0001 86#define WACOM_VENDORDEFINED_PEN 0xff0d0001
87#define WACOM_G9_PAGE 0xff090000
88#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
89#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
90#define WACOM_G11_PAGE 0xff110000
91#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
92#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
87 93
88#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \ 94#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
89 ((f)->physical == HID_DG_STYLUS) || \ 95 ((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
238 int ps_connected; 244 int ps_connected;
239 u8 bt_features; 245 u8 bt_features;
240 u8 bt_high_speed; 246 u8 bt_high_speed;
247 int mode_report;
248 int mode_value;
241 struct hid_data hid_data; 249 struct hid_data hid_data;
242}; 250};
243 251
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e6821fea..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -975,10 +975,10 @@ config I2C_XLR
975 975
976config I2C_XLP9XX 976config I2C_XLP9XX
977 tristate "XLP9XX I2C support" 977 tristate "XLP9XX I2C support"
978 depends on CPU_XLP || COMPILE_TEST 978 depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
979 help 979 help
980 This driver enables support for the on-chip I2C interface of 980 This driver enables support for the on-chip I2C interface of
981 the Broadcom XLP9xx/XLP5xx MIPS processors. 981 the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
982 982
983 This driver can also be built as a module. If so, the module will 983 This driver can also be built as a module. If so, the module will
984 be called i2c-xlp9xx. 984 be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
116 cbd_t __iomem *rbase; 116 cbd_t __iomem *rbase;
117 u_char *txbuf[CPM_MAXBD]; 117 u_char *txbuf[CPM_MAXBD];
118 u_char *rxbuf[CPM_MAXBD]; 118 u_char *rxbuf[CPM_MAXBD];
119 u32 txdma[CPM_MAXBD]; 119 dma_addr_t txdma[CPM_MAXBD];
120 u32 rxdma[CPM_MAXBD]; 120 dma_addr_t rxdma[CPM_MAXBD];
121}; 121};
122 122
123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id) 123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
671 return -EIO; 671 return -EIO;
672 } 672 }
673 673
674 clk_prepare_enable(i2c->clk); 674 ret = clk_enable(i2c->clk);
675 if (ret)
676 return ret;
675 677
676 for (i = 0; i < num; i++, msgs++) { 678 for (i = 0; i < num; i++, msgs++) {
677 stop = (i == num - 1); 679 stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
695 } 697 }
696 698
697 out: 699 out:
698 clk_disable_unprepare(i2c->clk); 700 clk_disable(i2c->clk);
699 return ret; 701 return ret;
700} 702}
701 703
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
747 return -ENOENT; 749 return -ENOENT;
748 } 750 }
749 751
750 clk_prepare_enable(i2c->clk); 752 ret = clk_prepare_enable(i2c->clk);
753 if (ret)
754 return ret;
751 755
752 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 756 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
753 i2c->regs = devm_ioremap_resource(&pdev->dev, mem); 757 i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
799 803
800 platform_set_drvdata(pdev, i2c); 804 platform_set_drvdata(pdev, i2c);
801 805
806 clk_disable(i2c->clk);
807
808 return 0;
809
802 err_clk: 810 err_clk:
803 clk_disable_unprepare(i2c->clk); 811 clk_disable_unprepare(i2c->clk);
804 return ret; 812 return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
810 818
811 i2c_del_adapter(&i2c->adap); 819 i2c_del_adapter(&i2c->adap);
812 820
821 clk_unprepare(i2c->clk);
822
813 return 0; 823 return 0;
814} 824}
815 825
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
821 831
822 i2c->suspended = 1; 832 i2c->suspended = 1;
823 833
834 clk_unprepare(i2c->clk);
835
824 return 0; 836 return 0;
825} 837}
826 838
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
830 struct exynos5_i2c *i2c = platform_get_drvdata(pdev); 842 struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
831 int ret = 0; 843 int ret = 0;
832 844
833 clk_prepare_enable(i2c->clk); 845 ret = clk_prepare_enable(i2c->clk);
846 if (ret)
847 return ret;
834 848
835 ret = exynos5_hsi2c_clock_setup(i2c); 849 ret = exynos5_hsi2c_clock_setup(i2c);
836 if (ret) { 850 if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
839 } 853 }
840 854
841 exynos5_i2c_init(i2c); 855 exynos5_i2c_init(i2c);
842 clk_disable_unprepare(i2c->clk); 856 clk_disable(i2c->clk);
843 i2c->suspended = 0; 857 i2c->suspended = 0;
844 858
845 return 0; 859 return 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ 75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a 77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
78#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
78#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 79#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
79 80
80#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ 81#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
180static const struct pci_device_id ismt_ids[] = { 181static const struct pci_device_id ismt_ids[] = {
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, 182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, 183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
184 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, 185 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
184 { 0, } 186 { 0, }
185}; 187};
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..ba14a863b451 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", 771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
772 &clk_freq); 772 &clk_freq);
773 if (ret) { 773 if (ret) {
774 dev_err(&pdev->dev, "clock-frequency not specified in DT"); 774 dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
775 goto err; 775 goto err;
776 } 776 }
777 777
778 i2c->speed = clk_freq / 1000; 778 i2c->speed = clk_freq / 1000;
779 if (i2c->speed == 0) {
780 ret = -EINVAL;
781 dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
782 goto err;
783 }
779 jz4780_i2c_set_speed(i2c); 784 jz4780_i2c_set_speed(i2c);
780 785
781 dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed); 786 dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
855static const struct of_device_id rk3x_i2c_match[] = { 855static const struct of_device_id rk3x_i2c_match[] = {
856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, 856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, 857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
858 { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
858 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, 859 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
859 {}, 860 {},
860}; 861};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0f2f8484e8ec..e584d88ee337 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -525,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
525 return 0; 525 return 0;
526} 526}
527 527
528
529/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
530static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env) 528static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
531{ 529{
532 struct i2c_client *client = to_i2c_client(dev); 530 struct i2c_client *client = to_i2c_client(dev);
533 int rc; 531 int rc;
534 532
535 rc = acpi_device_uevent_modalias(dev, env); 533 rc = acpi_device_uevent_modalias(dev, env);
536 if (rc != -ENODEV) 534 if (rc != -ENODEV)
537 return rc; 535 return rc;
538 536
539 if (add_uevent_var(env, "MODALIAS=%s%s", 537 return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
540 I2C_MODULE_PREFIX, client->name))
541 return -ENOMEM;
542 dev_dbg(dev, "uevent\n");
543 return 0;
544} 538}
545 539
546/* i2c bus recovery routines */ 540/* i2c bus recovery routines */
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 7748a0a5ddb9..8de073aed001 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -140,22 +140,34 @@ static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_
140 return i2c_demux_activate_master(priv, new_chan); 140 return i2c_demux_activate_master(priv, new_chan);
141} 141}
142 142
143static ssize_t cur_master_show(struct device *dev, struct device_attribute *attr, 143static ssize_t available_masters_show(struct device *dev,
144 char *buf) 144 struct device_attribute *attr,
145 char *buf)
145{ 146{
146 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); 147 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
147 int count = 0, i; 148 int count = 0, i;
148 149
149 for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++) 150 for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
150 count += scnprintf(buf + count, PAGE_SIZE - count, "%c %d - %s\n", 151 count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
151 i == priv->cur_chan ? '*' : ' ', i, 152 i, priv->chan[i].parent_np->full_name,
152 priv->chan[i].parent_np->full_name); 153 i == priv->num_chan - 1 ? '\n' : ' ');
153 154
154 return count; 155 return count;
155} 156}
157static DEVICE_ATTR_RO(available_masters);
156 158
157static ssize_t cur_master_store(struct device *dev, struct device_attribute *attr, 159static ssize_t current_master_show(struct device *dev,
158 const char *buf, size_t count) 160 struct device_attribute *attr,
161 char *buf)
162{
163 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
164
165 return sprintf(buf, "%d\n", priv->cur_chan);
166}
167
168static ssize_t current_master_store(struct device *dev,
169 struct device_attribute *attr,
170 const char *buf, size_t count)
159{ 171{
160 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); 172 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
161 unsigned int val; 173 unsigned int val;
@@ -172,7 +184,7 @@ static ssize_t cur_master_store(struct device *dev, struct device_attribute *att
172 184
173 return ret < 0 ? ret : count; 185 return ret < 0 ? ret : count;
174} 186}
175static DEVICE_ATTR_RW(cur_master); 187static DEVICE_ATTR_RW(current_master);
176 188
177static int i2c_demux_pinctrl_probe(struct platform_device *pdev) 189static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
178{ 190{
@@ -218,12 +230,18 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
218 /* switch to first parent as active master */ 230 /* switch to first parent as active master */
219 i2c_demux_activate_master(priv, 0); 231 i2c_demux_activate_master(priv, 0);
220 232
221 err = device_create_file(&pdev->dev, &dev_attr_cur_master); 233 err = device_create_file(&pdev->dev, &dev_attr_available_masters);
222 if (err) 234 if (err)
223 goto err_rollback; 235 goto err_rollback;
224 236
237 err = device_create_file(&pdev->dev, &dev_attr_current_master);
238 if (err)
239 goto err_rollback_available;
240
225 return 0; 241 return 0;
226 242
243err_rollback_available:
244 device_remove_file(&pdev->dev, &dev_attr_available_masters);
227err_rollback: 245err_rollback:
228 for (j = 0; j < i; j++) { 246 for (j = 0; j < i; j++) {
229 of_node_put(priv->chan[j].parent_np); 247 of_node_put(priv->chan[j].parent_np);
@@ -238,7 +256,8 @@ static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
238 struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev); 256 struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
239 int i; 257 int i;
240 258
241 device_remove_file(&pdev->dev, &dev_attr_cur_master); 259 device_remove_file(&pdev->dev, &dev_attr_current_master);
260 device_remove_file(&pdev->dev, &dev_attr_available_masters);
242 261
243 i2c_demux_deactivate_master(priv); 262 i2c_demux_deactivate_master(priv);
244 263
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ba947df5a8c7..c6935de425fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = {
660 .enter = NULL } 660 .enter = NULL }
661}; 661};
662 662
663static struct cpuidle_state skx_cstates[] = {
664 {
665 .name = "C1-SKX",
666 .desc = "MWAIT 0x00",
667 .flags = MWAIT2flg(0x00),
668 .exit_latency = 2,
669 .target_residency = 2,
670 .enter = &intel_idle,
671 .enter_freeze = intel_idle_freeze, },
672 {
673 .name = "C1E-SKX",
674 .desc = "MWAIT 0x01",
675 .flags = MWAIT2flg(0x01),
676 .exit_latency = 10,
677 .target_residency = 20,
678 .enter = &intel_idle,
679 .enter_freeze = intel_idle_freeze, },
680 {
681 .name = "C6-SKX",
682 .desc = "MWAIT 0x20",
683 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
684 .exit_latency = 133,
685 .target_residency = 600,
686 .enter = &intel_idle,
687 .enter_freeze = intel_idle_freeze, },
688 {
689 .enter = NULL }
690};
691
663static struct cpuidle_state atom_cstates[] = { 692static struct cpuidle_state atom_cstates[] = {
664 { 693 {
665 .name = "C1E-ATM", 694 .name = "C1E-ATM",
@@ -818,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n,
818 * driver in this case 847 * driver in this case
819 */ 848 */
820 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); 849 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
821 if (!dev->registered) 850 if (dev->registered)
822 intel_idle_cpu_init(hotcpu); 851 break;
852
853 if (intel_idle_cpu_init(hotcpu))
854 return NOTIFY_BAD;
823 855
824 break; 856 break;
825 } 857 }
@@ -904,6 +936,10 @@ static const struct idle_cpu idle_cpu_skl = {
904 .disable_promotion_to_c1e = true, 936 .disable_promotion_to_c1e = true,
905}; 937};
906 938
939static const struct idle_cpu idle_cpu_skx = {
940 .state_table = skx_cstates,
941 .disable_promotion_to_c1e = true,
942};
907 943
908static const struct idle_cpu idle_cpu_avn = { 944static const struct idle_cpu idle_cpu_avn = {
909 .state_table = avn_cstates, 945 .state_table = avn_cstates,
@@ -945,6 +981,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
945 ICPU(0x56, idle_cpu_bdw), 981 ICPU(0x56, idle_cpu_bdw),
946 ICPU(0x4e, idle_cpu_skl), 982 ICPU(0x4e, idle_cpu_skl),
947 ICPU(0x5e, idle_cpu_skl), 983 ICPU(0x5e, idle_cpu_skl),
984 ICPU(0x8e, idle_cpu_skl),
985 ICPU(0x9e, idle_cpu_skl),
986 ICPU(0x55, idle_cpu_skx),
948 ICPU(0x57, idle_cpu_knl), 987 ICPU(0x57, idle_cpu_knl),
949 {} 988 {}
950}; 989};
@@ -987,22 +1026,15 @@ static int __init intel_idle_probe(void)
987 icpu = (const struct idle_cpu *)id->driver_data; 1026 icpu = (const struct idle_cpu *)id->driver_data;
988 cpuidle_state_table = icpu->state_table; 1027 cpuidle_state_table = icpu->state_table;
989 1028
990 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
991 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
992 else
993 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
994
995 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 1029 pr_debug(PREFIX "v" INTEL_IDLE_VERSION
996 " model 0x%X\n", boot_cpu_data.x86_model); 1030 " model 0x%X\n", boot_cpu_data.x86_model);
997 1031
998 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
999 lapic_timer_reliable_states);
1000 return 0; 1032 return 0;
1001} 1033}
1002 1034
1003/* 1035/*
1004 * intel_idle_cpuidle_devices_uninit() 1036 * intel_idle_cpuidle_devices_uninit()
1005 * unregister, free cpuidle_devices 1037 * Unregisters the cpuidle devices.
1006 */ 1038 */
1007static void intel_idle_cpuidle_devices_uninit(void) 1039static void intel_idle_cpuidle_devices_uninit(void)
1008{ 1040{
@@ -1013,9 +1045,6 @@ static void intel_idle_cpuidle_devices_uninit(void)
1013 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 1045 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1014 cpuidle_unregister_device(dev); 1046 cpuidle_unregister_device(dev);
1015 } 1047 }
1016
1017 free_percpu(intel_idle_cpuidle_devices);
1018 return;
1019} 1048}
1020 1049
1021/* 1050/*
@@ -1111,7 +1140,7 @@ static void intel_idle_state_table_update(void)
1111 * intel_idle_cpuidle_driver_init() 1140 * intel_idle_cpuidle_driver_init()
1112 * allocate, initialize cpuidle_states 1141 * allocate, initialize cpuidle_states
1113 */ 1142 */
1114static int __init intel_idle_cpuidle_driver_init(void) 1143static void __init intel_idle_cpuidle_driver_init(void)
1115{ 1144{
1116 int cstate; 1145 int cstate;
1117 struct cpuidle_driver *drv = &intel_idle_driver; 1146 struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1163,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
1163 drv->state_count += 1; 1192 drv->state_count += 1;
1164 } 1193 }
1165 1194
1166 if (icpu->auto_demotion_disable_flags)
1167 on_each_cpu(auto_demotion_disable, NULL, 1);
1168
1169 if (icpu->byt_auto_demotion_disable_flag) { 1195 if (icpu->byt_auto_demotion_disable_flag) {
1170 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); 1196 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
1171 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); 1197 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
1172 } 1198 }
1173
1174 if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
1175 on_each_cpu(c1e_promotion_disable, NULL, 1);
1176
1177 return 0;
1178} 1199}
1179 1200
1180 1201
@@ -1193,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu)
1193 1214
1194 if (cpuidle_register_device(dev)) { 1215 if (cpuidle_register_device(dev)) {
1195 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); 1216 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
1196 intel_idle_cpuidle_devices_uninit();
1197 return -EIO; 1217 return -EIO;
1198 } 1218 }
1199 1219
@@ -1218,40 +1238,51 @@ static int __init intel_idle_init(void)
1218 if (retval) 1238 if (retval)
1219 return retval; 1239 return retval;
1220 1240
1241 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
1242 if (intel_idle_cpuidle_devices == NULL)
1243 return -ENOMEM;
1244
1221 intel_idle_cpuidle_driver_init(); 1245 intel_idle_cpuidle_driver_init();
1222 retval = cpuidle_register_driver(&intel_idle_driver); 1246 retval = cpuidle_register_driver(&intel_idle_driver);
1223 if (retval) { 1247 if (retval) {
1224 struct cpuidle_driver *drv = cpuidle_get_driver(); 1248 struct cpuidle_driver *drv = cpuidle_get_driver();
1225 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 1249 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
1226 drv ? drv->name : "none"); 1250 drv ? drv->name : "none");
1251 free_percpu(intel_idle_cpuidle_devices);
1227 return retval; 1252 return retval;
1228 } 1253 }
1229 1254
1230 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
1231 if (intel_idle_cpuidle_devices == NULL)
1232 return -ENOMEM;
1233
1234 cpu_notifier_register_begin(); 1255 cpu_notifier_register_begin();
1235 1256
1236 for_each_online_cpu(i) { 1257 for_each_online_cpu(i) {
1237 retval = intel_idle_cpu_init(i); 1258 retval = intel_idle_cpu_init(i);
1238 if (retval) { 1259 if (retval) {
1260 intel_idle_cpuidle_devices_uninit();
1239 cpu_notifier_register_done(); 1261 cpu_notifier_register_done();
1240 cpuidle_unregister_driver(&intel_idle_driver); 1262 cpuidle_unregister_driver(&intel_idle_driver);
1263 free_percpu(intel_idle_cpuidle_devices);
1241 return retval; 1264 return retval;
1242 } 1265 }
1243 } 1266 }
1244 __register_cpu_notifier(&cpu_hotplug_notifier); 1267 __register_cpu_notifier(&cpu_hotplug_notifier);
1245 1268
1269 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
1270 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1271 else
1272 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1273
1246 cpu_notifier_register_done(); 1274 cpu_notifier_register_done();
1247 1275
1276 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
1277 lapic_timer_reliable_states);
1278
1248 return 0; 1279 return 0;
1249} 1280}
1250 1281
1251static void __exit intel_idle_exit(void) 1282static void __exit intel_idle_exit(void)
1252{ 1283{
1253 intel_idle_cpuidle_devices_uninit(); 1284 struct cpuidle_device *dev;
1254 cpuidle_unregister_driver(&intel_idle_driver); 1285 int i;
1255 1286
1256 cpu_notifier_register_begin(); 1287 cpu_notifier_register_begin();
1257 1288
@@ -1259,9 +1290,15 @@ static void __exit intel_idle_exit(void)
1259 on_each_cpu(__setup_broadcast_timer, (void *)false, 1); 1290 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1260 __unregister_cpu_notifier(&cpu_hotplug_notifier); 1291 __unregister_cpu_notifier(&cpu_hotplug_notifier);
1261 1292
1293 for_each_possible_cpu(i) {
1294 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1295 cpuidle_unregister_device(dev);
1296 }
1297
1262 cpu_notifier_register_done(); 1298 cpu_notifier_register_done();
1263 1299
1264 return; 1300 cpuidle_unregister_driver(&intel_idle_driver);
1301 free_percpu(intel_idle_cpuidle_devices);
1265} 1302}
1266 1303
1267module_init(intel_idle_init); 1304module_init(intel_idle_init);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index c73331f7782b..2072a31e813b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
547{ 547{
548 int ret; 548 int ret;
549 int axis = chan->scan_index; 549 int axis = chan->scan_index;
550 unsigned int raw_val; 550 __le16 raw_val;
551 551
552 mutex_lock(&data->mutex); 552 mutex_lock(&data->mutex);
553 ret = bmc150_accel_set_power_state(data, true); 553 ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
557 } 557 }
558 558
559 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis), 559 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
560 &raw_val, 2); 560 &raw_val, sizeof(raw_val));
561 if (ret < 0) { 561 if (ret < 0) {
562 dev_err(data->dev, "Error reading axis %d\n", axis); 562 dev_err(data->dev, "Error reading axis %d\n", axis);
563 bmc150_accel_set_power_state(data, false); 563 bmc150_accel_set_power_state(data, false);
564 mutex_unlock(&data->mutex); 564 mutex_unlock(&data->mutex);
565 return ret; 565 return ret;
566 } 566 }
567 *val = sign_extend32(raw_val >> chan->scan_type.shift, 567 *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
568 chan->scan_type.realbits - 1); 568 chan->scan_type.realbits - 1);
569 ret = bmc150_accel_set_power_state(data, false); 569 ret = bmc150_accel_set_power_state(data, false);
570 mutex_unlock(&data->mutex); 570 mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
988 .realbits = (bits), \ 988 .realbits = (bits), \
989 .storagebits = 16, \ 989 .storagebits = 16, \
990 .shift = 16 - (bits), \ 990 .shift = 16 - (bits), \
991 .endianness = IIO_LE, \
991 }, \ 992 }, \
992 .event_spec = &bmc150_accel_event, \ 993 .event_spec = &bmc150_accel_event, \
993 .num_event_specs = 1 \ 994 .num_event_specs = 1 \
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index af4aea7b20f9..82c718c515a0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -134,6 +134,7 @@ config AT91_ADC
134config AT91_SAMA5D2_ADC 134config AT91_SAMA5D2_ADC
135 tristate "Atmel AT91 SAMA5D2 ADC" 135 tristate "Atmel AT91 SAMA5D2 ADC"
136 depends on ARCH_AT91 || COMPILE_TEST 136 depends on ARCH_AT91 || COMPILE_TEST
137 depends on HAS_IOMEM
137 help 138 help
138 Say yes here to build support for Atmel SAMA5D2 ADC which is 139 Say yes here to build support for Atmel SAMA5D2 ADC which is
139 available on SAMA5D2 SoC family. 140 available on SAMA5D2 SoC family.
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 929508e5266c..998dc3caad4c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1386 }, 1386 },
1387 [max11644] = { 1387 [max11644] = {
1388 .bits = 12, 1388 .bits = 12,
1389 .int_vref_mv = 2048, 1389 .int_vref_mv = 4096,
1390 .mode_list = max11644_mode_list, 1390 .mode_list = max11644_mode_list,
1391 .num_modes = ARRAY_SIZE(max11644_mode_list), 1391 .num_modes = ARRAY_SIZE(max11644_mode_list),
1392 .default_mode = s0to1, 1392 .default_mode = s0to1,
@@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1396 }, 1396 },
1397 [max11645] = { 1397 [max11645] = {
1398 .bits = 12, 1398 .bits = 12,
1399 .int_vref_mv = 4096, 1399 .int_vref_mv = 2048,
1400 .mode_list = max11644_mode_list, 1400 .mode_list = max11644_mode_list,
1401 .num_modes = ARRAY_SIZE(max11644_mode_list), 1401 .num_modes = ARRAY_SIZE(max11644_mode_list),
1402 .default_mode = s0to1, 1402 .default_mode = s0to1,
@@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1406 }, 1406 },
1407 [max11646] = { 1407 [max11646] = {
1408 .bits = 10, 1408 .bits = 10,
1409 .int_vref_mv = 2048, 1409 .int_vref_mv = 4096,
1410 .mode_list = max11644_mode_list, 1410 .mode_list = max11644_mode_list,
1411 .num_modes = ARRAY_SIZE(max11644_mode_list), 1411 .num_modes = ARRAY_SIZE(max11644_mode_list),
1412 .default_mode = s0to1, 1412 .default_mode = s0to1,
@@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1416 }, 1416 },
1417 [max11647] = { 1417 [max11647] = {
1418 .bits = 10, 1418 .bits = 10,
1419 .int_vref_mv = 4096, 1419 .int_vref_mv = 2048,
1420 .mode_list = max11644_mode_list, 1420 .mode_list = max11644_mode_list,
1421 .num_modes = ARRAY_SIZE(max11644_mode_list), 1421 .num_modes = ARRAY_SIZE(max11644_mode_list),
1422 .default_mode = s0to1, 1422 .default_mode = s0to1,
@@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = {
1680 { "max11615", max11615 }, 1680 { "max11615", max11615 },
1681 { "max11616", max11616 }, 1681 { "max11616", max11616 },
1682 { "max11617", max11617 }, 1682 { "max11617", max11617 },
1683 { "max11644", max11644 },
1684 { "max11645", max11645 },
1685 { "max11646", max11646 },
1686 { "max11647", max11647 },
1683 {} 1687 {}
1684}; 1688};
1685 1689
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index bbce3b09ac45..4dac567e75b4 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
452static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val) 452static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
453{ 453{
454 int ret; 454 int ret;
455 unsigned int raw_val; 455 __le16 raw_val;
456 456
457 mutex_lock(&data->mutex); 457 mutex_lock(&data->mutex);
458 ret = bmg160_set_power_state(data, true); 458 ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
462 } 462 }
463 463
464 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val, 464 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
465 2); 465 sizeof(raw_val));
466 if (ret < 0) { 466 if (ret < 0) {
467 dev_err(data->dev, "Error reading axis %d\n", axis); 467 dev_err(data->dev, "Error reading axis %d\n", axis);
468 bmg160_set_power_state(data, false); 468 bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
470 return ret; 470 return ret;
471 } 471 }
472 472
473 *val = sign_extend32(raw_val, 15); 473 *val = sign_extend32(le16_to_cpu(raw_val), 15);
474 ret = bmg160_set_power_state(data, false); 474 ret = bmg160_set_power_state(data, false);
475 mutex_unlock(&data->mutex); 475 mutex_unlock(&data->mutex);
476 if (ret < 0) 476 if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
733 .sign = 's', \ 733 .sign = 's', \
734 .realbits = 16, \ 734 .realbits = 16, \
735 .storagebits = 16, \ 735 .storagebits = 16, \
736 .endianness = IIO_LE, \
736 }, \ 737 }, \
737 .event_spec = &bmg160_event, \ 738 .event_spec = &bmg160_event, \
738 .num_event_specs = 1 \ 739 .num_event_specs = 1 \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
780 mutex_unlock(&data->mutex); 781 mutex_unlock(&data->mutex);
781 goto err; 782 goto err;
782 } 783 }
783 data->buffer[i++] = ret; 784 data->buffer[i++] = val;
784 } 785 }
785 mutex_unlock(&data->mutex); 786 mutex_unlock(&data->mutex);
786 787
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 09db89359544..90ab8a2d2846 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
238 238
239 mutex_lock(&data->lock); 239 mutex_lock(&data->lock);
240 240
241 while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) { 241 while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
242 ret = max30100_read_measurement(data); 242 ret = max30100_read_measurement(data);
243 if (ret) 243 if (ret)
244 break; 244 break;
245 245
246 iio_push_to_buffers(data->indio_dev, data->buffer); 246 iio_push_to_buffers(data->indio_dev, data->buffer);
247 cnt--;
247 } 248 }
248 249
249 mutex_unlock(&data->lock); 250 mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index a7f557af4389..847455a2d6bb 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -9,9 +9,8 @@ config INV_MPU6050_IIO
9 9
10config INV_MPU6050_I2C 10config INV_MPU6050_I2C
11 tristate "Invensense MPU6050 devices (I2C)" 11 tristate "Invensense MPU6050 devices (I2C)"
12 depends on I2C 12 depends on I2C_MUX
13 select INV_MPU6050_IIO 13 select INV_MPU6050_IIO
14 select I2C_MUX
15 select REGMAP_I2C 14 select REGMAP_I2C
16 help 15 help
17 This driver supports the Invensense MPU6050 devices. 16 This driver supports the Invensense MPU6050 devices.
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b976332d45d3..90462fcf5436 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -653,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
653 unsigned int modes; 653 unsigned int modes;
654 654
655 memset(config, 0, sizeof(*config)); 655 memset(config, 0, sizeof(*config));
656 config->watermark = ~0;
656 657
657 /* 658 /*
658 * If there is just one buffer and we are removing it there is nothing 659 * If there is just one buffer and we are removing it there is nothing
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index f6a07dc32ae4..a6af56ad10e1 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
769 mutex_lock(&data->lock); 769 mutex_lock(&data->lock);
770 data->gesture_mode_running = 1; 770 data->gesture_mode_running = 1;
771 771
772 while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) { 772 while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
773 ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE, 773 ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
774 &data->buffer, 4); 774 &data->buffer, 4);
775 775
@@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
777 goto err_read; 777 goto err_read;
778 778
779 iio_push_to_buffers(data->indio_dev, data->buffer); 779 iio_push_to_buffers(data->indio_dev, data->buffer);
780 cnt--;
780 } 781 }
781 782
782err_read: 783err_read:
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
44static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev) 44static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
45{ 45{
46} 46}
47#define ST_MAGN_TRIGGER_SET_STATE NULL
47#endif /* CONFIG_IIO_BUFFER */ 48#endif /* CONFIG_IIO_BUFFER */
48 49
49#endif /* ST_MAGN_H */ 50#endif /* ST_MAGN_H */
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59da456..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
691 NULL); 691 NULL);
692 692
693 /* Coudn't find default GID location */ 693 /* Coudn't find default GID location */
694 WARN_ON(ix < 0); 694 if (WARN_ON(ix < 0))
695 goto release;
695 696
696 zattr_type.gid_type = gid_type; 697 zattr_type.gid_type = gid_type;
697 698
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa0433b07..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
51#include <rdma/ib_cm.h> 52#include <rdma/ib_cm.h>
52#include <rdma/ib_user_cm.h> 53#include <rdma/ib_user_cm.h>
53#include <rdma/ib_marshall.h> 54#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1103 struct ib_ucm_cmd_hdr hdr; 1104 struct ib_ucm_cmd_hdr hdr;
1104 ssize_t result; 1105 ssize_t result;
1105 1106
1107 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1108 return -EACCES;
1109
1106 if (len < sizeof(hdr)) 1110 if (len < sizeof(hdr))
1107 return -EINVAL; 1111 return -EINVAL;
1108 1112
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcceadfde..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
1574 struct rdma_ucm_cmd_hdr hdr; 1574 struct rdma_ucm_cmd_hdr hdr;
1575 ssize_t ret; 1575 ssize_t ret;
1576 1576
1577 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1578 return -EACCES;
1579
1577 if (len < sizeof(hdr)) 1580 if (len < sizeof(hdr))
1578 return -EINVAL; 1581 return -EINVAL;
1579 1582
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc81535..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
52
51#include "uverbs.h" 53#include "uverbs.h"
52 54
53MODULE_AUTHOR("Roland Dreier"); 55MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
709 int srcu_key; 711 int srcu_key;
710 ssize_t ret; 712 ssize_t ret;
711 713
714 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
715 return -EACCES;
716
712 if (count < sizeof hdr) 717 if (count < sizeof hdr)
713 return -EINVAL; 718 return -EINVAL;
714 719
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adbf39c0..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
1860void ib_drain_qp(struct ib_qp *qp) 1860void ib_drain_qp(struct ib_qp *qp)
1861{ 1861{
1862 ib_drain_sq(qp); 1862 ib_drain_sq(qp);
1863 ib_drain_rq(qp); 1863 if (!qp->srq)
1864 ib_drain_rq(qp);
1864} 1865}
1865EXPORT_SYMBOL(ib_drain_qp); 1866EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b8952d13..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1392 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1392 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1393 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1394 sizeof(dev->ibdev.iwcm->ifname));
1393 1395
1394 ret = ib_register_device(&dev->ibdev, NULL); 1396 ret = ib_register_device(&dev->ibdev, NULL);
1395 if (ret) 1397 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb783573c..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, 162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
163 &cq->bar2_qid, 163 &cq->bar2_qid,
164 user ? &cq->bar2_pa : NULL); 164 user ? &cq->bar2_pa : NULL);
165 if (user && !cq->bar2_va) { 165 if (user && !cq->bar2_pa) {
166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", 166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
167 pci_name(rdev->lldi.pdev), cq->cqid); 167 pci_name(rdev->lldi.pdev), cq->cqid);
168 ret = -EINVAL; 168 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682dc5709..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; 580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; 581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
582 dev->ibdev.iwcm->get_qp = c4iw_get_qp; 582 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
583 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
584 sizeof(dev->ibdev.iwcm->ifname));
583 585
584 ret = ib_register_device(&dev->ibdev, NULL); 586 ret = ib_register_device(&dev->ibdev, NULL);
585 if (ret) 587 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5e033..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
185 185
186 if (pbar2_pa) 186 if (pbar2_pa)
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; 187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188
189 if (is_t4(rdev->lldi.adapter_type))
190 return NULL;
191
188 return rdev->bar2_kva + bar2_qoffset; 192 return rdev->bar2_kva + bar2_qoffset;
189} 193}
190 194
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
270 /* 274 /*
271 * User mode must have bar2 access. 275 * User mode must have bar2 access.
272 */ 276 */
273 if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { 277 if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
274 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", 278 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
275 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); 279 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
276 goto free_dma; 280 goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1895void c4iw_drain_sq(struct ib_qp *ibqp) 1899void c4iw_drain_sq(struct ib_qp *ibqp)
1896{ 1900{
1897 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1901 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1902 unsigned long flag;
1903 bool need_to_wait;
1898 1904
1899 wait_for_completion(&qp->sq_drained); 1905 spin_lock_irqsave(&qp->lock, flag);
1906 need_to_wait = !t4_sq_empty(&qp->wq);
1907 spin_unlock_irqrestore(&qp->lock, flag);
1908
1909 if (need_to_wait)
1910 wait_for_completion(&qp->sq_drained);
1900} 1911}
1901 1912
1902void c4iw_drain_rq(struct ib_qp *ibqp) 1913void c4iw_drain_rq(struct ib_qp *ibqp)
1903{ 1914{
1904 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1915 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1916 unsigned long flag;
1917 bool need_to_wait;
1918
1919 spin_lock_irqsave(&qp->lock, flag);
1920 need_to_wait = !t4_rq_empty(&qp->wq);
1921 spin_unlock_irqrestore(&qp->lock, flag);
1905 1922
1906 wait_for_completion(&qp->rq_drained); 1923 if (need_to_wait)
1924 wait_for_completion(&qp->rq_drained);
1907} 1925}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 92745d755272..38f917a6c778 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1992,7 +1992,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
1992/** 1992/**
1993 * i40iw_get_dst_ipv6 1993 * i40iw_get_dst_ipv6
1994 */ 1994 */
1995#if IS_ENABLED(CONFIG_IPV6)
1996static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, 1995static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
1997 struct sockaddr_in6 *dst_addr) 1996 struct sockaddr_in6 *dst_addr)
1998{ 1997{
@@ -2008,7 +2007,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2008 dst = ip6_route_output(&init_net, NULL, &fl6); 2007 dst = ip6_route_output(&init_net, NULL, &fl6);
2009 return dst; 2008 return dst;
2010} 2009}
2011#endif
2012 2010
2013/** 2011/**
2014 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address 2012 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
@@ -2016,7 +2014,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2016 * @dst_ip: remote ip address 2014 * @dst_ip: remote ip address
2017 * @arpindex: if there is an arp entry 2015 * @arpindex: if there is an arp entry
2018 */ 2016 */
2019#if IS_ENABLED(CONFIG_IPV6)
2020static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, 2017static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2021 u32 *src, 2018 u32 *src,
2022 u32 *dest, 2019 u32 *dest,
@@ -2089,7 +2086,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2089 dst_release(dst); 2086 dst_release(dst);
2090 return rc; 2087 return rc;
2091} 2088}
2092#endif
2093 2089
2094/** 2090/**
2095 * i40iw_ipv4_is_loopback - check if loopback 2091 * i40iw_ipv4_is_loopback - check if loopback
@@ -2190,13 +2186,13 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
2190 cm_info->loc_addr[0], 2186 cm_info->loc_addr[0],
2191 cm_info->rem_addr[0], 2187 cm_info->rem_addr[0],
2192 oldarpindex); 2188 oldarpindex);
2193#if IS_ENABLED(CONFIG_IPV6) 2189 else if (IS_ENABLED(CONFIG_IPV6))
2194 else
2195 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, 2190 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
2196 cm_info->loc_addr, 2191 cm_info->loc_addr,
2197 cm_info->rem_addr, 2192 cm_info->rem_addr,
2198 oldarpindex); 2193 oldarpindex);
2199#endif 2194 else
2195 arpindex = -EINVAL;
2200 } 2196 }
2201 if (arpindex < 0) { 2197 if (arpindex < 0) {
2202 i40iw_pr_err("cm_node arpindex\n"); 2198 i40iw_pr_err("cm_node arpindex\n");
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5acf346e048e..6ad0489cb3c5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
530 sizeof(struct mlx5_wqe_ctrl_seg)) / 530 sizeof(struct mlx5_wqe_ctrl_seg)) /
531 sizeof(struct mlx5_wqe_data_seg); 531 sizeof(struct mlx5_wqe_data_seg);
532 props->max_sge = min(max_rq_sg, max_sq_sg); 532 props->max_sge = min(max_rq_sg, max_sq_sg);
533 props->max_sge_rd = props->max_sge; 533 props->max_sge_rd = MLX5_MAX_SGE_RD;
534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
671 struct mlx5_ib_dev *dev = to_mdev(ibdev); 671 struct mlx5_ib_dev *dev = to_mdev(ibdev);
672 struct mlx5_core_dev *mdev = dev->mdev; 672 struct mlx5_core_dev *mdev = dev->mdev;
673 struct mlx5_hca_vport_context *rep; 673 struct mlx5_hca_vport_context *rep;
674 int max_mtu; 674 u16 max_mtu;
675 int oper_mtu; 675 u16 oper_mtu;
676 int err; 676 int err;
677 u8 ib_link_width_oper; 677 u8 ib_link_width_oper;
678 u8 vl_hw_cap; 678 u8 vl_hw_cap;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f16c818ad2e6..b46c25542a7c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -776,15 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); 776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, 777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
778 unsigned long end); 778 unsigned long end);
779int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
780 u8 port, struct ifla_vf_info *info);
781int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
782 u8 port, int state);
783int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
784 u8 port, struct ifla_vf_stats *stats);
785int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
786 u64 guid, int type);
787
788#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 779#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
789static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) 780static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
790{ 781{
@@ -801,6 +792,15 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
801 792
802#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 793#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
803 794
795int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
796 u8 port, struct ifla_vf_info *info);
797int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
798 u8 port, int state);
799int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
800 u8 port, struct ifla_vf_stats *stats);
801int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
802 u64 guid, int type);
803
804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
805 int index); 805 int index);
806 806
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3ea9e055fdd3..92914539edc7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
500 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); 500 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
501 */ 501 */
502 502
503 if (!netif_carrier_ok(netdev))
504 return NETDEV_TX_OK;
505
506 if (netif_queue_stopped(netdev)) 503 if (netif_queue_stopped(netdev))
507 return NETDEV_TX_BUSY; 504 return NETDEV_TX_BUSY;
508 505
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
45#include <linux/export.h> 45#include <linux/export.h>
46#include <linux/uio.h> 46#include <linux/uio.h>
47 47
48#include <rdma/ib.h>
49
48#include "qib.h" 50#include "qib.h"
49#include "qib_common.h" 51#include "qib_common.h"
50#include "qib_user_sdma.h" 52#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2067 ssize_t ret = 0; 2069 ssize_t ret = 0;
2068 void *dest; 2070 void *dest;
2069 2071
2072 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2073 return -EACCES;
2074
2070 if (count < sizeof(cmd.type)) { 2075 if (count < sizeof(cmd.type)) {
2071 ret = -EINVAL; 2076 ret = -EINVAL;
2072 goto bail; 2077 goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a6948dc8..a9e3bcc522c4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@ bail:
1637 spin_unlock_irqrestore(&qp->s_hlock, flags); 1637 spin_unlock_irqrestore(&qp->s_hlock, flags);
1638 if (nreq) { 1638 if (nreq) {
1639 if (call_send) 1639 if (call_send)
1640 rdi->driver_f.schedule_send_no_lock(qp);
1641 else
1642 rdi->driver_f.do_send(qp); 1640 rdi->driver_f.do_send(qp);
1641 else
1642 rdi->driver_f.schedule_send_no_lock(qp);
1643 } 1643 }
1644 return err; 1644 return err;
1645} 1645}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0bd3cb2f3c67..8b42401d4795 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1264,26 +1264,40 @@ free_mem:
1264 */ 1264 */
1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1266{ 1266{
1267 struct se_session *se_sess;
1268 struct srpt_send_ioctx *ioctx; 1267 struct srpt_send_ioctx *ioctx;
1269 int tag; 1268 unsigned long flags;
1270 1269
1271 BUG_ON(!ch); 1270 BUG_ON(!ch);
1272 se_sess = ch->sess;
1273 1271
1274 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 1272 ioctx = NULL;
1275 if (tag < 0) { 1273 spin_lock_irqsave(&ch->spinlock, flags);
1276 pr_err("Unable to obtain tag for srpt_send_ioctx\n"); 1274 if (!list_empty(&ch->free_list)) {
1277 return NULL; 1275 ioctx = list_first_entry(&ch->free_list,
1276 struct srpt_send_ioctx, free_list);
1277 list_del(&ioctx->free_list);
1278 } 1278 }
1279 ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag]; 1279 spin_unlock_irqrestore(&ch->spinlock, flags);
1280 memset(ioctx, 0, sizeof(struct srpt_send_ioctx)); 1280
1281 ioctx->ch = ch; 1281 if (!ioctx)
1282 return ioctx;
1283
1284 BUG_ON(ioctx->ch != ch);
1282 spin_lock_init(&ioctx->spinlock); 1285 spin_lock_init(&ioctx->spinlock);
1283 ioctx->state = SRPT_STATE_NEW; 1286 ioctx->state = SRPT_STATE_NEW;
1287 ioctx->n_rbuf = 0;
1288 ioctx->rbufs = NULL;
1289 ioctx->n_rdma = 0;
1290 ioctx->n_rdma_wrs = 0;
1291 ioctx->rdma_wrs = NULL;
1292 ioctx->mapped_sg_count = 0;
1284 init_completion(&ioctx->tx_done); 1293 init_completion(&ioctx->tx_done);
1285 1294 ioctx->queue_status_only = false;
1286 ioctx->cmd.map_tag = tag; 1295 /*
1296 * transport_init_se_cmd() does not initialize all fields, so do it
1297 * here.
1298 */
1299 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1300 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1287 1301
1288 return ioctx; 1302 return ioctx;
1289} 1303}
@@ -2021,7 +2035,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2021 struct ib_cm_rep_param *rep_param; 2035 struct ib_cm_rep_param *rep_param;
2022 struct srpt_rdma_ch *ch, *tmp_ch; 2036 struct srpt_rdma_ch *ch, *tmp_ch;
2023 u32 it_iu_len; 2037 u32 it_iu_len;
2024 int ret = 0; 2038 int i, ret = 0;
2025 unsigned char *p; 2039 unsigned char *p;
2026 2040
2027 WARN_ON_ONCE(irqs_disabled()); 2041 WARN_ON_ONCE(irqs_disabled());
@@ -2143,6 +2157,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2143 if (!ch->ioctx_ring) 2157 if (!ch->ioctx_ring)
2144 goto free_ch; 2158 goto free_ch;
2145 2159
2160 INIT_LIST_HEAD(&ch->free_list);
2161 for (i = 0; i < ch->rq_size; i++) {
2162 ch->ioctx_ring[i]->ch = ch;
2163 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2164 }
2165
2146 ret = srpt_create_ch_ib(ch); 2166 ret = srpt_create_ch_ib(ch);
2147 if (ret) { 2167 if (ret) {
2148 rej->reason = cpu_to_be32( 2168 rej->reason = cpu_to_be32(
@@ -2173,8 +2193,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2173 p = &ch->sess_name[0]; 2193 p = &ch->sess_name[0];
2174 2194
2175try_again: 2195try_again:
2176 ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size, 2196 ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
2177 sizeof(struct srpt_send_ioctx),
2178 TARGET_PROT_NORMAL, p, ch, NULL); 2197 TARGET_PROT_NORMAL, p, ch, NULL);
2179 if (IS_ERR(ch->sess)) { 2198 if (IS_ERR(ch->sess)) {
2180 pr_info("Rejected login because no ACL has been" 2199 pr_info("Rejected login because no ACL has been"
@@ -2881,7 +2900,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2881 struct srpt_send_ioctx *ioctx = container_of(se_cmd, 2900 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
2882 struct srpt_send_ioctx, cmd); 2901 struct srpt_send_ioctx, cmd);
2883 struct srpt_rdma_ch *ch = ioctx->ch; 2902 struct srpt_rdma_ch *ch = ioctx->ch;
2884 struct se_session *se_sess = ch->sess; 2903 unsigned long flags;
2885 2904
2886 WARN_ON(ioctx->state != SRPT_STATE_DONE); 2905 WARN_ON(ioctx->state != SRPT_STATE_DONE);
2887 WARN_ON(ioctx->mapped_sg_count != 0); 2906 WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2892,7 +2911,9 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2892 ioctx->n_rbuf = 0; 2911 ioctx->n_rbuf = 0;
2893 } 2912 }
2894 2913
2895 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 2914 spin_lock_irqsave(&ch->spinlock, flags);
2915 list_add(&ioctx->free_list, &ch->free_list);
2916 spin_unlock_irqrestore(&ch->spinlock, flags);
2896} 2917}
2897 2918
2898/** 2919/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ca288f019315..af9b8b527340 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -179,6 +179,7 @@ struct srpt_recv_ioctx {
179 * struct srpt_send_ioctx - SRPT send I/O context. 179 * struct srpt_send_ioctx - SRPT send I/O context.
180 * @ioctx: See above. 180 * @ioctx: See above.
181 * @ch: Channel pointer. 181 * @ch: Channel pointer.
182 * @free_list: Node in srpt_rdma_ch.free_list.
182 * @n_rbuf: Number of data buffers in the received SRP command. 183 * @n_rbuf: Number of data buffers in the received SRP command.
183 * @rbufs: Pointer to SRP data buffer array. 184 * @rbufs: Pointer to SRP data buffer array.
184 * @single_rbuf: SRP data buffer if the command has only a single buffer. 185 * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -201,6 +202,7 @@ struct srpt_send_ioctx {
201 struct srp_direct_buf *rbufs; 202 struct srp_direct_buf *rbufs;
202 struct srp_direct_buf single_rbuf; 203 struct srp_direct_buf single_rbuf;
203 struct scatterlist *sg; 204 struct scatterlist *sg;
205 struct list_head free_list;
204 spinlock_t spinlock; 206 spinlock_t spinlock;
205 enum srpt_command_state state; 207 enum srpt_command_state state;
206 struct se_cmd cmd; 208 struct se_cmd cmd;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index e8a84d12b7ff..1142a93dd90b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -153,6 +153,7 @@ static const struct xpad_device {
153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, 155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
156 { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
156 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 157 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
157 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, 158 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
158 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, 159 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
304 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ 305 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
305 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 306 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
306 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ 307 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
308 XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
307 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 309 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
308 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 310 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
309 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 311 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index d5994a745ffa..982936334537 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
178 input_set_drvdata(haptics->input_dev, haptics); 178 input_set_drvdata(haptics->input_dev, haptics);
179 179
180 haptics->input_dev->name = "arizona:haptics"; 180 haptics->input_dev->name = "arizona:haptics";
181 haptics->input_dev->dev.parent = pdev->dev.parent;
182 haptics->input_dev->close = arizona_haptics_close; 181 haptics->input_dev->close = arizona_haptics_close;
183 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit); 182 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
184 183
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e03d12..67aab86048ad 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay)) 353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
354 kpd_delay = 15625; 354 kpd_delay = 15625;
355 355
356 if (kpd_delay > 62500 || kpd_delay == 0) { 356 /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
357 if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
357 dev_err(&pdev->dev, "invalid power key trigger delay\n"); 358 dev_err(&pdev->dev, "invalid power key trigger delay\n");
358 return -EINVAL; 359 return -EINVAL;
359 } 360 }
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
385 pwr->name = "pmic8xxx_pwrkey"; 386 pwr->name = "pmic8xxx_pwrkey";
386 pwr->phys = "pmic8xxx_pwrkey/input0"; 387 pwr->phys = "pmic8xxx_pwrkey/input0";
387 388
388 delay = (kpd_delay << 10) / USEC_PER_SEC; 389 delay = (kpd_delay << 6) / USEC_PER_SEC;
389 delay = 1 + ilog2(delay); 390 delay = ilog2(delay);
390 391
391 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl); 392 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
392 if (err < 0) { 393 if (err < 0) {
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 10c4e3d462f1..caa5a62c42fb 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
222 222
223 info->input_dev->name = "twl4030:vibrator"; 223 info->input_dev->name = "twl4030:vibrator";
224 info->input_dev->id.version = 1; 224 info->input_dev->id.version = 1;
225 info->input_dev->dev.parent = pdev->dev.parent;
226 info->input_dev->close = twl4030_vibra_close; 225 info->input_dev->close = twl4030_vibra_close;
227 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 226 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
228 227
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ea63fad48de6..53e33fab3f7a 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -45,7 +45,6 @@
45struct vibra_info { 45struct vibra_info {
46 struct device *dev; 46 struct device *dev;
47 struct input_dev *input_dev; 47 struct input_dev *input_dev;
48 struct workqueue_struct *workqueue;
49 struct work_struct play_work; 48 struct work_struct play_work;
50 struct mutex mutex; 49 struct mutex mutex;
51 int irq; 50 int irq;
@@ -213,11 +212,7 @@ static int vibra_play(struct input_dev *input, void *data,
213 info->strong_speed = effect->u.rumble.strong_magnitude; 212 info->strong_speed = effect->u.rumble.strong_magnitude;
214 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1; 213 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
215 214
216 ret = queue_work(info->workqueue, &info->play_work); 215 schedule_work(&info->play_work);
217 if (!ret) {
218 dev_info(&input->dev, "work is already on queue\n");
219 return ret;
220 }
221 216
222 return 0; 217 return 0;
223} 218}
@@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
362 357
363 info->input_dev->name = "twl6040:vibrator"; 358 info->input_dev->name = "twl6040:vibrator";
364 info->input_dev->id.version = 1; 359 info->input_dev->id.version = 1;
365 info->input_dev->dev.parent = pdev->dev.parent;
366 info->input_dev->close = twl6040_vibra_close; 360 info->input_dev->close = twl6040_vibra_close;
367 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 361 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
368 362
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4a4396..7c18249d6c8e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
858 goto err_free_buf; 858 goto err_free_buf;
859 } 859 }
860 860
861 /* Sanity check that a device has an endpoint */
862 if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
863 dev_err(&usbinterface->dev,
864 "Invalid number of endpoints\n");
865 error = -EINVAL;
866 goto err_free_urb;
867 }
868
861 /* 869 /*
862 * The endpoint is always altsetting 0, we know this since we know 870 * The endpoint is always altsetting 0, we know this since we know
863 * this device only has one interrupt endpoint 871 * this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
879 * HID report descriptor 887 * HID report descriptor
880 */ 888 */
881 if (usb_get_extra_descriptor(usbinterface->cur_altsetting, 889 if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
882 HID_DEVICE_TYPE, &hid_desc) != 0){ 890 HID_DEVICE_TYPE, &hid_desc) != 0) {
883 dev_err(&usbinterface->dev, 891 dev_err(&usbinterface->dev,
884 "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); 892 "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
885 error = -EIO; 893 error = -EIO;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 374c129219ef..5efadad4615b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -92,6 +92,7 @@ struct iommu_dev_data {
92 struct list_head dev_data_list; /* For global dev_data_list */ 92 struct list_head dev_data_list; /* For global dev_data_list */
93 struct protection_domain *domain; /* Domain the device is bound to */ 93 struct protection_domain *domain; /* Domain the device is bound to */
94 u16 devid; /* PCI Device ID */ 94 u16 devid; /* PCI Device ID */
95 u16 alias; /* Alias Device ID */
95 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
96 bool passthrough; /* Device is identity mapped */ 97 bool passthrough; /* Device is identity mapped */
97 struct { 98 struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
166 return container_of(dom, struct protection_domain, domain); 167 return container_of(dom, struct protection_domain, domain);
167} 168}
168 169
170static inline u16 get_device_id(struct device *dev)
171{
172 struct pci_dev *pdev = to_pci_dev(dev);
173
174 return PCI_DEVID(pdev->bus->number, pdev->devfn);
175}
176
169static struct iommu_dev_data *alloc_dev_data(u16 devid) 177static struct iommu_dev_data *alloc_dev_data(u16 devid)
170{ 178{
171 struct iommu_dev_data *dev_data; 179 struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
203 return dev_data; 211 return dev_data;
204} 212}
205 213
214static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
215{
216 *(u16 *)data = alias;
217 return 0;
218}
219
220static u16 get_alias(struct device *dev)
221{
222 struct pci_dev *pdev = to_pci_dev(dev);
223 u16 devid, ivrs_alias, pci_alias;
224
225 devid = get_device_id(dev);
226 ivrs_alias = amd_iommu_alias_table[devid];
227 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
228
229 if (ivrs_alias == pci_alias)
230 return ivrs_alias;
231
232 /*
233 * DMA alias showdown
234 *
235 * The IVRS is fairly reliable in telling us about aliases, but it
236 * can't know about every screwy device. If we don't have an IVRS
237 * reported alias, use the PCI reported alias. In that case we may
238 * still need to initialize the rlookup and dev_table entries if the
239 * alias is to a non-existent device.
240 */
241 if (ivrs_alias == devid) {
242 if (!amd_iommu_rlookup_table[pci_alias]) {
243 amd_iommu_rlookup_table[pci_alias] =
244 amd_iommu_rlookup_table[devid];
245 memcpy(amd_iommu_dev_table[pci_alias].data,
246 amd_iommu_dev_table[devid].data,
247 sizeof(amd_iommu_dev_table[pci_alias].data));
248 }
249
250 return pci_alias;
251 }
252
253 pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
254 "for device %s[%04x:%04x], kernel reported alias "
255 "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
256 PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
257 PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
258 PCI_FUNC(pci_alias));
259
260 /*
261 * If we don't have a PCI DMA alias and the IVRS alias is on the same
262 * bus, then the IVRS table may know about a quirk that we don't.
263 */
264 if (pci_alias == devid &&
265 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
266 pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
267 pdev->dma_alias_devfn = ivrs_alias & 0xff;
268 pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
269 PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
270 dev_name(dev));
271 }
272
273 return ivrs_alias;
274}
275
206static struct iommu_dev_data *find_dev_data(u16 devid) 276static struct iommu_dev_data *find_dev_data(u16 devid)
207{ 277{
208 struct iommu_dev_data *dev_data; 278 struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
215 return dev_data; 285 return dev_data;
216} 286}
217 287
218static inline u16 get_device_id(struct device *dev)
219{
220 struct pci_dev *pdev = to_pci_dev(dev);
221
222 return PCI_DEVID(pdev->bus->number, pdev->devfn);
223}
224
225static struct iommu_dev_data *get_dev_data(struct device *dev) 288static struct iommu_dev_data *get_dev_data(struct device *dev)
226{ 289{
227 return dev->archdata.iommu; 290 return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
349 if (!dev_data) 412 if (!dev_data)
350 return -ENOMEM; 413 return -ENOMEM;
351 414
415 dev_data->alias = get_alias(dev);
416
352 if (pci_iommuv2_capable(pdev)) { 417 if (pci_iommuv2_capable(pdev)) {
353 struct amd_iommu *iommu; 418 struct amd_iommu *iommu;
354 419
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
369 u16 devid, alias; 434 u16 devid, alias;
370 435
371 devid = get_device_id(dev); 436 devid = get_device_id(dev);
372 alias = amd_iommu_alias_table[devid]; 437 alias = get_alias(dev);
373 438
374 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); 439 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
375 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); 440 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
1061 int ret; 1126 int ret;
1062 1127
1063 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1128 iommu = amd_iommu_rlookup_table[dev_data->devid];
1064 alias = amd_iommu_alias_table[dev_data->devid]; 1129 alias = dev_data->alias;
1065 1130
1066 ret = iommu_flush_dte(iommu, dev_data->devid); 1131 ret = iommu_flush_dte(iommu, dev_data->devid);
1067 if (!ret && alias != dev_data->devid) 1132 if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
2039 bool ats; 2104 bool ats;
2040 2105
2041 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2106 iommu = amd_iommu_rlookup_table[dev_data->devid];
2042 alias = amd_iommu_alias_table[dev_data->devid]; 2107 alias = dev_data->alias;
2043 ats = dev_data->ats.enabled; 2108 ats = dev_data->ats.enabled;
2044 2109
2045 /* Update data structures */ 2110 /* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
2073 return; 2138 return;
2074 2139
2075 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2140 iommu = amd_iommu_rlookup_table[dev_data->devid];
2076 alias = amd_iommu_alias_table[dev_data->devid]; 2141 alias = dev_data->alias;
2077 2142
2078 /* decrease reference counters */ 2143 /* decrease reference counters */
2079 dev_data->domain->dev_iommu[iommu->index] -= 1; 2144 dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2409e3bd3df2..7c39ac4b9c53 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
826 if (smmu_domain->smmu) 826 if (smmu_domain->smmu)
827 goto out_unlock; 827 goto out_unlock;
828 828
829 /* We're bypassing these SIDs, so don't allocate an actual context */
830 if (domain->type == IOMMU_DOMAIN_DMA) {
831 smmu_domain->smmu = smmu;
832 goto out_unlock;
833 }
834
829 /* 835 /*
830 * Mapping the requested stage onto what we support is surprisingly 836 * Mapping the requested stage onto what we support is surprisingly
831 * complicated, mainly because the spec allows S1+S2 SMMUs without 837 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
948 void __iomem *cb_base; 954 void __iomem *cb_base;
949 int irq; 955 int irq;
950 956
951 if (!smmu) 957 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
952 return; 958 return;
953 959
954 /* 960 /*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1089 struct arm_smmu_device *smmu = smmu_domain->smmu; 1095 struct arm_smmu_device *smmu = smmu_domain->smmu;
1090 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1096 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1091 1097
1092 /* Devices in an IOMMU group may already be configured */
1093 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1094 if (ret)
1095 return ret == -EEXIST ? 0 : ret;
1096
1097 /* 1098 /*
1098 * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1099 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1099 * for all devices behind the SMMU. 1100 * for all devices behind the SMMU. Note that we need to take
1101 * care configuring SMRs for devices both a platform_device and
1102 * and a PCI device (i.e. a PCI host controller)
1100 */ 1103 */
1101 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1104 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1102 return 0; 1105 return 0;
1103 1106
1107 /* Devices in an IOMMU group may already be configured */
1108 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1109 if (ret)
1110 return ret == -EEXIST ? 0 : ret;
1111
1104 for (i = 0; i < cfg->num_streamids; ++i) { 1112 for (i = 0; i < cfg->num_streamids; ++i) {
1105 u32 idx, s2cr; 1113 u32 idx, s2cr;
1106 1114
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182666cb..58f2fe687a24 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
403 unsigned int s_length = sg_dma_len(s); 403 unsigned int s_length = sg_dma_len(s);
404 unsigned int s_dma_len = s->length; 404 unsigned int s_dma_len = s->length;
405 405
406 s->offset = s_offset; 406 s->offset += s_offset;
407 s->length = s_length; 407 s->length = s_length;
408 sg_dma_address(s) = dma_addr + s_offset; 408 sg_dma_address(s) = dma_addr + s_offset;
409 dma_addr += s_dma_len; 409 dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
422 422
423 for_each_sg(sg, s, nents, i) { 423 for_each_sg(sg, s, nents, i) {
424 if (sg_dma_address(s) != DMA_ERROR_CODE) 424 if (sg_dma_address(s) != DMA_ERROR_CODE)
425 s->offset = sg_dma_address(s); 425 s->offset += sg_dma_address(s);
426 if (sg_dma_len(s)) 426 if (sg_dma_len(s))
427 s->length = sg_dma_len(s); 427 s->length = sg_dma_len(s);
428 sg_dma_address(s) = DMA_ERROR_CODE; 428 sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2e1b7f14df2..e1852e845d21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2458 } 2458 }
2459 2459
2460 /* register PCI DMA alias device */ 2460 /* register PCI DMA alias device */
2461 if (req_id != dma_alias && dev_is_pci(dev)) { 2461 if (dev_is_pci(dev) && req_id != dma_alias) {
2462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), 2462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2463 dma_alias & 0xff, NULL, domain); 2463 dma_alias & 0xff, NULL, domain);
2464 2464
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index bfd4f7c3b1d8..b9df1411c894 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
848 if (!group->default_domain) { 848 if (!group->default_domain) {
849 group->default_domain = __iommu_domain_alloc(dev->bus, 849 group->default_domain = __iommu_domain_alloc(dev->bus,
850 IOMMU_DOMAIN_DMA); 850 IOMMU_DOMAIN_DMA);
851 group->domain = group->default_domain; 851 if (!group->domain)
852 group->domain = group->default_domain;
852 } 853 }
853 854
854 ret = iommu_group_add_device(group, dev); 855 ret = iommu_group_add_device(group, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a6f593a0a29e..5710a06c3049 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -315,8 +315,8 @@ static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
315 int i; 315 int i;
316 316
317 for (i = 0; i < iommu->num_mmu; i++) 317 for (i = 0; i < iommu->num_mmu; i++)
318 active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 318 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
319 RK_MMU_STATUS_STALL_ACTIVE; 319 RK_MMU_STATUS_STALL_ACTIVE);
320 320
321 return active; 321 return active;
322} 322}
@@ -327,8 +327,8 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
327 int i; 327 int i;
328 328
329 for (i = 0; i < iommu->num_mmu; i++) 329 for (i = 0; i < iommu->num_mmu; i++)
330 enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 330 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
331 RK_MMU_STATUS_PAGING_ENABLED; 331 RK_MMU_STATUS_PAGING_ENABLED);
332 332
333 return enable; 333 return enable;
334} 334}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 94a30da0cfac..4dffccf532a2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
468 468
469 /* Update the pcpu_masks */ 469 /* Update the pcpu_masks */
470 for (i = 0; i < gic_vpes; i++) 470 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
471 clear_bit(irq, pcpu_masks[i].pcpu_mask); 471 clear_bit(irq, pcpu_masks[i].pcpu_mask);
472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
473 473
@@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
707 spin_lock_irqsave(&gic_lock, flags); 707 spin_lock_irqsave(&gic_lock, flags);
708 gic_map_to_pin(intr, gic_cpu_pin); 708 gic_map_to_pin(intr, gic_cpu_pin);
709 gic_map_to_vpe(intr, vpe); 709 gic_map_to_vpe(intr, vpe);
710 for (i = 0; i < gic_vpes; i++) 710 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
711 clear_bit(intr, pcpu_masks[i].pcpu_mask); 711 clear_bit(intr, pcpu_masks[i].pcpu_mask);
712 set_bit(intr, pcpu_masks[vpe].pcpu_mask); 712 set_bit(intr, pcpu_masks[vpe].pcpu_mask);
713 spin_unlock_irqrestore(&gic_lock, flags); 713 spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
715 if (!maddr || maddr->family != AF_ISDN) 715 if (!maddr || maddr->family != AF_ISDN)
716 return -EINVAL; 716 return -EINVAL;
717 717
718 if (addr_len < sizeof(struct sockaddr_mISDN))
719 return -EINVAL;
720
718 lock_sock(sk); 721 lock_sock(sk);
719 722
720 if (_pms(sk)->dev) { 723 if (_pms(sk)->dev) {
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0242e0..67392b6ab845 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
331 * Actually now I think of it, it's possible that Ron *is* half the Plan 9 331 * Actually now I think of it, it's possible that Ron *is* half the Plan 9
332 * userbase. Oh well. 332 * userbase. Oh well.
333 */ 333 */
334static bool could_be_syscall(unsigned int num) 334bool could_be_syscall(unsigned int num)
335{ 335{
336 /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */ 336 /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
337 return num == IA32_SYSCALL_VECTOR || num == syscall_vector; 337 return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
416 * 416 *
417 * This routine indicates if a particular trap number could be delivered 417 * This routine indicates if a particular trap number could be delivered
418 * directly. 418 * directly.
419 *
420 * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
421 * trap gate for syscalls, so this trick is ineffective. See Mastery for
422 * how we could do this anyway...
419 */ 423 */
420static bool direct_trap(unsigned int num) 424static bool direct_trap(unsigned int num)
421{ 425{
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad0461e80..69b3814afd2f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
167bool send_notify_to_eventfd(struct lg_cpu *cpu); 167bool send_notify_to_eventfd(struct lg_cpu *cpu);
168void init_clockdev(struct lg_cpu *cpu); 168void init_clockdev(struct lg_cpu *cpu);
169bool check_syscall_vector(struct lguest *lg); 169bool check_syscall_vector(struct lguest *lg);
170bool could_be_syscall(unsigned int num);
170int init_interrupts(void); 171int init_interrupts(void);
171void free_interrupts(void); 172void free_interrupts(void);
172 173
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd771a2be..adc162c7040d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
429 return; 429 return;
430 break; 430 break;
431 case 32 ... 255: 431 case 32 ... 255:
432 /* This might be a syscall. */
433 if (could_be_syscall(cpu->regs->trapnum))
434 break;
435
432 /* 436 /*
433 * These values mean a real interrupt occurred, in which case 437 * Other values mean a real interrupt occurred, in which case
434 * the Host handler has already been run. We just do a 438 * the Host handler has already been run. We just do a
435 * friendly check if another process should now be run, then 439 * friendly check if another process should now be run, then
436 * return to run the Guest again. 440 * return to run the Guest again.
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index dc11bbf27274..58d04726cdd7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -46,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
46 size_t count, loff_t *ppos) 46 size_t count, loff_t *ppos)
47{ 47{
48 struct mbox_test_device *tdev = filp->private_data; 48 struct mbox_test_device *tdev = filp->private_data;
49 int ret;
50 49
51 if (!tdev->tx_channel) { 50 if (!tdev->tx_channel) {
52 dev_err(tdev->dev, "Channel cannot do Tx\n"); 51 dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
60 return -EINVAL; 59 return -EINVAL;
61 } 60 }
62 61
63 tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL); 62 /* Only allocate memory if we need to */
64 if (!tdev->signal) 63 if (!tdev->signal) {
65 return -ENOMEM; 64 tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
65 if (!tdev->signal)
66 return -ENOMEM;
67 }
66 68
67 ret = copy_from_user(tdev->signal, userbuf, count); 69 if (copy_from_user(tdev->signal, userbuf, count)) {
68 if (ret) {
69 kfree(tdev->signal); 70 kfree(tdev->signal);
71 tdev->signal = NULL;
70 return -EFAULT; 72 return -EFAULT;
71 } 73 }
72 74
73 return ret < 0 ? ret : count; 75 return count;
74} 76}
75 77
76static const struct file_operations mbox_test_signal_ops = { 78static const struct file_operations mbox_test_signal_ops = {
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index bd07f39f0692..dd2afbca51c9 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -189,8 +189,8 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
189 int i; 189 int i;
190 190
191 ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL); 191 ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
192 if (IS_ERR(ctx)) 192 if (!ctx)
193 return PTR_ERR(ctx); 193 return -ENOMEM;
194 194
195 platform_set_drvdata(pdev, ctx); 195 platform_set_drvdata(pdev, ctx);
196 196
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..4a36632c236f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
375 375
376 if (!np) { 376 if (!np) {
377 dev_err(cl->dev, "%s() currently only supports DT\n", __func__); 377 dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
378 return ERR_PTR(-ENOSYS); 378 return ERR_PTR(-EINVAL);
379 } 379 }
380 380
381 if (!of_get_property(np, "mbox-names", NULL)) { 381 if (!of_get_property(np, "mbox-names", NULL)) {
382 dev_err(cl->dev, 382 dev_err(cl->dev,
383 "%s() requires an \"mbox-names\" property\n", __func__); 383 "%s() requires an \"mbox-names\" property\n", __func__);
384 return ERR_PTR(-ENOSYS); 384 return ERR_PTR(-EINVAL);
385 } 385 }
386 386
387 of_property_for_each_string(np, "mbox-names", prop, mbox_name) { 387 of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0ddf638d60f3..043828d541f7 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -361,8 +361,6 @@ static int __init acpi_pcc_probe(void)
361 struct acpi_generic_address *db_reg; 361 struct acpi_generic_address *db_reg;
362 struct acpi_pcct_hw_reduced *pcct_ss; 362 struct acpi_pcct_hw_reduced *pcct_ss;
363 pcc_mbox_channels[i].con_priv = pcct_entry; 363 pcc_mbox_channels[i].con_priv = pcct_entry;
364 pcct_entry = (struct acpi_subtable_header *)
365 ((unsigned long) pcct_entry + pcct_entry->length);
366 364
367 /* If doorbell is in system memory cache the virt address */ 365 /* If doorbell is in system memory cache the virt address */
368 pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry; 366 pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
@@ -370,6 +368,8 @@ static int __init acpi_pcc_probe(void)
370 if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 368 if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
371 pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address, 369 pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
372 db_reg->bit_width/8); 370 db_reg->bit_width/8);
371 pcct_entry = (struct acpi_subtable_header *)
372 ((unsigned long) pcct_entry + pcct_entry->length);
373 } 373 }
374 374
375 pcc_mbox_ctrl.num_chans = count; 375 pcc_mbox_ctrl.num_chans = count;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7df6b4f1548a..3fe86b54d50b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
322{ 322{
323 ClearPagePrivate(page); 323 ClearPagePrivate(page);
324 set_page_private(page, 0); 324 set_page_private(page, 0);
325 page_cache_release(page); 325 put_page(page);
326} 326}
327static void free_buffers(struct page *page) 327static void free_buffers(struct page *page)
328{ 328{
@@ -1673,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap)
1673 if (!bitmap) /* there was no bitmap */ 1673 if (!bitmap) /* there was no bitmap */
1674 return; 1674 return;
1675 1675
1676 if (bitmap->sysfs_can_clear)
1677 sysfs_put(bitmap->sysfs_can_clear);
1678
1676 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && 1679 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1677 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) 1680 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1678 md_cluster_stop(bitmap->mddev); 1681 md_cluster_stop(bitmap->mddev);
@@ -1712,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev)
1712 if (mddev->thread) 1715 if (mddev->thread)
1713 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1716 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1714 1717
1715 if (bitmap->sysfs_can_clear)
1716 sysfs_put(bitmap->sysfs_can_clear);
1717
1718 bitmap_free(bitmap); 1718 bitmap_free(bitmap);
1719} 1719}
1720 1720
1721/* 1721/*
1722 * initialize the bitmap structure 1722 * initialize the bitmap structure
1723 * if this returns an error, bitmap_destroy must be called to do clean up 1723 * if this returns an error, bitmap_destroy must be called to do clean up
1724 * once mddev->bitmap is set
1724 */ 1725 */
1725struct bitmap *bitmap_create(struct mddev *mddev, int slot) 1726struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1726{ 1727{
@@ -1865,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1865 struct bitmap_counts *counts; 1866 struct bitmap_counts *counts;
1866 struct bitmap *bitmap = bitmap_create(mddev, slot); 1867 struct bitmap *bitmap = bitmap_create(mddev, slot);
1867 1868
1868 if (IS_ERR(bitmap)) 1869 if (IS_ERR(bitmap)) {
1870 bitmap_free(bitmap);
1869 return PTR_ERR(bitmap); 1871 return PTR_ERR(bitmap);
1872 }
1870 1873
1871 rv = bitmap_init_from_disk(bitmap, 0); 1874 rv = bitmap_init_from_disk(bitmap, 0);
1872 if (rv) 1875 if (rv)
@@ -2170,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2170 else { 2173 else {
2171 mddev->bitmap = bitmap; 2174 mddev->bitmap = bitmap;
2172 rv = bitmap_load(mddev); 2175 rv = bitmap_load(mddev);
2173 if (rv) { 2176 if (rv)
2174 bitmap_destroy(mddev);
2175 mddev->bitmap_info.offset = 0; 2177 mddev->bitmap_info.offset = 0;
2176 }
2177 } 2178 }
2178 mddev->pers->quiesce(mddev, 0); 2179 mddev->pers->quiesce(mddev, 0);
2179 if (rv) 2180 if (rv) {
2181 bitmap_destroy(mddev);
2180 return rv; 2182 return rv;
2183 }
2181 } 2184 }
2182 } 2185 }
2183 } 2186 }
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 27f2ef300f8b..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
867 return 0; 867 return 0;
868} 868}
869 869
870#define WRITE_LOCK(cmd) \ 870static bool cmd_write_lock(struct dm_cache_metadata *cmd)
871 down_write(&cmd->root_lock); \ 871{
872 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 872 down_write(&cmd->root_lock);
873 up_write(&cmd->root_lock); \ 873 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
874 return -EINVAL; \ 874 up_write(&cmd->root_lock);
875 return false;
875 } 876 }
877 return true;
878}
876 879
877#define WRITE_LOCK_VOID(cmd) \ 880#define WRITE_LOCK(cmd) \
878 down_write(&cmd->root_lock); \ 881 do { \
879 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 882 if (!cmd_write_lock((cmd))) \
880 up_write(&cmd->root_lock); \ 883 return -EINVAL; \
881 return; \ 884 } while(0)
882 } 885
886#define WRITE_LOCK_VOID(cmd) \
887 do { \
888 if (!cmd_write_lock((cmd))) \
889 return; \
890 } while(0)
883 891
884#define WRITE_UNLOCK(cmd) \ 892#define WRITE_UNLOCK(cmd) \
885 up_write(&cmd->root_lock) 893 up_write(&(cmd)->root_lock)
886 894
887#define READ_LOCK(cmd) \ 895static bool cmd_read_lock(struct dm_cache_metadata *cmd)
888 down_read(&cmd->root_lock); \ 896{
889 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 897 down_read(&cmd->root_lock);
890 up_read(&cmd->root_lock); \ 898 if (cmd->fail_io) {
891 return -EINVAL; \ 899 up_read(&cmd->root_lock);
900 return false;
892 } 901 }
902 return true;
903}
893 904
894#define READ_LOCK_VOID(cmd) \ 905#define READ_LOCK(cmd) \
895 down_read(&cmd->root_lock); \ 906 do { \
896 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 907 if (!cmd_read_lock((cmd))) \
897 up_read(&cmd->root_lock); \ 908 return -EINVAL; \
898 return; \ 909 } while(0)
899 } 910
911#define READ_LOCK_VOID(cmd) \
912 do { \
913 if (!cmd_read_lock((cmd))) \
914 return; \
915 } while(0)
900 916
901#define READ_UNLOCK(cmd) \ 917#define READ_UNLOCK(cmd) \
902 up_read(&cmd->root_lock) 918 up_read(&(cmd)->root_lock)
903 919
904int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size) 920int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
905{ 921{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be4905769a45..3d3ac13287a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1662,8 +1662,10 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1662 tio = alloc_tio(ci, ti, target_bio_nr); 1662 tio = alloc_tio(ci, ti, target_bio_nr);
1663 tio->len_ptr = len; 1663 tio->len_ptr = len;
1664 r = clone_bio(tio, bio, sector, *len); 1664 r = clone_bio(tio, bio, sector, *len);
1665 if (r < 0) 1665 if (r < 0) {
1666 free_tio(ci->md, tio);
1666 break; 1667 break;
1668 }
1667 __map_bio(tio); 1669 __map_bio(tio);
1668 } 1670 }
1669 1671
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c068f171b4eb..194580fba7fd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -718,6 +718,7 @@ static void super_written(struct bio *bio)
718 718
719 if (atomic_dec_and_test(&mddev->pending_writes)) 719 if (atomic_dec_and_test(&mddev->pending_writes))
720 wake_up(&mddev->sb_wait); 720 wake_up(&mddev->sb_wait);
721 rdev_dec_pending(rdev, mddev);
721 bio_put(bio); 722 bio_put(bio);
722} 723}
723 724
@@ -732,6 +733,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
732 */ 733 */
733 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 734 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
734 735
736 atomic_inc(&rdev->nr_pending);
737
735 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 738 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
736 bio->bi_iter.bi_sector = sector; 739 bio->bi_iter.bi_sector = sector;
737 bio_add_page(bio, page, size, 0); 740 bio_add_page(bio, page, size, 0);
@@ -6883,7 +6886,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6883 6886
6884 case ADD_NEW_DISK: 6887 case ADD_NEW_DISK:
6885 /* We can support ADD_NEW_DISK on read-only arrays 6888 /* We can support ADD_NEW_DISK on read-only arrays
6886 * on if we are re-adding a preexisting device. 6889 * only if we are re-adding a preexisting device.
6887 * So require mddev->pers and MD_DISK_SYNC. 6890 * So require mddev->pers and MD_DISK_SYNC.
6888 */ 6891 */
6889 if (mddev->pers) { 6892 if (mddev->pers) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 39fb21e048e6..a7f2b9c9f8a0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
570 if (best_dist_disk < 0) { 570 if (best_dist_disk < 0) {
571 if (is_badblock(rdev, this_sector, sectors, 571 if (is_badblock(rdev, this_sector, sectors,
572 &first_bad, &bad_sectors)) { 572 &first_bad, &bad_sectors)) {
573 if (first_bad < this_sector) 573 if (first_bad <= this_sector)
574 /* Cannot use this */ 574 /* Cannot use this */
575 continue; 575 continue;
576 best_good_sectors = first_bad - this_sector; 576 best_good_sectors = first_bad - this_sector;
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861aea68a5..6b469e8c4c6e 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
228 "au8522", 0x8e >> 1, NULL); 228 "au8522", 0x8e >> 1, NULL);
229 if (sd == NULL) 229 if (sd == NULL)
230 pr_err("analog subdev registration failed\n"); 230 pr_err("analog subdev registration failed\n");
231#ifdef CONFIG_MEDIA_CONTROLLER
232 if (sd)
233 dev->decoder = &sd->entity;
234#endif
235 } 231 }
236 232
237 /* Setup tuners */ 233 /* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 5dc82e8c8670..cc22b32776ad 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -137,8 +137,14 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
137#ifdef CONFIG_MEDIA_CONTROLLER 137#ifdef CONFIG_MEDIA_CONTROLLER
138 if (dev->media_dev && 138 if (dev->media_dev &&
139 media_devnode_is_registered(&dev->media_dev->devnode)) { 139 media_devnode_is_registered(&dev->media_dev->devnode)) {
140 /* clear enable_source, disable_source */
141 dev->media_dev->source_priv = NULL;
142 dev->media_dev->enable_source = NULL;
143 dev->media_dev->disable_source = NULL;
144
140 media_device_unregister(dev->media_dev); 145 media_device_unregister(dev->media_dev);
141 media_device_cleanup(dev->media_dev); 146 media_device_cleanup(dev->media_dev);
147 kfree(dev->media_dev);
142 dev->media_dev = NULL; 148 dev->media_dev = NULL;
143 } 149 }
144#endif 150#endif
@@ -166,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
166 Set the status so poll routines can check and avoid 172 Set the status so poll routines can check and avoid
167 access after disconnect. 173 access after disconnect.
168 */ 174 */
169 dev->dev_state = DEV_DISCONNECTED; 175 set_bit(DEV_DISCONNECTED, &dev->dev_state);
170 176
171 au0828_rc_unregister(dev); 177 au0828_rc_unregister(dev);
172 /* Digital TV */ 178 /* Digital TV */
@@ -192,7 +198,7 @@ static int au0828_media_device_init(struct au0828_dev *dev,
192#ifdef CONFIG_MEDIA_CONTROLLER 198#ifdef CONFIG_MEDIA_CONTROLLER
193 struct media_device *mdev; 199 struct media_device *mdev;
194 200
195 mdev = media_device_get_devres(&udev->dev); 201 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
196 if (!mdev) 202 if (!mdev)
197 return -ENOMEM; 203 return -ENOMEM;
198 204
@@ -456,7 +462,8 @@ static int au0828_media_device_register(struct au0828_dev *dev,
456{ 462{
457#ifdef CONFIG_MEDIA_CONTROLLER 463#ifdef CONFIG_MEDIA_CONTROLLER
458 int ret; 464 int ret;
459 struct media_entity *entity, *demod = NULL, *tuner = NULL; 465 struct media_entity *entity, *demod = NULL;
466 struct media_link *link;
460 467
461 if (!dev->media_dev) 468 if (!dev->media_dev)
462 return 0; 469 return 0;
@@ -482,26 +489,37 @@ static int au0828_media_device_register(struct au0828_dev *dev,
482 } 489 }
483 490
484 /* 491 /*
485 * Find tuner and demod to disable the link between 492 * Find tuner, decoder and demod.
486 * the two to avoid disable step when tuner is requested 493 *
487 * by video or audio. Note that this step can't be done 494 * The tuner and decoder should be cached, as they'll be used by
488 * until dvb graph is created during dvb register. 495 * au0828_enable_source.
496 *
497 * It also needs to disable the link between tuner and
498 * decoder/demod, to avoid disable step when tuner is requested
499 * by video or audio. Note that this step can't be done until dvb
500 * graph is created during dvb register.
489 */ 501 */
490 media_device_for_each_entity(entity, dev->media_dev) { 502 media_device_for_each_entity(entity, dev->media_dev) {
491 if (entity->function == MEDIA_ENT_F_DTV_DEMOD) 503 switch (entity->function) {
504 case MEDIA_ENT_F_TUNER:
505 dev->tuner = entity;
506 break;
507 case MEDIA_ENT_F_ATV_DECODER:
508 dev->decoder = entity;
509 break;
510 case MEDIA_ENT_F_DTV_DEMOD:
492 demod = entity; 511 demod = entity;
493 else if (entity->function == MEDIA_ENT_F_TUNER) 512 break;
494 tuner = entity; 513 }
495 } 514 }
496 /* Disable link between tuner and demod */
497 if (tuner && demod) {
498 struct media_link *link;
499 515
500 list_for_each_entry(link, &demod->links, list) { 516 /* Disable link between tuner->demod and/or tuner->decoder */
501 if (link->sink->entity == demod && 517 if (dev->tuner) {
502 link->source->entity == tuner) { 518 list_for_each_entry(link, &dev->tuner->links, list) {
519 if (demod && link->sink->entity == demod)
520 media_entity_setup_link(link, 0);
521 if (dev->decoder && link->sink->entity == dev->decoder)
503 media_entity_setup_link(link, 0); 522 media_entity_setup_link(link, 0);
504 }
505 } 523 }
506 } 524 }
507 525
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
130 bool first = true; 130 bool first = true;
131 131
132 /* do nothing if device is disconnected */ 132 /* do nothing if device is disconnected */
133 if (ir->dev->dev_state == DEV_DISCONNECTED) 133 if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
134 return 0; 134 return 0;
135 135
136 /* Check IR int */ 136 /* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
260 cancel_delayed_work_sync(&ir->work); 260 cancel_delayed_work_sync(&ir->work);
261 261
262 /* do nothing if device is disconnected */ 262 /* do nothing if device is disconnected */
263 if (ir->dev->dev_state != DEV_DISCONNECTED) { 263 if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
264 /* Disable IR */ 264 /* Disable IR */
265 au8522_rc_clear(ir, 0xe0, 1 << 4); 265 au8522_rc_clear(ir, 0xe0, 1 << 4);
266 } 266 }
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13f6dab9ccc2..32d7db96479c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -106,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
106 106
107static int check_dev(struct au0828_dev *dev) 107static int check_dev(struct au0828_dev *dev)
108{ 108{
109 if (dev->dev_state & DEV_DISCONNECTED) { 109 if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
110 pr_info("v4l2 ioctl: device not present\n"); 110 pr_info("v4l2 ioctl: device not present\n");
111 return -ENODEV; 111 return -ENODEV;
112 } 112 }
113 113
114 if (dev->dev_state & DEV_MISCONFIGURED) { 114 if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
115 pr_info("v4l2 ioctl: device is misconfigured; " 115 pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
116 "close and open it again\n");
117 return -EIO; 116 return -EIO;
118 } 117 }
119 return 0; 118 return 0;
@@ -521,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
521 if (!dev) 520 if (!dev)
522 return 0; 521 return 0;
523 522
524 if ((dev->dev_state & DEV_DISCONNECTED) || 523 if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
525 (dev->dev_state & DEV_MISCONFIGURED)) 524 test_bit(DEV_MISCONFIGURED, &dev->dev_state))
526 return 0; 525 return 0;
527 526
528 if (urb->status < 0) { 527 if (urb->status < 0) {
@@ -824,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
824 int ret = 0; 823 int ret = 0;
825 824
826 dev->stream_state = STREAM_INTERRUPT; 825 dev->stream_state = STREAM_INTERRUPT;
827 if (dev->dev_state == DEV_DISCONNECTED) 826 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
828 return -ENODEV; 827 return -ENODEV;
829 else if (ret) { 828 else if (ret) {
830 dev->dev_state = DEV_MISCONFIGURED; 829 set_bit(DEV_MISCONFIGURED, &dev->dev_state);
831 dprintk(1, "%s device is misconfigured!\n", __func__); 830 dprintk(1, "%s device is misconfigured!\n", __func__);
832 return ret; 831 return ret;
833 } 832 }
@@ -1026,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
1026 int ret; 1025 int ret;
1027 1026
1028 dprintk(1, 1027 dprintk(1,
1029 "%s called std_set %d dev_state %d stream users %d users %d\n", 1028 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1030 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1029 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1031 dev->streaming_users, dev->users); 1030 dev->streaming_users, dev->users);
1032 1031
@@ -1045,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
1045 au0828_analog_stream_enable(dev); 1044 au0828_analog_stream_enable(dev);
1046 au0828_analog_stream_reset(dev); 1045 au0828_analog_stream_reset(dev);
1047 dev->stream_state = STREAM_OFF; 1046 dev->stream_state = STREAM_OFF;
1048 dev->dev_state |= DEV_INITIALIZED; 1047 set_bit(DEV_INITIALIZED, &dev->dev_state);
1049 } 1048 }
1050 dev->users++; 1049 dev->users++;
1051 mutex_unlock(&dev->lock); 1050 mutex_unlock(&dev->lock);
@@ -1059,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
1059 struct video_device *vdev = video_devdata(filp); 1058 struct video_device *vdev = video_devdata(filp);
1060 1059
1061 dprintk(1, 1060 dprintk(1,
1062 "%s called std_set %d dev_state %d stream users %d users %d\n", 1061 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1063 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1062 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1064 dev->streaming_users, dev->users); 1063 dev->streaming_users, dev->users);
1065 1064
@@ -1075,7 +1074,7 @@ static int au0828_v4l2_close(struct file *filp)
1075 del_timer_sync(&dev->vbi_timeout); 1074 del_timer_sync(&dev->vbi_timeout);
1076 } 1075 }
1077 1076
1078 if (dev->dev_state == DEV_DISCONNECTED) 1077 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
1079 goto end; 1078 goto end;
1080 1079
1081 if (dev->users == 1) { 1080 if (dev->users == 1) {
@@ -1135,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
1135 .type = V4L2_TUNER_ANALOG_TV, 1134 .type = V4L2_TUNER_ANALOG_TV,
1136 }; 1135 };
1137 1136
1138 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1137 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1139 dev->std_set_in_tuner_core, dev->dev_state); 1138 dev->std_set_in_tuner_core, dev->dev_state);
1140 1139
1141 if (dev->std_set_in_tuner_core) 1140 if (dev->std_set_in_tuner_core)
@@ -1207,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv,
1207 struct video_device *vdev = video_devdata(file); 1206 struct video_device *vdev = video_devdata(file);
1208 struct au0828_dev *dev = video_drvdata(file); 1207 struct au0828_dev *dev = video_drvdata(file);
1209 1208
1210 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1209 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1211 dev->std_set_in_tuner_core, dev->dev_state); 1210 dev->std_set_in_tuner_core, dev->dev_state);
1212 1211
1213 strlcpy(cap->driver, "au0828", sizeof(cap->driver)); 1212 strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1250,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
1250{ 1249{
1251 struct au0828_dev *dev = video_drvdata(file); 1250 struct au0828_dev *dev = video_drvdata(file);
1252 1251
1253 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1252 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1254 dev->std_set_in_tuner_core, dev->dev_state); 1253 dev->std_set_in_tuner_core, dev->dev_state);
1255 1254
1256 f->fmt.pix.width = dev->width; 1255 f->fmt.pix.width = dev->width;
@@ -1269,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1269{ 1268{
1270 struct au0828_dev *dev = video_drvdata(file); 1269 struct au0828_dev *dev = video_drvdata(file);
1271 1270
1272 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1271 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1273 dev->std_set_in_tuner_core, dev->dev_state); 1272 dev->std_set_in_tuner_core, dev->dev_state);
1274 1273
1275 return au0828_set_format(dev, VIDIOC_TRY_FMT, f); 1274 return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1281,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
1281 struct au0828_dev *dev = video_drvdata(file); 1280 struct au0828_dev *dev = video_drvdata(file);
1282 int rc; 1281 int rc;
1283 1282
1284 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1283 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1285 dev->std_set_in_tuner_core, dev->dev_state); 1284 dev->std_set_in_tuner_core, dev->dev_state);
1286 1285
1287 rc = check_dev(dev); 1286 rc = check_dev(dev);
@@ -1303,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
1303{ 1302{
1304 struct au0828_dev *dev = video_drvdata(file); 1303 struct au0828_dev *dev = video_drvdata(file);
1305 1304
1306 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1305 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1307 dev->std_set_in_tuner_core, dev->dev_state); 1306 dev->std_set_in_tuner_core, dev->dev_state);
1308 1307
1309 if (norm == dev->std) 1308 if (norm == dev->std)
@@ -1335,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
1335{ 1334{
1336 struct au0828_dev *dev = video_drvdata(file); 1335 struct au0828_dev *dev = video_drvdata(file);
1337 1336
1338 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1337 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1339 dev->std_set_in_tuner_core, dev->dev_state); 1338 dev->std_set_in_tuner_core, dev->dev_state);
1340 1339
1341 *norm = dev->std; 1340 *norm = dev->std;
@@ -1357,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
1357 [AU0828_VMUX_DVB] = "DVB", 1356 [AU0828_VMUX_DVB] = "DVB",
1358 }; 1357 };
1359 1358
1360 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1359 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1361 dev->std_set_in_tuner_core, dev->dev_state); 1360 dev->std_set_in_tuner_core, dev->dev_state);
1362 1361
1363 tmp = input->index; 1362 tmp = input->index;
@@ -1387,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
1387{ 1386{
1388 struct au0828_dev *dev = video_drvdata(file); 1387 struct au0828_dev *dev = video_drvdata(file);
1389 1388
1390 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1389 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1391 dev->std_set_in_tuner_core, dev->dev_state); 1390 dev->std_set_in_tuner_core, dev->dev_state);
1392 1391
1393 *i = dev->ctrl_input; 1392 *i = dev->ctrl_input;
@@ -1398,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
1398{ 1397{
1399 int i; 1398 int i;
1400 1399
1401 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1400 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1402 dev->std_set_in_tuner_core, dev->dev_state); 1401 dev->std_set_in_tuner_core, dev->dev_state);
1403 1402
1404 switch (AUVI_INPUT(index).type) { 1403 switch (AUVI_INPUT(index).type) {
@@ -1496,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
1496{ 1495{
1497 struct au0828_dev *dev = video_drvdata(file); 1496 struct au0828_dev *dev = video_drvdata(file);
1498 1497
1499 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1498 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1500 dev->std_set_in_tuner_core, dev->dev_state); 1499 dev->std_set_in_tuner_core, dev->dev_state);
1501 1500
1502 a->index = dev->ctrl_ainput; 1501 a->index = dev->ctrl_ainput;
@@ -1516,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
1516 if (a->index != dev->ctrl_ainput) 1515 if (a->index != dev->ctrl_ainput)
1517 return -EINVAL; 1516 return -EINVAL;
1518 1517
1519 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1518 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1520 dev->std_set_in_tuner_core, dev->dev_state); 1519 dev->std_set_in_tuner_core, dev->dev_state);
1521 return 0; 1520 return 0;
1522} 1521}
@@ -1534,7 +1533,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
1534 if (ret) 1533 if (ret)
1535 return ret; 1534 return ret;
1536 1535
1537 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1536 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1538 dev->std_set_in_tuner_core, dev->dev_state); 1537 dev->std_set_in_tuner_core, dev->dev_state);
1539 1538
1540 strcpy(t->name, "Auvitek tuner"); 1539 strcpy(t->name, "Auvitek tuner");
@@ -1554,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
1554 if (t->index != 0) 1553 if (t->index != 0)
1555 return -EINVAL; 1554 return -EINVAL;
1556 1555
1557 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1556 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1558 dev->std_set_in_tuner_core, dev->dev_state); 1557 dev->std_set_in_tuner_core, dev->dev_state);
1559 1558
1560 au0828_init_tuner(dev); 1559 au0828_init_tuner(dev);
@@ -1576,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
1576 1575
1577 if (freq->tuner != 0) 1576 if (freq->tuner != 0)
1578 return -EINVAL; 1577 return -EINVAL;
1579 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1578 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1580 dev->std_set_in_tuner_core, dev->dev_state); 1579 dev->std_set_in_tuner_core, dev->dev_state);
1581 freq->frequency = dev->ctrl_freq; 1580 freq->frequency = dev->ctrl_freq;
1582 return 0; 1581 return 0;
@@ -1591,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1591 if (freq->tuner != 0) 1590 if (freq->tuner != 0)
1592 return -EINVAL; 1591 return -EINVAL;
1593 1592
1594 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1593 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1595 dev->std_set_in_tuner_core, dev->dev_state); 1594 dev->std_set_in_tuner_core, dev->dev_state);
1596 1595
1597 au0828_init_tuner(dev); 1596 au0828_init_tuner(dev);
@@ -1617,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
1617{ 1616{
1618 struct au0828_dev *dev = video_drvdata(file); 1617 struct au0828_dev *dev = video_drvdata(file);
1619 1618
1620 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1619 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1621 dev->std_set_in_tuner_core, dev->dev_state); 1620 dev->std_set_in_tuner_core, dev->dev_state);
1622 1621
1623 format->fmt.vbi.samples_per_line = dev->vbi_width; 1622 format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1643,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
1643 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1642 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1644 return -EINVAL; 1643 return -EINVAL;
1645 1644
1646 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1645 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1647 dev->std_set_in_tuner_core, dev->dev_state); 1646 dev->std_set_in_tuner_core, dev->dev_state);
1648 1647
1649 cc->bounds.left = 0; 1648 cc->bounds.left = 0;
@@ -1665,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
1665{ 1664{
1666 struct au0828_dev *dev = video_drvdata(file); 1665 struct au0828_dev *dev = video_drvdata(file);
1667 1666
1668 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1667 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1669 dev->std_set_in_tuner_core, dev->dev_state); 1668 dev->std_set_in_tuner_core, dev->dev_state);
1670 1669
1671 reg->val = au0828_read(dev, reg->reg); 1670 reg->val = au0828_read(dev, reg->reg);
@@ -1678,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
1678{ 1677{
1679 struct au0828_dev *dev = video_drvdata(file); 1678 struct au0828_dev *dev = video_drvdata(file);
1680 1679
1681 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1680 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1682 dev->std_set_in_tuner_core, dev->dev_state); 1681 dev->std_set_in_tuner_core, dev->dev_state);
1683 1682
1684 return au0828_writereg(dev, reg->reg, reg->val); 1683 return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ff7f8510fb77..87f32846f1c0 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 23
24#include <linux/bitops.h>
24#include <linux/usb.h> 25#include <linux/usb.h>
25#include <linux/i2c.h> 26#include <linux/i2c.h>
26#include <linux/i2c-algo-bit.h> 27#include <linux/i2c-algo-bit.h>
@@ -121,9 +122,9 @@ enum au0828_stream_state {
121 122
122/* device state */ 123/* device state */
123enum au0828_dev_state { 124enum au0828_dev_state {
124 DEV_INITIALIZED = 0x01, 125 DEV_INITIALIZED = 0,
125 DEV_DISCONNECTED = 0x02, 126 DEV_DISCONNECTED = 1,
126 DEV_MISCONFIGURED = 0x04 127 DEV_MISCONFIGURED = 2
127}; 128};
128 129
129struct au0828_dev; 130struct au0828_dev;
@@ -247,7 +248,7 @@ struct au0828_dev {
247 int input_type; 248 int input_type;
248 int std_set_in_tuner_core; 249 int std_set_in_tuner_core;
249 unsigned int ctrl_input; 250 unsigned int ctrl_input;
250 enum au0828_dev_state dev_state; 251 long unsigned int dev_state; /* defined at enum au0828_dev_state */;
251 enum au0828_stream_state stream_state; 252 enum au0828_stream_state stream_state;
252 wait_queue_head_t open; 253 wait_queue_head_t open;
253 254
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebbd0436..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
1452 printk(KERN_INFO "%s: %s found\n", __func__, 1452 printk(KERN_INFO "%s: %s found\n", __func__,
1453 usbvision_device_data[model].model_string); 1453 usbvision_device_data[model].model_string);
1454 1454
1455 /*
1456 * this is a security check.
1457 * an exploit using an incorrect bInterfaceNumber is known
1458 */
1459 if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
1460 return -ENODEV;
1461
1462 if (usbvision_device_data[model].interface >= 0) 1455 if (usbvision_device_data[model].interface >= 0)
1463 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; 1456 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
1464 else if (ifnum < dev->actconfig->desc.bNumInterfaces) 1457 else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index df4c052c6bd6..f300f060b3f3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
349 349
350 if (dma->pages) { 350 if (dma->pages) {
351 for (i = 0; i < dma->nr_pages; i++) 351 for (i = 0; i < dma->nr_pages; i++)
352 page_cache_release(dma->pages[i]); 352 put_page(dma->pages[i]);
353 kfree(dma->pages); 353 kfree(dma->pages);
354 dma->pages = NULL; 354 dma->pages = NULL;
355 } 355 }
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f496e0e..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1645 * Will sleep if required for nonblocking == false. 1645 * Will sleep if required for nonblocking == false.
1646 */ 1646 */
1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1648 int nonblocking) 1648 void *pb, int nonblocking)
1649{ 1649{
1650 unsigned long flags; 1650 unsigned long flags;
1651 int ret; 1651 int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1666 /* 1666 /*
1667 * Only remove the buffer from done_list if v4l2_buffer can handle all 1667 * Only remove the buffer from done_list if v4l2_buffer can handle all
1668 * the planes. 1668 * the planes.
1669 * Verifying planes is NOT necessary since it already has been checked
1670 * before the buffer is queued/prepared. So it can never fail.
1671 */ 1669 */
1672 list_del(&(*vb)->done_entry); 1670 ret = call_bufop(q, verify_planes_array, *vb, pb);
1671 if (!ret)
1672 list_del(&(*vb)->done_entry);
1673 spin_unlock_irqrestore(&q->done_lock, flags); 1673 spin_unlock_irqrestore(&q->done_lock, flags);
1674 1674
1675 return ret; 1675 return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1748 struct vb2_buffer *vb = NULL; 1748 struct vb2_buffer *vb = NULL;
1749 int ret; 1749 int ret;
1750 1750
1751 ret = __vb2_get_done_vb(q, &vb, nonblocking); 1751 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
1752 if (ret < 0) 1752 if (ret < 0)
1753 return ret; 1753 return ret;
1754 1754
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
2298 return POLLERR; 2298 return POLLERR;
2299 2299
2300 /* 2300 /*
2301 * If this quirk is set and QBUF hasn't been called yet then
2302 * return POLLERR as well. This only affects capture queues, output
2303 * queues will always initialize waiting_for_buffers to false.
2304 * This quirk is set by V4L2 for backwards compatibility reasons.
2305 */
2306 if (q->quirk_poll_must_check_waiting_for_buffers &&
2307 q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
2308 return POLLERR;
2309
2310 /*
2301 * For output streams you can call write() as long as there are fewer 2311 * For output streams you can call write() as long as there are fewer
2302 * buffers queued than there are buffers available. 2312 * buffers queued than there are buffers available.
2303 */ 2313 */
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
49 vec = frame_vector_create(nr); 49 vec = frame_vector_create(nr);
50 if (!vec) 50 if (!vec)
51 return ERR_PTR(-ENOMEM); 51 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start, nr, write, 1, vec); 52 ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
53 if (ret < 0) 53 if (ret < 0)
54 goto out_destroy; 54 goto out_destroy;
55 /* We accept only complete set of PFNs */ 55 /* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..7f366f1b0377 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
74 return 0; 74 return 0;
75} 75}
76 76
77static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
78{
79 return __verify_planes_array(vb, pb);
80}
81
77/** 82/**
78 * __verify_length() - Verify that the bytesused value for each plane fits in 83 * __verify_length() - Verify that the bytesused value for each plane fits in
79 * the plane length and that the data offset doesn't exceed the bytesused value. 84 * the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
437} 442}
438 443
439static const struct vb2_buf_ops v4l2_buf_ops = { 444static const struct vb2_buf_ops v4l2_buf_ops = {
445 .verify_planes_array = __verify_planes_array_core,
440 .fill_user_buffer = __fill_v4l2_buffer, 446 .fill_user_buffer = __fill_v4l2_buffer,
441 .fill_vb2_buffer = __fill_vb2_buffer, 447 .fill_vb2_buffer = __fill_vb2_buffer,
442 .copy_timestamp = __copy_timestamp, 448 .copy_timestamp = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
765 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); 771 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
766 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) 772 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
767 == V4L2_BUF_FLAG_TIMESTAMP_COPY; 773 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
774 /*
775 * For compatibility with vb1: if QBUF hasn't been called yet, then
776 * return POLLERR as well. This only affects capture queues, output
777 * queues will always initialize waiting_for_buffers to false.
778 */
779 q->quirk_poll_must_check_waiting_for_buffers = true;
768 780
769 return vb2_core_queue_init(q); 781 return vb2_core_queue_init(q);
770} 782}
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
818 poll_wait(file, &fh->wait, wait); 830 poll_wait(file, &fh->wait, wait);
819 } 831 }
820 832
821 /*
822 * For compatibility with vb1: if QBUF hasn't been called yet, then
823 * return POLLERR as well. This only affects capture queues, output
824 * queues will always initialize waiting_for_buffers to false.
825 */
826 if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
827 return POLLERR;
828
829 return res | vb2_core_poll(q, file, wait); 833 return res | vb2_core_poll(q, file, wait);
830} 834}
831EXPORT_SYMBOL_GPL(vb2_poll); 835EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f280500..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); 223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
225 225
226 /*
227 * Wait until no further interrupts are presented by the PSL
228 * for this context.
229 */
230 if (cxl_ops->irq_wait)
231 cxl_ops->irq_wait(ctx);
232
226 /* release the reference to the group leader and mm handling pid */ 233 /* release the reference to the group leader and mm handling pid */
227 put_pid(ctx->pid); 234 put_pid(ctx->pid);
228 put_pid(ctx->glpid); 235 put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf7806e..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ 274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ 275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ 276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
277#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
277/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ 278/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
278#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ 279#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
279#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ 280#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
855 u64 dsisr, u64 errstat); 856 u64 dsisr, u64 errstat);
856 irqreturn_t (*psl_interrupt)(int irq, void *data); 857 irqreturn_t (*psl_interrupt)(int irq, void *data);
857 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); 858 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
859 void (*irq_wait)(struct cxl_context *ctx);
858 int (*attach_process)(struct cxl_context *ctx, bool kernel, 860 int (*attach_process)(struct cxl_context *ctx, bool kernel,
859 u64 wed, u64 amr); 861 u64 wed, u64 amr);
860 int (*detach_process)(struct cxl_context *ctx); 862 int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc41a2c..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
203void cxl_unmap_irq(unsigned int virq, void *cookie) 203void cxl_unmap_irq(unsigned int virq, void *cookie)
204{ 204{
205 free_irq(virq, cookie); 205 free_irq(virq, cookie);
206 irq_dispose_mapping(virq);
207} 206}
208 207
209int cxl_register_one_irq(struct cxl *adapter, 208int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbdf9793..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/delay.h>
17#include <asm/synch.h> 18#include <asm/synch.h>
18#include <misc/cxl-base.h> 19#include <misc/cxl-base.h>
19 20
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
797 return fail_psl_irq(afu, &irq_info); 798 return fail_psl_irq(afu, &irq_info);
798} 799}
799 800
801void native_irq_wait(struct cxl_context *ctx)
802{
803 u64 dsisr;
804 int timeout = 1000;
805 int ph;
806
807 /*
808 * Wait until no further interrupts are presented by the PSL
809 * for this context.
810 */
811 while (timeout--) {
812 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
813 if (ph != ctx->pe)
814 return;
815 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
816 if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
817 return;
818 /*
819 * We are waiting for the workqueue to process our
820 * irq, so need to let that run here.
821 */
822 msleep(1);
823 }
824
825 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
826 " DSISR %016llx!\n", ph, dsisr);
827 return;
828}
829
800static irqreturn_t native_slice_irq_err(int irq, void *data) 830static irqreturn_t native_slice_irq_err(int irq, void *data)
801{ 831{
802 struct cxl_afu *afu = data; 832 struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
1076 .handle_psl_slice_error = native_handle_psl_slice_error, 1106 .handle_psl_slice_error = native_handle_psl_slice_error,
1077 .psl_interrupt = NULL, 1107 .psl_interrupt = NULL,
1078 .ack_irq = native_ack_irq, 1108 .ack_irq = native_ack_irq,
1109 .irq_wait = native_irq_wait,
1079 .attach_process = native_attach_process, 1110 .attach_process = native_attach_process,
1080 .detach_process = native_detach_process, 1111 .detach_process = native_detach_process,
1081 .support_attributes = native_support_attributes, 1112 .support_attributes = native_support_attributes,
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..9c677f3f3c26 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
116{ 116{
117 struct inode *root; 117 struct inode *root;
118 118
119 sb->s_blocksize = PAGE_CACHE_SIZE; 119 sb->s_blocksize = PAGE_SIZE;
120 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 120 sb->s_blocksize_bits = PAGE_SHIFT;
121 sb->s_magic = IBMASMFS_MAGIC; 121 sb->s_magic = IBMASMFS_MAGIC;
122 sb->s_op = &ibmasmfs_s_ops; 122 sb->s_op = &ibmasmfs_s_ops;
123 sb->s_time_gran = 1; 123 sb->s_time_gran = 1;
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5f1a36b8fbb0..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
458 break; 458 break;
459 459
460 val = kmalloc(len, GFP_KERNEL); 460 val = kmalloc(len, GFP_KERNEL);
461 if (!val) 461 if (!val) {
462 kfree(base);
462 break; 463 break;
464 }
463 465
464 *val = 0x12345678; 466 *val = 0x12345678;
465 base[offset] = *val; 467 base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
498 } 500 }
499 case CT_READ_BUDDY_AFTER_FREE: { 501 case CT_READ_BUDDY_AFTER_FREE: {
500 unsigned long p = __get_free_page(GFP_KERNEL); 502 unsigned long p = __get_free_page(GFP_KERNEL);
501 int saw, *val = kmalloc(1024, GFP_KERNEL); 503 int saw, *val;
502 int *base; 504 int *base;
503 505
504 if (!p) 506 if (!p)
505 break; 507 break;
506 508
507 if (!val) 509 val = kmalloc(1024, GFP_KERNEL);
510 if (!val) {
511 free_page(p);
508 break; 512 break;
513 }
509 514
510 base = (int *)p; 515 base = (int *)p;
511 516
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..f84a4275ca29 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
728 if (dirty) 728 if (dirty)
729 set_page_dirty(pages[i]); 729 set_page_dirty(pages[i]);
730 730
731 page_cache_release(pages[i]); 731 put_page(pages[i]);
732 pages[i] = NULL; 732 pages[i] = NULL;
733 } 733 }
734} 734}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 3bdbe50a363f..8a0147dfed27 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -86,7 +86,6 @@ static int max_devices;
86 86
87/* TODO: Replace these with struct ida */ 87/* TODO: Replace these with struct ida */
88static DECLARE_BITMAP(dev_use, MAX_DEVICES); 88static DECLARE_BITMAP(dev_use, MAX_DEVICES);
89static DECLARE_BITMAP(name_use, MAX_DEVICES);
90 89
91/* 90/*
92 * There is one mmc_blk_data per slot. 91 * There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
105 unsigned int usage; 104 unsigned int usage;
106 unsigned int read_only; 105 unsigned int read_only;
107 unsigned int part_type; 106 unsigned int part_type;
108 unsigned int name_idx;
109 unsigned int reset_done; 107 unsigned int reset_done;
110#define MMC_BLK_READ BIT(0) 108#define MMC_BLK_READ BIT(0)
111#define MMC_BLK_WRITE BIT(1) 109#define MMC_BLK_WRITE BIT(1)
@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2202 goto out; 2200 goto out;
2203 } 2201 }
2204 2202
2205 /*
2206 * !subname implies we are creating main mmc_blk_data that will be
2207 * associated with mmc_card with dev_set_drvdata. Due to device
2208 * partitions, devidx will not coincide with a per-physical card
2209 * index anymore so we keep track of a name index.
2210 */
2211 if (!subname) {
2212 md->name_idx = find_first_zero_bit(name_use, max_devices);
2213 __set_bit(md->name_idx, name_use);
2214 } else
2215 md->name_idx = ((struct mmc_blk_data *)
2216 dev_to_disk(parent)->private_data)->name_idx;
2217
2218 md->area_type = area_type; 2203 md->area_type = area_type;
2219 2204
2220 /* 2205 /*
@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2264 */ 2249 */
2265 2250
2266 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 2251 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2267 "mmcblk%u%s", md->name_idx, subname ? subname : ""); 2252 "mmcblk%u%s", card->host->index, subname ? subname : "");
2268 2253
2269 if (mmc_card_mmc(card)) 2254 if (mmc_card_mmc(card))
2270 blk_queue_logical_block_size(md->queue.queue, 2255 blk_queue_logical_block_size(md->queue.queue,
@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
2418 struct list_head *pos, *q; 2403 struct list_head *pos, *q;
2419 struct mmc_blk_data *part_md; 2404 struct mmc_blk_data *part_md;
2420 2405
2421 __clear_bit(md->name_idx, name_use);
2422 list_for_each_safe(pos, q, &md->part) { 2406 list_for_each_safe(pos, q, &md->part) {
2423 part_md = list_entry(pos, struct mmc_blk_data, part); 2407 part_md = list_entry(pos, struct mmc_blk_data, part);
2424 list_del(pos); 2408 list_del(pos);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d94607611d8..6e4c55a4aab5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
356 * They have to set these according to their abilities. 356 * They have to set these according to their abilities.
357 */ 357 */
358 host->max_segs = 1; 358 host->max_segs = 1;
359 host->max_seg_size = PAGE_CACHE_SIZE; 359 host->max_seg_size = PAGE_SIZE;
360 360
361 host->max_req_size = PAGE_CACHE_SIZE; 361 host->max_req_size = PAGE_SIZE;
362 host->max_blk_size = 512; 362 host->max_blk_size = 512;
363 host->max_blk_count = PAGE_CACHE_SIZE / 512; 363 host->max_blk_count = PAGE_SIZE / 512;
364 364
365 return host; 365 return host;
366} 366}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8354cb..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
97config MMC_SDHCI_ACPI 97config MMC_SDHCI_ACPI
98 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 98 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
99 depends on MMC_SDHCI && ACPI 99 depends on MMC_SDHCI && ACPI
100 select IOSF_MBI if X86
100 help 101 help
101 This selects support for ACPI enumerated SDHCI controllers, 102 This selects support for ACPI enumerated SDHCI controllers,
102 identified by ACPI Compatibility ID PNP0D40 or specific 103 identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41c6d58..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
41#include <linux/mmc/pm.h> 41#include <linux/mmc/pm.h>
42#include <linux/mmc/slot-gpio.h> 42#include <linux/mmc/slot-gpio.h>
43 43
44#ifdef CONFIG_X86
45#include <asm/cpu_device_id.h>
46#include <asm/iosf_mbi.h>
47#endif
48
44#include "sdhci.h" 49#include "sdhci.h"
45 50
46enum { 51enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
116 .ops = &sdhci_acpi_ops_int, 121 .ops = &sdhci_acpi_ops_int,
117}; 122};
118 123
124#ifdef CONFIG_X86
125
126static bool sdhci_acpi_byt(void)
127{
128 static const struct x86_cpu_id byt[] = {
129 { X86_VENDOR_INTEL, 6, 0x37 },
130 {}
131 };
132
133 return x86_match_cpu(byt);
134}
135
136#define BYT_IOSF_SCCEP 0x63
137#define BYT_IOSF_OCP_NETCTRL0 0x1078
138#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
139
140static void sdhci_acpi_byt_setting(struct device *dev)
141{
142 u32 val = 0;
143
144 if (!sdhci_acpi_byt())
145 return;
146
147 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
148 &val)) {
149 dev_err(dev, "%s read error\n", __func__);
150 return;
151 }
152
153 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
154 return;
155
156 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
157
158 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
159 val)) {
160 dev_err(dev, "%s write error\n", __func__);
161 return;
162 }
163
164 dev_dbg(dev, "%s completed\n", __func__);
165}
166
167static bool sdhci_acpi_byt_defer(struct device *dev)
168{
169 if (!sdhci_acpi_byt())
170 return false;
171
172 if (!iosf_mbi_available())
173 return true;
174
175 sdhci_acpi_byt_setting(dev);
176
177 return false;
178}
179
180#else
181
182static inline void sdhci_acpi_byt_setting(struct device *dev)
183{
184}
185
186static inline bool sdhci_acpi_byt_defer(struct device *dev)
187{
188 return false;
189}
190
191#endif
192
119static int bxt_get_cd(struct mmc_host *mmc) 193static int bxt_get_cd(struct mmc_host *mmc)
120{ 194{
121 int gpio_cd = mmc_gpio_get_cd(mmc); 195 int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
322 if (acpi_bus_get_status(device) || !device->status.present) 396 if (acpi_bus_get_status(device) || !device->status.present)
323 return -ENODEV; 397 return -ENODEV;
324 398
399 if (sdhci_acpi_byt_defer(dev))
400 return -EPROBE_DEFER;
401
325 hid = acpi_device_hid(device); 402 hid = acpi_device_hid(device);
326 uid = device->pnp.unique_id; 403 uid = device->pnp.unique_id;
327 404
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
447{ 524{
448 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 525 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
449 526
527 sdhci_acpi_byt_setting(&c->pdev->dev);
528
450 return sdhci_resume_host(c->host); 529 return sdhci_resume_host(c->host);
451} 530}
452 531
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
470{ 549{
471 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 550 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
472 551
552 sdhci_acpi_byt_setting(&c->pdev->dev);
553
473 return sdhci_runtime_resume_host(c->host); 554 return sdhci_runtime_resume_host(c->host);
474} 555}
475 556
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 62aa5d0efcee..79e19017343e 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
390 slot->cd_idx = 0; 390 slot->cd_idx = 0;
391 slot->cd_override_level = true; 391 slot->cd_override_level = true;
392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || 392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) 394 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
394 slot->host->mmc_host_ops.get_cd = bxt_get_cd; 395 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
395 396
@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
1173 1174
1174 { 1175 {
1175 .vendor = PCI_VENDOR_ID_INTEL, 1176 .vendor = PCI_VENDOR_ID_INTEL,
1177 .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
1178 .subvendor = PCI_ANY_ID,
1179 .subdevice = PCI_ANY_ID,
1180 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
1181 },
1182
1183 {
1184 .vendor = PCI_VENDOR_ID_INTEL,
1185 .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
1186 .subvendor = PCI_ANY_ID,
1187 .subdevice = PCI_ANY_ID,
1188 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
1189 },
1190
1191 {
1192 .vendor = PCI_VENDOR_ID_INTEL,
1193 .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
1194 .subvendor = PCI_ANY_ID,
1195 .subdevice = PCI_ANY_ID,
1196 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
1197 },
1198
1199 {
1200 .vendor = PCI_VENDOR_ID_INTEL,
1176 .device = PCI_DEVICE_ID_INTEL_APL_EMMC, 1201 .device = PCI_DEVICE_ID_INTEL_APL_EMMC,
1177 .subvendor = PCI_ANY_ID, 1202 .subvendor = PCI_ANY_ID,
1178 .subdevice = PCI_ANY_ID, 1203 .subdevice = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
28#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca 28#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
29#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc 29#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
30#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0 30#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
31#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
32#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
33#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
31#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca 34#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
32#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc 35#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
33#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0 36#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index aca439d3ca83..30132500aa1c 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -309,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
309 __func__, uhs, ctrl_2); 309 __func__, uhs, ctrl_2);
310} 310}
311 311
312static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
313 unsigned short vdd)
314{
315 struct mmc_host *mmc = host->mmc;
316 u8 pwr = host->pwr;
317
318 sdhci_set_power(host, mode, vdd);
319
320 if (host->pwr == pwr)
321 return;
322
323 if (host->pwr == 0)
324 vdd = 0;
325
326 if (!IS_ERR(mmc->supply.vmmc)) {
327 spin_unlock_irq(&host->lock);
328 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
329 spin_lock_irq(&host->lock);
330 }
331}
332
312static const struct sdhci_ops pxav3_sdhci_ops = { 333static const struct sdhci_ops pxav3_sdhci_ops = {
313 .set_clock = sdhci_set_clock, 334 .set_clock = sdhci_set_clock,
335 .set_power = pxav3_set_power,
314 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, 336 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
315 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 337 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
316 .set_bus_width = sdhci_set_bus_width, 338 .set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f8c4762bb48d..bcc0de47fe7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -382,14 +382,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
382 .pdata = &sdhci_tegra114_pdata, 382 .pdata = &sdhci_tegra114_pdata,
383}; 383};
384 384
385static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
386 .pdata = &sdhci_tegra114_pdata,
387 .nvquirks = NVQUIRK_ENABLE_SDR50 |
388 NVQUIRK_ENABLE_DDR50 |
389 NVQUIRK_ENABLE_SDR104 |
390 NVQUIRK_HAS_PADCALIB,
391};
392
393static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { 385static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
394 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 386 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
395 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 387 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
407 399
408static const struct of_device_id sdhci_tegra_dt_match[] = { 400static const struct of_device_id sdhci_tegra_dt_match[] = {
409 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 401 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
410 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, 402 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
411 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 403 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
412 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 404 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
413 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 405 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8670f162dec7..6bd3d1794966 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1210,10 +1210,24 @@ clock_set:
1210} 1210}
1211EXPORT_SYMBOL_GPL(sdhci_set_clock); 1211EXPORT_SYMBOL_GPL(sdhci_set_clock);
1212 1212
1213static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1213static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1214 unsigned short vdd) 1214 unsigned short vdd)
1215{ 1215{
1216 struct mmc_host *mmc = host->mmc; 1216 struct mmc_host *mmc = host->mmc;
1217
1218 spin_unlock_irq(&host->lock);
1219 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1220 spin_lock_irq(&host->lock);
1221
1222 if (mode != MMC_POWER_OFF)
1223 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1224 else
1225 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1226}
1227
1228void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1229 unsigned short vdd)
1230{
1217 u8 pwr = 0; 1231 u8 pwr = 0;
1218 1232
1219 if (mode != MMC_POWER_OFF) { 1233 if (mode != MMC_POWER_OFF) {
@@ -1245,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1245 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1259 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1246 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1260 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1247 sdhci_runtime_pm_bus_off(host); 1261 sdhci_runtime_pm_bus_off(host);
1248 vdd = 0;
1249 } else { 1262 } else {
1250 /* 1263 /*
1251 * Spec says that we should clear the power reg before setting 1264 * Spec says that we should clear the power reg before setting
@@ -1276,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1276 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1289 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1277 mdelay(10); 1290 mdelay(10);
1278 } 1291 }
1292}
1293EXPORT_SYMBOL_GPL(sdhci_set_power);
1279 1294
1280 if (!IS_ERR(mmc->supply.vmmc)) { 1295static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1281 spin_unlock_irq(&host->lock); 1296 unsigned short vdd)
1282 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1297{
1283 spin_lock_irq(&host->lock); 1298 struct mmc_host *mmc = host->mmc;
1284 } 1299
1300 if (host->ops->set_power)
1301 host->ops->set_power(host, mode, vdd);
1302 else if (!IS_ERR(mmc->supply.vmmc))
1303 sdhci_set_power_reg(host, mode, vdd);
1304 else
1305 sdhci_set_power(host, mode, vdd);
1285} 1306}
1286 1307
1287/*****************************************************************************\ 1308/*****************************************************************************\
@@ -1431,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1431 } 1452 }
1432 } 1453 }
1433 1454
1434 sdhci_set_power(host, ios->power_mode, ios->vdd); 1455 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1435 1456
1436 if (host->ops->platform_send_init_74_clocks) 1457 if (host->ops->platform_send_init_74_clocks)
1437 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1458 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 3bd28033dbd9..0f39f4f84d10 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -529,6 +529,8 @@ struct sdhci_ops {
529#endif 529#endif
530 530
531 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 531 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
532 void (*set_power)(struct sdhci_host *host, unsigned char mode,
533 unsigned short vdd);
532 534
533 int (*enable_dma)(struct sdhci_host *host); 535 int (*enable_dma)(struct sdhci_host *host);
534 unsigned int (*get_max_clock)(struct sdhci_host *host); 536 unsigned int (*get_max_clock)(struct sdhci_host *host);
@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
660} 662}
661 663
662void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); 664void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
665void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
666 unsigned short vdd);
663void sdhci_set_bus_width(struct sdhci_host *host, int width); 667void sdhci_set_bus_width(struct sdhci_host *host, int width);
664void sdhci_reset(struct sdhci_host *host, u8 mask); 668void sdhci_reset(struct sdhci_host *host, u8 mask);
665void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 669void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 8d870ce9f944..d9a655f47d41 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1513 mmc->caps |= pd->caps; 1513 mmc->caps |= pd->caps;
1514 mmc->max_segs = 32; 1514 mmc->max_segs = 32;
1515 mmc->max_blk_size = 512; 1515 mmc->max_blk_size = 512;
1516 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1516 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1518 mmc->max_seg_size = mmc->max_req_size; 1518 mmc->max_seg_size = mmc->max_req_size;
1519 1519
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a413848c..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1129 MMC_CAP_1_8V_DDR | 1129 MMC_CAP_1_8V_DDR |
1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1131 1131
1132 /* TODO MMC DDR is not working on A80 */
1133 if (of_device_is_compatible(pdev->dev.of_node,
1134 "allwinner,sun9i-a80-mmc"))
1135 mmc->caps &= ~MMC_CAP_1_8V_DDR;
1136
1132 ret = mmc_of_parse(mmc); 1137 ret = mmc_of_parse(mmc);
1133 if (ret) 1138 if (ret)
1134 goto error_free_dma; 1139 goto error_free_dma;
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 675435873823..7fb0c034dcb6 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
63 } 63 }
64 } 64 }
65 65
66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
67 (align & PAGE_MASK))) || !multiple) { 67 (align & PAGE_MASK))) || !multiple) {
68 ret = -EINVAL; 68 ret = -EINVAL;
69 goto pio; 69 goto pio;
@@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
133 } 133 }
134 } 134 }
135 135
136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
137 (align & PAGE_MASK))) || !multiple) { 137 (align & PAGE_MASK))) || !multiple) {
138 ret = -EINVAL; 138 ret = -EINVAL;
139 goto pio; 139 goto pio;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 03f6e74c1906..0521b4662748 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1125 mmc->caps2 |= pdata->capabilities2; 1125 mmc->caps2 |= pdata->capabilities2;
1126 mmc->max_segs = 32; 1126 mmc->max_segs = 32;
1127 mmc->max_blk_size = 512; 1127 mmc->max_blk_size = 512;
1128 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 1128 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1129 mmc->max_segs; 1129 mmc->max_segs;
1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1131 mmc->max_seg_size = mmc->max_req_size; 1131 mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b2752fe711f2..807c06e203c3 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
1789 /* Set .max_segs to some random number. Feel free to adjust. */ 1789 /* Set .max_segs to some random number. Feel free to adjust. */
1790 mmc->max_segs = 32; 1790 mmc->max_segs = 32;
1791 mmc->max_blk_size = 512; 1791 mmc->max_blk_size = 512;
1792 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1792 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1794 /* 1794 /*
1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code, 1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057737e6..7c887f111a7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
75 break; 75 break;
76 } 76 }
77 77
78 page_cache_release(page); 78 put_page(page);
79 pages--; 79 pages--;
80 index++; 80 index++;
81 } 81 }
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
124 return PTR_ERR(page); 124 return PTR_ERR(page);
125 125
126 memcpy(buf, page_address(page) + offset, cpylen); 126 memcpy(buf, page_address(page) + offset, cpylen);
127 page_cache_release(page); 127 put_page(page);
128 128
129 if (retlen) 129 if (retlen)
130 *retlen += cpylen; 130 *retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
164 unlock_page(page); 164 unlock_page(page);
165 balance_dirty_pages_ratelimited(mapping); 165 balance_dirty_pages_ratelimited(mapping);
166 } 166 }
167 page_cache_release(page); 167 put_page(page);
168 168
169 if (retlen) 169 if (retlen)
170 *retlen += cpylen; 170 *retlen += cpylen;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b6facac54fc0..557b8462f55e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4009,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
4009 * This is the first phase of the normal nand_scan() function. It reads the 4009 * This is the first phase of the normal nand_scan() function. It reads the
4010 * flash ID and sets up MTD fields accordingly. 4010 * flash ID and sets up MTD fields accordingly.
4011 * 4011 *
4012 * The mtd->owner field must be set to the module of the caller.
4013 */ 4012 */
4014int nand_scan_ident(struct mtd_info *mtd, int maxchips, 4013int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4015 struct nand_flash_dev *table) 4014 struct nand_flash_dev *table)
@@ -4429,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
4429 * 4428 *
4430 * This fills out all the uninitialized function pointers with the defaults. 4429 * This fills out all the uninitialized function pointers with the defaults.
4431 * The flash ID is read and the mtd/chip structures are filled with the 4430 * The flash ID is read and the mtd/chip structures are filled with the
4432 * appropriate values. The mtd->owner field must be set to the module of the 4431 * appropriate values.
4433 * caller.
4434 */ 4432 */
4435int nand_scan(struct mtd_info *mtd, int maxchips) 4433int nand_scan(struct mtd_info *mtd, int maxchips)
4436{ 4434{
4437 int ret; 4435 int ret;
4438 4436
4439 /* Many callers got this wrong, so check for it for a while... */
4440 if (!mtd->owner && caller_is_module()) {
4441 pr_crit("%s called with NULL mtd->owner!\n", __func__);
4442 BUG();
4443 }
4444
4445 ret = nand_scan_ident(mtd, maxchips, NULL); 4437 ret = nand_scan_ident(mtd, maxchips, NULL);
4446 if (!ret) 4438 if (!ret)
4447 ret = nand_scan_tail(mtd); 4439 ret = nand_scan_tail(mtd);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd519503bb1..a58169a28741 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
1339 int i; 1339 int i;
1340 1340
1341 for (i = 0; i < ns->held_cnt; i++) 1341 for (i = 0; i < ns->held_cnt; i++)
1342 page_cache_release(ns->held_pages[i]); 1342 put_page(ns->held_pages[i]);
1343} 1343}
1344 1344
1345/* Get page cache pages in advance to provide NOFS memory allocation */ 1345/* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
1349 struct page *page; 1349 struct page *page;
1350 struct address_space *mapping = file->f_mapping; 1350 struct address_space *mapping = file->f_mapping;
1351 1351
1352 start_index = pos >> PAGE_CACHE_SHIFT; 1352 start_index = pos >> PAGE_SHIFT;
1353 end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1353 end_index = (pos + count - 1) >> PAGE_SHIFT;
1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) 1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1355 return -EINVAL; 1355 return -EINVAL;
1356 ns->held_cnt = 0; 1356 ns->held_cnt = 0;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b7da2..befd67df08e1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -62,9 +62,8 @@ config DUMMY
62 this device is consigned into oblivion) with a configurable IP 62 this device is consigned into oblivion) with a configurable IP
63 address. It is most commonly used in order to make your currently 63 address. It is most commonly used in order to make your currently
64 inactive SLIP address seem like a real address for local programs. 64 inactive SLIP address seem like a real address for local programs.
65 If you use SLIP or PPP, you might want to say Y here. Since this 65 If you use SLIP or PPP, you might want to say Y here. It won't
66 thing often comes in handy, the default is Y. It won't enlarge your 66 enlarge your kernel. What a deal. Read about it in the Network
67 kernel either. What a deal. Read about it in the Network
68 Administrator's Guide, available from 67 Administrator's Guide, available from
69 <http://www.tldp.org/docs.html#guide>. 68 <http://www.tldp.org/docs.html#guide>.
70 69
@@ -195,6 +194,7 @@ config GENEVE
195 194
196config MACSEC 195config MACSEC
197 tristate "IEEE 802.1AE MAC-level encryption (MACsec)" 196 tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
197 select CRYPTO
198 select CRYPTO_AES 198 select CRYPTO_AES
199 select CRYPTO_GCM 199 select CRYPTO_GCM
200 ---help--- 200 ---help---
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 50454be86570..a2904029cccc 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2181 struct net_device *bridge) 2181 struct net_device *bridge)
2182{ 2182{
2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2184 u16 fid;
2185 int i, err; 2184 int i, err;
2186 2185
2187 mutex_lock(&ps->smi_mutex); 2186 mutex_lock(&ps->smi_mutex);
2188 2187
2189 /* Get or create the bridge FID and assign it to the port */
2190 for (i = 0; i < ps->num_ports; ++i)
2191 if (ps->ports[i].bridge_dev == bridge)
2192 break;
2193
2194 if (i < ps->num_ports)
2195 err = _mv88e6xxx_port_fid_get(ds, i, &fid);
2196 else
2197 err = _mv88e6xxx_fid_new(ds, &fid);
2198 if (err)
2199 goto unlock;
2200
2201 err = _mv88e6xxx_port_fid_set(ds, port, fid);
2202 if (err)
2203 goto unlock;
2204
2205 /* Assign the bridge and remap each port's VLANTable */ 2188 /* Assign the bridge and remap each port's VLANTable */
2206 ps->ports[port].bridge_dev = bridge; 2189 ps->ports[port].bridge_dev = bridge;
2207 2190
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2213 } 2196 }
2214 } 2197 }
2215 2198
2216unlock:
2217 mutex_unlock(&ps->smi_mutex); 2199 mutex_unlock(&ps->smi_mutex);
2218 2200
2219 return err; 2201 return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2223{ 2205{
2224 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2206 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2225 struct net_device *bridge = ps->ports[port].bridge_dev; 2207 struct net_device *bridge = ps->ports[port].bridge_dev;
2226 u16 fid;
2227 int i; 2208 int i;
2228 2209
2229 mutex_lock(&ps->smi_mutex); 2210 mutex_lock(&ps->smi_mutex);
2230 2211
2231 /* Give the port a fresh Filtering Information Database */
2232 if (_mv88e6xxx_fid_new(ds, &fid) ||
2233 _mv88e6xxx_port_fid_set(ds, port, fid))
2234 netdev_warn(ds->ports[port], "failed to assign a new FID\n");
2235
2236 /* Unassign the bridge and remap each port's VLANTable */ 2212 /* Unassign the bridge and remap each port's VLANTable */
2237 ps->ports[port].bridge_dev = NULL; 2213 ps->ports[port].bridge_dev = NULL;
2238 2214
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2476 * the other bits clear. 2452 * the other bits clear.
2477 */ 2453 */
2478 reg = 1 << port; 2454 reg = 1 << port;
2479 /* Disable learning for DSA and CPU ports */ 2455 /* Disable learning for CPU port */
2480 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) 2456 if (dsa_is_cpu_port(ds, port))
2481 reg = PORT_ASSOC_VECTOR_LOCKED_PORT; 2457 reg = 0;
2482 2458
2483 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); 2459 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2484 if (ret) 2460 if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2558 if (ret) 2534 if (ret)
2559 goto abort; 2535 goto abort;
2560 2536
2561 /* Port based VLAN map: give each port its own address 2537 /* Port based VLAN map: give each port the same default address
2562 * database, and allow bidirectional communication between the 2538 * database, and allow bidirectional communication between the
2563 * CPU and DSA port(s), and the other ports. 2539 * CPU and DSA port(s), and the other ports.
2564 */ 2540 */
2565 ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); 2541 ret = _mv88e6xxx_port_fid_set(ds, port, 0);
2566 if (ret) 2542 if (ret)
2567 goto abort; 2543 goto abort;
2568 2544
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1412 1412
1413 err = -EIO; 1413 err = -EIO;
1414 1414
1415 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; 1415 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1417 1417
1418 /* Init PHY as early as possible due to power saving issue */ 1418 /* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1573 } 1573 }
1574 1574
1575 /* This (reset &) enable is not preset in specs or reference driver but
1576 * Broadcom does it in arch PCI code when enabling fake PCI device.
1577 */
1578 bcma_core_enable(core, 0);
1579
1575 /* Allocation and references */ 1580 /* Allocation and references */
1576 net_dev = alloc_etherdev(sizeof(*bgmac)); 1581 net_dev = alloc_etherdev(sizeof(*bgmac));
1577 if (!net_dev) 1582 if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
199#define BGMAC_CMDCFG_TAI 0x00000200 199#define BGMAC_CMDCFG_TAI 0x00000200
200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ 200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
201#define BGMAC_CMDCFG_HD_SHIFT 10 201#define BGMAC_CMDCFG_HD_SHIFT 10
202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ 202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ 203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
204#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 204#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ 205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
206#define BGMAC_CMDCFG_AE 0x00400000 206#define BGMAC_CMDCFG_AE 0x00400000
207#define BGMAC_CMDCFG_CFE 0x00800000 207#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index cf6445d148ca..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
878 else 878 else
879 p = (char *)priv; 879 p = (char *)priv;
880 p += s->stat_offset; 880 p += s->stat_offset;
881 data[i] = *(u32 *)p; 881 if (sizeof(unsigned long) != sizeof(u32) &&
882 s->stat_sizeof == sizeof(unsigned long))
883 data[i] = *(unsigned long *)p;
884 else
885 data[i] = *(u32 *)p;
882 } 886 }
883} 887}
884 888
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 967951582e03..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
1011 } 1011 }
1012 1012
1013 lmac++; 1013 lmac++;
1014 if (lmac == MAX_LMAC_PER_BGX) 1014 if (lmac == MAX_LMAC_PER_BGX) {
1015 of_node_put(node);
1015 break; 1016 break;
1017 }
1016 } 1018 }
1017 of_node_put(node);
1018 return 0; 1019 return 0;
1019 1020
1020defer: 1021defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc26f86..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1451 unsigned int mmd, unsigned int reg, u16 *valp); 1451 unsigned int mmd, unsigned int reg, u16 *valp);
1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1453 unsigned int mmd, unsigned int reg, u16 val); 1453 unsigned int mmd, unsigned int reg, u16 val);
1454int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id);
1454int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1457int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1458 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id); 1459 unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2981void t4_free_sge_resources(struct adapter *adap) 2981void t4_free_sge_resources(struct adapter *adap)
2982{ 2982{
2983 int i; 2983 int i;
2984 struct sge_eth_rxq *eq = adap->sge.ethrxq; 2984 struct sge_eth_rxq *eq;
2985 struct sge_eth_txq *etq = adap->sge.ethtxq; 2985 struct sge_eth_txq *etq;
2986
2987 /* stop all Rx queues in order to start them draining */
2988 for (i = 0; i < adap->sge.ethqsets; i++) {
2989 eq = &adap->sge.ethrxq[i];
2990 if (eq->rspq.desc)
2991 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
2992 FW_IQ_TYPE_FL_INT_CAP,
2993 eq->rspq.cntxt_id,
2994 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
2995 0xffff);
2996 }
2986 2997
2987 /* clean up Ethernet Tx/Rx queues */ 2998 /* clean up Ethernet Tx/Rx queues */
2988 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { 2999 for (i = 0; i < adap->sge.ethqsets; i++) {
3000 eq = &adap->sge.ethrxq[i];
2989 if (eq->rspq.desc) 3001 if (eq->rspq.desc)
2990 free_rspq_fl(adap, &eq->rspq, 3002 free_rspq_fl(adap, &eq->rspq,
2991 eq->fl.size ? &eq->fl : NULL); 3003 eq->fl.size ? &eq->fl : NULL);
3004
3005 etq = &adap->sge.ethtxq[i];
2992 if (etq->q.desc) { 3006 if (etq->q.desc) {
2993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3007 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
2994 etq->q.cntxt_id); 3008 etq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cc1736bece0f..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
2557} 2557}
2558 2558
2559#define EEPROM_STAT_ADDR 0x7bfc 2559#define EEPROM_STAT_ADDR 0x7bfc
2560#define VPD_SIZE 0x800
2560#define VPD_BASE 0x400 2561#define VPD_BASE 0x400
2561#define VPD_BASE_OLD 0 2562#define VPD_BASE_OLD 0
2562#define VPD_LEN 1024 2563#define VPD_LEN 1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2594 if (!vpd) 2595 if (!vpd)
2595 return -ENOMEM; 2596 return -ENOMEM;
2596 2597
2598 /* We have two VPD data structures stored in the adapter VPD area.
2599 * By default, Linux calculates the size of the VPD area by traversing
2600 * the first VPD area at offset 0x0, so we need to tell the OS what
2601 * our real VPD size is.
2602 */
2603 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2604 if (ret < 0)
2605 goto out;
2606
2597 /* Card information normally starts at VPD_BASE but early cards had 2607 /* Card information normally starts at VPD_BASE but early cards had
2598 * it at 0. 2608 * it at 0.
2599 */ 2609 */
@@ -6940,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
6940} 6950}
6941 6951
6942/** 6952/**
6953 * t4_iq_stop - stop an ingress queue and its FLs
6954 * @adap: the adapter
6955 * @mbox: mailbox to use for the FW command
6956 * @pf: the PF owning the queues
6957 * @vf: the VF owning the queues
6958 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
6959 * @iqid: ingress queue id
6960 * @fl0id: FL0 queue id or 0xffff if no attached FL0
6961 * @fl1id: FL1 queue id or 0xffff if no attached FL1
6962 *
6963 * Stops an ingress queue and its associated FLs, if any. This causes
6964 * any current or future data/messages destined for these queues to be
6965 * tossed.
6966 */
6967int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
6968 unsigned int vf, unsigned int iqtype, unsigned int iqid,
6969 unsigned int fl0id, unsigned int fl1id)
6970{
6971 struct fw_iq_cmd c;
6972
6973 memset(&c, 0, sizeof(c));
6974 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
6975 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
6976 FW_IQ_CMD_VFN_V(vf));
6977 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
6978 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
6979 c.iqid = cpu_to_be16(iqid);
6980 c.fl0id = cpu_to_be16(fl0id);
6981 c.fl1id = cpu_to_be16(fl1id);
6982 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6983}
6984
6985/**
6943 * t4_iq_free - free an ingress queue and its FLs 6986 * t4_iq_free - free an ingress queue and its FLs
6944 * @adap: the adapter 6987 * @adap: the adapter
6945 * @mbox: mailbox to use for the FW command 6988 * @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 06bc2d2e7a73..a2cdfc1261dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ 168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
169 CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
169 170
170 /* T6 adapters: 171 /* T6 adapters:
171 */ 172 */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3fc7bde699ba..ae90d4f12b70 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
3106 return __e1000_maybe_stop_tx(netdev, size); 3106 return __e1000_maybe_stop_tx(netdev, size);
3107} 3107}
3108 3108
3109#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) 3109#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev) 3111 struct net_device *netdev)
3112{ 3112{
@@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3256 nr_frags, mss); 3256 nr_frags, mss);
3257 3257
3258 if (count) { 3258 if (count) {
3259 /* The descriptors needed is higher than other Intel drivers
3260 * due to a number of workarounds. The breakdown is below:
3261 * Data descriptors: MAX_SKB_FRAGS + 1
3262 * Context Descriptor: 1
3263 * Keep head from touching tail: 2
3264 * Workarounds: 3
3265 */
3266 int desc_needed = MAX_SKB_FRAGS + 7;
3267
3259 netdev_sent_queue(netdev, skb->len); 3268 netdev_sent_queue(netdev, skb->len);
3260 skb_tx_timestamp(skb); 3269 skb_tx_timestamp(skb);
3261 3270
3262 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3271 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3272
3273 /* 82544 potentially requires twice as many data descriptors
3274 * in order to guarantee buffers don't end on evenly-aligned
3275 * dwords
3276 */
3277 if (adapter->pcix_82544)
3278 desc_needed += MAX_SKB_FRAGS + 1;
3279
3263 /* Make sure there is space in the ring for the next send. */ 3280 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3281 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3265 3282
3266 if (!skb->xmit_more || 3283 if (!skb->xmit_more ||
3267 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3284 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1223 if (err) 1223 if (err)
1224 return err; 1224 return err;
1225 1225
1226 /* verify upper 16 bits are zero */
1227 if (vid >> 16)
1228 return FM10K_ERR_PARAM;
1229
1230 set = !(vid & FM10K_VLAN_CLEAR); 1226 set = !(vid & FM10K_VLAN_CLEAR);
1231 vid &= ~FM10K_VLAN_CLEAR; 1227 vid &= ~FM10K_VLAN_CLEAR;
1232 1228
1233 err = fm10k_iov_select_vid(vf_info, (u16)vid); 1229 /* if the length field has been set, this is a multi-bit
1234 if (err < 0) 1230 * update request. For multi-bit requests, simply disallow
1235 return err; 1231 * them when the pf_vid has been set. In this case, the PF
1232 * should have already cleared the VLAN_TABLE, and if we
1233 * allowed them, it could allow a rogue VF to receive traffic
1234 * on a VLAN it was not assigned. In the single-bit case, we
1235 * need to modify requests for VLAN 0 to use the default PF or
1236 * SW vid when assigned.
1237 */
1236 1238
1237 vid = err; 1239 if (vid >> 16) {
1240 /* prevent multi-bit requests when PF has
1241 * administratively set the VLAN for this VF
1242 */
1243 if (vf_info->pf_vid)
1244 return FM10K_ERR_PARAM;
1245 } else {
1246 err = fm10k_iov_select_vid(vf_info, (u16)vid);
1247 if (err < 0)
1248 return err;
1249
1250 vid = err;
1251 }
1238 1252
1239 /* update VSI info for VF in regards to VLAN table */ 1253 /* update VSI info for VF in regards to VLAN table */
1240 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); 1254 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 67006431726a..344912957cab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8559,6 +8559,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
8559 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8559 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8560 I40E_FLAG_WB_ON_ITR_CAPABLE | 8560 I40E_FLAG_WB_ON_ITR_CAPABLE |
8561 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8561 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8562 I40E_FLAG_NO_PCI_LINK_CHECK |
8562 I40E_FLAG_100M_SGMII_CAPABLE | 8563 I40E_FLAG_100M_SGMII_CAPABLE |
8563 I40E_FLAG_USE_SET_LLDP_MIB | 8564 I40E_FLAG_USE_SET_LLDP_MIB |
8564 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8565 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2594} 2594}
2595 2595
2596/** 2596/**
2597 * __i40e_chk_linearize - Check if there are more than 8 fragments per packet 2597 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2598 * @skb: send buffer 2598 * @skb: send buffer
2599 * 2599 *
2600 * Note: Our HW can't scatter-gather more than 8 fragments to build 2600 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2601 * a packet on the wire and so we need to figure out the cases where we 2601 * and so we need to figure out the cases where we need to linearize the skb.
2602 * need to linearize the skb. 2602 *
2603 * For TSO we need to count the TSO header and segment payload separately.
2604 * As such we need to check cases where we have 7 fragments or more as we
2605 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2606 * the segment payload in the first descriptor, and another 7 for the
2607 * fragments.
2603 **/ 2608 **/
2604bool __i40e_chk_linearize(struct sk_buff *skb) 2609bool __i40e_chk_linearize(struct sk_buff *skb)
2605{ 2610{
2606 const struct skb_frag_struct *frag, *stale; 2611 const struct skb_frag_struct *frag, *stale;
2607 int gso_size, nr_frags, sum; 2612 int nr_frags, sum;
2608
2609 /* check to see if TSO is enabled, if so we may get a repreive */
2610 gso_size = skb_shinfo(skb)->gso_size;
2611 if (unlikely(!gso_size))
2612 return true;
2613 2613
2614 /* no need to check if number of frags is less than 8 */ 2614 /* no need to check if number of frags is less than 7 */
2615 nr_frags = skb_shinfo(skb)->nr_frags; 2615 nr_frags = skb_shinfo(skb)->nr_frags;
2616 if (nr_frags < I40E_MAX_BUFFER_TXD) 2616 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2617 return false; 2617 return false;
2618 2618
2619 /* We need to walk through the list and validate that each group 2619 /* We need to walk through the list and validate that each group
2620 * of 6 fragments totals at least gso_size. However we don't need 2620 * of 6 fragments totals at least gso_size. However we don't need
2621 * to perform such validation on the first or last 6 since the first 2621 * to perform such validation on the last 6 since the last 6 cannot
2622 * 6 cannot inherit any data from a descriptor before them, and the 2622 * inherit any data from a descriptor after them.
2623 * last 6 cannot inherit any data from a descriptor after them.
2624 */ 2623 */
2625 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 2624 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2626 frag = &skb_shinfo(skb)->frags[0]; 2625 frag = &skb_shinfo(skb)->frags[0];
2627 2626
2628 /* Initialize size to the negative value of gso_size minus 1. We 2627 /* Initialize size to the negative value of gso_size minus 1. We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2631 * descriptors for a single transmit as the header and previous 2630 * descriptors for a single transmit as the header and previous
2632 * fragment are already consuming 2 descriptors. 2631 * fragment are already consuming 2 descriptors.
2633 */ 2632 */
2634 sum = 1 - gso_size; 2633 sum = 1 - skb_shinfo(skb)->gso_size;
2635 2634
2636 /* Add size of frags 1 through 5 to create our initial sum */ 2635 /* Add size of frags 0 through 4 to create our initial sum */
2637 sum += skb_frag_size(++frag); 2636 sum += skb_frag_size(frag++);
2638 sum += skb_frag_size(++frag); 2637 sum += skb_frag_size(frag++);
2639 sum += skb_frag_size(++frag); 2638 sum += skb_frag_size(frag++);
2640 sum += skb_frag_size(++frag); 2639 sum += skb_frag_size(frag++);
2641 sum += skb_frag_size(++frag); 2640 sum += skb_frag_size(frag++);
2642 2641
2643 /* Walk through fragments adding latest fragment, testing it, and 2642 /* Walk through fragments adding latest fragment, testing it, and
2644 * then removing stale fragments from the sum. 2643 * then removing stale fragments from the sum.
2645 */ 2644 */
2646 stale = &skb_shinfo(skb)->frags[0]; 2645 stale = &skb_shinfo(skb)->frags[0];
2647 for (;;) { 2646 for (;;) {
2648 sum += skb_frag_size(++frag); 2647 sum += skb_frag_size(frag++);
2649 2648
2650 /* if sum is negative we failed to make sufficient progress */ 2649 /* if sum is negative we failed to make sufficient progress */
2651 if (sum < 0) 2650 if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2655 if (!--nr_frags) 2654 if (!--nr_frags)
2656 break; 2655 break;
2657 2656
2658 sum -= skb_frag_size(++stale); 2657 sum -= skb_frag_size(stale++);
2659 } 2658 }
2660 2659
2661 return false; 2660 return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
413 **/ 413 **/
414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
415{ 415{
416 /* we can only support up to 8 data buffers for a single send */ 416 /* Both TSO and single send will work if count is less than 8 */
417 if (likely(count <= I40E_MAX_BUFFER_TXD)) 417 if (likely(count < I40E_MAX_BUFFER_TXD))
418 return false; 418 return false;
419 419
420 return __i40e_chk_linearize(skb); 420 if (skb_is_gso(skb))
421 return __i40e_chk_linearize(skb);
422
423 /* we can support up to 8 data buffers for a single send */
424 return count != I40E_MAX_BUFFER_TXD;
421} 425}
422#endif /* _I40E_TXRX_H_ */ 426#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1796} 1796}
1797 1797
1798/** 1798/**
1799 * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet 1799 * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
1800 * @skb: send buffer 1800 * @skb: send buffer
1801 * 1801 *
1802 * Note: Our HW can't scatter-gather more than 8 fragments to build 1802 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
1803 * a packet on the wire and so we need to figure out the cases where we 1803 * and so we need to figure out the cases where we need to linearize the skb.
1804 * need to linearize the skb. 1804 *
1805 * For TSO we need to count the TSO header and segment payload separately.
1806 * As such we need to check cases where we have 7 fragments or more as we
1807 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1808 * the segment payload in the first descriptor, and another 7 for the
1809 * fragments.
1805 **/ 1810 **/
1806bool __i40evf_chk_linearize(struct sk_buff *skb) 1811bool __i40evf_chk_linearize(struct sk_buff *skb)
1807{ 1812{
1808 const struct skb_frag_struct *frag, *stale; 1813 const struct skb_frag_struct *frag, *stale;
1809 int gso_size, nr_frags, sum; 1814 int nr_frags, sum;
1810
1811 /* check to see if TSO is enabled, if so we may get a repreive */
1812 gso_size = skb_shinfo(skb)->gso_size;
1813 if (unlikely(!gso_size))
1814 return true;
1815 1815
1816 /* no need to check if number of frags is less than 8 */ 1816 /* no need to check if number of frags is less than 7 */
1817 nr_frags = skb_shinfo(skb)->nr_frags; 1817 nr_frags = skb_shinfo(skb)->nr_frags;
1818 if (nr_frags < I40E_MAX_BUFFER_TXD) 1818 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
1819 return false; 1819 return false;
1820 1820
1821 /* We need to walk through the list and validate that each group 1821 /* We need to walk through the list and validate that each group
1822 * of 6 fragments totals at least gso_size. However we don't need 1822 * of 6 fragments totals at least gso_size. However we don't need
1823 * to perform such validation on the first or last 6 since the first 1823 * to perform such validation on the last 6 since the last 6 cannot
1824 * 6 cannot inherit any data from a descriptor before them, and the 1824 * inherit any data from a descriptor after them.
1825 * last 6 cannot inherit any data from a descriptor after them.
1826 */ 1825 */
1827 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 1826 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
1828 frag = &skb_shinfo(skb)->frags[0]; 1827 frag = &skb_shinfo(skb)->frags[0];
1829 1828
1830 /* Initialize size to the negative value of gso_size minus 1. We 1829 /* Initialize size to the negative value of gso_size minus 1. We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1833 * descriptors for a single transmit as the header and previous 1832 * descriptors for a single transmit as the header and previous
1834 * fragment are already consuming 2 descriptors. 1833 * fragment are already consuming 2 descriptors.
1835 */ 1834 */
1836 sum = 1 - gso_size; 1835 sum = 1 - skb_shinfo(skb)->gso_size;
1837 1836
1838 /* Add size of frags 1 through 5 to create our initial sum */ 1837 /* Add size of frags 0 through 4 to create our initial sum */
1839 sum += skb_frag_size(++frag); 1838 sum += skb_frag_size(frag++);
1840 sum += skb_frag_size(++frag); 1839 sum += skb_frag_size(frag++);
1841 sum += skb_frag_size(++frag); 1840 sum += skb_frag_size(frag++);
1842 sum += skb_frag_size(++frag); 1841 sum += skb_frag_size(frag++);
1843 sum += skb_frag_size(++frag); 1842 sum += skb_frag_size(frag++);
1844 1843
1845 /* Walk through fragments adding latest fragment, testing it, and 1844 /* Walk through fragments adding latest fragment, testing it, and
1846 * then removing stale fragments from the sum. 1845 * then removing stale fragments from the sum.
1847 */ 1846 */
1848 stale = &skb_shinfo(skb)->frags[0]; 1847 stale = &skb_shinfo(skb)->frags[0];
1849 for (;;) { 1848 for (;;) {
1850 sum += skb_frag_size(++frag); 1849 sum += skb_frag_size(frag++);
1851 1850
1852 /* if sum is negative we failed to make sufficient progress */ 1851 /* if sum is negative we failed to make sufficient progress */
1853 if (sum < 0) 1852 if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1857 if (!--nr_frags) 1856 if (!--nr_frags)
1858 break; 1857 break;
1859 1858
1860 sum -= skb_frag_size(++stale); 1859 sum -= skb_frag_size(stale++);
1861 } 1860 }
1862 1861
1863 return false; 1862 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
395 **/ 395 **/
396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
397{ 397{
398 /* we can only support up to 8 data buffers for a single send */ 398 /* Both TSO and single send will work if count is less than 8 */
399 if (likely(count <= I40E_MAX_BUFFER_TXD)) 399 if (likely(count < I40E_MAX_BUFFER_TXD))
400 return false; 400 return false;
401 401
402 return __i40evf_chk_linearize(skb); 402 if (skb_is_gso(skb))
403 return __i40evf_chk_linearize(skb);
404
405 /* we can support up to 8 data buffers for a single send */
406 return count != I40E_MAX_BUFFER_TXD;
403} 407}
404#endif /* _I40E_TXRX_H_ */ 408#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a9b47f..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
337 case ETH_SS_STATS: 337 case ETH_SS_STATS:
338 return bitmap_iterator_count(&it) + 338 return bitmap_iterator_count(&it) +
339 (priv->tx_ring_num * 2) + 339 (priv->tx_ring_num * 2) +
340 (priv->rx_ring_num * 2); 340 (priv->rx_ring_num * 3);
341 case ETH_SS_TEST: 341 case ETH_SS_TEST:
342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; 343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
404 for (i = 0; i < priv->rx_ring_num; i++) { 404 for (i = 0; i < priv->rx_ring_num; i++) {
405 data[index++] = priv->rx_ring[i]->packets; 405 data[index++] = priv->rx_ring[i]->packets;
406 data[index++] = priv->rx_ring[i]->bytes; 406 data[index++] = priv->rx_ring[i]->bytes;
407 data[index++] = priv->rx_ring[i]->dropped;
407 } 408 }
408 spin_unlock_bh(&priv->stats_lock); 409 spin_unlock_bh(&priv->stats_lock);
409 410
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
477 "rx%d_packets", i); 478 "rx%d_packets", i);
478 sprintf(data + (index++) * ETH_GSTRING_LEN, 479 sprintf(data + (index++) * ETH_GSTRING_LEN,
479 "rx%d_bytes", i); 480 "rx%d_bytes", i);
481 sprintf(data + (index++) * ETH_GSTRING_LEN,
482 "rx%d_dropped", i);
480 } 483 }
481 break; 484 break;
482 case ETH_SS_PRIV_FLAGS: 485 case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
158 u64 in_mod = reset << 8 | port; 158 u64 in_mod = reset << 8 | port;
159 int err; 159 int err;
160 int i, counter_index; 160 int i, counter_index;
161 unsigned long sw_rx_dropped = 0;
161 162
162 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 163 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
163 if (IS_ERR(mailbox)) 164 if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
180 for (i = 0; i < priv->rx_ring_num; i++) { 181 for (i = 0; i < priv->rx_ring_num; i++) {
181 stats->rx_packets += priv->rx_ring[i]->packets; 182 stats->rx_packets += priv->rx_ring[i]->packets;
182 stats->rx_bytes += priv->rx_ring[i]->bytes; 183 stats->rx_bytes += priv->rx_ring[i]->bytes;
184 sw_rx_dropped += priv->rx_ring[i]->dropped;
183 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; 185 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
184 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; 186 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
185 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; 187 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
236 &mlx4_en_stats->MCAST_prio_1, 238 &mlx4_en_stats->MCAST_prio_1,
237 NUM_PRIORITIES); 239 NUM_PRIORITIES);
238 stats->collisions = 0; 240 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 241 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
242 sw_rx_dropped;
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 243 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = 0; 244 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 245 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe510e4e..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
61 gfp_t gfp = _gfp; 61 gfp_t gfp = _gfp;
62 62
63 if (order) 63 if (order)
64 gfp |= __GFP_COMP | __GFP_NOWARN; 64 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
65 page = alloc_pages(gfp, order); 65 page = alloc_pages(gfp, order);
66 if (likely(page)) 66 if (likely(page))
67 break; 67 break;
@@ -126,7 +126,9 @@ out:
126 dma_unmap_page(priv->ddev, page_alloc[i].dma, 126 dma_unmap_page(priv->ddev, page_alloc[i].dma,
127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
128 page = page_alloc[i].page; 128 page = page_alloc[i].page;
129 set_page_count(page, 1); 129 /* Revert changes done by mlx4_alloc_pages */
130 page_ref_sub(page, page_alloc[i].page_size /
131 priv->frag_info[i].frag_stride - 1);
130 put_page(page); 132 put_page(page);
131 } 133 }
132 } 134 }
@@ -176,7 +178,9 @@ out:
176 dma_unmap_page(priv->ddev, page_alloc->dma, 178 dma_unmap_page(priv->ddev, page_alloc->dma,
177 page_alloc->page_size, PCI_DMA_FROMDEVICE); 179 page_alloc->page_size, PCI_DMA_FROMDEVICE);
178 page = page_alloc->page; 180 page = page_alloc->page;
179 set_page_count(page, 1); 181 /* Revert changes done by mlx4_alloc_pages */
182 page_ref_sub(page, page_alloc->page_size /
183 priv->frag_info[i].frag_stride - 1);
180 put_page(page); 184 put_page(page);
181 page_alloc->page = NULL; 185 page_alloc->page = NULL;
182 } 186 }
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
939 /* GRO not possible, complete processing here */ 943 /* GRO not possible, complete processing here */
940 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 944 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
941 if (!skb) { 945 if (!skb) {
942 priv->stats.rx_dropped++; 946 ring->dropped++;
943 goto next; 947 goto next;
944 } 948 }
945 949
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c0d7b7296236..a386f047c1af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
405 u32 packets = 0; 405 u32 packets = 0;
406 u32 bytes = 0; 406 u32 bytes = 0;
407 int factor = priv->cqe_factor; 407 int factor = priv->cqe_factor;
408 u64 timestamp = 0;
409 int done = 0; 408 int done = 0;
410 int budget = priv->tx_work_limit; 409 int budget = priv->tx_work_limit;
411 u32 last_nr_txbb; 410 u32 last_nr_txbb;
@@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
445 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 444 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
446 445
447 do { 446 do {
447 u64 timestamp = 0;
448
448 txbbs_skipped += last_nr_txbb; 449 txbbs_skipped += last_nr_txbb;
449 ring_index = (ring_index + last_nr_txbb) & size_mask; 450 ring_index = (ring_index + last_nr_txbb) & size_mask;
450 if (ring->tx_info[ring_index].ts_requested) 451
452 if (unlikely(ring->tx_info[ring_index].ts_requested))
451 timestamp = mlx4_en_get_cqe_ts(cqe); 453 timestamp = mlx4_en_get_cqe_ts(cqe);
452 454
453 /* free next descriptor */ 455 /* free next descriptor */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f7230da58..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
3172 return 0; 3172 return 0;
3173} 3173}
3174 3174
3175static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3176{
3177 struct pci_dev *pdev = dev->persist->pdev;
3178 int err = 0;
3179
3180 mutex_lock(&dev->persist->pci_status_mutex);
3181 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3182 err = pci_enable_device(pdev);
3183 if (!err)
3184 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3185 }
3186 mutex_unlock(&dev->persist->pci_status_mutex);
3187
3188 return err;
3189}
3190
3191static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3192{
3193 struct pci_dev *pdev = dev->persist->pdev;
3194
3195 mutex_lock(&dev->persist->pci_status_mutex);
3196 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3197 pci_disable_device(pdev);
3198 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3199 }
3200 mutex_unlock(&dev->persist->pci_status_mutex);
3201}
3202
3175static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3203static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3176 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3204 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3177 int reset_flow) 3205 int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3582 3610
3583 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3611 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3584 3612
3585 err = pci_enable_device(pdev); 3613 err = mlx4_pci_enable_device(&priv->dev);
3586 if (err) { 3614 if (err) {
3587 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3615 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3588 return err; 3616 return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
3715 pci_release_regions(pdev); 3743 pci_release_regions(pdev);
3716 3744
3717err_disable_pdev: 3745err_disable_pdev:
3718 pci_disable_device(pdev); 3746 mlx4_pci_disable_device(&priv->dev);
3719 pci_set_drvdata(pdev, NULL); 3747 pci_set_drvdata(pdev, NULL);
3720 return err; 3748 return err;
3721} 3749}
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3775 priv->pci_dev_data = id->driver_data; 3803 priv->pci_dev_data = id->driver_data;
3776 mutex_init(&dev->persist->device_state_mutex); 3804 mutex_init(&dev->persist->device_state_mutex);
3777 mutex_init(&dev->persist->interface_state_mutex); 3805 mutex_init(&dev->persist->interface_state_mutex);
3806 mutex_init(&dev->persist->pci_status_mutex);
3778 3807
3779 ret = devlink_register(devlink, &pdev->dev); 3808 ret = devlink_register(devlink, &pdev->dev);
3780 if (ret) 3809 if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
3923 } 3952 }
3924 3953
3925 pci_release_regions(pdev); 3954 pci_release_regions(pdev);
3926 pci_disable_device(pdev); 3955 mlx4_pci_disable_device(dev);
3927 devlink_unregister(devlink); 3956 devlink_unregister(devlink);
3928 kfree(dev->persist); 3957 kfree(dev->persist);
3929 devlink_free(devlink); 3958 devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4042 if (state == pci_channel_io_perm_failure) 4071 if (state == pci_channel_io_perm_failure)
4043 return PCI_ERS_RESULT_DISCONNECT; 4072 return PCI_ERS_RESULT_DISCONNECT;
4044 4073
4045 pci_disable_device(pdev); 4074 mlx4_pci_disable_device(persist->dev);
4046 return PCI_ERS_RESULT_NEED_RESET; 4075 return PCI_ERS_RESULT_NEED_RESET;
4047} 4076}
4048 4077
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4050{ 4079{
4051 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4080 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4052 struct mlx4_dev *dev = persist->dev; 4081 struct mlx4_dev *dev = persist->dev;
4053 struct mlx4_priv *priv = mlx4_priv(dev); 4082 int err;
4054 int ret;
4055 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4056 int total_vfs;
4057 4083
4058 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4084 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4059 ret = pci_enable_device(pdev); 4085 err = mlx4_pci_enable_device(dev);
4060 if (ret) { 4086 if (err) {
4061 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 4087 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4062 return PCI_ERS_RESULT_DISCONNECT; 4088 return PCI_ERS_RESULT_DISCONNECT;
4063 } 4089 }
4064 4090
4065 pci_set_master(pdev); 4091 pci_set_master(pdev);
4066 pci_restore_state(pdev); 4092 pci_restore_state(pdev);
4067 pci_save_state(pdev); 4093 pci_save_state(pdev);
4094 return PCI_ERS_RESULT_RECOVERED;
4095}
4068 4096
4097static void mlx4_pci_resume(struct pci_dev *pdev)
4098{
4099 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4100 struct mlx4_dev *dev = persist->dev;
4101 struct mlx4_priv *priv = mlx4_priv(dev);
4102 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4103 int total_vfs;
4104 int err;
4105
4106 mlx4_err(dev, "%s was called\n", __func__);
4069 total_vfs = dev->persist->num_vfs; 4107 total_vfs = dev->persist->num_vfs;
4070 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4108 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4071 4109
4072 mutex_lock(&persist->interface_state_mutex); 4110 mutex_lock(&persist->interface_state_mutex);
4073 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4111 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4074 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4112 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4075 priv, 1); 4113 priv, 1);
4076 if (ret) { 4114 if (err) {
4077 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 4115 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4078 __func__, ret); 4116 __func__, err);
4079 goto end; 4117 goto end;
4080 } 4118 }
4081 4119
4082 ret = restore_current_port_types(dev, dev->persist-> 4120 err = restore_current_port_types(dev, dev->persist->
4083 curr_port_type, dev->persist-> 4121 curr_port_type, dev->persist->
4084 curr_port_poss_type); 4122 curr_port_poss_type);
4085 if (ret) 4123 if (err)
4086 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 4124 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4087 } 4125 }
4088end: 4126end:
4089 mutex_unlock(&persist->interface_state_mutex); 4127 mutex_unlock(&persist->interface_state_mutex);
4090 4128
4091 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
4092} 4129}
4093 4130
4094static void mlx4_shutdown(struct pci_dev *pdev) 4131static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
4105static const struct pci_error_handlers mlx4_err_handler = { 4142static const struct pci_error_handlers mlx4_err_handler = {
4106 .error_detected = mlx4_pci_err_detected, 4143 .error_detected = mlx4_pci_err_detected,
4107 .slot_reset = mlx4_pci_slot_reset, 4144 .slot_reset = mlx4_pci_slot_reset,
4145 .resume = mlx4_pci_resume,
4108}; 4146};
4109 4147
4110static struct pci_driver mlx4_driver = { 4148static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef9683101ead..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
587 int init_port_ref[MLX4_MAX_PORTS + 1]; 587 int init_port_ref[MLX4_MAX_PORTS + 1];
588 u16 max_mtu[MLX4_MAX_PORTS + 1]; 588 u16 max_mtu[MLX4_MAX_PORTS + 1];
589 u8 pptx;
590 u8 pprx;
589 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 591 int disable_mcast_ref[MLX4_MAX_PORTS + 1];
590 struct mlx4_resource_tracker res_tracker; 592 struct mlx4_resource_tracker res_tracker;
591 struct workqueue_struct *comm_wq; 593 struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a73344..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
323 unsigned long csum_ok; 323 unsigned long csum_ok;
324 unsigned long csum_none; 324 unsigned long csum_none;
325 unsigned long csum_complete; 325 unsigned long csum_complete;
326 unsigned long dropped;
326 int hwtstamp_rx_filter; 327 int hwtstamp_rx_filter;
327 cpumask_var_t affinity_mask; 328 cpumask_var_t affinity_mask;
328}; 329};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
1317 } 1317 }
1318 1318
1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
1320 /* Slave cannot change Global Pause configuration */
1321 if (slave != mlx4_master_func_num(dev) &&
1322 ((gen_context->pptx != master->pptx) ||
1323 (gen_context->pprx != master->pprx))) {
1324 gen_context->pptx = master->pptx;
1325 gen_context->pprx = master->pprx;
1326 mlx4_warn(dev,
1327 "denying Global Pause change for slave:%d\n",
1328 slave);
1329 } else {
1330 master->pptx = gen_context->pptx;
1331 master->pprx = gen_context->pprx;
1332 }
1320 break; 1333 break;
1321 case MLX4_SET_PORT_GID_TABLE: 1334 case MLX4_SET_PORT_GID_TABLE:
1322 /* change to MULTIPLE entries: number of guest's gids 1335 /* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 879e6276c473..e80ce94b5dcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -609,7 +609,7 @@ enum mlx5e_link_mode {
609 MLX5E_100GBASE_KR4 = 22, 609 MLX5E_100GBASE_KR4 = 22,
610 MLX5E_100GBASE_LR4 = 23, 610 MLX5E_100GBASE_LR4 = 23,
611 MLX5E_100BASE_TX = 24, 611 MLX5E_100BASE_TX = 24,
612 MLX5E_100BASE_T = 25, 612 MLX5E_1000BASE_T = 25,
613 MLX5E_10GBASE_T = 26, 613 MLX5E_10GBASE_T = 26,
614 MLX5E_25GBASE_CR = 27, 614 MLX5E_25GBASE_CR = 27,
615 MLX5E_25GBASE_KR = 28, 615 MLX5E_25GBASE_KR = 28,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 68834b715f6c..3476ab844634 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -138,10 +138,10 @@ static const struct {
138 [MLX5E_100BASE_TX] = { 138 [MLX5E_100BASE_TX] = {
139 .speed = 100, 139 .speed = 100,
140 }, 140 },
141 [MLX5E_100BASE_T] = { 141 [MLX5E_1000BASE_T] = {
142 .supported = SUPPORTED_100baseT_Full, 142 .supported = SUPPORTED_1000baseT_Full,
143 .advertised = ADVERTISED_100baseT_Full, 143 .advertised = ADVERTISED_1000baseT_Full,
144 .speed = 100, 144 .speed = 1000,
145 }, 145 },
146 [MLX5E_10GBASE_T] = { 146 [MLX5E_10GBASE_T] = {
147 .supported = SUPPORTED_10000baseT_Full, 147 .supported = SUPPORTED_10000baseT_Full,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e0adb604f461..67d548b70e14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1404 return 0; 1404 return 0;
1405} 1405}
1406 1406
1407static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1407static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
1408{ 1408{
1409 struct mlx5e_priv *priv = netdev_priv(netdev);
1410 struct mlx5_core_dev *mdev = priv->mdev; 1409 struct mlx5_core_dev *mdev = priv->mdev;
1411 int hw_mtu; 1410 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
1412 int err; 1411 int err;
1413 1412
1414 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); 1413 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
1415 if (err) 1414 if (err)
1416 return err; 1415 return err;
1417 1416
1418 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 1417 /* Update vport context MTU */
1418 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
1419 return 0;
1420}
1419 1421
1420 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) 1422static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
1421 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", 1423{
1422 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); 1424 struct mlx5_core_dev *mdev = priv->mdev;
1425 u16 hw_mtu = 0;
1426 int err;
1423 1427
1424 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); 1428 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
1429 if (err || !hw_mtu) /* fallback to port oper mtu */
1430 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1431
1432 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
1433}
1434
1435static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1436{
1437 struct mlx5e_priv *priv = netdev_priv(netdev);
1438 u16 mtu;
1439 int err;
1440
1441 err = mlx5e_set_mtu(priv, netdev->mtu);
1442 if (err)
1443 return err;
1444
1445 mlx5e_query_mtu(priv, &mtu);
1446 if (mtu != netdev->mtu)
1447 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
1448 __func__, mtu, netdev->mtu);
1449
1450 netdev->mtu = mtu;
1425 return 0; 1451 return 0;
1426} 1452}
1427 1453
@@ -1999,22 +2025,27 @@ static int mlx5e_set_features(struct net_device *netdev,
1999 return err; 2025 return err;
2000} 2026}
2001 2027
2028#define MXL5_HW_MIN_MTU 64
2029#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2030
2002static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) 2031static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2003{ 2032{
2004 struct mlx5e_priv *priv = netdev_priv(netdev); 2033 struct mlx5e_priv *priv = netdev_priv(netdev);
2005 struct mlx5_core_dev *mdev = priv->mdev; 2034 struct mlx5_core_dev *mdev = priv->mdev;
2006 bool was_opened; 2035 bool was_opened;
2007 int max_mtu; 2036 u16 max_mtu;
2037 u16 min_mtu;
2008 int err = 0; 2038 int err = 0;
2009 2039
2010 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 2040 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2011 2041
2012 max_mtu = MLX5E_HW2SW_MTU(max_mtu); 2042 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
2043 min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
2013 2044
2014 if (new_mtu > max_mtu) { 2045 if (new_mtu > max_mtu || new_mtu < min_mtu) {
2015 netdev_err(netdev, 2046 netdev_err(netdev,
2016 "%s: Bad MTU (%d) > (%d) Max\n", 2047 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2017 __func__, new_mtu, max_mtu); 2048 __func__, new_mtu, min_mtu, max_mtu);
2018 return -EINVAL; 2049 return -EINVAL;
2019 } 2050 }
2020 2051
@@ -2602,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2602 schedule_work(&priv->set_rx_mode_work); 2633 schedule_work(&priv->set_rx_mode_work);
2603 mlx5e_disable_async_events(priv); 2634 mlx5e_disable_async_events(priv);
2604 flush_scheduled_work(); 2635 flush_scheduled_work();
2605 unregister_netdev(netdev); 2636 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
2637 netif_device_detach(netdev);
2638 mutex_lock(&priv->state_lock);
2639 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
2640 mlx5e_close_locked(netdev);
2641 mutex_unlock(&priv->state_lock);
2642 } else {
2643 unregister_netdev(netdev);
2644 }
2645
2606 mlx5e_tc_cleanup(priv); 2646 mlx5e_tc_cleanup(priv);
2607 mlx5e_vxlan_cleanup(priv); 2647 mlx5e_vxlan_cleanup(priv);
2608 mlx5e_destroy_flow_tables(priv); 2648 mlx5e_destroy_flow_tables(priv);
@@ -2615,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2615 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); 2655 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
2616 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 2656 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2617 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 2657 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2618 free_netdev(netdev); 2658
2659 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
2660 free_netdev(netdev);
2619} 2661}
2620 2662
2621static void *mlx5e_get_netdev(void *vpriv) 2663static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5121be4675d1..89cce97d46c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1065,33 +1065,6 @@ unlock_fg:
1065 return rule; 1065 return rule;
1066} 1066}
1067 1067
1068static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
1069 u8 match_criteria_enable,
1070 u32 *match_criteria,
1071 u32 *match_value,
1072 u8 action,
1073 u32 flow_tag,
1074 struct mlx5_flow_destination *dest)
1075{
1076 struct mlx5_flow_rule *rule;
1077 struct mlx5_flow_group *g;
1078
1079 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1080 if (IS_ERR(g))
1081 return (void *)g;
1082
1083 rule = add_rule_fg(g, match_value,
1084 action, flow_tag, dest);
1085 if (IS_ERR(rule)) {
1086 /* Remove assumes refcount > 0 and autogroup creates a group
1087 * with a refcount = 0.
1088 */
1089 tree_get_node(&g->node);
1090 tree_remove_node(&g->node);
1091 }
1092 return rule;
1093}
1094
1095static struct mlx5_flow_rule * 1068static struct mlx5_flow_rule *
1096_mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1069_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1097 u8 match_criteria_enable, 1070 u8 match_criteria_enable,
@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1119 goto unlock; 1092 goto unlock;
1120 } 1093 }
1121 1094
1122 rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, 1095 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1123 match_value, action, flow_tag, dest); 1096 if (IS_ERR(g)) {
1097 rule = (void *)g;
1098 goto unlock;
1099 }
1100
1101 rule = add_rule_fg(g, match_value,
1102 action, flow_tag, dest);
1103 if (IS_ERR(rule)) {
1104 /* Remove assumes refcount > 0 and autogroup creates a group
1105 * with a refcount = 0.
1106 */
1107 unlock_ref_node(&ft->node);
1108 tree_get_node(&g->node);
1109 tree_remove_node(&g->node);
1110 return rule;
1111 }
1124unlock: 1112unlock:
1125 unlock_ref_node(&ft->node); 1113 unlock_ref_node(&ft->node);
1126 return rule; 1114 return rule;
@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1288{ 1276{
1289 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; 1277 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
1290 int prio; 1278 int prio;
1291 static struct fs_prio *fs_prio; 1279 struct fs_prio *fs_prio;
1292 struct mlx5_flow_namespace *ns; 1280 struct mlx5_flow_namespace *ns;
1293 1281
1294 if (!root_ns) 1282 if (!root_ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f3b2fae4991..6892746fd10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
966 int err; 966 int err;
967 967
968 mutex_lock(&dev->intf_state_mutex); 968 mutex_lock(&dev->intf_state_mutex);
969 if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { 969 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
971 __func__); 971 __func__);
972 goto out; 972 goto out;
@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1133 if (err) 1133 if (err)
1134 pr_info("failed request module on %s\n", MLX5_IB_MOD); 1134 pr_info("failed request module on %s\n", MLX5_IB_MOD);
1135 1135
1136 dev->interface_state = MLX5_INTERFACE_STATE_UP; 1136 clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1137 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1137out: 1138out:
1138 mutex_unlock(&dev->intf_state_mutex); 1139 mutex_unlock(&dev->intf_state_mutex);
1139 1140
@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1207 } 1208 }
1208 1209
1209 mutex_lock(&dev->intf_state_mutex); 1210 mutex_lock(&dev->intf_state_mutex);
1210 if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { 1211 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
1211 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1212 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1212 __func__); 1213 __func__);
1213 goto out; 1214 goto out;
@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1241 mlx5_cmd_cleanup(dev); 1242 mlx5_cmd_cleanup(dev);
1242 1243
1243out: 1244out:
1244 dev->interface_state = MLX5_INTERFACE_STATE_DOWN; 1245 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1246 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1245 mutex_unlock(&dev->intf_state_mutex); 1247 mutex_unlock(&dev->intf_state_mutex);
1246 return err; 1248 return err;
1247} 1249}
@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
1452 .resume = mlx5_pci_resume 1454 .resume = mlx5_pci_resume
1453}; 1455};
1454 1456
1457static void shutdown(struct pci_dev *pdev)
1458{
1459 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1460 struct mlx5_priv *priv = &dev->priv;
1461
1462 dev_info(&pdev->dev, "Shutdown was called\n");
1463 /* Notify mlx5 clients that the kernel is being shut down */
1464 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
1465 mlx5_unload_one(dev, priv);
1466 mlx5_pci_disable_device(dev);
1467}
1468
1455static const struct pci_device_id mlx5_core_pci_table[] = { 1469static const struct pci_device_id mlx5_core_pci_table[] = {
1456 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ 1470 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
1457 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ 1471 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
1459 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ 1473 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1460 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ 1474 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1461 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ 1475 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1476 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
1477 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1462 { 0, } 1478 { 0, }
1463}; 1479};
1464 1480
@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
1469 .id_table = mlx5_core_pci_table, 1485 .id_table = mlx5_core_pci_table,
1470 .probe = init_one, 1486 .probe = init_one,
1471 .remove = remove_one, 1487 .remove = remove_one,
1488 .shutdown = shutdown,
1472 .err_handler = &mlx5_err_handler, 1489 .err_handler = &mlx5_err_handler,
1473 .sriov_configure = mlx5_core_sriov_configure, 1490 .sriov_configure = mlx5_core_sriov_configure,
1474}; 1491};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ae378c575deb..53cc1e2c693b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
247} 247}
248EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); 248EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
249 249
250static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, 250static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
251 int *max_mtu, int *oper_mtu, u8 port) 251 u16 *max_mtu, u16 *oper_mtu, u8 port)
252{ 252{
253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; 254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); 268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
269} 269}
270 270
271int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) 271int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
272{ 272{
273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; 274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
283} 283}
284EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); 284EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
285 285
286void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, 286void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
287 u8 port) 287 u8 port)
288{ 288{
289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); 289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
290} 290}
291EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); 291EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
292 292
293void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 293void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
294 u8 port) 294 u8 port)
295{ 295{
296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); 296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bd518405859e..b69dadcfb897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
196} 196}
197EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); 197EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
198 198
199int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
200{
201 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
202 u32 *out;
203 int err;
204
205 out = mlx5_vzalloc(outlen);
206 if (!out)
207 return -ENOMEM;
208
209 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
210 if (!err)
211 *mtu = MLX5_GET(query_nic_vport_context_out, out,
212 nic_vport_context.mtu);
213
214 kvfree(out);
215 return err;
216}
217EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
218
219int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
220{
221 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
222 void *in;
223 int err;
224
225 in = mlx5_vzalloc(inlen);
226 if (!in)
227 return -ENOMEM;
228
229 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
230 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
231
232 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
233
234 kvfree(in);
235 return err;
236}
237EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
238
199int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, 239int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
200 u32 vport, 240 u32 vport,
201 enum mlx5_list_type list_type, 241 enum mlx5_list_type list_type,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af329502d..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
750 return false; 750 return false;
751} 751}
752 752
753static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
754{
755 qed_chain_consume(&rxq->rx_bd_ring);
756 rxq->sw_rx_cons++;
757}
758
753/* This function reuses the buffer(from an offset) from 759/* This function reuses the buffer(from an offset) from
754 * consumer index to producer index in the bd ring 760 * consumer index to producer index in the bd ring
755 */ 761 */
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
773 curr_cons->data = NULL; 779 curr_cons->data = NULL;
774} 780}
775 781
782/* In case of allocation failures reuse buffers
783 * from consumer index to produce buffers for firmware
784 */
785static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
786 struct qede_dev *edev, u8 count)
787{
788 struct sw_rx_data *curr_cons;
789
790 for (; count > 0; count--) {
791 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
792 qede_reuse_page(edev, rxq, curr_cons);
793 qede_rx_bd_ring_consume(rxq);
794 }
795}
796
776static inline int qede_realloc_rx_buffer(struct qede_dev *edev, 797static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
777 struct qede_rx_queue *rxq, 798 struct qede_rx_queue *rxq,
778 struct sw_rx_data *curr_cons) 799 struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
781 curr_cons->page_offset += rxq->rx_buf_seg_size; 802 curr_cons->page_offset += rxq->rx_buf_seg_size;
782 803
783 if (curr_cons->page_offset == PAGE_SIZE) { 804 if (curr_cons->page_offset == PAGE_SIZE) {
784 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 805 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
806 /* Since we failed to allocate new buffer
807 * current buffer can be used again.
808 */
809 curr_cons->page_offset -= rxq->rx_buf_seg_size;
810
785 return -ENOMEM; 811 return -ENOMEM;
812 }
786 813
787 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, 814 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
788 PAGE_SIZE, DMA_FROM_DEVICE); 815 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
901 len_on_bd); 928 len_on_bd);
902 929
903 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { 930 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
904 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 931 /* Incr page ref count to reuse on allocation failure
932 * so that it doesn't get freed while freeing SKB.
933 */
934 atomic_inc(&current_bd->data->_count);
905 goto out; 935 goto out;
906 } 936 }
907 937
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
915 return 0; 945 return 0;
916 946
917out: 947out:
948 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
949 qede_recycle_rx_bd_ring(rxq, edev, 1);
918 return -ENOMEM; 950 return -ENOMEM;
919} 951}
920 952
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
966 tpa_info->skb = netdev_alloc_skb(edev->ndev, 998 tpa_info->skb = netdev_alloc_skb(edev->ndev,
967 le16_to_cpu(cqe->len_on_first_bd)); 999 le16_to_cpu(cqe->len_on_first_bd));
968 if (unlikely(!tpa_info->skb)) { 1000 if (unlikely(!tpa_info->skb)) {
1001 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
969 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 1002 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
970 return; 1003 goto cons_buf;
971 } 1004 }
972 1005
973 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); 1006 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
990 /* This is needed in order to enable forwarding support */ 1023 /* This is needed in order to enable forwarding support */
991 qede_set_gro_params(edev, tpa_info->skb, cqe); 1024 qede_set_gro_params(edev, tpa_info->skb, cqe);
992 1025
1026cons_buf: /* We still need to handle bd_len_list to consume buffers */
993 if (likely(cqe->ext_bd_len_list[0])) 1027 if (likely(cqe->ext_bd_len_list[0]))
994 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 1028 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
995 le16_to_cpu(cqe->ext_bd_len_list[0])); 1029 le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
1007 const struct iphdr *iph = ip_hdr(skb); 1041 const struct iphdr *iph = ip_hdr(skb);
1008 struct tcphdr *th; 1042 struct tcphdr *th;
1009 1043
1010 skb_set_network_header(skb, 0);
1011 skb_set_transport_header(skb, sizeof(struct iphdr)); 1044 skb_set_transport_header(skb, sizeof(struct iphdr));
1012 th = tcp_hdr(skb); 1045 th = tcp_hdr(skb);
1013 1046
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
1022 struct ipv6hdr *iph = ipv6_hdr(skb); 1055 struct ipv6hdr *iph = ipv6_hdr(skb);
1023 struct tcphdr *th; 1056 struct tcphdr *th;
1024 1057
1025 skb_set_network_header(skb, 0);
1026 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 1058 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1027 th = tcp_hdr(skb); 1059 th = tcp_hdr(skb);
1028 1060
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
1037 struct sk_buff *skb, 1069 struct sk_buff *skb,
1038 u16 vlan_tag) 1070 u16 vlan_tag)
1039{ 1071{
1072 /* FW can send a single MTU sized packet from gro flow
1073 * due to aggregation timeout/last segment etc. which
1074 * is not expected to be a gro packet. If a skb has zero
1075 * frags then simply push it in the stack as non gso skb.
1076 */
1077 if (unlikely(!skb->data_len)) {
1078 skb_shinfo(skb)->gso_type = 0;
1079 skb_shinfo(skb)->gso_size = 0;
1080 goto send_skb;
1081 }
1082
1040#ifdef CONFIG_INET 1083#ifdef CONFIG_INET
1041 if (skb_shinfo(skb)->gso_size) { 1084 if (skb_shinfo(skb)->gso_size) {
1085 skb_set_network_header(skb, 0);
1086
1042 switch (skb->protocol) { 1087 switch (skb->protocol) {
1043 case htons(ETH_P_IP): 1088 case htons(ETH_P_IP):
1044 qede_gro_ip_csum(skb); 1089 qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
1053 } 1098 }
1054 } 1099 }
1055#endif 1100#endif
1101
1102send_skb:
1056 skb_record_rx_queue(skb, fp->rss_id); 1103 skb_record_rx_queue(skb, fp->rss_id);
1057 qede_skb_receive(edev, fp, skb, vlan_tag); 1104 qede_skb_receive(edev, fp, skb, vlan_tag);
1058} 1105}
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1244 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", 1291 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1245 sw_comp_cons, parse_flag); 1292 sw_comp_cons, parse_flag);
1246 rxq->rx_hw_errors++; 1293 rxq->rx_hw_errors++;
1247 qede_reuse_page(edev, rxq, sw_rx_data); 1294 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1248 goto next_rx; 1295 goto next_cqe;
1249 } 1296 }
1250 1297
1251 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1298 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1252 if (unlikely(!skb)) { 1299 if (unlikely(!skb)) {
1253 DP_NOTICE(edev, 1300 DP_NOTICE(edev,
1254 "Build_skb failed, dropping incoming packet\n"); 1301 "Build_skb failed, dropping incoming packet\n");
1255 qede_reuse_page(edev, rxq, sw_rx_data); 1302 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1256 rxq->rx_alloc_errors++; 1303 rxq->rx_alloc_errors++;
1257 goto next_rx; 1304 goto next_cqe;
1258 } 1305 }
1259 1306
1260 /* Copy data into SKB */ 1307 /* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1288 if (unlikely(qede_realloc_rx_buffer(edev, rxq, 1335 if (unlikely(qede_realloc_rx_buffer(edev, rxq,
1289 sw_rx_data))) { 1336 sw_rx_data))) {
1290 DP_ERR(edev, "Failed to allocate rx buffer\n"); 1337 DP_ERR(edev, "Failed to allocate rx buffer\n");
1338 /* Incr page ref count to reuse on allocation
1339 * failure so that it doesn't get freed while
1340 * freeing SKB.
1341 */
1342
1343 atomic_inc(&sw_rx_data->data->_count);
1291 rxq->rx_alloc_errors++; 1344 rxq->rx_alloc_errors++;
1345 qede_recycle_rx_bd_ring(rxq, edev,
1346 fp_cqe->bd_num);
1347 dev_kfree_skb_any(skb);
1292 goto next_cqe; 1348 goto next_cqe;
1293 } 1349 }
1294 } 1350 }
1295 1351
1352 qede_rx_bd_ring_consume(rxq);
1353
1296 if (fp_cqe->bd_num != 1) { 1354 if (fp_cqe->bd_num != 1) {
1297 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); 1355 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
1298 u8 num_frags; 1356 u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1303 num_frags--) { 1361 num_frags--) {
1304 u16 cur_size = pkt_len > rxq->rx_buf_size ? 1362 u16 cur_size = pkt_len > rxq->rx_buf_size ?
1305 rxq->rx_buf_size : pkt_len; 1363 rxq->rx_buf_size : pkt_len;
1364 if (unlikely(!cur_size)) {
1365 DP_ERR(edev,
1366 "Still got %d BDs for mapping jumbo, but length became 0\n",
1367 num_frags);
1368 qede_recycle_rx_bd_ring(rxq, edev,
1369 num_frags);
1370 dev_kfree_skb_any(skb);
1371 goto next_cqe;
1372 }
1306 1373
1307 WARN_ONCE(!cur_size, 1374 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
1308 "Still got %d BDs for mapping jumbo, but length became 0\n", 1375 qede_recycle_rx_bd_ring(rxq, edev,
1309 num_frags); 1376 num_frags);
1310 1377 dev_kfree_skb_any(skb);
1311 if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
1312 goto next_cqe; 1378 goto next_cqe;
1379 }
1313 1380
1314 rxq->sw_rx_cons++;
1315 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1381 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1316 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1382 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1317 qed_chain_consume(&rxq->rx_bd_ring); 1383 qede_rx_bd_ring_consume(rxq);
1384
1318 dma_unmap_page(&edev->pdev->dev, 1385 dma_unmap_page(&edev->pdev->dev,
1319 sw_rx_data->mapping, 1386 sw_rx_data->mapping,
1320 PAGE_SIZE, DMA_FROM_DEVICE); 1387 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1330 pkt_len -= cur_size; 1397 pkt_len -= cur_size;
1331 } 1398 }
1332 1399
1333 if (pkt_len) 1400 if (unlikely(pkt_len))
1334 DP_ERR(edev, 1401 DP_ERR(edev,
1335 "Mapped all BDs of jumbo, but still have %d bytes\n", 1402 "Mapped all BDs of jumbo, but still have %d bytes\n",
1336 pkt_len); 1403 pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1349 skb_record_rx_queue(skb, fp->rss_id); 1416 skb_record_rx_queue(skb, fp->rss_id);
1350 1417
1351 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); 1418 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
1352
1353 qed_chain_consume(&rxq->rx_bd_ring);
1354next_rx:
1355 rxq->sw_rx_cons++;
1356next_rx_only: 1419next_rx_only:
1357 rx_pkt++; 1420 rx_pkt++;
1358 1421
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
2257 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 2320 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2258 struct sw_rx_data *replace_buf = &tpa_info->replace_buf; 2321 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2259 2322
2260 if (replace_buf) { 2323 if (replace_buf->data) {
2261 dma_unmap_page(&edev->pdev->dev, 2324 dma_unmap_page(&edev->pdev->dev,
2262 dma_unmap_addr(replace_buf, mapping), 2325 dma_unmap_addr(replace_buf, mapping),
2263 PAGE_SIZE, DMA_FROM_DEVICE); 2326 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
2377static int qede_alloc_mem_rxq(struct qede_dev *edev, 2440static int qede_alloc_mem_rxq(struct qede_dev *edev,
2378 struct qede_rx_queue *rxq) 2441 struct qede_rx_queue *rxq)
2379{ 2442{
2380 int i, rc, size, num_allocated; 2443 int i, rc, size;
2381 2444
2382 rxq->num_rx_buffers = edev->q_num_rx_buffers; 2445 rxq->num_rx_buffers = edev->q_num_rx_buffers;
2383 2446
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2394 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 2457 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
2395 if (!rxq->sw_rx_ring) { 2458 if (!rxq->sw_rx_ring) {
2396 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 2459 DP_ERR(edev, "Rx buffers ring allocation failed\n");
2460 rc = -ENOMEM;
2397 goto err; 2461 goto err;
2398 } 2462 }
2399 2463
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2421 /* Allocate buffers for the Rx ring */ 2485 /* Allocate buffers for the Rx ring */
2422 for (i = 0; i < rxq->num_rx_buffers; i++) { 2486 for (i = 0; i < rxq->num_rx_buffers; i++) {
2423 rc = qede_alloc_rx_buffer(edev, rxq); 2487 rc = qede_alloc_rx_buffer(edev, rxq);
2424 if (rc) 2488 if (rc) {
2425 break; 2489 DP_ERR(edev,
2426 } 2490 "Rx buffers allocation failed at index %d\n", i);
2427 num_allocated = i; 2491 goto err;
2428 if (!num_allocated) { 2492 }
2429 DP_ERR(edev, "Rx buffers allocation failed\n");
2430 goto err;
2431 } else if (num_allocated < rxq->num_rx_buffers) {
2432 DP_NOTICE(edev,
2433 "Allocated less buffers than desired (%d allocated)\n",
2434 num_allocated);
2435 } 2493 }
2436 2494
2437 qede_alloc_sge_mem(edev, rxq); 2495 rc = qede_alloc_sge_mem(edev, rxq);
2438
2439 return 0;
2440
2441err: 2496err:
2442 qede_free_mem_rxq(edev, rxq); 2497 return rc;
2443 return -ENOMEM;
2444} 2498}
2445 2499
2446static void qede_free_mem_txq(struct qede_dev *edev, 2500static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
2523 } 2577 }
2524 2578
2525 return 0; 2579 return 0;
2526
2527err: 2580err:
2528 qede_free_mem_fp(edev, fp); 2581 return rc;
2529 return -ENOMEM;
2530} 2582}
2531 2583
2532static void qede_free_mem_load(struct qede_dev *edev) 2584static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
2549 struct qede_fastpath *fp = &edev->fp_array[rss_id]; 2601 struct qede_fastpath *fp = &edev->fp_array[rss_id];
2550 2602
2551 rc = qede_alloc_mem_fp(edev, fp); 2603 rc = qede_alloc_mem_fp(edev, fp);
2552 if (rc) 2604 if (rc) {
2553 break;
2554 }
2555
2556 if (rss_id != QEDE_RSS_CNT(edev)) {
2557 /* Failed allocating memory for all the queues */
2558 if (!rss_id) {
2559 DP_ERR(edev, 2605 DP_ERR(edev,
2560 "Failed to allocate memory for the leading queue\n"); 2606 "Failed to allocate memory for fastpath - rss id = %d\n",
2561 rc = -ENOMEM; 2607 rss_id);
2562 } else { 2608 qede_free_mem_load(edev);
2563 DP_NOTICE(edev, 2609 return rc;
2564 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
2565 QEDE_RSS_CNT(edev), rss_id);
2566 } 2610 }
2567 edev->num_rss = rss_id;
2568 } 2611 }
2569 2612
2570 return 0; 2613 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 55007f1e6bbc..caf6ddb7ea76 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 63 40#define _QLCNIC_LINUX_SUBVERSION 64
41#define QLCNIC_LINUX_VERSIONID "5.3.63" 41#define QLCNIC_LINUX_VERSIONID "5.3.64"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 087e14a3fba7..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
1691 rate = clk_get_rate(clk); 1691 rate = clk_get_rate(clk);
1692 clk_put(clk); 1692 clk_put(clk);
1693 1693
1694 if (!rate)
1695 return -EINVAL;
1696
1694 inc = 1000000000ULL << 20; 1697 inc = 1000000000ULL << 20;
1695 do_div(inc, rate); 1698 do_div(inc, rate);
1696 1699
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7560fd..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
2194 __func__); 2194 __func__);
2195 return ret; 2195 return ret;
2196 } 2196 }
2197 ret = sh_eth_dev_init(ndev, false); 2197 ret = sh_eth_dev_init(ndev, true);
2198 if (ret < 0) { 2198 if (ret < 0) {
2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", 2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2200 __func__); 2200 __func__);
2201 return ret; 2201 return ret;
2202 } 2202 }
2203 2203
2204 mdp->irq_enabled = true;
2205 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2206 /* Setting the Rx mode will start the Rx process. */
2207 sh_eth_write(ndev, EDRRR_R, EDRRR);
2208 netif_device_attach(ndev); 2204 netif_device_attach(ndev);
2209 } 2205 }
2210 2206
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..afb90d129cb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
36 36
37#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
38#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
39
37#define EMAC_SPLITTER_CTRL_REG 0x0 40#define EMAC_SPLITTER_CTRL_REG 0x0
38#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 41#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
39#define EMAC_SPLITTER_CTRL_SPEED_10 0x2 42#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -46,7 +49,6 @@ struct socfpga_dwmac {
46 u32 reg_shift; 49 u32 reg_shift;
47 struct device *dev; 50 struct device *dev;
48 struct regmap *sys_mgr_base_addr; 51 struct regmap *sys_mgr_base_addr;
49 struct reset_control *stmmac_rst;
50 void __iomem *splitter_base; 52 void __iomem *splitter_base;
51 bool f2h_ptp_ref_clk; 53 bool f2h_ptp_ref_clk;
52}; 54};
@@ -89,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
89 struct device_node *np_splitter; 91 struct device_node *np_splitter;
90 struct resource res_splitter; 92 struct resource res_splitter;
91 93
92 dwmac->stmmac_rst = devm_reset_control_get(dev,
93 STMMAC_RESOURCE_NAME);
94 if (IS_ERR(dwmac->stmmac_rst)) {
95 dev_info(dev, "Could not get reset control!\n");
96 if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
97 return -EPROBE_DEFER;
98 dwmac->stmmac_rst = NULL;
99 }
100
101 dwmac->interface = of_get_phy_mode(np); 94 dwmac->interface = of_get_phy_mode(np);
102 95
103 sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); 96 sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -148,7 +141,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
148 int phymode = dwmac->interface; 141 int phymode = dwmac->interface;
149 u32 reg_offset = dwmac->reg_offset; 142 u32 reg_offset = dwmac->reg_offset;
150 u32 reg_shift = dwmac->reg_shift; 143 u32 reg_shift = dwmac->reg_shift;
151 u32 ctrl, val; 144 u32 ctrl, val, module;
152 145
153 switch (phymode) { 146 switch (phymode) {
154 case PHY_INTERFACE_MODE_RGMII: 147 case PHY_INTERFACE_MODE_RGMII:
@@ -175,39 +168,39 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
175 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 168 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
176 ctrl |= val << reg_shift; 169 ctrl |= val << reg_shift;
177 170
178 if (dwmac->f2h_ptp_ref_clk) 171 if (dwmac->f2h_ptp_ref_clk) {
179 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 172 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
180 else 173 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
174 &module);
175 module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
176 regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
177 module);
178 } else {
181 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); 179 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
180 }
182 181
183 regmap_write(sys_mgr_base_addr, reg_offset, ctrl); 182 regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
184 return 0;
185}
186
187static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
188{
189 struct socfpga_dwmac *dwmac = priv;
190 183
191 /* On socfpga platform exit, assert and hold reset to the 184 return 0;
192 * enet controller - the default state after a hard reset.
193 */
194 if (dwmac->stmmac_rst)
195 reset_control_assert(dwmac->stmmac_rst);
196} 185}
197 186
198static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) 187static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
199{ 188{
200 struct socfpga_dwmac *dwmac = priv; 189 struct socfpga_dwmac *dwmac = priv;
201 struct net_device *ndev = platform_get_drvdata(pdev); 190 struct net_device *ndev = platform_get_drvdata(pdev);
202 struct stmmac_priv *stpriv = NULL; 191 struct stmmac_priv *stpriv = NULL;
203 int ret = 0; 192 int ret = 0;
204 193
205 if (ndev) 194 if (!ndev)
206 stpriv = netdev_priv(ndev); 195 return -EINVAL;
196
197 stpriv = netdev_priv(ndev);
198 if (!stpriv)
199 return -EINVAL;
207 200
208 /* Assert reset to the enet controller before changing the phy mode */ 201 /* Assert reset to the enet controller before changing the phy mode */
209 if (dwmac->stmmac_rst) 202 if (stpriv->stmmac_rst)
210 reset_control_assert(dwmac->stmmac_rst); 203 reset_control_assert(stpriv->stmmac_rst);
211 204
212 /* Setup the phy mode in the system manager registers according to 205 /* Setup the phy mode in the system manager registers according to
213 * devicetree configuration 206 * devicetree configuration
@@ -217,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
217 /* Deassert reset for the phy configuration to be sampled by 210 /* Deassert reset for the phy configuration to be sampled by
218 * the enet controller, and operation to start in requested mode 211 * the enet controller, and operation to start in requested mode
219 */ 212 */
220 if (dwmac->stmmac_rst) 213 if (stpriv->stmmac_rst)
221 reset_control_deassert(dwmac->stmmac_rst); 214 reset_control_deassert(stpriv->stmmac_rst);
222 215
223 /* Before the enet controller is suspended, the phy is suspended. 216 /* Before the enet controller is suspended, the phy is suspended.
224 * This causes the phy clock to be gated. The enet controller is 217 * This causes the phy clock to be gated. The enet controller is
@@ -235,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
235 * control register 0, and can be modified by the phy driver 228 * control register 0, and can be modified by the phy driver
236 * framework. 229 * framework.
237 */ 230 */
238 if (stpriv && stpriv->phydev) 231 if (stpriv->phydev)
239 phy_resume(stpriv->phydev); 232 phy_resume(stpriv->phydev);
240 233
241 return ret; 234 return ret;
@@ -275,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
275 268
276 plat_dat->bsp_priv = dwmac; 269 plat_dat->bsp_priv = dwmac;
277 plat_dat->init = socfpga_dwmac_init; 270 plat_dat->init = socfpga_dwmac_init;
278 plat_dat->exit = socfpga_dwmac_exit;
279 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; 271 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
280 272
281 ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); 273 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
282 if (ret) 274 if (!ret)
283 return ret; 275 ret = socfpga_dwmac_init(pdev, dwmac);
284 276
285 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 277 return ret;
286} 278}
287 279
288static const struct of_device_id socfpga_dwmac_match[] = { 280static const struct of_device_id socfpga_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 78464fa7fe1f..fcbd4be562e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -288,10 +288,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
288 (priv->pcs == STMMAC_PCS_RTBI)) 288 (priv->pcs == STMMAC_PCS_RTBI))
289 goto out; 289 goto out;
290 290
291 /* Never init EEE in case of a switch is attached */
292 if (priv->phydev->is_pseudo_fixed_link)
293 goto out;
294
295 /* MAC core supports the EEE feature. */ 291 /* MAC core supports the EEE feature. */
296 if (priv->dma_cap.eee) { 292 if (priv->dma_cap.eee) {
297 int tx_lpi_timer = priv->tx_lpi_timer; 293 int tx_lpi_timer = priv->tx_lpi_timer;
@@ -771,10 +767,16 @@ static void stmmac_adjust_link(struct net_device *dev)
771 767
772 spin_unlock_irqrestore(&priv->lock, flags); 768 spin_unlock_irqrestore(&priv->lock, flags);
773 769
774 /* At this stage, it could be needed to setup the EEE or adjust some 770 if (phydev->is_pseudo_fixed_link)
775 * MAC related HW registers. 771 /* Stop PHY layer to call the hook to adjust the link in case
776 */ 772 * of a switch is attached to the stmmac driver.
777 priv->eee_enabled = stmmac_eee_init(priv); 773 */
774 phydev->irq = PHY_IGNORE_INTERRUPT;
775 else
776 /* At this stage, init the EEE if supported.
777 * Never called in case of fixed_link.
778 */
779 priv->eee_enabled = stmmac_eee_init(priv);
778} 780}
779 781
780/** 782/**
@@ -865,10 +867,6 @@ static int stmmac_init_phy(struct net_device *dev)
865 return -ENODEV; 867 return -ENODEV;
866 } 868 }
867 869
868 /* If attached to a switch, there is no reason to poll phy handler */
869 if (phydev->is_pseudo_fixed_link)
870 phydev->irq = PHY_IGNORE_INTERRUPT;
871
872 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 870 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
873 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 871 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
874 872
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..bbb77cd8ad67 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1251 int i, ret; 1251 int i, ret;
1252 u32 reg; 1252 u32 reg;
1253 1253
1254 pm_runtime_get_sync(&priv->pdev->dev);
1255
1254 if (!cpsw_common_res_usage_state(priv)) 1256 if (!cpsw_common_res_usage_state(priv))
1255 cpsw_intr_disable(priv); 1257 cpsw_intr_disable(priv);
1256 netif_carrier_off(ndev); 1258 netif_carrier_off(ndev);
1257 1259
1258 pm_runtime_get_sync(&priv->pdev->dev);
1259
1260 reg = priv->version; 1260 reg = priv->version;
1261 1261
1262 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 1262 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..58d58f002559 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1878 pdata->hw_ram_addr = auxdata->hw_ram_addr; 1878 pdata->hw_ram_addr = auxdata->hw_ram_addr;
1879 } 1879 }
1880 1880
1881 pdev->dev.platform_data = pdata;
1882
1883 return pdata; 1881 return pdata;
1884} 1882}
1885 1883
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
2101 cpdma_ctlr_destroy(priv->dma); 2099 cpdma_ctlr_destroy(priv->dma);
2102 2100
2103 unregister_netdev(ndev); 2101 unregister_netdev(ndev);
2102 pm_runtime_disable(&pdev->dev);
2104 free_netdev(ndev); 2103 free_netdev(ndev);
2105 2104
2106 return 0; 2105 return 0;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 84d3e5ca8817..c6385617bfb2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
880 macsec_skb_cb(skb)->valid = false; 880 macsec_skb_cb(skb)->valid = false;
881 skb = skb_share_check(skb, GFP_ATOMIC); 881 skb = skb_share_check(skb, GFP_ATOMIC);
882 if (!skb) 882 if (!skb)
883 return NULL; 883 return ERR_PTR(-ENOMEM);
884 884
885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); 885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
886 if (!req) { 886 if (!req) {
887 kfree_skb(skb); 887 kfree_skb(skb);
888 return NULL; 888 return ERR_PTR(-ENOMEM);
889 } 889 }
890 890
891 hdr = (struct macsec_eth_header *)skb->data; 891 hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
905 skb = skb_unshare(skb, GFP_ATOMIC); 905 skb = skb_unshare(skb, GFP_ATOMIC);
906 if (!skb) { 906 if (!skb) {
907 aead_request_free(req); 907 aead_request_free(req);
908 return NULL; 908 return ERR_PTR(-ENOMEM);
909 } 909 }
910 } else { 910 } else {
911 /* integrity only: all headers + data authenticated */ 911 /* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
921 dev_hold(dev); 921 dev_hold(dev);
922 ret = crypto_aead_decrypt(req); 922 ret = crypto_aead_decrypt(req);
923 if (ret == -EINPROGRESS) { 923 if (ret == -EINPROGRESS) {
924 return NULL; 924 return ERR_PTR(ret);
925 } else if (ret != 0) { 925 } else if (ret != 0) {
926 /* decryption/authentication failed 926 /* decryption/authentication failed
927 * 10.6 if validateFrames is disabled, deliver anyway 927 * 10.6 if validateFrames is disabled, deliver anyway
928 */ 928 */
929 if (ret != -EBADMSG) { 929 if (ret != -EBADMSG) {
930 kfree_skb(skb); 930 kfree_skb(skb);
931 skb = NULL; 931 skb = ERR_PTR(ret);
932 } 932 }
933 } else { 933 } else {
934 macsec_skb_cb(skb)->valid = true; 934 macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1148 1148
1149 if (!skb) { 1149 if (IS_ERR(skb)) {
1150 macsec_rxsa_put(rx_sa); 1150 /* the decrypt callback needs the reference */
1151 if (PTR_ERR(skb) != -EINPROGRESS)
1152 macsec_rxsa_put(rx_sa);
1151 rcu_read_unlock(); 1153 rcu_read_unlock();
1152 *pskb = NULL; 1154 *pskb = NULL;
1153 return RX_HANDLER_CONSUMED; 1155 return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@ deliver:
1161 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1163 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1162 macsec_reset_skb(skb, secy->netdev); 1164 macsec_reset_skb(skb, secy->netdev);
1163 1165
1164 macsec_rxsa_put(rx_sa); 1166 if (rx_sa)
1167 macsec_rxsa_put(rx_sa);
1165 count_rx(dev, skb->len); 1168 count_rx(dev, skb->len);
1166 1169
1167 rcu_read_unlock(); 1170 rcu_read_unlock();
@@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1622 } 1625 }
1623 1626
1624 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1627 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1625 if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, 1628 if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1626 secy->icv_len)) { 1629 secy->key_len, secy->icv_len)) {
1630 kfree(rx_sa);
1627 rtnl_unlock(); 1631 rtnl_unlock();
1628 return -ENOMEM; 1632 return -ENOMEM;
1629 } 1633 }
@@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1768 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1772 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
1769 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1773 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1770 secy->key_len, secy->icv_len)) { 1774 secy->key_len, secy->icv_len)) {
1775 kfree(tx_sa);
1771 rtnl_unlock(); 1776 rtnl_unlock();
1772 return -ENOMEM; 1777 return -ENOMEM;
1773 } 1778 }
@@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2227 return 1; 2232 return 1;
2228 2233
2229 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || 2234 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
2230 nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 2235 nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2236 MACSEC_DEFAULT_CIPHER_ID) ||
2231 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2237 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2232 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2238 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2233 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2239 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2268 if (!hdr) 2274 if (!hdr)
2269 return -EMSGSIZE; 2275 return -EMSGSIZE;
2270 2276
2271 rtnl_lock(); 2277 genl_dump_check_consistent(cb, hdr, &macsec_fam);
2272 2278
2273 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2279 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
2274 goto nla_put_failure; 2280 goto nla_put_failure;
@@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2429 2435
2430 nla_nest_end(skb, rxsc_list); 2436 nla_nest_end(skb, rxsc_list);
2431 2437
2432 rtnl_unlock();
2433
2434 genlmsg_end(skb, hdr); 2438 genlmsg_end(skb, hdr);
2435 2439
2436 return 0; 2440 return 0;
2437 2441
2438nla_put_failure: 2442nla_put_failure:
2439 rtnl_unlock();
2440 genlmsg_cancel(skb, hdr); 2443 genlmsg_cancel(skb, hdr);
2441 return -EMSGSIZE; 2444 return -EMSGSIZE;
2442} 2445}
2443 2446
2447static int macsec_generation = 1; /* protected by RTNL */
2448
2444static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2449static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2445{ 2450{
2446 struct net *net = sock_net(skb->sk); 2451 struct net *net = sock_net(skb->sk);
@@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2450 dev_idx = cb->args[0]; 2455 dev_idx = cb->args[0];
2451 2456
2452 d = 0; 2457 d = 0;
2458 rtnl_lock();
2459
2460 cb->seq = macsec_generation;
2461
2453 for_each_netdev(net, dev) { 2462 for_each_netdev(net, dev) {
2454 struct macsec_secy *secy; 2463 struct macsec_secy *secy;
2455 2464
@@ -2467,6 +2476,7 @@ next:
2467 } 2476 }
2468 2477
2469done: 2478done:
2479 rtnl_unlock();
2470 cb->args[0] = d; 2480 cb->args[0] = d;
2471 return skb->len; 2481 return skb->len;
2472} 2482}
@@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
2920 struct net_device *real_dev = macsec->real_dev; 2930 struct net_device *real_dev = macsec->real_dev;
2921 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 2931 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
2922 2932
2933 macsec_generation++;
2934
2923 unregister_netdevice_queue(dev, head); 2935 unregister_netdevice_queue(dev, head);
2924 list_del_rcu(&macsec->secys); 2936 list_del_rcu(&macsec->secys);
2925 if (list_empty(&rxd->secys)) 2937 if (list_empty(&rxd->secys)) {
2926 netdev_rx_handler_unregister(real_dev); 2938 netdev_rx_handler_unregister(real_dev);
2939 kfree(rxd);
2940 }
2927 2941
2928 macsec_del_dev(macsec); 2942 macsec_del_dev(macsec);
2929} 2943}
@@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev,
2945 2959
2946 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 2960 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
2947 rxd); 2961 rxd);
2948 if (err < 0) 2962 if (err < 0) {
2963 kfree(rxd);
2949 return err; 2964 return err;
2965 }
2950 } 2966 }
2951 2967
2952 list_add_tail_rcu(&macsec->secys, &rxd->secys); 2968 list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3066 if (err < 0) 3082 if (err < 0)
3067 goto del_dev; 3083 goto del_dev;
3068 3084
3085 macsec_generation++;
3086
3069 dev_hold(real_dev); 3087 dev_hold(real_dev);
3070 3088
3071 return 0; 3089 return 0;
@@ -3079,7 +3097,7 @@ unregister:
3079 3097
3080static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3098static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3081{ 3099{
3082 u64 csid = DEFAULT_CIPHER_ID; 3100 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
3083 u8 icv_len = DEFAULT_ICV_LEN; 3101 u8 icv_len = DEFAULT_ICV_LEN;
3084 int flag; 3102 int flag;
3085 bool es, scb, sci; 3103 bool es, scb, sci;
@@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3094 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3112 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3095 3113
3096 switch (csid) { 3114 switch (csid) {
3097 case DEFAULT_CIPHER_ID: 3115 case MACSEC_DEFAULT_CIPHER_ID:
3098 case DEFAULT_CIPHER_ALT: 3116 case MACSEC_DEFAULT_CIPHER_ALT:
3099 if (icv_len < MACSEC_MIN_ICV_LEN || 3117 if (icv_len < MACSEC_MIN_ICV_LEN ||
3100 icv_len > MACSEC_MAX_ICV_LEN) 3118 icv_len > MACSEC_MAX_ICV_LEN)
3101 return -EINVAL; 3119 return -EINVAL;
@@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3129 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3147 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
3130 return -EINVAL; 3148 return -EINVAL;
3131 3149
3132 if ((data[IFLA_MACSEC_PROTECT] && 3150 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
3133 nla_get_u8(data[IFLA_MACSEC_PROTECT])) && 3151 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
3134 !data[IFLA_MACSEC_WINDOW]) 3152 !data[IFLA_MACSEC_WINDOW])
3135 return -EINVAL; 3153 return -EINVAL;
3136 3154
@@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb,
3168 3186
3169 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || 3187 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
3170 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3188 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
3171 nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 3189 nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
3190 MACSEC_DEFAULT_CIPHER_ID) ||
3172 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3191 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
3173 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3192 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
3174 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3193 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d458728..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
441 return -ENOMEM; 441 return -ENOMEM;
442 442
443 mutex_init(&ks->lock); 443 mutex_init(&ks->lock);
444 ks->spi = spi_dev_get(spi); 444 ks->spi = spi;
445 ks->chip = &ks8995_chip[variant]; 445 ks->chip = &ks8995_chip[variant];
446 446
447 if (ks->spi->dev.of_node) { 447 if (ks->spi->dev.of_node) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 510e90a6bb26..2c9e45f50edb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1015,7 +1015,6 @@ static void tun_net_init(struct net_device *dev)
1015 /* Zero header length */ 1015 /* Zero header length */
1016 dev->type = ARPHRD_NONE; 1016 dev->type = ARPHRD_NONE;
1017 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1017 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1018 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1019 break; 1018 break;
1020 1019
1021 case IFF_TAP: 1020 case IFF_TAP:
@@ -1027,7 +1026,6 @@ static void tun_net_init(struct net_device *dev)
1027 1026
1028 eth_hw_addr_random(dev); 1027 eth_hw_addr_random(dev);
1029 1028
1030 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1031 break; 1029 break;
1032 } 1030 }
1033} 1031}
@@ -1481,6 +1479,8 @@ static void tun_setup(struct net_device *dev)
1481 1479
1482 dev->ethtool_ops = &tun_ethtool_ops; 1480 dev->ethtool_ops = &tun_ethtool_ops;
1483 dev->destructor = tun_free_netdev; 1481 dev->destructor = tun_free_netdev;
1482 /* We prefer our own queue length */
1483 dev->tx_queue_len = TUN_READQ_SIZE;
1484} 1484}
1485 1485
1486/* Trivial set of netlink ops to allow deleting tun or tap 1486/* Trivial set of netlink ops to allow deleting tun or tap
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
618 .driver_info = (unsigned long)&cdc_mbim_info, 618 .driver_info = (unsigned long)&cdc_mbim_info,
619 }, 619 },
620 /* Huawei E3372 fails unless NDP comes after the IP packets */ 620
621 { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 621 /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
622 * (12d1:157d), are known to fail unless the NDP is placed
623 * after the IP packets. Applying the quirk to all Huawei
624 * devices is broader than necessary, but harmless.
625 */
626 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
622 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, 627 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
623 }, 628 },
624 /* default entry */ 629 /* default entry */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f67b00a..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1152 union Vmxnet3_GenericDesc *gdesc) 1152 union Vmxnet3_GenericDesc *gdesc)
1153{ 1153{
1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1155 /* typical case: TCP/UDP over IP and both csums are correct */ 1155 if (gdesc->rcd.v4 &&
1156 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1156 (le32_to_cpu(gdesc->dword[3]) &
1157 VMXNET3_RCD_CSUM_OK) { 1157 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(gdesc->rcd.frg);
1161 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1162 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY; 1163 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1164 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1161 BUG_ON(gdesc->rcd.frg); 1165 BUG_ON(gdesc->rcd.frg);
1162 } else { 1166 } else {
1163 if (gdesc->rcd.csum) { 1167 if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb900c1..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -60,41 +60,6 @@ struct pcpu_dstats {
60 struct u64_stats_sync syncp; 60 struct u64_stats_sync syncp;
61}; 61};
62 62
63static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
64{
65 return dst;
66}
67
68static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
69{
70 return ip_local_out(net, sk, skb);
71}
72
73static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
74{
75 /* TO-DO: return max ethernet size? */
76 return dst->dev->mtu;
77}
78
79static void vrf_dst_destroy(struct dst_entry *dst)
80{
81 /* our dst lives forever - or until the device is closed */
82}
83
84static unsigned int vrf_default_advmss(const struct dst_entry *dst)
85{
86 return 65535 - 40;
87}
88
89static struct dst_ops vrf_dst_ops = {
90 .family = AF_INET,
91 .local_out = vrf_ip_local_out,
92 .check = vrf_ip_check,
93 .mtu = vrf_v4_mtu,
94 .destroy = vrf_dst_destroy,
95 .default_advmss = vrf_default_advmss,
96};
97
98/* neighbor handling is done with actual device; do not want 63/* neighbor handling is done with actual device; do not want
99 * to flip skb->dev for those ndisc packets. This really fails 64 * to flip skb->dev for those ndisc packets. This really fails
100 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is 65 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
349} 314}
350 315
351#if IS_ENABLED(CONFIG_IPV6) 316#if IS_ENABLED(CONFIG_IPV6)
352static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
353{
354 return dst;
355}
356
357static struct dst_ops vrf_dst_ops6 = {
358 .family = AF_INET6,
359 .local_out = ip6_local_out,
360 .check = vrf_ip6_check,
361 .mtu = vrf_v4_mtu,
362 .destroy = vrf_dst_destroy,
363 .default_advmss = vrf_default_advmss,
364};
365
366static int init_dst_ops6_kmem_cachep(void)
367{
368 vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
369 sizeof(struct rt6_info),
370 0,
371 SLAB_HWCACHE_ALIGN,
372 NULL);
373
374 if (!vrf_dst_ops6.kmem_cachep)
375 return -ENOMEM;
376
377 return 0;
378}
379
380static void free_dst_ops6_kmem_cachep(void)
381{
382 kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
383}
384
385static int vrf_input6(struct sk_buff *skb)
386{
387 skb->dev->stats.rx_errors++;
388 kfree_skb(skb);
389 return 0;
390}
391
392/* modelled after ip6_finish_output2 */ 317/* modelled after ip6_finish_output2 */
393static int vrf_finish_output6(struct net *net, struct sock *sk, 318static int vrf_finish_output6(struct net *net, struct sock *sk,
394 struct sk_buff *skb) 319 struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
429 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 354 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
430} 355}
431 356
432static void vrf_rt6_destroy(struct net_vrf *vrf) 357static void vrf_rt6_release(struct net_vrf *vrf)
433{ 358{
434 dst_destroy(&vrf->rt6->dst); 359 dst_release(&vrf->rt6->dst);
435 free_percpu(vrf->rt6->rt6i_pcpu);
436 vrf->rt6 = NULL; 360 vrf->rt6 = NULL;
437} 361}
438 362
439static int vrf_rt6_create(struct net_device *dev) 363static int vrf_rt6_create(struct net_device *dev)
440{ 364{
441 struct net_vrf *vrf = netdev_priv(dev); 365 struct net_vrf *vrf = netdev_priv(dev);
442 struct dst_entry *dst; 366 struct net *net = dev_net(dev);
443 struct rt6_info *rt6; 367 struct rt6_info *rt6;
444 int cpu;
445 int rc = -ENOMEM; 368 int rc = -ENOMEM;
446 369
447 rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, 370 rt6 = ip6_dst_alloc(net, dev,
448 DST_OBSOLETE_NONE, 371 DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
449 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
450 if (!rt6) 372 if (!rt6)
451 goto out; 373 goto out;
452 374
453 dst = &rt6->dst;
454
455 rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
456 if (!rt6->rt6i_pcpu) {
457 dst_destroy(dst);
458 goto out;
459 }
460 for_each_possible_cpu(cpu) {
461 struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
462 *p = NULL;
463 }
464
465 memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
466
467 INIT_LIST_HEAD(&rt6->rt6i_siblings);
468 INIT_LIST_HEAD(&rt6->rt6i_uncached);
469
470 rt6->dst.input = vrf_input6;
471 rt6->dst.output = vrf_output6; 375 rt6->dst.output = vrf_output6;
472 376 rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
473 rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); 377 dst_hold(&rt6->dst);
474
475 atomic_set(&rt6->dst.__refcnt, 2);
476
477 vrf->rt6 = rt6; 378 vrf->rt6 = rt6;
478 rc = 0; 379 rc = 0;
479out: 380out:
480 return rc; 381 return rc;
481} 382}
482#else 383#else
483static int init_dst_ops6_kmem_cachep(void) 384static void vrf_rt6_release(struct net_vrf *vrf)
484{
485 return 0;
486}
487
488static void free_dst_ops6_kmem_cachep(void)
489{
490}
491
492static void vrf_rt6_destroy(struct net_vrf *vrf)
493{ 385{
494} 386}
495 387
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
557 !(IPCB(skb)->flags & IPSKB_REROUTED)); 449 !(IPCB(skb)->flags & IPSKB_REROUTED));
558} 450}
559 451
560static void vrf_rtable_destroy(struct net_vrf *vrf) 452static void vrf_rtable_release(struct net_vrf *vrf)
561{ 453{
562 struct dst_entry *dst = (struct dst_entry *)vrf->rth; 454 struct dst_entry *dst = (struct dst_entry *)vrf->rth;
563 455
564 dst_destroy(dst); 456 dst_release(dst);
565 vrf->rth = NULL; 457 vrf->rth = NULL;
566} 458}
567 459
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
570 struct net_vrf *vrf = netdev_priv(dev); 462 struct net_vrf *vrf = netdev_priv(dev);
571 struct rtable *rth; 463 struct rtable *rth;
572 464
573 rth = dst_alloc(&vrf_dst_ops, dev, 2, 465 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
574 DST_OBSOLETE_NONE,
575 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
576 if (rth) { 466 if (rth) {
577 rth->dst.output = vrf_output; 467 rth->dst.output = vrf_output;
578 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
579 rth->rt_flags = 0;
580 rth->rt_type = RTN_UNICAST;
581 rth->rt_is_input = 0;
582 rth->rt_iif = 0;
583 rth->rt_pmtu = 0;
584 rth->rt_gateway = 0;
585 rth->rt_uses_gateway = 0;
586 rth->rt_table_id = vrf->tb_id; 468 rth->rt_table_id = vrf->tb_id;
587 INIT_LIST_HEAD(&rth->rt_uncached);
588 rth->rt_uncached_list = NULL;
589 } 469 }
590 470
591 return rth; 471 return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
673 struct net_device *port_dev; 553 struct net_device *port_dev;
674 struct list_head *iter; 554 struct list_head *iter;
675 555
676 vrf_rtable_destroy(vrf); 556 vrf_rtable_release(vrf);
677 vrf_rt6_destroy(vrf); 557 vrf_rt6_release(vrf);
678 558
679 netdev_for_each_lower_dev(dev, port_dev, iter) 559 netdev_for_each_lower_dev(dev, port_dev, iter)
680 vrf_del_slave(dev, port_dev); 560 vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
704 return 0; 584 return 0;
705 585
706out_rth: 586out_rth:
707 vrf_rtable_destroy(vrf); 587 vrf_rtable_release(vrf);
708out_stats: 588out_stats:
709 free_percpu(dev->dstats); 589 free_percpu(dev->dstats);
710 dev->dstats = NULL; 590 dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
737 struct net_vrf *vrf = netdev_priv(dev); 617 struct net_vrf *vrf = netdev_priv(dev);
738 618
739 rth = vrf->rth; 619 rth = vrf->rth;
740 atomic_inc(&rth->dst.__refcnt); 620 dst_hold(&rth->dst);
741 } 621 }
742 622
743 return rth; 623 return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
788 struct net_vrf *vrf = netdev_priv(dev); 668 struct net_vrf *vrf = netdev_priv(dev);
789 669
790 rt = vrf->rt6; 670 rt = vrf->rt6;
791 atomic_inc(&rt->dst.__refcnt); 671 dst_hold(&rt->dst);
792 } 672 }
793 673
794 return (struct dst_entry *)rt; 674 return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
946{ 826{
947 int rc; 827 int rc;
948 828
949 vrf_dst_ops.kmem_cachep =
950 kmem_cache_create("vrf_ip_dst_cache",
951 sizeof(struct rtable), 0,
952 SLAB_HWCACHE_ALIGN,
953 NULL);
954
955 if (!vrf_dst_ops.kmem_cachep)
956 return -ENOMEM;
957
958 rc = init_dst_ops6_kmem_cachep();
959 if (rc != 0)
960 goto error2;
961
962 register_netdevice_notifier(&vrf_notifier_block); 829 register_netdevice_notifier(&vrf_notifier_block);
963 830
964 rc = rtnl_link_register(&vrf_link_ops); 831 rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
969 836
970error: 837error:
971 unregister_netdevice_notifier(&vrf_notifier_block); 838 unregister_netdevice_notifier(&vrf_notifier_block);
972 free_dst_ops6_kmem_cachep();
973error2:
974 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
975 return rc; 839 return rc;
976} 840}
977 841
978static void __exit vrf_cleanup_module(void)
979{
980 rtnl_link_unregister(&vrf_link_ops);
981 unregister_netdevice_notifier(&vrf_notifier_block);
982 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
983 free_dst_ops6_kmem_cachep();
984}
985
986module_init(vrf_init_module); 842module_init(vrf_init_module);
987module_exit(vrf_cleanup_module);
988MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); 843MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
989MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); 844MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
990MODULE_LICENSE("GPL"); 845MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
5680 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5680 INIT_WORK(&wl->firmware_load, b43_request_firmware);
5681 schedule_work(&wl->firmware_load); 5681 schedule_work(&wl->firmware_load);
5682 5682
5683bcma_out:
5684 return err; 5683 return err;
5685 5684
5686bcma_err_wireless_exit: 5685bcma_err_wireless_exit:
5687 ieee80211_free_hw(wl->hw); 5686 ieee80211_free_hw(wl->hw);
5687bcma_out:
5688 kfree(dev);
5688 return err; 5689 return err;
5689} 5690}
5690 5691
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
5712 b43_rng_exit(wl); 5713 b43_rng_exit(wl);
5713 5714
5714 b43_leds_unregister(wl); 5715 b43_leds_unregister(wl);
5715
5716 ieee80211_free_hw(wl->hw); 5716 ieee80211_free_hw(wl->hw);
5717 kfree(wldev->dev);
5717} 5718}
5718 5719
5719static struct bcma_driver b43_bcma_driver = { 5720static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5796 5797
5797 b43_leds_unregister(wl); 5798 b43_leds_unregister(wl);
5798 b43_wireless_exit(dev, wl); 5799 b43_wireless_exit(dev, wl);
5800 kfree(dev);
5799} 5801}
5800 5802
5801static struct ssb_driver b43_ssb_driver = { 5803static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1147 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1147 /* the fw is stopped, the aux sta is dead: clean up driver state */
1148 iwl_mvm_del_aux_sta(mvm); 1148 iwl_mvm_del_aux_sta(mvm);
1149 1149
1150 iwl_free_fw_paging(mvm);
1151
1150 /* 1152 /*
1151 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1153 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1152 * won't be called in this case). 1154 * won't be called in this case).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
762 kfree(mvm->nvm_sections[i].data); 762 kfree(mvm->nvm_sections[i].data);
763 763
764 iwl_free_fw_paging(mvm);
765
766 iwl_mvm_tof_clean(mvm); 764 iwl_mvm_tof_clean(mvm);
767 765
768 ieee80211_free_hw(mvm->hw); 766 ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
732 */ 732 */
733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
734 if (val & (BIT(1) | BIT(17))) { 734 if (val & (BIT(1) | BIT(17))) {
735 IWL_INFO(trans, 735 IWL_DEBUG_INFO(trans,
736 "can't access the RSA semaphore it is write protected\n"); 736 "can't access the RSA semaphore it is write protected\n");
737 return 0; 737 return 0;
738 } 738 }
739 739
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) 2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; 2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
2490 2490
2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
2493 rtldm->thermalvalue, thermal_value); 2493 rtldm->thermalvalue, thermal_value);
2494 /*Record last Power Tracking Thermal Value*/ 2494 /*Record last Power Tracking Thermal Value*/
2495 rtldm->thermalvalue = thermal_value; 2495 rtldm->thermalvalue = thermal_value;
2496 } 2496 }
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index c32cbb593600..f068b6513cd2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
1204{ 1204{
1205 struct btt *btt = bdev->bd_disk->private_data; 1205 struct btt *btt = bdev->bd_disk->private_data;
1206 1206
1207 btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector); 1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
1208 page_endio(page, rw & WRITE, 0); 1208 page_endio(page, rw & WRITE, 0);
1209 return 0; 1209 return 0;
1210} 1210}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index fc82743aefb6..19f822d7f652 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
407 [ND_CMD_IMPLEMENTED] = { }, 407 [ND_CMD_IMPLEMENTED] = { },
408 [ND_CMD_SMART] = { 408 [ND_CMD_SMART] = {
409 .out_num = 2, 409 .out_num = 2,
410 .out_sizes = { 4, 8, }, 410 .out_sizes = { 4, 128, },
411 }, 411 },
412 [ND_CMD_SMART_THRESHOLD] = { 412 [ND_CMD_SMART_THRESHOLD] = {
413 .out_num = 2, 413 .out_num = 2,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 79646d0c3277..182a93fe3712 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
417 set_badblock(bb, start_sector, num_sectors); 417 set_badblock(bb, start_sector, num_sectors);
418} 418}
419 419
420static void namespace_add_poison(struct list_head *poison_list, 420static void badblocks_populate(struct list_head *poison_list,
421 struct badblocks *bb, struct resource *res) 421 struct badblocks *bb, const struct resource *res)
422{ 422{
423 struct nd_poison *pl; 423 struct nd_poison *pl;
424 424
@@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list,
460} 460}
461 461
462/** 462/**
463 * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks 463 * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
464 * @ndns: the namespace containing poison ranges 464 * @region: parent region of the range to interrogate
465 * @bb: badblocks instance to populate 465 * @bb: badblocks instance to populate
466 * @offset: offset at the start of the namespace before 'sector 0' 466 * @res: resource range to consider
467 * 467 *
468 * The poison list generated during NFIT initialization may contain multiple, 468 * The poison list generated during bus initialization may contain
469 * possibly overlapping ranges in the SPA (System Physical Address) space. 469 * multiple, possibly overlapping physical address ranges. Compare each
470 * Compare each of these ranges to the namespace currently being initialized, 470 * of these ranges to the resource range currently being initialized,
471 * and add badblocks to the gendisk for all matching sub-ranges 471 * and add badblocks entries for all matching sub-ranges
472 */ 472 */
473void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, 473void nvdimm_badblocks_populate(struct nd_region *nd_region,
474 struct badblocks *bb, resource_size_t offset) 474 struct badblocks *bb, const struct resource *res)
475{ 475{
476 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
477 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
478 struct nvdimm_bus *nvdimm_bus; 476 struct nvdimm_bus *nvdimm_bus;
479 struct list_head *poison_list; 477 struct list_head *poison_list;
480 struct resource res = {
481 .start = nsio->res.start + offset,
482 .end = nsio->res.end,
483 };
484 478
485 nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); 479 if (!is_nd_pmem(&nd_region->dev)) {
480 dev_WARN_ONCE(&nd_region->dev, 1,
481 "%s only valid for pmem regions\n", __func__);
482 return;
483 }
484 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
486 poison_list = &nvdimm_bus->poison_list; 485 poison_list = &nvdimm_bus->poison_list;
487 486
488 nvdimm_bus_lock(&nvdimm_bus->dev); 487 nvdimm_bus_lock(&nvdimm_bus->dev);
489 namespace_add_poison(poison_list, bb, &res); 488 badblocks_populate(poison_list, bb, res);
490 nvdimm_bus_unlock(&nvdimm_bus->dev); 489 nvdimm_bus_unlock(&nvdimm_bus->dev);
491} 490}
492EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison); 491EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
493 492
494static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 493static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
495{ 494{
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1799bd97a9ce..875c524fafb0 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
266int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns); 266int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
267const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, 267const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
268 char *name); 268 char *name);
269void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, 269void nvdimm_badblocks_populate(struct nd_region *nd_region,
270 struct badblocks *bb, resource_size_t offset); 270 struct badblocks *bb, const struct resource *res);
271int nd_blk_region_init(struct nd_region *nd_region); 271int nd_blk_region_init(struct nd_region *nd_region);
272void __nd_iostat_start(struct bio *bio, unsigned long *start); 272void __nd_iostat_start(struct bio *bio, unsigned long *start);
273static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) 273static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 254d3bc13f70..e071e214feba 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
376 } else { 376 } else {
377 /* from init we validate */ 377 /* from init we validate */
378 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) 378 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
379 return -EINVAL; 379 return -ENODEV;
380 } 380 }
381 381
382 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) { 382 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index cc31c6f1f88e..f798899338ed 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -103,6 +103,20 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
103 flush_dcache_page(page); 103 flush_dcache_page(page);
104 } 104 }
105 } else { 105 } else {
106 /*
107 * Note that we write the data both before and after
108 * clearing poison. The write before clear poison
109 * handles situations where the latest written data is
110 * preserved and the clear poison operation simply marks
111 * the address range as valid without changing the data.
112 * In this case application software can assume that an
113 * interrupted write will either return the new good
114 * data or an error.
115 *
116 * However, if pmem_clear_poison() leaves the data in an
117 * indeterminate state we need to perform the write
118 * after clear poison.
119 */
106 flush_dcache_page(page); 120 flush_dcache_page(page);
107 memcpy_to_pmem(pmem_addr, mem + off, len); 121 memcpy_to_pmem(pmem_addr, mem + off, len);
108 if (unlikely(bad_pmem)) { 122 if (unlikely(bad_pmem)) {
@@ -151,7 +165,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
151 struct pmem_device *pmem = bdev->bd_disk->private_data; 165 struct pmem_device *pmem = bdev->bd_disk->private_data;
152 int rc; 166 int rc;
153 167
154 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); 168 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
155 if (rw & WRITE) 169 if (rw & WRITE)
156 wmb_pmem(); 170 wmb_pmem();
157 171
@@ -244,7 +258,9 @@ static void pmem_detach_disk(struct pmem_device *pmem)
244static int pmem_attach_disk(struct device *dev, 258static int pmem_attach_disk(struct device *dev,
245 struct nd_namespace_common *ndns, struct pmem_device *pmem) 259 struct nd_namespace_common *ndns, struct pmem_device *pmem)
246{ 260{
261 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
247 int nid = dev_to_node(dev); 262 int nid = dev_to_node(dev);
263 struct resource bb_res;
248 struct gendisk *disk; 264 struct gendisk *disk;
249 265
250 blk_queue_make_request(pmem->pmem_queue, pmem_make_request); 266 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
@@ -271,8 +287,17 @@ static int pmem_attach_disk(struct device *dev,
271 devm_exit_badblocks(dev, &pmem->bb); 287 devm_exit_badblocks(dev, &pmem->bb);
272 if (devm_init_badblocks(dev, &pmem->bb)) 288 if (devm_init_badblocks(dev, &pmem->bb))
273 return -ENOMEM; 289 return -ENOMEM;
274 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); 290 bb_res.start = nsio->res.start + pmem->data_offset;
275 291 bb_res.end = nsio->res.end;
292 if (is_nd_pfn(dev)) {
293 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
294 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
295
296 bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
297 bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
298 }
299 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
300 &bb_res);
276 disk->bb = &pmem->bb; 301 disk->bb = &pmem->bb;
277 add_disk(disk); 302 add_disk(disk);
278 revalidate_disk(disk); 303 revalidate_disk(disk);
@@ -553,7 +578,7 @@ static int nd_pmem_probe(struct device *dev)
553 ndns->rw_bytes = pmem_rw_bytes; 578 ndns->rw_bytes = pmem_rw_bytes;
554 if (devm_init_badblocks(dev, &pmem->bb)) 579 if (devm_init_badblocks(dev, &pmem->bb))
555 return -ENOMEM; 580 return -ENOMEM;
556 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); 581 nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
557 582
558 if (is_nd_btt(dev)) { 583 if (is_nd_btt(dev)) {
559 /* btt allocates its own request_queue */ 584 /* btt allocates its own request_queue */
@@ -595,14 +620,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
595{ 620{
596 struct pmem_device *pmem = dev_get_drvdata(dev); 621 struct pmem_device *pmem = dev_get_drvdata(dev);
597 struct nd_namespace_common *ndns = pmem->ndns; 622 struct nd_namespace_common *ndns = pmem->ndns;
623 struct nd_region *nd_region = to_nd_region(dev->parent);
624 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
625 struct resource res = {
626 .start = nsio->res.start + pmem->data_offset,
627 .end = nsio->res.end,
628 };
598 629
599 if (event != NVDIMM_REVALIDATE_POISON) 630 if (event != NVDIMM_REVALIDATE_POISON)
600 return; 631 return;
601 632
602 if (is_nd_btt(dev)) 633 if (is_nd_pfn(dev)) {
603 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); 634 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
604 else 635 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
605 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); 636
637 res.start += __le32_to_cpu(pfn_sb->start_pad);
638 res.end -= __le32_to_cpu(pfn_sb->end_trunc);
639 }
640
641 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
606} 642}
607 643
608MODULE_ALIAS("pmem"); 644MODULE_ALIAS("pmem");
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 24ccda303efb..4fd733ff72b1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1478 if (result > 0) { 1478 if (result > 0) {
1479 dev_err(dev->ctrl.device, 1479 dev_err(dev->ctrl.device,
1480 "Could not set queue count (%d)\n", result); 1480 "Could not set queue count (%d)\n", result);
1481 nr_io_queues = 0; 1481 return 0;
1482 result = 0;
1483 } 1482 }
1484 1483
1485 if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { 1484 if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1513 * If we enable msix early due to not intx, disable it again before 1512 * If we enable msix early due to not intx, disable it again before
1514 * setting up the full range we need. 1513 * setting up the full range we need.
1515 */ 1514 */
1516 if (!pdev->irq) 1515 if (pdev->msi_enabled)
1516 pci_disable_msi(pdev);
1517 else if (pdev->msix_enabled)
1517 pci_disable_msix(pdev); 1518 pci_disable_msix(pdev);
1518 1519
1519 for (i = 0; i < nr_io_queues; i++) 1520 for (i = 0; i < nr_io_queues; i++)
@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1696 if (pci_enable_device_mem(pdev)) 1697 if (pci_enable_device_mem(pdev))
1697 return result; 1698 return result;
1698 1699
1699 dev->entry[0].vector = pdev->irq;
1700 pci_set_master(pdev); 1700 pci_set_master(pdev);
1701 1701
1702 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 1702 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1709 } 1709 }
1710 1710
1711 /* 1711 /*
1712 * Some devices don't advertse INTx interrupts, pre-enable a single 1712 * Some devices and/or platforms don't advertise or work with INTx
1713 * MSIX vec for setup. We'll adjust this later. 1713 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
1714 * adjust this later.
1714 */ 1715 */
1715 if (!pdev->irq) { 1716 if (pci_enable_msix(pdev, dev->entry, 1)) {
1716 result = pci_enable_msix(pdev, dev->entry, 1); 1717 pci_enable_msi(pdev);
1717 if (result < 0) 1718 dev->entry[0].vector = pdev->irq;
1718 goto disable; 1719 }
1720
1721 if (!dev->entry[0].vector) {
1722 result = -ENODEV;
1723 goto disable;
1719 } 1724 }
1720 1725
1721 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 1726 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
1859 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1864 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1860 nvme_dev_disable(dev, false); 1865 nvme_dev_disable(dev, false);
1861 1866
1867 if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
1868 goto out;
1869
1862 set_bit(NVME_CTRL_RESETTING, &dev->flags); 1870 set_bit(NVME_CTRL_RESETTING, &dev->flags);
1863 1871
1864 result = nvme_pci_enable(dev); 1872 result = nvme_pci_enable(dev);
@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
2078{ 2086{
2079 struct nvme_dev *dev = pci_get_drvdata(pdev); 2087 struct nvme_dev *dev = pci_get_drvdata(pdev);
2080 2088
2081 del_timer_sync(&dev->watchdog_timer);
2082
2083 set_bit(NVME_CTRL_REMOVING, &dev->flags); 2089 set_bit(NVME_CTRL_REMOVING, &dev->flags);
2084 pci_set_drvdata(pdev, NULL); 2090 pci_set_drvdata(pdev, NULL);
2085 flush_work(&dev->async_work); 2091 flush_work(&dev->async_work);
2092 flush_work(&dev->reset_work);
2086 flush_work(&dev->scan_work); 2093 flush_work(&dev->scan_work);
2087 nvme_remove_namespaces(&dev->ctrl); 2094 nvme_remove_namespaces(&dev->ctrl);
2088 nvme_uninit_ctrl(&dev->ctrl); 2095 nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b48ac6300c79..a0e5260bd006 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
239{ 239{
240 struct inode *root_inode; 240 struct inode *root_inode;
241 241
242 sb->s_blocksize = PAGE_CACHE_SIZE; 242 sb->s_blocksize = PAGE_SIZE;
243 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 243 sb->s_blocksize_bits = PAGE_SHIFT;
244 sb->s_magic = OPROFILEFS_MAGIC; 244 sb->s_magic = OPROFILEFS_MAGIC;
245 sb->s_op = &s_ops; 245 sb->s_op = &s_ops;
246 sb->s_time_gran = 1; 246 sb->s_time_gran = 1;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 01b9d0a00abc..d11cdbb8fba3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
275} 275}
276EXPORT_SYMBOL(pci_write_vpd); 276EXPORT_SYMBOL(pci_write_vpd);
277 277
278/**
279 * pci_set_vpd_size - Set size of Vital Product Data space
280 * @dev: pci device struct
281 * @len: size of vpd space
282 */
283int pci_set_vpd_size(struct pci_dev *dev, size_t len)
284{
285 if (!dev->vpd || !dev->vpd->ops)
286 return -ENODEV;
287 return dev->vpd->ops->set_size(dev, len);
288}
289EXPORT_SYMBOL(pci_set_vpd_size);
290
278#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) 291#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
279 292
280/** 293/**
@@ -498,9 +511,23 @@ out:
498 return ret ? ret : count; 511 return ret ? ret : count;
499} 512}
500 513
514static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
515{
516 struct pci_vpd *vpd = dev->vpd;
517
518 if (len == 0 || len > PCI_VPD_MAX_SIZE)
519 return -EIO;
520
521 vpd->valid = 1;
522 vpd->len = len;
523
524 return 0;
525}
526
501static const struct pci_vpd_ops pci_vpd_ops = { 527static const struct pci_vpd_ops pci_vpd_ops = {
502 .read = pci_vpd_read, 528 .read = pci_vpd_read,
503 .write = pci_vpd_write, 529 .write = pci_vpd_write,
530 .set_size = pci_vpd_set_size,
504}; 531};
505 532
506static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, 533static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
533 return ret; 560 return ret;
534} 561}
535 562
563static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
564{
565 struct pci_dev *tdev = pci_get_slot(dev->bus,
566 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
567 int ret;
568
569 if (!tdev)
570 return -ENODEV;
571
572 ret = pci_set_vpd_size(tdev, len);
573 pci_dev_put(tdev);
574 return ret;
575}
576
536static const struct pci_vpd_ops pci_vpd_f0_ops = { 577static const struct pci_vpd_ops pci_vpd_f0_ops = {
537 .read = pci_vpd_f0_read, 578 .read = pci_vpd_f0_read,
538 .write = pci_vpd_f0_write, 579 .write = pci_vpd_f0_write,
580 .set_size = pci_vpd_f0_set_size,
539}; 581};
540 582
541int pci_vpd_init(struct pci_dev *dev) 583int pci_vpd_init(struct pci_dev *dev)
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a2755a164..2f817fa4c661 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -32,7 +32,7 @@
32#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) 32#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
33 33
34struct imx6_pcie { 34struct imx6_pcie {
35 struct gpio_desc *reset_gpio; 35 int reset_gpio;
36 struct clk *pcie_bus; 36 struct clk *pcie_bus;
37 struct clk *pcie_phy; 37 struct clk *pcie_phy;
38 struct clk *pcie; 38 struct clk *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
309 usleep_range(200, 500); 309 usleep_range(200, 500);
310 310
311 /* Some boards don't have PCIe reset GPIO. */ 311 /* Some boards don't have PCIe reset GPIO. */
312 if (imx6_pcie->reset_gpio) { 312 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
313 gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0); 313 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
314 msleep(100); 314 msleep(100);
315 gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1); 315 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
316 } 316 }
317 return 0; 317 return 0;
318 318
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
523{ 523{
524 struct imx6_pcie *imx6_pcie; 524 struct imx6_pcie *imx6_pcie;
525 struct pcie_port *pp; 525 struct pcie_port *pp;
526 struct device_node *np = pdev->dev.of_node;
526 struct resource *dbi_base; 527 struct resource *dbi_base;
527 struct device_node *node = pdev->dev.of_node; 528 struct device_node *node = pdev->dev.of_node;
528 int ret; 529 int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
544 return PTR_ERR(pp->dbi_base); 545 return PTR_ERR(pp->dbi_base);
545 546
546 /* Fetch GPIOs */ 547 /* Fetch GPIOs */
547 imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", 548 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
548 GPIOD_OUT_LOW); 549 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
550 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
551 GPIOF_OUT_INIT_LOW, "PCIe reset");
552 if (ret) {
553 dev_err(&pdev->dev, "unable to get reset gpio\n");
554 return ret;
555 }
556 }
549 557
550 /* Fetch clocks */ 558 /* Fetch clocks */
551 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); 559 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e982010f0ed1..342b6918bbde 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
636 u8 *data = (u8 *) buf; 636 u8 *data = (u8 *) buf;
637 637
638 /* Several chips lock up trying to read undefined config space */ 638 /* Several chips lock up trying to read undefined config space */
639 if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0) 639 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
640 size = dev->cfg_size; 640 size = dev->cfg_size;
641 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 641 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
642 size = 128; 642 size = 128;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d0fb93481573..a814bbb80fcb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
97struct pci_vpd_ops { 97struct pci_vpd_ops {
98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
100 int (*set_size)(struct pci_dev *dev, size_t len);
100}; 101};
101 102
102struct pci_vpd { 103struct pci_vpd {
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
56 int stschg_irq; /* card-status-change irq */ 56 int stschg_irq; /* card-status-change irq */
57 int card_irq; /* card irq */ 57 int card_irq; /* card irq */
58 int eject_irq; /* db1200/pb1200 have these */ 58 int eject_irq; /* db1200/pb1200 have these */
59 int insert_gpio; /* db1000 carddetect gpio */
59 60
60#define BOARD_TYPE_DEFAULT 0 /* most boards */ 61#define BOARD_TYPE_DEFAULT 0 /* most boards */
61#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */ 62#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
83/* carddetect gpio: low-active */ 84/* carddetect gpio: low-active */
84static int db1000_card_inserted(struct db1x_pcmcia_sock *sock) 85static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
85{ 86{
86 return !gpio_get_value(irq_to_gpio(sock->insert_irq)); 87 return !gpio_get_value(sock->insert_gpio);
87} 88}
88 89
89static int db1x_card_inserted(struct db1x_pcmcia_sock *sock) 90static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
457 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card"); 458 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
458 sock->card_irq = r ? r->start : 0; 459 sock->card_irq = r ? r->start : 0;
459 460
460 /* insert: irq which triggers on card insertion/ejection */ 461 /* insert: irq which triggers on card insertion/ejection
462 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
463 */
461 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert"); 464 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
462 sock->insert_irq = r ? r->start : -1; 465 sock->insert_irq = r ? r->start : -1;
466 if (sock->board_type == BOARD_TYPE_DEFAULT) {
467 sock->insert_gpio = r ? r->start : -1;
468 sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
469 }
463 470
464 /* stschg: irq which trigger on card status change (optional) */ 471 /* stschg: irq which trigger on card status change (optional) */
465 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg"); 472 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 32346b5a8a11..f70090897fdf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
737 break; 737 break;
738 case CPU_PM_EXIT: 738 case CPU_PM_EXIT:
739 case CPU_PM_ENTER_FAILED: 739 case CPU_PM_ENTER_FAILED:
740 /* Restore and enable the counter */ 740 /*
741 armpmu_start(event, PERF_EF_RELOAD); 741 * Restore and enable the counter.
742 * armpmu_start() indirectly calls
743 *
744 * perf_event_update_userpage()
745 *
746 * that requires RCU read locking to be functional,
747 * wrap the call within RCU_NONIDLE to make the
748 * RCU subsystem aware this cpu is not idle from
749 * an RCU perspective for the armpmu_start() call
750 * duration.
751 */
752 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
742 break; 753 break;
743 default: 754 default:
744 break; 755 break;
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 77e2d02e6bee..793ecb6d87bc 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
86 if (!np) 86 if (!np)
87 return -ENODEV; 87 return -ENODEV;
88 88
89 if (!dev->parent || !dev->parent->of_node)
90 return -ENODEV;
91
89 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
90 if (IS_ERR(dp)) 93 if (IS_ERR(dp))
91 return -ENOMEM; 94 return -ENOMEM;
@@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
104 return ret; 107 return ret;
105 } 108 }
106 109
107 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 110 dp->grf = syscon_node_to_regmap(dev->parent->of_node);
108 if (IS_ERR(dp->grf)) { 111 if (IS_ERR(dp->grf)) {
109 dev_err(dev, "rk3288-dp needs rockchip,grf property\n"); 112 dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
110 return PTR_ERR(dp->grf); 113 return PTR_ERR(dp->grf);
111 } 114 }
112 115
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 887b4c27195f..6ebcf3e41c46 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
176 struct regmap *grf; 176 struct regmap *grf;
177 unsigned int reg_offset; 177 unsigned int reg_offset;
178 178
179 grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 179 if (!dev->parent || !dev->parent->of_node)
180 return -ENODEV;
181
182 grf = syscon_node_to_regmap(dev->parent->of_node);
180 if (IS_ERR(grf)) { 183 if (IS_ERR(grf)) {
181 dev_err(dev, "Missing rockchip,grf property\n"); 184 dev_err(dev, "Missing rockchip,grf property\n");
182 return PTR_ERR(grf); 185 return PTR_ERR(grf);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index debe1219d76d..fc8cbf611723 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -2,6 +2,7 @@ config PINCTRL_IMX
2 bool 2 bool
3 select PINMUX 3 select PINMUX
4 select PINCONF 4 select PINCONF
5 select REGMAP
5 6
6config PINCTRL_IMX1_CORE 7config PINCTRL_IMX1_CORE
7 bool 8 bool
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 46210512d8ec..9cfa544072b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -762,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
762 762
763 if (of_property_read_bool(dev_np, "fsl,input-sel")) { 763 if (of_property_read_bool(dev_np, "fsl,input-sel")) {
764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0); 764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
765 if (np) { 765 if (!np) {
766 ipctl->input_sel_base = of_iomap(np, 0);
767 if (IS_ERR(ipctl->input_sel_base)) {
768 of_node_put(np);
769 dev_err(&pdev->dev,
770 "iomuxc input select base address not found\n");
771 return PTR_ERR(ipctl->input_sel_base);
772 }
773 } else {
774 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n"); 766 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
775 return -EINVAL; 767 return -EINVAL;
776 } 768 }
769
770 ipctl->input_sel_base = of_iomap(np, 0);
777 of_node_put(np); 771 of_node_put(np);
772 if (!ipctl->input_sel_base) {
773 dev_err(&pdev->dev,
774 "iomuxc input select base address not found\n");
775 return -ENOMEM;
776 }
778 } 777 }
779 778
780 imx_pinctrl_desc.name = dev_name(&pdev->dev); 779 imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85536b467c25..6c2c816f8e5f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -665,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d)
665 spin_unlock(&pctrl->lock); 665 spin_unlock(&pctrl->lock);
666} 666}
667 667
668static void intel_gpio_irq_enable(struct irq_data *d)
669{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
671 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
672 const struct intel_community *community;
673 unsigned pin = irqd_to_hwirq(d);
674 unsigned long flags;
675
676 spin_lock_irqsave(&pctrl->lock, flags);
677
678 community = intel_get_community(pctrl, pin);
679 if (community) {
680 unsigned padno = pin_to_padno(community, pin);
681 unsigned gpp_size = community->gpp_size;
682 unsigned gpp_offset = padno % gpp_size;
683 unsigned gpp = padno / gpp_size;
684 u32 value;
685
686 /* Clear interrupt status first to avoid unexpected interrupt */
687 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
688
689 value = readl(community->regs + community->ie_offset + gpp * 4);
690 value |= BIT(gpp_offset);
691 writel(value, community->regs + community->ie_offset + gpp * 4);
692 }
693
694 spin_unlock_irqrestore(&pctrl->lock, flags);
695}
696
668static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) 697static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
669{ 698{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 699 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
741 value |= PADCFG0_RXINV; 770 value |= PADCFG0_RXINV;
742 } else if (type & IRQ_TYPE_EDGE_RISING) { 771 } else if (type & IRQ_TYPE_EDGE_RISING) {
743 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT; 772 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
744 } else if (type & IRQ_TYPE_LEVEL_LOW) { 773 } else if (type & IRQ_TYPE_LEVEL_MASK) {
745 value |= PADCFG0_RXINV; 774 if (type & IRQ_TYPE_LEVEL_LOW)
775 value |= PADCFG0_RXINV;
746 } else { 776 } else {
747 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT; 777 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
748 } 778 }
@@ -852,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
852 882
853static struct irq_chip intel_gpio_irqchip = { 883static struct irq_chip intel_gpio_irqchip = {
854 .name = "intel-gpio", 884 .name = "intel-gpio",
885 .irq_enable = intel_gpio_irq_enable,
855 .irq_ack = intel_gpio_irq_ack, 886 .irq_ack = intel_gpio_irq_ack,
856 .irq_mask = intel_gpio_irq_mask, 887 .irq_mask = intel_gpio_irq_mask,
857 .irq_unmask = intel_gpio_irq_unmask, 888 .irq_unmask = intel_gpio_irq_unmask,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 2bbe6f7964a7..6ab8c3ccdeea 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent); 1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
1005 int eint_num, virq, eint_offset; 1005 int eint_num, virq, eint_offset;
1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc; 1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
1007 static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256}; 1007 static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
1008 128000, 256000};
1008 const struct mtk_desc_pin *pin; 1009 const struct mtk_desc_pin *pin;
1009 struct irq_data *d; 1010 struct irq_data *d;
1010 1011
@@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
1022 if (!mtk_eint_can_en_debounce(pctl, eint_num)) 1023 if (!mtk_eint_can_en_debounce(pctl, eint_num))
1023 return -ENOSYS; 1024 return -ENOSYS;
1024 1025
1025 dbnc = ARRAY_SIZE(dbnc_arr); 1026 dbnc = ARRAY_SIZE(debounce_time);
1026 for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) { 1027 for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
1027 if (debounce <= dbnc_arr[i]) { 1028 if (debounce <= debounce_time[i]) {
1028 dbnc = i; 1029 dbnc = i;
1029 break; 1030 break;
1030 } 1031 }
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352406108fa0..c8969dd49449 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
990 int val; 990 int val;
991 991
992 if (pull) 992 if (pull)
993 pullidx = data_out ? 1 : 2; 993 pullidx = data_out ? 2 : 1;
994 994
995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s", 995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
996 gpio, 996 gpio,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 856f736cb1a6..2673cd9d106e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
469 "mfio83", 469 "mfio83",
470}; 470};
471 471
472static const char * const pistachio_sys_pll_lock_groups[] = { 472static const char * const pistachio_audio_pll_lock_groups[] = {
473 "mfio84", 473 "mfio84",
474}; 474};
475 475
476static const char * const pistachio_wifi_pll_lock_groups[] = { 476static const char * const pistachio_rpu_v_pll_lock_groups[] = {
477 "mfio85", 477 "mfio85",
478}; 478};
479 479
480static const char * const pistachio_bt_pll_lock_groups[] = { 480static const char * const pistachio_rpu_l_pll_lock_groups[] = {
481 "mfio86", 481 "mfio86",
482}; 482};
483 483
484static const char * const pistachio_rpu_v_pll_lock_groups[] = { 484static const char * const pistachio_sys_pll_lock_groups[] = {
485 "mfio87", 485 "mfio87",
486}; 486};
487 487
488static const char * const pistachio_rpu_l_pll_lock_groups[] = { 488static const char * const pistachio_wifi_pll_lock_groups[] = {
489 "mfio88", 489 "mfio88",
490}; 490};
491 491
492static const char * const pistachio_audio_pll_lock_groups[] = { 492static const char * const pistachio_bt_pll_lock_groups[] = {
493 "mfio89", 493 "mfio89",
494}; 494};
495 495
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
559 PISTACHIO_FUNCTION_DREQ4, 559 PISTACHIO_FUNCTION_DREQ4,
560 PISTACHIO_FUNCTION_DREQ5, 560 PISTACHIO_FUNCTION_DREQ5,
561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK, 561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
562 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
563 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
564 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
562 PISTACHIO_FUNCTION_SYS_PLL_LOCK, 565 PISTACHIO_FUNCTION_SYS_PLL_LOCK,
563 PISTACHIO_FUNCTION_WIFI_PLL_LOCK, 566 PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
564 PISTACHIO_FUNCTION_BT_PLL_LOCK, 567 PISTACHIO_FUNCTION_BT_PLL_LOCK,
565 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
566 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
567 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND, 568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND, 569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND, 570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
620 FUNCTION(dreq4), 620 FUNCTION(dreq4),
621 FUNCTION(dreq5), 621 FUNCTION(dreq5),
622 FUNCTION(mips_pll_lock), 622 FUNCTION(mips_pll_lock),
623 FUNCTION(audio_pll_lock),
624 FUNCTION(rpu_v_pll_lock),
625 FUNCTION(rpu_l_pll_lock),
623 FUNCTION(sys_pll_lock), 626 FUNCTION(sys_pll_lock),
624 FUNCTION(wifi_pll_lock), 627 FUNCTION(wifi_pll_lock),
625 FUNCTION(bt_pll_lock), 628 FUNCTION(bt_pll_lock),
626 FUNCTION(rpu_v_pll_lock),
627 FUNCTION(rpu_l_pll_lock),
628 FUNCTION(audio_pll_lock),
629 FUNCTION(debug_raw_cca_ind), 629 FUNCTION(debug_raw_cca_ind),
630 FUNCTION(debug_ed_sec20_cca_ind), 630 FUNCTION(debug_ed_sec20_cca_ind),
631 FUNCTION(debug_ed_sec40_cca_ind), 631 FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb126d56ad40..cf9bafa10acf 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
1280 1280
1281 /* Parse pins in each row from LSB */ 1281 /* Parse pins in each row from LSB */
1282 while (mask) { 1282 while (mask) {
1283 bit_pos = ffs(mask); 1283 bit_pos = __ffs(mask);
1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin; 1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
1285 mask_pos = ((pcs->fmask) << (bit_pos - 1)); 1285 mask_pos = ((pcs->fmask) << bit_pos);
1286 val_pos = val & mask_pos; 1286 val_pos = val & mask_pos;
1287 submask = mask & mask_pos; 1287 submask = mask & mask_pos;
1288 1288
@@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask", 1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask",
1853 &pcs->fmask); 1853 &pcs->fmask);
1854 if (!ret) { 1854 if (!ret) {
1855 pcs->fshift = ffs(pcs->fmask) - 1; 1855 pcs->fshift = __ffs(pcs->fmask);
1856 pcs->fmax = pcs->fmask >> pcs->fshift; 1856 pcs->fmax = pcs->fmask >> pcs->fshift;
1857 } else { 1857 } else {
1858 /* If mask property doesn't exist, function mux is invalid. */ 1858 /* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 412c6b78140a..a13f2b6f6fc0 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
1573 return 0; 1573 return 0;
1574} 1574}
1575 1575
1576/*
1577 * gpiolib gpiod_to_irq callback function.
1578 * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
1579 */
1580static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
1581{
1582 struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
1583 int i;
1584
1585 for (i = 0; i < info->num_exin; i++)
1586 if (info->exin[i] == offset)
1587 return ltq_eiu_get_irq(i);
1588
1589 return -1;
1590}
1591
1576static struct gpio_chip xway_chip = { 1592static struct gpio_chip xway_chip = {
1577 .label = "gpio-xway", 1593 .label = "gpio-xway",
1578 .direction_input = xway_gpio_dir_in, 1594 .direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = {
1581 .set = xway_gpio_set, 1597 .set = xway_gpio_set,
1582 .request = gpiochip_generic_request, 1598 .request = gpiochip_generic_request,
1583 .free = gpiochip_generic_free, 1599 .free = gpiochip_generic_free,
1600 .to_irq = xway_gpio_to_irq,
1584 .base = -1, 1601 .base = -1,
1585}; 1602};
1586 1603
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b5d81ced6ce6..b68ae424cee2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -237,7 +237,7 @@ DECLARE_QCA_GPIO_PINS(99);
237 .pins = gpio##id##_pins, \ 237 .pins = gpio##id##_pins, \
238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \ 238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
239 .funcs = (int[]){ \ 239 .funcs = (int[]){ \
240 qca_mux_NA, /* gpio mode */ \ 240 qca_mux_gpio, /* gpio mode */ \
241 qca_mux_##f1, \ 241 qca_mux_##f1, \
242 qca_mux_##f2, \ 242 qca_mux_##f2, \
243 qca_mux_##f3, \ 243 qca_mux_##f3, \
@@ -254,11 +254,11 @@ DECLARE_QCA_GPIO_PINS(99);
254 qca_mux_##f14 \ 254 qca_mux_##f14 \
255 }, \ 255 }, \
256 .nfuncs = 15, \ 256 .nfuncs = 15, \
257 .ctl_reg = 0x1000 + 0x10 * id, \ 257 .ctl_reg = 0x0 + 0x1000 * id, \
258 .io_reg = 0x1004 + 0x10 * id, \ 258 .io_reg = 0x4 + 0x1000 * id, \
259 .intr_cfg_reg = 0x1008 + 0x10 * id, \ 259 .intr_cfg_reg = 0x8 + 0x1000 * id, \
260 .intr_status_reg = 0x100c + 0x10 * id, \ 260 .intr_status_reg = 0xc + 0x1000 * id, \
261 .intr_target_reg = 0x400 + 0x4 * id, \ 261 .intr_target_reg = 0x8 + 0x1000 * id, \
262 .mux_bit = 2, \ 262 .mux_bit = 2, \
263 .pull_bit = 0, \ 263 .pull_bit = 0, \
264 .drv_bit = 6, \ 264 .drv_bit = 6, \
@@ -414,7 +414,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
414 .nfunctions = ARRAY_SIZE(ipq4019_functions), 414 .nfunctions = ARRAY_SIZE(ipq4019_functions),
415 .groups = ipq4019_groups, 415 .groups = ipq4019_groups,
416 .ngroups = ARRAY_SIZE(ipq4019_groups), 416 .ngroups = ARRAY_SIZE(ipq4019_groups),
417 .ngpios = 70, 417 .ngpios = 100,
418}; 418};
419 419
420static int ipq4019_pinctrl_probe(struct platform_device *pdev) 420static int ipq4019_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index dc3609f0c60b..ee0c1f2567d9 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -546,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
546 return ret; 546 return ret;
547 } 547 }
548 548
549 pinctrl_provide_dummies(); 549 /* Enable dummy states for those platforms without pinctrl support */
550 if (!of_have_populated_dt())
551 pinctrl_provide_dummies();
550 552
551 ret = sh_pfc_init_ranges(pfc); 553 ret = sh_pfc_init_ranges(pfc);
552 if (ret < 0) 554 if (ret < 0)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
485 .pins = sun8i_a33_pins, 485 .pins = sun8i_a33_pins,
486 .npins = ARRAY_SIZE(sun8i_a33_pins), 486 .npins = ARRAY_SIZE(sun8i_a33_pins),
487 .irq_banks = 2, 487 .irq_banks = 2,
488 .irq_bank_base = 1,
488}; 489};
489 490
490static int sun8i_a33_pinctrl_probe(struct platform_device *pdev) 491static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 12a1dfabb1af..3b017dbd289c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -579,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) 579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
580{ 580{
581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
582 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 582 u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
583 u8 index = sunxi_irq_cfg_offset(d->hwirq); 583 u8 index = sunxi_irq_cfg_offset(d->hwirq);
584 unsigned long flags; 584 unsigned long flags;
585 u32 regval; 585 u32 regval;
@@ -626,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
626static void sunxi_pinctrl_irq_ack(struct irq_data *d) 626static void sunxi_pinctrl_irq_ack(struct irq_data *d)
627{ 627{
628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
629 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 629 u32 status_reg = sunxi_irq_status_reg(d->hwirq,
630 pctl->desc->irq_bank_base);
630 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 631 u8 status_idx = sunxi_irq_status_offset(d->hwirq);
631 632
632 /* Clear the IRQ */ 633 /* Clear the IRQ */
@@ -636,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
636static void sunxi_pinctrl_irq_mask(struct irq_data *d) 637static void sunxi_pinctrl_irq_mask(struct irq_data *d)
637{ 638{
638 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 639 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
639 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 640 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
640 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 641 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
641 unsigned long flags; 642 unsigned long flags;
642 u32 val; 643 u32 val;
@@ -653,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
653static void sunxi_pinctrl_irq_unmask(struct irq_data *d) 654static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
654{ 655{
655 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 656 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
656 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 657 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
657 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 658 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
658 unsigned long flags; 659 unsigned long flags;
659 u32 val; 660 u32 val;
@@ -745,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
745 if (bank == pctl->desc->irq_banks) 746 if (bank == pctl->desc->irq_banks)
746 return; 747 return;
747 748
748 reg = sunxi_irq_status_reg_from_bank(bank); 749 reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
749 val = readl(pctl->membase + reg); 750 val = readl(pctl->membase + reg);
750 751
751 if (val) { 752 if (val) {
@@ -1024,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
1024 1025
1025 for (i = 0; i < pctl->desc->irq_banks; i++) { 1026 for (i = 0; i < pctl->desc->irq_banks; i++) {
1026 /* Mask and clear all IRQs before registering a handler */ 1027 /* Mask and clear all IRQs before registering a handler */
1027 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i)); 1028 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
1029 pctl->desc->irq_bank_base));
1028 writel(0xffffffff, 1030 writel(0xffffffff,
1029 pctl->membase + sunxi_irq_status_reg_from_bank(i)); 1031 pctl->membase + sunxi_irq_status_reg_from_bank(i,
1032 pctl->desc->irq_bank_base));
1030 1033
1031 irq_set_chained_handler_and_data(pctl->irq[i], 1034 irq_set_chained_handler_and_data(pctl->irq[i],
1032 sunxi_pinctrl_irq_handler, 1035 sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
97 int npins; 97 int npins;
98 unsigned pin_base; 98 unsigned pin_base;
99 unsigned irq_banks; 99 unsigned irq_banks;
100 unsigned irq_bank_base;
100 bool irq_read_needs_mux; 101 bool irq_read_needs_mux;
101}; 102};
102 103
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
233 return pin_num * PULL_PINS_BITS; 234 return pin_num * PULL_PINS_BITS;
234} 235}
235 236
236static inline u32 sunxi_irq_cfg_reg(u16 irq) 237static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
237{ 238{
238 u8 bank = irq / IRQ_PER_BANK; 239 u8 bank = irq / IRQ_PER_BANK;
239 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04; 240 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
240 241
241 return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg; 242 return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
242} 243}
243 244
244static inline u32 sunxi_irq_cfg_offset(u16 irq) 245static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
247 return irq_num * IRQ_CFG_IRQ_BITS; 248 return irq_num * IRQ_CFG_IRQ_BITS;
248} 249}
249 250
250static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank) 251static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
251{ 252{
252 return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE; 253 return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
253} 254}
254 255
255static inline u32 sunxi_irq_ctrl_reg(u16 irq) 256static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
256{ 257{
257 u8 bank = irq / IRQ_PER_BANK; 258 u8 bank = irq / IRQ_PER_BANK;
258 259
259 return sunxi_irq_ctrl_reg_from_bank(bank); 260 return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
260} 261}
261 262
262static inline u32 sunxi_irq_ctrl_offset(u16 irq) 263static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
265 return irq_num * IRQ_CTRL_IRQ_BITS; 266 return irq_num * IRQ_CTRL_IRQ_BITS;
266} 267}
267 268
268static inline u32 sunxi_irq_status_reg_from_bank(u8 bank) 269static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
269{ 270{
270 return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE; 271 return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
271} 272}
272 273
273static inline u32 sunxi_irq_status_reg(u16 irq) 274static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
274{ 275{
275 u8 bank = irq / IRQ_PER_BANK; 276 u8 bank = irq / IRQ_PER_BANK;
276 277
277 return sunxi_irq_status_reg_from_bank(bank); 278 return sunxi_irq_status_reg_from_bank(bank, bank_base);
278} 279}
279 280
280static inline u32 sunxi_irq_status_offset(u16 irq) 281static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cba4455..09356684c32f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
127 arg0.integer.value = reg; 127 arg0.integer.value = reg;
128 128
129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret); 129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
130 if (ACPI_FAILURE(status))
131 return -EINVAL;
130 *ret = lret; 132 *ret = lret;
131 return (status != AE_OK) ? -EINVAL : 0; 133 return 0;
132} 134}
133 135
134/** 136/**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
173DEFINE_CONV(normal, 1, 2, 3); 175DEFINE_CONV(normal, 1, 2, 3);
174DEFINE_CONV(y_inverted, 1, -2, 3); 176DEFINE_CONV(y_inverted, 1, -2, 3);
175DEFINE_CONV(x_inverted, -1, 2, 3); 177DEFINE_CONV(x_inverted, -1, 2, 3);
178DEFINE_CONV(x_inverted_usd, -1, 2, -3);
176DEFINE_CONV(z_inverted, 1, 2, -3); 179DEFINE_CONV(z_inverted, 1, 2, -3);
177DEFINE_CONV(xy_swap, 2, 1, 3); 180DEFINE_CONV(xy_swap, 2, 1, 3);
178DEFINE_CONV(xy_rotated_left, -2, 1, 3); 181DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
236 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), 239 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
237 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), 240 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
238 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), 241 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
242 AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
239 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 243 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
240 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 244 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
241 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), 245 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index f93abc8c1424..a818db6aa08f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
91} 91}
92 92
93static const struct dev_pm_ops intel_hid_pl_pm_ops = { 93static const struct dev_pm_ops intel_hid_pl_pm_ops = {
94 .freeze = intel_hid_pl_suspend_handler,
95 .restore = intel_hid_pl_resume_handler,
94 .suspend = intel_hid_pl_suspend_handler, 96 .suspend = intel_hid_pl_suspend_handler,
95 .resume = intel_hid_pl_resume_handler, 97 .resume = intel_hid_pl_resume_handler,
96}; 98};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 3fb1d85c70a8..6f497e80c9df 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
687 ipcdev.acpi_io_size = size; 687 ipcdev.acpi_io_size = size;
688 dev_info(&pdev->dev, "io res: %pR\n", res); 688 dev_info(&pdev->dev, "io res: %pR\n", res);
689 689
690 /* This is index 0 to cover BIOS data register */
691 punit_res = punit_res_array; 690 punit_res = punit_res_array;
691 /* This is index 0 to cover BIOS data register */
692 res = platform_get_resource(pdev, IORESOURCE_MEM, 692 res = platform_get_resource(pdev, IORESOURCE_MEM,
693 PLAT_RESOURCE_BIOS_DATA_INDEX); 693 PLAT_RESOURCE_BIOS_DATA_INDEX);
694 if (!res) { 694 if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
698 *punit_res = *res; 698 *punit_res = *res;
699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res); 699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
700 700
701 /* This is index 1 to cover BIOS interface register */
701 res = platform_get_resource(pdev, IORESOURCE_MEM, 702 res = platform_get_resource(pdev, IORESOURCE_MEM,
702 PLAT_RESOURCE_BIOS_IFACE_INDEX); 703 PLAT_RESOURCE_BIOS_IFACE_INDEX);
703 if (!res) { 704 if (!res) {
704 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n"); 705 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
705 return -ENXIO; 706 return -ENXIO;
706 } 707 }
707 /* This is index 1 to cover BIOS interface register */
708 *++punit_res = *res; 708 *++punit_res = *res;
709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res); 709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
710 710
711 /* This is index 2 to cover ISP data register, optional */
711 res = platform_get_resource(pdev, IORESOURCE_MEM, 712 res = platform_get_resource(pdev, IORESOURCE_MEM,
712 PLAT_RESOURCE_ISP_DATA_INDEX); 713 PLAT_RESOURCE_ISP_DATA_INDEX);
713 if (!res) { 714 ++punit_res;
714 dev_err(&pdev->dev, "Failed to get res of punit ISP data\n"); 715 if (res) {
715 return -ENXIO; 716 *punit_res = *res;
717 dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
716 } 718 }
717 /* This is index 2 to cover ISP data register */
718 *++punit_res = *res;
719 dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
720 719
720 /* This is index 3 to cover ISP interface register, optional */
721 res = platform_get_resource(pdev, IORESOURCE_MEM, 721 res = platform_get_resource(pdev, IORESOURCE_MEM,
722 PLAT_RESOURCE_ISP_IFACE_INDEX); 722 PLAT_RESOURCE_ISP_IFACE_INDEX);
723 if (!res) { 723 ++punit_res;
724 dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n"); 724 if (res) {
725 return -ENXIO; 725 *punit_res = *res;
726 dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
726 } 727 }
727 /* This is index 3 to cover ISP interface register */
728 *++punit_res = *res;
729 dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
730 728
729 /* This is index 4 to cover GTD data register, optional */
731 res = platform_get_resource(pdev, IORESOURCE_MEM, 730 res = platform_get_resource(pdev, IORESOURCE_MEM,
732 PLAT_RESOURCE_GTD_DATA_INDEX); 731 PLAT_RESOURCE_GTD_DATA_INDEX);
733 if (!res) { 732 ++punit_res;
734 dev_err(&pdev->dev, "Failed to get res of punit GTD data\n"); 733 if (res) {
735 return -ENXIO; 734 *punit_res = *res;
735 dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
736 } 736 }
737 /* This is index 4 to cover GTD data register */
738 *++punit_res = *res;
739 dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
740 737
738 /* This is index 5 to cover GTD interface register, optional */
741 res = platform_get_resource(pdev, IORESOURCE_MEM, 739 res = platform_get_resource(pdev, IORESOURCE_MEM,
742 PLAT_RESOURCE_GTD_IFACE_INDEX); 740 PLAT_RESOURCE_GTD_IFACE_INDEX);
743 if (!res) { 741 ++punit_res;
744 dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n"); 742 if (res) {
745 return -ENXIO; 743 *punit_res = *res;
744 dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
746 } 745 }
747 /* This is index 5 to cover GTD interface register */
748 *++punit_res = *res;
749 dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
750 746
751 res = platform_get_resource(pdev, IORESOURCE_MEM, 747 res = platform_get_resource(pdev, IORESOURCE_MEM,
752 PLAT_RESOURCE_IPC_INDEX); 748 PLAT_RESOURCE_IPC_INDEX);
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd875409a02d..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
227 struct resource *res; 227 struct resource *res;
228 void __iomem *addr; 228 void __iomem *addr;
229 229
230 /*
231 * The following resources are required
232 * - BIOS_IPC BASE_DATA
233 * - BIOS_IPC BASE_IFACE
234 */
230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 235 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
231 addr = devm_ioremap_resource(&pdev->dev, res); 236 addr = devm_ioremap_resource(&pdev->dev, res);
232 if (IS_ERR(addr)) 237 if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
239 return PTR_ERR(addr); 244 return PTR_ERR(addr);
240 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr; 245 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
241 246
247 /*
248 * The following resources are optional
249 * - ISPDRIVER_IPC BASE_DATA
250 * - ISPDRIVER_IPC BASE_IFACE
251 * - GTDRIVER_IPC BASE_DATA
252 * - GTDRIVER_IPC BASE_IFACE
253 */
242 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 254 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
243 addr = devm_ioremap_resource(&pdev->dev, res); 255 if (res) {
244 if (IS_ERR(addr)) 256 addr = devm_ioremap_resource(&pdev->dev, res);
245 return PTR_ERR(addr); 257 if (!IS_ERR(addr))
246 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 258 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
259 }
247 260
248 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 261 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
249 addr = devm_ioremap_resource(&pdev->dev, res); 262 if (res) {
250 if (IS_ERR(addr)) 263 addr = devm_ioremap_resource(&pdev->dev, res);
251 return PTR_ERR(addr); 264 if (!IS_ERR(addr))
252 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 265 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
266 }
253 267
254 res = platform_get_resource(pdev, IORESOURCE_MEM, 4); 268 res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
255 addr = devm_ioremap_resource(&pdev->dev, res); 269 if (res) {
256 if (IS_ERR(addr)) 270 addr = devm_ioremap_resource(&pdev->dev, res);
257 return PTR_ERR(addr); 271 if (!IS_ERR(addr))
258 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 272 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
273 }
259 274
260 res = platform_get_resource(pdev, IORESOURCE_MEM, 5); 275 res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
261 addr = devm_ioremap_resource(&pdev->dev, res); 276 if (res) {
262 if (IS_ERR(addr)) 277 addr = devm_ioremap_resource(&pdev->dev, res);
263 return PTR_ERR(addr); 278 if (!IS_ERR(addr))
264 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; 279 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
280 }
265 281
266 return 0; 282 return 0;
267} 283}
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 397119f83e82..781bd10ca7ac 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
659static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period) 659static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
660{ 660{
661 u32 telem_ctrl = 0; 661 u32 telem_ctrl = 0;
662 int ret; 662 int ret = 0;
663 663
664 mutex_lock(&(telm_conf->telem_lock)); 664 mutex_lock(&(telm_conf->telem_lock));
665 if (ioss_period) { 665 if (ioss_period) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e305ab541a22..9255ff3ee81a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
7972 fan_update_desired_level(s); 7972 fan_update_desired_level(s);
7973 mutex_unlock(&fan_mutex); 7973 mutex_unlock(&fan_mutex);
7974 7974
7975 if (rc)
7976 return rc;
7975 if (status) 7977 if (status)
7976 *status = s; 7978 *status = s;
7977 7979
7978 return rc; 7980 return 0;
7979} 7981}
7980 7982
7981static int fan_get_speed(unsigned int *speed) 7983static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a76a862..01e12d221a8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
135/* Field definitions */ 135/* Field definitions */
136#define HCI_ACCEL_MASK 0x7fff 136#define HCI_ACCEL_MASK 0x7fff
137#define HCI_HOTKEY_DISABLE 0x0b 137#define HCI_HOTKEY_DISABLE 0x0b
138#define HCI_HOTKEY_ENABLE 0x01 138#define HCI_HOTKEY_ENABLE 0x09
139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
140#define HCI_LCD_BRIGHTNESS_BITS 3 140#define HCI_LCD_BRIGHTNESS_BITS 3
141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS) 141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cdfd01f0adb8..8fad0a7044d3 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1091,6 +1091,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
1091 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1091 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1092 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ 1092 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
1093 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1093 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1094 RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
1094 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */ 1095 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
1095 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */ 1096 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
1096 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */ 1097 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 7225ac6b3df5..fad968eb75f6 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
392 392
393 .max_register = FTM_PWMLOAD, 393 .max_register = FTM_PWMLOAD,
394 .volatile_reg = fsl_pwm_volatile_reg, 394 .volatile_reg = fsl_pwm_volatile_reg,
395 .cache_type = REGCACHE_RBTREE, 395 .cache_type = REGCACHE_FLAT,
396}; 396};
397 397
398static int fsl_pwm_probe(struct platform_device *pdev) 398static int fsl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5d4d91846357..96168b819044 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2669,9 +2669,9 @@ static int __init mport_init(void)
2669 2669
2670 /* Create device class needed by udev */ 2670 /* Create device class needed by udev */
2671 dev_class = class_create(THIS_MODULE, DRV_NAME); 2671 dev_class = class_create(THIS_MODULE, DRV_NAME);
2672 if (!dev_class) { 2672 if (IS_ERR(dev_class)) {
2673 rmcd_error("Unable to create " DRV_NAME " class"); 2673 rmcd_error("Unable to create " DRV_NAME " class");
2674 return -EINVAL; 2674 return PTR_ERR(dev_class);
2675 } 2675 }
2676 2676
2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); 2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2156ee5bae1..ecb7dbae9be9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -863,7 +863,7 @@ out:
863 * A user-initiated temperature conversion is not started by this function, 863 * A user-initiated temperature conversion is not started by this function,
864 * so the temperature is updated once every 64 seconds. 864 * so the temperature is updated once every 64 seconds.
865 */ 865 */
866static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC) 866static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
867{ 867{
868 struct ds1307 *ds1307 = dev_get_drvdata(dev); 868 struct ds1307 *ds1307 = dev_get_drvdata(dev);
869 u8 temp_buf[2]; 869 u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
892 struct device_attribute *attr, char *buf) 892 struct device_attribute *attr, char *buf)
893{ 893{
894 int ret; 894 int ret;
895 s16 temp; 895 s32 temp;
896 896
897 ret = ds3231_hwmon_read_temp(dev, &temp); 897 ret = ds3231_hwmon_read_temp(dev, &temp);
898 if (ret) 898 if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
1531 return PTR_ERR(ds1307->rtc); 1531 return PTR_ERR(ds1307->rtc);
1532 } 1532 }
1533 1533
1534 if (ds1307_can_wakeup_device) { 1534 if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
1535 /* Disable request for an IRQ */ 1535 /* Disable request for an IRQ */
1536 want_irq = false; 1536 want_irq = false;
1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n"); 1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1bce9cf51b1e..b83908670a9a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
756 blk_cleanup_queue(dev_info->dcssblk_queue); 756 blk_cleanup_queue(dev_info->dcssblk_queue);
757 dev_info->gd->queue = NULL; 757 dev_info->gd->queue = NULL;
758 put_disk(dev_info->gd); 758 put_disk(dev_info->gd);
759 device_unregister(&dev_info->dev);
760 759
761 /* unload all related segments */ 760 /* unload all related segments */
762 list_for_each_entry(entry, &dev_info->seg_list, lh) 761 list_for_each_entry(entry, &dev_info->seg_list, lh)
763 segment_unload(entry->segment_name); 762 segment_unload(entry->segment_name);
764 763
765 put_device(&dev_info->dev);
766 up_write(&dcssblk_devices_sem); 764 up_write(&dcssblk_devices_sem);
767 765
766 device_unregister(&dev_info->dev);
767 put_device(&dev_info->dev);
768
768 rc = count; 769 rc = count;
769out_buf: 770out_buf:
770 kfree(local_buf); 771 kfree(local_buf);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896deccb..e6f54d3b8969 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
303 if (req->cmd_type != REQ_TYPE_FS) { 303 if (req->cmd_type != REQ_TYPE_FS) {
304 blk_start_request(req); 304 blk_start_request(req);
305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); 305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
306 blk_end_request_all(req, -EIO); 306 __blk_end_request_all(req, -EIO);
307 continue; 307 continue;
308 } 308 }
309 309
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
56{ 56{
57 struct sclp_ctl_sccb ctl_sccb; 57 struct sclp_ctl_sccb ctl_sccb;
58 struct sccb_header *sccb; 58 struct sccb_header *sccb;
59 unsigned long copied;
59 int rc; 60 int rc;
60 61
61 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) 62 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
65 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 66 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
66 if (!sccb) 67 if (!sccb)
67 return -ENOMEM; 68 return -ENOMEM;
68 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { 69 copied = PAGE_SIZE -
70 copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
71 if (offsetof(struct sccb_header, length) +
72 sizeof(sccb->length) > copied || sccb->length > copied) {
69 rc = -EFAULT; 73 rc = -EFAULT;
70 goto out_free; 74 goto out_free;
71 } 75 }
72 if (sccb->length > PAGE_SIZE || sccb->length < 8) 76 if (sccb->length < 8) {
73 return -EINVAL; 77 rc = -EINVAL;
74 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
75 rc = -EFAULT;
76 goto out_free; 78 goto out_free;
77 } 79 }
78 rc = sclp_sync_request(ctl_sccb.cmdw, sccb); 80 rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 21a67ed047e8..ff6caab8cc8b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -452,10 +452,11 @@ static int aac_slave_configure(struct scsi_device *sdev)
452 else if (depth < 2) 452 else if (depth < 2)
453 depth = 2; 453 depth = 2;
454 scsi_change_queue_depth(sdev, depth); 454 scsi_change_queue_depth(sdev, depth);
455 } else 455 } else {
456 scsi_change_queue_depth(sdev, 1); 456 scsi_change_queue_depth(sdev, 1);
457 457
458 sdev->tagged_supported = 1; 458 sdev->tagged_supported = 1;
459 }
459 460
460 return 0; 461 return 0;
461} 462}
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f3bb7af4e984..ead83a24bcd1 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
688{ 688{
689 struct flowi6 fl; 689 struct flowi6 fl;
690 690
691 memset(&fl, 0, sizeof(fl));
691 if (saddr) 692 if (saddr)
692 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 693 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
693 if (daddr) 694 if (daddr)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 35968bdb4866..8fb9643fe6e3 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
289 atomic64_set(&afu->room, room); 289 atomic64_set(&afu->room, room);
290 if (room) 290 if (room)
291 goto write_rrin; 291 goto write_rrin;
292 udelay(nretry); 292 udelay(1 << nretry);
293 } while (nretry++ < MC_ROOM_RETRY_CNT); 293 } while (nretry++ < MC_ROOM_RETRY_CNT);
294 294
295 pr_err("%s: no cmd_room to send reset\n", __func__); 295 pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
303 if (rrin != 0x1) 303 if (rrin != 0x1)
304 break; 304 break;
305 /* Double delay each time */ 305 /* Double delay each time */
306 udelay(2 << nretry); 306 udelay(1 << nretry);
307 } while (nretry++ < MC_ROOM_RETRY_CNT); 307 } while (nretry++ < MC_ROOM_RETRY_CNT);
308} 308}
309 309
@@ -338,7 +338,7 @@ retry:
338 atomic64_set(&afu->room, room); 338 atomic64_set(&afu->room, room);
339 if (room) 339 if (room)
340 goto write_ioarrin; 340 goto write_ioarrin;
341 udelay(nretry); 341 udelay(1 << nretry);
342 } while (nretry++ < MC_ROOM_RETRY_CNT); 342 } while (nretry++ < MC_ROOM_RETRY_CNT);
343 343
344 dev_err(dev, "%s: no cmd_room to send 0x%X\n", 344 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
352 * afu->room. 352 * afu->room.
353 */ 353 */
354 if (nretry++ < MC_ROOM_RETRY_CNT) { 354 if (nretry++ < MC_ROOM_RETRY_CNT) {
355 udelay(nretry); 355 udelay(1 << nretry);
356 goto retry; 356 goto retry;
357 } 357 }
358 358
@@ -683,28 +683,23 @@ static void stop_afu(struct cxlflash_cfg *cfg)
683} 683}
684 684
685/** 685/**
686 * term_mc() - terminates the master context 686 * term_intr() - disables all AFU interrupts
687 * @cfg: Internal structure associated with the host. 687 * @cfg: Internal structure associated with the host.
688 * @level: Depth of allocation, where to begin waterfall tear down. 688 * @level: Depth of allocation, where to begin waterfall tear down.
689 * 689 *
690 * Safe to call with AFU/MC in partially allocated/initialized state. 690 * Safe to call with AFU/MC in partially allocated/initialized state.
691 */ 691 */
692static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) 692static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
693{ 693{
694 int rc = 0;
695 struct afu *afu = cfg->afu; 694 struct afu *afu = cfg->afu;
696 struct device *dev = &cfg->dev->dev; 695 struct device *dev = &cfg->dev->dev;
697 696
698 if (!afu || !cfg->mcctx) { 697 if (!afu || !cfg->mcctx) {
699 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n", 698 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
700 __func__);
701 return; 699 return;
702 } 700 }
703 701
704 switch (level) { 702 switch (level) {
705 case UNDO_START:
706 rc = cxl_stop_context(cfg->mcctx);
707 BUG_ON(rc);
708 case UNMAP_THREE: 703 case UNMAP_THREE:
709 cxl_unmap_afu_irq(cfg->mcctx, 3, afu); 704 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
710 case UNMAP_TWO: 705 case UNMAP_TWO:
@@ -713,9 +708,34 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
713 cxl_unmap_afu_irq(cfg->mcctx, 1, afu); 708 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
714 case FREE_IRQ: 709 case FREE_IRQ:
715 cxl_free_afu_irqs(cfg->mcctx); 710 cxl_free_afu_irqs(cfg->mcctx);
716 case RELEASE_CONTEXT: 711 /* fall through */
717 cfg->mcctx = NULL; 712 case UNDO_NOOP:
713 /* No action required */
714 break;
715 }
716}
717
718/**
719 * term_mc() - terminates the master context
720 * @cfg: Internal structure associated with the host.
721 * @level: Depth of allocation, where to begin waterfall tear down.
722 *
723 * Safe to call with AFU/MC in partially allocated/initialized state.
724 */
725static void term_mc(struct cxlflash_cfg *cfg)
726{
727 int rc = 0;
728 struct afu *afu = cfg->afu;
729 struct device *dev = &cfg->dev->dev;
730
731 if (!afu || !cfg->mcctx) {
732 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
733 return;
718 } 734 }
735
736 rc = cxl_stop_context(cfg->mcctx);
737 WARN_ON(rc);
738 cfg->mcctx = NULL;
719} 739}
720 740
721/** 741/**
@@ -726,10 +746,20 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
726 */ 746 */
727static void term_afu(struct cxlflash_cfg *cfg) 747static void term_afu(struct cxlflash_cfg *cfg)
728{ 748{
749 /*
750 * Tear down is carefully orchestrated to ensure
751 * no interrupts can come in when the problem state
752 * area is unmapped.
753 *
754 * 1) Disable all AFU interrupts
755 * 2) Unmap the problem state area
756 * 3) Stop the master context
757 */
758 term_intr(cfg, UNMAP_THREE);
729 if (cfg->afu) 759 if (cfg->afu)
730 stop_afu(cfg); 760 stop_afu(cfg);
731 761
732 term_mc(cfg, UNDO_START); 762 term_mc(cfg);
733 763
734 pr_debug("%s: returning\n", __func__); 764 pr_debug("%s: returning\n", __func__);
735} 765}
@@ -1597,41 +1627,24 @@ static int start_afu(struct cxlflash_cfg *cfg)
1597} 1627}
1598 1628
1599/** 1629/**
1600 * init_mc() - create and register as the master context 1630 * init_intr() - setup interrupt handlers for the master context
1601 * @cfg: Internal structure associated with the host. 1631 * @cfg: Internal structure associated with the host.
1602 * 1632 *
1603 * Return: 0 on success, -errno on failure 1633 * Return: 0 on success, -errno on failure
1604 */ 1634 */
1605static int init_mc(struct cxlflash_cfg *cfg) 1635static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1636 struct cxl_context *ctx)
1606{ 1637{
1607 struct cxl_context *ctx;
1608 struct device *dev = &cfg->dev->dev;
1609 struct afu *afu = cfg->afu; 1638 struct afu *afu = cfg->afu;
1639 struct device *dev = &cfg->dev->dev;
1610 int rc = 0; 1640 int rc = 0;
1611 enum undo_level level; 1641 enum undo_level level = UNDO_NOOP;
1612
1613 ctx = cxl_get_context(cfg->dev);
1614 if (unlikely(!ctx))
1615 return -ENOMEM;
1616 cfg->mcctx = ctx;
1617
1618 /* Set it up as a master with the CXL */
1619 cxl_set_master(ctx);
1620
1621 /* During initialization reset the AFU to start from a clean slate */
1622 rc = cxl_afu_reset(cfg->mcctx);
1623 if (unlikely(rc)) {
1624 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1625 __func__, rc);
1626 level = RELEASE_CONTEXT;
1627 goto out;
1628 }
1629 1642
1630 rc = cxl_allocate_afu_irqs(ctx, 3); 1643 rc = cxl_allocate_afu_irqs(ctx, 3);
1631 if (unlikely(rc)) { 1644 if (unlikely(rc)) {
1632 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", 1645 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1633 __func__, rc); 1646 __func__, rc);
1634 level = RELEASE_CONTEXT; 1647 level = UNDO_NOOP;
1635 goto out; 1648 goto out;
1636 } 1649 }
1637 1650
@@ -1661,8 +1674,47 @@ static int init_mc(struct cxlflash_cfg *cfg)
1661 level = UNMAP_TWO; 1674 level = UNMAP_TWO;
1662 goto out; 1675 goto out;
1663 } 1676 }
1677out:
1678 return level;
1679}
1664 1680
1665 rc = 0; 1681/**
1682 * init_mc() - create and register as the master context
1683 * @cfg: Internal structure associated with the host.
1684 *
1685 * Return: 0 on success, -errno on failure
1686 */
1687static int init_mc(struct cxlflash_cfg *cfg)
1688{
1689 struct cxl_context *ctx;
1690 struct device *dev = &cfg->dev->dev;
1691 int rc = 0;
1692 enum undo_level level;
1693
1694 ctx = cxl_get_context(cfg->dev);
1695 if (unlikely(!ctx)) {
1696 rc = -ENOMEM;
1697 goto ret;
1698 }
1699 cfg->mcctx = ctx;
1700
1701 /* Set it up as a master with the CXL */
1702 cxl_set_master(ctx);
1703
1704 /* During initialization reset the AFU to start from a clean slate */
1705 rc = cxl_afu_reset(cfg->mcctx);
1706 if (unlikely(rc)) {
1707 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1708 __func__, rc);
1709 goto ret;
1710 }
1711
1712 level = init_intr(cfg, ctx);
1713 if (unlikely(level)) {
1714 dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
1715 __func__, rc);
1716 goto out;
1717 }
1666 1718
1667 /* This performs the equivalent of the CXL_IOCTL_START_WORK. 1719 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1668 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 1720 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
@@ -1678,7 +1730,7 @@ ret:
1678 pr_debug("%s: returning rc=%d\n", __func__, rc); 1730 pr_debug("%s: returning rc=%d\n", __func__, rc);
1679 return rc; 1731 return rc;
1680out: 1732out:
1681 term_mc(cfg, level); 1733 term_intr(cfg, level);
1682 goto ret; 1734 goto ret;
1683} 1735}
1684 1736
@@ -1751,7 +1803,8 @@ out:
1751err2: 1803err2:
1752 kref_put(&afu->mapcount, afu_unmap); 1804 kref_put(&afu->mapcount, afu_unmap);
1753err1: 1805err1:
1754 term_mc(cfg, UNDO_START); 1806 term_intr(cfg, UNMAP_THREE);
1807 term_mc(cfg);
1755 goto out; 1808 goto out;
1756} 1809}
1757 1810
@@ -2488,8 +2541,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2488 if (unlikely(rc)) 2541 if (unlikely(rc))
2489 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", 2542 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2490 __func__, rc); 2543 __func__, rc);
2491 stop_afu(cfg); 2544 term_afu(cfg);
2492 term_mc(cfg, UNDO_START);
2493 return PCI_ERS_RESULT_NEED_RESET; 2545 return PCI_ERS_RESULT_NEED_RESET;
2494 case pci_channel_io_perm_failure: 2546 case pci_channel_io_perm_failure:
2495 cfg->state = STATE_FAILTERM; 2547 cfg->state = STATE_FAILTERM;
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 0faed422c7f4..eb9d8f730b38 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -79,12 +79,11 @@
79#define WWPN_BUF_LEN (WWPN_LEN + 1) 79#define WWPN_BUF_LEN (WWPN_LEN + 1)
80 80
81enum undo_level { 81enum undo_level {
82 RELEASE_CONTEXT = 0, 82 UNDO_NOOP = 0,
83 FREE_IRQ, 83 FREE_IRQ,
84 UNMAP_ONE, 84 UNMAP_ONE,
85 UNMAP_TWO, 85 UNMAP_TWO,
86 UNMAP_THREE, 86 UNMAP_THREE
87 UNDO_START
88}; 87};
89 88
90struct dev_dependent_vals { 89struct dev_dependent_vals {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a404a41e871c..8eaed0522aa3 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1112,9 +1112,9 @@ static void alua_bus_detach(struct scsi_device *sdev)
1112 h->sdev = NULL; 1112 h->sdev = NULL;
1113 spin_unlock(&h->pg_lock); 1113 spin_unlock(&h->pg_lock);
1114 if (pg) { 1114 if (pg) {
1115 spin_lock(&pg->lock); 1115 spin_lock_irq(&pg->lock);
1116 list_del_rcu(&h->node); 1116 list_del_rcu(&h->node);
1117 spin_unlock(&pg->lock); 1117 spin_unlock_irq(&pg->lock);
1118 kref_put(&pg->kref, release_port_group); 1118 kref_put(&pg->kref, release_port_group);
1119 } 1119 }
1120 sdev->handler_data = NULL; 1120 sdev->handler_data = NULL;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e4db5fb3239a..8c44b9c424af 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5030,7 +5030,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5030static int 5030static int
5031_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 5031_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5032{ 5032{
5033 int r, i; 5033 int r, i, index;
5034 unsigned long flags; 5034 unsigned long flags;
5035 u32 reply_address; 5035 u32 reply_address;
5036 u16 smid; 5036 u16 smid;
@@ -5039,8 +5039,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5039 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; 5039 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
5040 u8 hide_flag; 5040 u8 hide_flag;
5041 struct adapter_reply_queue *reply_q; 5041 struct adapter_reply_queue *reply_q;
5042 long reply_post_free; 5042 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
5043 u32 reply_post_free_sz, index = 0;
5044 5043
5045 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5044 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5046 __func__)); 5045 __func__));
@@ -5124,27 +5123,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5124 _base_assign_reply_queues(ioc); 5123 _base_assign_reply_queues(ioc);
5125 5124
5126 /* initialize Reply Post Free Queue */ 5125 /* initialize Reply Post Free Queue */
5127 reply_post_free_sz = ioc->reply_post_queue_depth * 5126 index = 0;
5128 sizeof(Mpi2DefaultReplyDescriptor_t); 5127 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
5129 reply_post_free = (long)ioc->reply_post[index].reply_post_free;
5130 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5128 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5129 /*
5130 * If RDPQ is enabled, switch to the next allocation.
5131 * Otherwise advance within the contiguous region.
5132 */
5133 if (ioc->rdpq_array_enable) {
5134 reply_q->reply_post_free =
5135 ioc->reply_post[index++].reply_post_free;
5136 } else {
5137 reply_q->reply_post_free = reply_post_free_contig;
5138 reply_post_free_contig += ioc->reply_post_queue_depth;
5139 }
5140
5131 reply_q->reply_post_host_index = 0; 5141 reply_q->reply_post_host_index = 0;
5132 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
5133 reply_post_free;
5134 for (i = 0; i < ioc->reply_post_queue_depth; i++) 5142 for (i = 0; i < ioc->reply_post_queue_depth; i++)
5135 reply_q->reply_post_free[i].Words = 5143 reply_q->reply_post_free[i].Words =
5136 cpu_to_le64(ULLONG_MAX); 5144 cpu_to_le64(ULLONG_MAX);
5137 if (!_base_is_controller_msix_enabled(ioc)) 5145 if (!_base_is_controller_msix_enabled(ioc))
5138 goto skip_init_reply_post_free_queue; 5146 goto skip_init_reply_post_free_queue;
5139 /*
5140 * If RDPQ is enabled, switch to the next allocation.
5141 * Otherwise advance within the contiguous region.
5142 */
5143 if (ioc->rdpq_array_enable)
5144 reply_post_free = (long)
5145 ioc->reply_post[++index].reply_post_free;
5146 else
5147 reply_post_free += reply_post_free_sz;
5148 } 5147 }
5149 skip_init_reply_post_free_queue: 5148 skip_init_reply_post_free_queue:
5150 5149
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b1bf42b93fcc..1deb6adc411f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
784 int pg83_supported = 0; 784 int pg83_supported = 0;
785 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL; 785 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
786 786
787 if (sdev->skip_vpd_pages) 787 if (!scsi_device_supports_vpd(sdev))
788 return; 788 return;
789
789retry_pg0: 790retry_pg0:
790 vpd_buf = kmalloc(vpd_len, GFP_KERNEL); 791 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
791 if (!vpd_buf) 792 if (!vpd_buf)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 92ffd2406f97..2b642b145be1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -81,6 +81,7 @@ const char *scsi_host_state_name(enum scsi_host_state state)
81 return name; 81 return name;
82} 82}
83 83
84#ifdef CONFIG_SCSI_DH
84static const struct { 85static const struct {
85 unsigned char value; 86 unsigned char value;
86 char *name; 87 char *name;
@@ -94,7 +95,7 @@ static const struct {
94 { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, 95 { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
95}; 96};
96 97
97const char *scsi_access_state_name(unsigned char state) 98static const char *scsi_access_state_name(unsigned char state)
98{ 99{
99 int i; 100 int i;
100 char *name = NULL; 101 char *name = NULL;
@@ -107,6 +108,7 @@ const char *scsi_access_state_name(unsigned char state)
107 } 108 }
108 return name; 109 return name;
109} 110}
111#endif
110 112
111static int check_set(unsigned long long *val, char *src) 113static int check_set(unsigned long long *val, char *src)
112{ 114{
@@ -226,7 +228,7 @@ show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
226} 228}
227 229
228/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ 230/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
229struct device_attribute dev_attr_hstate = 231static struct device_attribute dev_attr_hstate =
230 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); 232 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
231 233
232static ssize_t 234static ssize_t
@@ -401,7 +403,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
401 NULL 403 NULL
402}; 404};
403 405
404struct attribute_group scsi_shost_attr_group = { 406static struct attribute_group scsi_shost_attr_group = {
405 .attrs = scsi_sysfs_shost_attrs, 407 .attrs = scsi_sysfs_shost_attrs,
406}; 408};
407 409
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a5457ac9cdb..f52b74cf8d1e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1275 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1275 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1276 struct scsi_device *sdp = sdkp->device; 1276 struct scsi_device *sdp = sdkp->device;
1277 struct Scsi_Host *host = sdp->host; 1277 struct Scsi_Host *host = sdp->host;
1278 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1278 int diskinfo[4]; 1279 int diskinfo[4];
1279 1280
1280 /* default to most commonly used values */ 1281 /* default to most commonly used values */
1281 diskinfo[0] = 0x40; /* 1 << 6 */ 1282 diskinfo[0] = 0x40; /* 1 << 6 */
1282 diskinfo[1] = 0x20; /* 1 << 5 */ 1283 diskinfo[1] = 0x20; /* 1 << 5 */
1283 diskinfo[2] = sdkp->capacity >> 11; 1284 diskinfo[2] = capacity >> 11;
1284 1285
1285 /* override with calculated, extended default, or driver values */ 1286 /* override with calculated, extended default, or driver values */
1286 if (host->hostt->bios_param) 1287 if (host->hostt->bios_param)
1287 host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo); 1288 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1288 else 1289 else
1289 scsicam_bios_param(bdev, sdkp->capacity, diskinfo); 1290 scsicam_bios_param(bdev, capacity, diskinfo);
1290 1291
1291 geo->heads = diskinfo[0]; 1292 geo->heads = diskinfo[0];
1292 geo->sectors = diskinfo[1]; 1293 geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@ got_data:
2337 if (sdkp->capacity > 0xffffffff) 2338 if (sdkp->capacity > 0xffffffff)
2338 sdp->use_16_for_rw = 1; 2339 sdp->use_16_for_rw = 1;
2339 2340
2340 /* Rescale capacity to 512-byte units */
2341 if (sector_size == 4096)
2342 sdkp->capacity <<= 3;
2343 else if (sector_size == 2048)
2344 sdkp->capacity <<= 2;
2345 else if (sector_size == 1024)
2346 sdkp->capacity <<= 1;
2347
2348 blk_queue_physical_block_size(sdp->request_queue, 2341 blk_queue_physical_block_size(sdp->request_queue,
2349 sdkp->physical_block_size); 2342 sdkp->physical_block_size);
2350 sdkp->device->sector_size = sector_size; 2343 sdkp->device->sector_size = sector_size;
@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2795 sdkp->ws10 = 1; 2788 sdkp->ws10 = 1;
2796} 2789}
2797 2790
2798static int sd_try_extended_inquiry(struct scsi_device *sdp)
2799{
2800 /* Attempt VPD inquiry if the device blacklist explicitly calls
2801 * for it.
2802 */
2803 if (sdp->try_vpd_pages)
2804 return 1;
2805 /*
2806 * Although VPD inquiries can go to SCSI-2 type devices,
2807 * some USB ones crash on receiving them, and the pages
2808 * we currently ask for are for SPC-3 and beyond
2809 */
2810 if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
2811 return 1;
2812 return 0;
2813}
2814
2815static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
2816{
2817 return blocks << (ilog2(sdev->sector_size) - 9);
2818}
2819
2820/** 2791/**
2821 * sd_revalidate_disk - called the first time a new disk is seen, 2792 * sd_revalidate_disk - called the first time a new disk is seen,
2822 * performs disk spin up, read_capacity, etc. 2793 * performs disk spin up, read_capacity, etc.
@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2856 if (sdkp->media_present) { 2827 if (sdkp->media_present) {
2857 sd_read_capacity(sdkp, buffer); 2828 sd_read_capacity(sdkp, buffer);
2858 2829
2859 if (sd_try_extended_inquiry(sdp)) { 2830 if (scsi_device_supports_vpd(sdp)) {
2860 sd_read_block_provisioning(sdkp); 2831 sd_read_block_provisioning(sdkp);
2861 sd_read_block_limits(sdkp); 2832 sd_read_block_limits(sdkp);
2862 sd_read_block_characteristics(sdkp); 2833 sd_read_block_characteristics(sdkp);
@@ -2891,7 +2862,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2891 if (sdkp->opt_xfer_blocks && 2862 if (sdkp->opt_xfer_blocks &&
2892 sdkp->opt_xfer_blocks <= dev_max && 2863 sdkp->opt_xfer_blocks <= dev_max &&
2893 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && 2864 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2894 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) 2865 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
2895 rw_max = q->limits.io_opt = 2866 rw_max = q->limits.io_opt =
2896 sdkp->opt_xfer_blocks * sdp->sector_size; 2867 sdkp->opt_xfer_blocks * sdp->sector_size;
2897 else 2868 else
@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2900 /* Combine with controller limits */ 2871 /* Combine with controller limits */
2901 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 2872 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
2902 2873
2903 set_capacity(disk, sdkp->capacity); 2874 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
2904 sd_config_write_same(sdkp); 2875 sd_config_write_same(sdkp);
2905 kfree(buffer); 2876 kfree(buffer);
2906 2877
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84aff29f..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@ struct scsi_disk {
65 struct device dev; 65 struct device dev;
66 struct gendisk *disk; 66 struct gendisk *disk;
67 atomic_t openers; 67 atomic_t openers;
68 sector_t capacity; /* size in 512-byte sectors */ 68 sector_t capacity; /* size in logical blocks */
69 u32 max_xfer_blocks; 69 u32 max_xfer_blocks;
70 u32 opt_xfer_blocks; 70 u32 opt_xfer_blocks;
71 u32 max_ws_blocks; 71 u32 max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
146 return 0; 146 return 0;
147} 147}
148 148
149static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
150{
151 return blocks << (ilog2(sdev->sector_size) - 9);
152}
153
149/* 154/*
150 * A DIF-capable target device can be formatted with different 155 * A DIF-capable target device can be formatted with different
151 * protection schemes. Currently 0 through 3 are defined: 156 * protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71c5138ddf94..dbf1882cfbac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4941 out_unmap: 4941 out_unmap:
4942 if (res > 0) { 4942 if (res > 0) {
4943 for (j=0; j < res; j++) 4943 for (j=0; j < res; j++)
4944 page_cache_release(pages[j]); 4944 put_page(pages[j]);
4945 res = 0; 4945 res = 0;
4946 } 4946 }
4947 kfree(pages); 4947 kfree(pages);
@@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
4963 /* FIXME: cache flush missing for rw==READ 4963 /* FIXME: cache flush missing for rw==READ
4964 * FIXME: call the correct reference counting function 4964 * FIXME: call the correct reference counting function
4965 */ 4965 */
4966 page_cache_release(page); 4966 put_page(page);
4967 } 4967 }
4968 kfree(STbp->mapped_pages); 4968 kfree(STbp->mapped_pages);
4969 STbp->mapped_pages = NULL; 4969 STbp->mapped_pages = NULL;
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 57e781c71e67..837effe19907 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
491 genpd->dev_ops.active_wakeup = scpsys_active_wakeup; 491 genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
492 492
493 /* 493 /*
494 * With CONFIG_PM disabled turn on all domains to make the 494 * Initially turn on all domains to make the domains usable
495 * hardware usable. 495 * with !CONFIG_PM and to get the hardware in sync with the
496 * software. The unused domains will be switched off during
497 * late_init time.
496 */ 498 */
497 if (!IS_ENABLED(CONFIG_PM)) 499 genpd->power_on(genpd);
498 genpd->power_on(genpd);
499 500
500 pm_genpd_init(genpd, NULL, true); 501 pm_genpd_init(genpd, NULL, false);
501 } 502 }
502 503
503 /* 504 /*
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e7a19be87c38..50769078e72e 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -211,11 +211,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
211 struct spi_transfer *transfer) 211 struct spi_transfer *transfer)
212{ 212{
213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
214 unsigned int bpw = transfer->bits_per_word; 214 unsigned int bpw;
215 215
216 if (!master->dma_rx) 216 if (!master->dma_rx)
217 return false; 217 return false;
218 218
219 if (!transfer)
220 return false;
221
222 bpw = transfer->bits_per_word;
219 if (!bpw) 223 if (!bpw)
220 bpw = spi->bits_per_word; 224 bpw = spi->bits_per_word;
221 225
@@ -333,8 +337,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
333static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 337static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
334 struct spi_imx_config *config) 338 struct spi_imx_config *config)
335{ 339{
336 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0; 340 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
337 u32 clk = config->speed_hz, delay, reg; 341 u32 clk = config->speed_hz, delay, reg;
342 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
338 343
339 /* 344 /*
340 * The hardware seems to have a race condition when changing modes. The 345 * The hardware seems to have a race condition when changing modes. The
@@ -358,13 +363,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
358 363
359 if (config->mode & SPI_CPHA) 364 if (config->mode & SPI_CPHA)
360 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 365 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
366 else
367 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
361 368
362 if (config->mode & SPI_CPOL) { 369 if (config->mode & SPI_CPOL) {
363 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 370 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
364 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 371 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
372 } else {
373 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
374 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
365 } 375 }
366 if (config->mode & SPI_CS_HIGH) 376 if (config->mode & SPI_CS_HIGH)
367 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 377 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
378 else
379 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
368 380
369 if (spi_imx->usedma) 381 if (spi_imx->usedma)
370 ctrl |= MX51_ECSPI_CTRL_SMC; 382 ctrl |= MX51_ECSPI_CTRL_SMC;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8bef46..43a02e377b3b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
423 423
424 if (mcspi_dma->dma_tx) { 424 if (mcspi_dma->dma_tx) {
425 struct dma_async_tx_descriptor *tx; 425 struct dma_async_tx_descriptor *tx;
426 struct scatterlist sg;
427 426
428 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); 427 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
429 428
430 sg_init_table(&sg, 1); 429 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
431 sg_dma_address(&sg) = xfer->tx_dma; 430 xfer->tx_sg.nents, DMA_MEM_TO_DEV,
432 sg_dma_len(&sg) = xfer->len; 431 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
433
434 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
435 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
436 if (tx) { 432 if (tx) {
437 tx->callback = omap2_mcspi_tx_callback; 433 tx->callback = omap2_mcspi_tx_callback;
438 tx->callback_param = spi; 434 tx->callback_param = spi;
@@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
478 474
479 if (mcspi_dma->dma_rx) { 475 if (mcspi_dma->dma_rx) {
480 struct dma_async_tx_descriptor *tx; 476 struct dma_async_tx_descriptor *tx;
481 struct scatterlist sg;
482 477
483 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); 478 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
484 479
485 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) 480 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
486 dma_count -= es; 481 dma_count -= es;
487 482
488 sg_init_table(&sg, 1); 483 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
489 sg_dma_address(&sg) = xfer->rx_dma; 484 xfer->rx_sg.nents, DMA_DEV_TO_MEM,
490 sg_dma_len(&sg) = dma_count; 485 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
491
492 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
493 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
494 DMA_CTRL_ACK);
495 if (tx) { 486 if (tx) {
496 tx->callback = omap2_mcspi_rx_callback; 487 tx->callback = omap2_mcspi_rx_callback;
497 tx->callback_param = spi; 488 tx->callback_param = spi;
@@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
505 omap2_mcspi_set_dma_req(spi, 1, 1); 496 omap2_mcspi_set_dma_req(spi, 1, 1);
506 497
507 wait_for_completion(&mcspi_dma->dma_rx_completion); 498 wait_for_completion(&mcspi_dma->dma_rx_completion);
508 dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
509 DMA_FROM_DEVICE);
510 499
511 if (mcspi->fifo_depth > 0) 500 if (mcspi->fifo_depth > 0)
512 return count; 501 return count;
@@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
619 608
620 if (tx != NULL) { 609 if (tx != NULL) {
621 wait_for_completion(&mcspi_dma->dma_tx_completion); 610 wait_for_completion(&mcspi_dma->dma_tx_completion);
622 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
623 DMA_TO_DEVICE);
624 611
625 if (mcspi->fifo_depth > 0) { 612 if (mcspi->fifo_depth > 0) {
626 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; 613 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
1087 gpio_free(spi->cs_gpio); 1074 gpio_free(spi->cs_gpio);
1088} 1075}
1089 1076
1077static bool omap2_mcspi_can_dma(struct spi_master *master,
1078 struct spi_device *spi,
1079 struct spi_transfer *xfer)
1080{
1081 if (xfer->len < DMA_MIN_BYTES)
1082 return false;
1083
1084 return true;
1085}
1086
1090static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, 1087static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1091 struct spi_device *spi, struct spi_transfer *t) 1088 struct spi_device *spi, struct spi_transfer *t)
1092{ 1089{
@@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
1268 return -EINVAL; 1265 return -EINVAL;
1269 } 1266 }
1270 1267
1271 if (len < DMA_MIN_BYTES)
1272 goto skip_dma_map;
1273
1274 if (mcspi_dma->dma_tx && tx_buf != NULL) {
1275 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1276 len, DMA_TO_DEVICE);
1277 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1278 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1279 'T', len);
1280 return -EINVAL;
1281 }
1282 }
1283 if (mcspi_dma->dma_rx && rx_buf != NULL) {
1284 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1285 DMA_FROM_DEVICE);
1286 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1287 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1288 'R', len);
1289 if (tx_buf != NULL)
1290 dma_unmap_single(mcspi->dev, t->tx_dma,
1291 len, DMA_TO_DEVICE);
1292 return -EINVAL;
1293 }
1294 }
1295
1296skip_dma_map:
1297 return omap2_mcspi_work_one(mcspi, spi, t); 1268 return omap2_mcspi_work_one(mcspi, spi, t);
1298} 1269}
1299 1270
@@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1377 master->transfer_one = omap2_mcspi_transfer_one; 1348 master->transfer_one = omap2_mcspi_transfer_one;
1378 master->set_cs = omap2_mcspi_set_cs; 1349 master->set_cs = omap2_mcspi_set_cs;
1379 master->cleanup = omap2_mcspi_cleanup; 1350 master->cleanup = omap2_mcspi_cleanup;
1351 master->can_dma = omap2_mcspi_can_dma;
1380 master->dev.of_node = node; 1352 master->dev.of_node = node;
1381 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; 1353 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
1382 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; 1354 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 8f50a4020f6f..6c6c0013ec7a 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -534,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
534 if (WARN_ON(rs->speed > MAX_SCLK_OUT)) 534 if (WARN_ON(rs->speed > MAX_SCLK_OUT))
535 rs->speed = MAX_SCLK_OUT; 535 rs->speed = MAX_SCLK_OUT;
536 536
537 /* the minimum divsor is 2 */ 537 /* the minimum divisor is 2 */
538 if (rs->max_freq < 2 * rs->speed) { 538 if (rs->max_freq < 2 * rs->speed) {
539 clk_set_rate(rs->spiclk, 2 * rs->speed); 539 clk_set_rate(rs->spiclk, 2 * rs->speed);
540 rs->max_freq = clk_get_rate(rs->spiclk); 540 rs->max_freq = clk_get_rate(rs->spiclk);
@@ -730,23 +730,27 @@ static int rockchip_spi_probe(struct platform_device *pdev)
730 master->transfer_one = rockchip_spi_transfer_one; 730 master->transfer_one = rockchip_spi_transfer_one;
731 master->handle_err = rockchip_spi_handle_err; 731 master->handle_err = rockchip_spi_handle_err;
732 732
733 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); 733 rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
734 if (IS_ERR_OR_NULL(rs->dma_tx.ch)) { 734 if (IS_ERR(rs->dma_tx.ch)) {
735 /* Check tx to see if we need defer probing driver */ 735 /* Check tx to see if we need defer probing driver */
736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { 736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
737 ret = -EPROBE_DEFER; 737 ret = -EPROBE_DEFER;
738 goto err_get_fifo_len; 738 goto err_get_fifo_len;
739 } 739 }
740 dev_warn(rs->dev, "Failed to request TX DMA channel\n"); 740 dev_warn(rs->dev, "Failed to request TX DMA channel\n");
741 rs->dma_tx.ch = NULL;
741 } 742 }
742 743
743 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); 744 rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
744 if (!rs->dma_rx.ch) { 745 if (IS_ERR(rs->dma_rx.ch)) {
745 if (rs->dma_tx.ch) { 746 if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
746 dma_release_channel(rs->dma_tx.ch); 747 dma_release_channel(rs->dma_tx.ch);
747 rs->dma_tx.ch = NULL; 748 rs->dma_tx.ch = NULL;
749 ret = -EPROBE_DEFER;
750 goto err_get_fifo_len;
748 } 751 }
749 dev_warn(rs->dev, "Failed to request RX DMA channel\n"); 752 dev_warn(rs->dev, "Failed to request RX DMA channel\n");
753 rs->dma_rx.ch = NULL;
750 } 754 }
751 755
752 if (rs->dma_tx.ch && rs->dma_rx.ch) { 756 if (rs->dma_tx.ch && rs->dma_rx.ch) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index de2f2f90d799..0239b45eed92 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1209,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
1209 struct spi_master *master = 1209 struct spi_master *master =
1210 container_of(work, struct spi_master, pump_messages); 1210 container_of(work, struct spi_master, pump_messages);
1211 1211
1212 __spi_pump_messages(master, true, false); 1212 __spi_pump_messages(master, true, master->bus_lock_flag);
1213} 1213}
1214 1214
1215static int spi_init_queue(struct spi_master *master) 1215static int spi_init_queue(struct spi_master *master)
@@ -2853,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2853 */ 2853 */
2854int spi_sync(struct spi_device *spi, struct spi_message *message) 2854int spi_sync(struct spi_device *spi, struct spi_message *message)
2855{ 2855{
2856 return __spi_sync(spi, message, 0); 2856 return __spi_sync(spi, message, spi->master->bus_lock_flag);
2857} 2857}
2858EXPORT_SYMBOL_GPL(spi_sync); 2858EXPORT_SYMBOL_GPL(spi_sync);
2859 2859
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index cf84581287b9..5bac28a3944e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -30,6 +30,8 @@ source "drivers/staging/wlan-ng/Kconfig"
30 30
31source "drivers/staging/comedi/Kconfig" 31source "drivers/staging/comedi/Kconfig"
32 32
33source "drivers/staging/olpc_dcon/Kconfig"
34
33source "drivers/staging/rtl8192u/Kconfig" 35source "drivers/staging/rtl8192u/Kconfig"
34 36
35source "drivers/staging/rtl8192e/Kconfig" 37source "drivers/staging/rtl8192e/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 7d6448d20464..a954242b0f2c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,6 +4,7 @@ obj-y += media/
4obj-$(CONFIG_SLICOSS) += slicoss/ 4obj-$(CONFIG_SLICOSS) += slicoss/
5obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 5obj-$(CONFIG_PRISM2_USB) += wlan-ng/
6obj-$(CONFIG_COMEDI) += comedi/ 6obj-$(CONFIG_COMEDI) += comedi/
7obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
7obj-$(CONFIG_RTL8192U) += rtl8192u/ 8obj-$(CONFIG_RTL8192U) += rtl8192u/
8obj-$(CONFIG_RTL8192E) += rtl8192e/ 9obj-$(CONFIG_RTL8192E) += rtl8192e/
9obj-$(CONFIG_R8712U) += rtl8712/ 10obj-$(CONFIG_R8712U) += rtl8712/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index dab486261154..13335437c69c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -88,7 +88,7 @@ do { \
88} while (0) 88} while (0)
89 89
90#ifndef LIBCFS_VMALLOC_SIZE 90#ifndef LIBCFS_VMALLOC_SIZE
91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ 91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
92#endif 92#endif
93 93
94#define LIBCFS_ALLOC_PRE(size, mask) \ 94#define LIBCFS_ALLOC_PRE(size, mask) \
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 0f2fd79e5ec8..837eb22749c3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -57,7 +57,7 @@
57#include "../libcfs_cpu.h" 57#include "../libcfs_cpu.h"
58#endif 58#endif
59 59
60#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) 60#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
61#define page_index(p) ((p)->index) 61#define page_index(p) ((p)->index)
62 62
63#define memory_pressure_get() (current->flags & PF_MEMALLOC) 63#define memory_pressure_get() (current->flags & PF_MEMALLOC)
@@ -67,7 +67,7 @@
67#if BITS_PER_LONG == 32 67#if BITS_PER_LONG == 32
68/* limit to lowmem on 32-bit systems */ 68/* limit to lowmem on 32-bit systems */
69#define NUM_CACHEPAGES \ 69#define NUM_CACHEPAGES \
70 min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) 70 min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
71#else 71#else
72#define NUM_CACHEPAGES totalram_pages 72#define NUM_CACHEPAGES totalram_pages
73#endif 73#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c341c5..1c679cb72785 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@ typedef struct {
514 /** 514 /**
515 * Starting offset of the fragment within the page. Note that the 515 * Starting offset of the fragment within the page. Note that the
516 * end of the fragment must not pass the end of the page; i.e., 516 * end of the fragment must not pass the end of the page; i.e.,
517 * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. 517 * kiov_len + kiov_offset <= PAGE_SIZE.
518 */ 518 */
519 unsigned int kiov_offset; 519 unsigned int kiov_offset;
520} lnet_kiov_t; 520} lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 3e1f24e77f64..d4ce06d0aeeb 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
291 291
292 for (nob = i = 0; i < niov; i++) { 292 for (nob = i = 0; i < niov; i++) {
293 if ((kiov[i].kiov_offset && i > 0) || 293 if ((kiov[i].kiov_offset && i > 0) ||
294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) 294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
295 return NULL; 295 return NULL;
296 296
297 pages[i] = kiov[i].kiov_page; 297 pages[i] = kiov[i].kiov_page;
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c90e5102fe06..c3d628bac5b8 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize)
517 max = TCD_MAX_PAGES; 517 max = TCD_MAX_PAGES;
518 } else { 518 } else {
519 max = max / num_possible_cpus(); 519 max = max / num_possible_cpus();
520 max <<= (20 - PAGE_CACHE_SHIFT); 520 max <<= (20 - PAGE_SHIFT);
521 } 521 }
522 rc = cfs_tracefile_init(max); 522 rc = cfs_tracefile_init(max);
523 523
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index ec3bc04bd89f..244eb89eef68 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
182 if (tcd->tcd_cur_pages > 0) { 182 if (tcd->tcd_cur_pages > 0) {
183 __LASSERT(!list_empty(&tcd->tcd_pages)); 183 __LASSERT(!list_empty(&tcd->tcd_pages));
184 tage = cfs_tage_from_list(tcd->tcd_pages.prev); 184 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
185 if (tage->used + len <= PAGE_CACHE_SIZE) 185 if (tage->used + len <= PAGE_SIZE)
186 return tage; 186 return tage;
187 } 187 }
188 188
@@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
260 * from here: this will lead to infinite recursion. 260 * from here: this will lead to infinite recursion.
261 */ 261 */
262 262
263 if (len > PAGE_CACHE_SIZE) { 263 if (len > PAGE_SIZE) {
264 pr_err("cowardly refusing to write %lu bytes in a page\n", len); 264 pr_err("cowardly refusing to write %lu bytes in a page\n", len);
265 return NULL; 265 return NULL;
266 } 266 }
@@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
349 for (i = 0; i < 2; i++) { 349 for (i = 0; i < 2; i++) {
350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1); 350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
351 if (!tage) { 351 if (!tage) {
352 if (needed + known_size > PAGE_CACHE_SIZE) 352 if (needed + known_size > PAGE_SIZE)
353 mask |= D_ERROR; 353 mask |= D_ERROR;
354 354
355 cfs_trace_put_tcd(tcd); 355 cfs_trace_put_tcd(tcd);
@@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
360 string_buf = (char *)page_address(tage->page) + 360 string_buf = (char *)page_address(tage->page) +
361 tage->used + known_size; 361 tage->used + known_size;
362 362
363 max_nob = PAGE_CACHE_SIZE - tage->used - known_size; 363 max_nob = PAGE_SIZE - tage->used - known_size;
364 if (max_nob <= 0) { 364 if (max_nob <= 0) {
365 printk(KERN_EMERG "negative max_nob: %d\n", 365 printk(KERN_EMERG "negative max_nob: %d\n",
366 max_nob); 366 max_nob);
@@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
424 __LASSERT(debug_buf == string_buf); 424 __LASSERT(debug_buf == string_buf);
425 425
426 tage->used += needed; 426 tage->used += needed;
427 __LASSERT(tage->used <= PAGE_CACHE_SIZE); 427 __LASSERT(tage->used <= PAGE_SIZE);
428 428
429console: 429console:
430 if ((mask & libcfs_printk) == 0) { 430 if ((mask & libcfs_printk) == 0) {
@@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
835 835
836int cfs_trace_allocate_string_buffer(char **str, int nob) 836int cfs_trace_allocate_string_buffer(char **str, int nob)
837{ 837{
838 if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ 838 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
839 return -EINVAL; 839 return -EINVAL;
840 840
841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); 841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
951 } 951 }
952 952
953 mb /= num_possible_cpus(); 953 mb /= num_possible_cpus();
954 pages = mb << (20 - PAGE_CACHE_SHIFT); 954 pages = mb << (20 - PAGE_SHIFT);
955 955
956 cfs_tracefile_write_lock(); 956 cfs_tracefile_write_lock();
957 957
@@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
977 977
978 cfs_tracefile_read_unlock(); 978 cfs_tracefile_read_unlock();
979 979
980 return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; 980 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
981} 981}
982 982
983static int tracefiled(void *arg) 983static int tracefiled(void *arg)
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index 4c77f9044dd3..ac84e7f4c859 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
87extern int libcfs_panic_in_progress; 87extern int libcfs_panic_in_progress;
88int cfs_trace_max_debug_mb(void); 88int cfs_trace_max_debug_mb(void);
89 89
90#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 90#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
91#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 91#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
92#define CFS_TRACEFILE_SIZE (500 << 20) 92#define CFS_TRACEFILE_SIZE (500 << 20)
93 93
@@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
96/* 96/*
97 * Private declare for tracefile 97 * Private declare for tracefile
98 */ 98 */
99#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 99#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
100#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 100#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
101 101
102#define CFS_TRACEFILE_SIZE (500 << 20) 102#define CFS_TRACEFILE_SIZE (500 << 20)
@@ -257,7 +257,7 @@ do { \
257do { \ 257do { \
258 __LASSERT(tage); \ 258 __LASSERT(tage); \
259 __LASSERT(tage->page); \ 259 __LASSERT(tage->page); \
260 __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ 260 __LASSERT(tage->used <= PAGE_SIZE); \
261 __LASSERT(page_count(tage->page) > 0); \ 261 __LASSERT(page_count(tage->page) > 0); \
262} while (0) 262} while (0)
263 263
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index c74514f99f90..75d31217bf92 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
139 for (i = 0; i < (int)niov; i++) { 139 for (i = 0; i < (int)niov; i++) {
140 /* We take the page pointer on trust */ 140 /* We take the page pointer on trust */
141 if (lmd->md_iov.kiov[i].kiov_offset + 141 if (lmd->md_iov.kiov[i].kiov_offset +
142 lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) 142 lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
143 return -EINVAL; /* invalid length */ 143 return -EINVAL; /* invalid length */
144 144
145 total_length += lmd->md_iov.kiov[i].kiov_len; 145 total_length += lmd->md_iov.kiov[i].kiov_len;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 0009a8de77d5..f19aa9320e34 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
549 if (len <= frag_len) { 549 if (len <= frag_len) {
550 dst->kiov_len = len; 550 dst->kiov_len = len;
551 LASSERT(dst->kiov_offset + dst->kiov_len 551 LASSERT(dst->kiov_offset + dst->kiov_len
552 <= PAGE_CACHE_SIZE); 552 <= PAGE_SIZE);
553 return niov; 553 return niov;
554 } 554 }
555 555
556 dst->kiov_len = frag_len; 556 dst->kiov_len = frag_len;
557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); 557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
558 558
559 len -= frag_len; 559 len -= frag_len;
560 dst++; 560 dst++;
@@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
887 rbp = &the_lnet.ln_rtrpools[cpt][0]; 887 rbp = &the_lnet.ln_rtrpools[cpt][0];
888 888
889 LASSERT(msg->msg_len <= LNET_MTU); 889 LASSERT(msg->msg_len <= LNET_MTU);
890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { 890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
891 rbp++; 891 rbp++;
892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); 892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
893 } 893 }
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index cc0c2753dd63..891fd59401d7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
166 nalloc = 16; /* first guess at max interfaces */ 166 nalloc = 16; /* first guess at max interfaces */
167 toobig = 0; 167 toobig = 0;
168 for (;;) { 168 for (;;) {
169 if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { 169 if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
170 toobig = 1; 170 toobig = 1;
171 nalloc = PAGE_CACHE_SIZE / sizeof(*ifr); 171 nalloc = PAGE_SIZE / sizeof(*ifr);
172 CWARN("Too many interfaces: only enumerating first %d\n", 172 CWARN("Too many interfaces: only enumerating first %d\n",
173 nalloc); 173 nalloc);
174 } 174 }
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 61459cf9d58f..b01dc424c514 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -27,8 +27,8 @@
27#define LNET_NRB_SMALL_PAGES 1 27#define LNET_NRB_SMALL_PAGES 1
28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ 28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) 29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \ 30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
31 PAGE_CACHE_SHIFT) 31 PAGE_SHIFT)
32 32
33static char *forwarding = ""; 33static char *forwarding = "";
34module_param(forwarding, charp, 0444); 34module_param(forwarding, charp, 0444);
@@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1338 return NULL; 1338 return NULL;
1339 } 1339 }
1340 1340
1341 rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; 1341 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1342 rb->rb_kiov[i].kiov_offset = 0; 1342 rb->rb_kiov[i].kiov_offset = 0;
1343 rb->rb_kiov[i].kiov_page = page; 1343 rb->rb_kiov[i].kiov_page = page;
1344 } 1344 }
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index eebc92412061..dcb6e506f592 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
90 * NB: this is not going to work for variable page size, 90 * NB: this is not going to work for variable page size,
91 * but we have to keep it for compatibility 91 * but we have to keep it for compatibility
92 */ 92 */
93 len = npg * PAGE_CACHE_SIZE; 93 len = npg * PAGE_SIZE;
94 94
95 } else { 95 } else {
96 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 96 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi)
104 opc = breq->blk_opc; 104 opc = breq->blk_opc;
105 flags = breq->blk_flags; 105 flags = breq->blk_flags;
106 len = breq->blk_len; 106 len = breq->blk_len;
107 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 107 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
108 } 108 }
109 109
110 if (npg > LNET_MAX_IOV || npg <= 0) 110 if (npg > LNET_MAX_IOV || npg <= 0)
@@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
167 167
168 if (pattern == LST_BRW_CHECK_SIMPLE) { 168 if (pattern == LST_BRW_CHECK_SIMPLE) {
169 memcpy(addr, &magic, BRW_MSIZE); 169 memcpy(addr, &magic, BRW_MSIZE);
170 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 170 addr += PAGE_SIZE - BRW_MSIZE;
171 memcpy(addr, &magic, BRW_MSIZE); 171 memcpy(addr, &magic, BRW_MSIZE);
172 return; 172 return;
173 } 173 }
174 174
175 if (pattern == LST_BRW_CHECK_FULL) { 175 if (pattern == LST_BRW_CHECK_FULL) {
176 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) 176 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
177 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); 177 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
178 return; 178 return;
179 } 179 }
@@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
198 if (data != magic) 198 if (data != magic)
199 goto bad_data; 199 goto bad_data;
200 200
201 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 201 addr += PAGE_SIZE - BRW_MSIZE;
202 data = *((__u64 *)addr); 202 data = *((__u64 *)addr);
203 if (data != magic) 203 if (data != magic)
204 goto bad_data; 204 goto bad_data;
@@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
207 } 207 }
208 208
209 if (pattern == LST_BRW_CHECK_FULL) { 209 if (pattern == LST_BRW_CHECK_FULL) {
210 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { 210 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
211 data = *(((__u64 *)addr) + i); 211 data = *(((__u64 *)addr) + i);
212 if (data != magic) 212 if (data != magic)
213 goto bad_data; 213 goto bad_data;
@@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
278 opc = breq->blk_opc; 278 opc = breq->blk_opc;
279 flags = breq->blk_flags; 279 flags = breq->blk_flags;
280 npg = breq->blk_npg; 280 npg = breq->blk_npg;
281 len = npg * PAGE_CACHE_SIZE; 281 len = npg * PAGE_SIZE;
282 282
283 } else { 283 } else {
284 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 284 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
292 opc = breq->blk_opc; 292 opc = breq->blk_opc;
293 flags = breq->blk_flags; 293 flags = breq->blk_flags;
294 len = breq->blk_len; 294 len = breq->blk_len;
295 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 295 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
296 } 296 }
297 297
298 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); 298 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
463 reply->brw_status = EINVAL; 463 reply->brw_status = EINVAL;
464 return 0; 464 return 0;
465 } 465 }
466 npg = reqst->brw_len >> PAGE_CACHE_SHIFT; 466 npg = reqst->brw_len >> PAGE_SHIFT;
467 467
468 } else { 468 } else {
469 npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 469 npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
470 } 470 }
471 471
472 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; 472 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 5c7cb72eac9a..79ee6c0bf7c1 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
743 if (args->lstio_tes_param && 743 if (args->lstio_tes_param &&
744 (args->lstio_tes_param_len <= 0 || 744 (args->lstio_tes_param_len <= 0 ||
745 args->lstio_tes_param_len > 745 args->lstio_tes_param_len >
746 PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) 746 PAGE_SIZE - sizeof(lstcon_test_t)))
747 return -EINVAL; 747 return -EINVAL;
748 748
749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); 749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
819 819
820 opc = data->ioc_u32[0]; 820 opc = data->ioc_u32[0];
821 821
822 if (data->ioc_plen1 > PAGE_CACHE_SIZE) 822 if (data->ioc_plen1 > PAGE_SIZE)
823 return -EINVAL; 823 return -EINVAL;
824 824
825 LIBCFS_ALLOC(buf, data->ioc_plen1); 825 LIBCFS_ALLOC(buf, data->ioc_plen1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index bcd78888f9cc..35a227d0c657 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
786 test_bulk_req_t *brq = &req->tsr_u.bulk_v0; 786 test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
787 787
788 brq->blk_opc = param->blk_opc; 788 brq->blk_opc = param->blk_opc;
789 brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / 789 brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
790 PAGE_CACHE_SIZE; 790 PAGE_SIZE;
791 brq->blk_flags = param->blk_flags; 791 brq->blk_flags = param->blk_flags;
792 792
793 return 0; 793 return 0;
@@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
822 if (transop == LST_TRANS_TSBCLIADD) { 822 if (transop == LST_TRANS_TSBCLIADD) {
823 npg = sfw_id_pages(test->tes_span); 823 npg = sfw_id_pages(test->tes_span);
824 nob = !(feats & LST_FEAT_BULK_LEN) ? 824 nob = !(feats & LST_FEAT_BULK_LEN) ?
825 npg * PAGE_CACHE_SIZE : 825 npg * PAGE_SIZE :
826 sizeof(lnet_process_id_packed_t) * test->tes_span; 826 sizeof(lnet_process_id_packed_t) * test->tes_span;
827 } 827 }
828 828
@@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
851 LASSERT(nob > 0); 851 LASSERT(nob > 0);
852 852
853 len = !(feats & LST_FEAT_BULK_LEN) ? 853 len = !(feats & LST_FEAT_BULK_LEN) ?
854 PAGE_CACHE_SIZE : 854 PAGE_SIZE :
855 min_t(int, nob, PAGE_CACHE_SIZE); 855 min_t(int, nob, PAGE_SIZE);
856 nob -= len; 856 nob -= len;
857 857
858 bulk->bk_iovs[i].kiov_offset = 0; 858 bulk->bk_iovs[i].kiov_offset = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 926c3970c498..e2c532399366 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
1161 int len; 1161 int len;
1162 1162
1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { 1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
1164 len = npg * PAGE_CACHE_SIZE; 1164 len = npg * PAGE_SIZE;
1165 1165
1166 } else { 1166 } else {
1167 len = sizeof(lnet_process_id_packed_t) * 1167 len = sizeof(lnet_process_id_packed_t) *
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 69be7d6f48fa..7d7748d96332 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
90static int 90static int
91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) 91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
92{ 92{
93 nob = min_t(int, nob, PAGE_CACHE_SIZE); 93 nob = min_t(int, nob, PAGE_SIZE);
94 94
95 LASSERT(nob > 0); 95 LASSERT(nob > 0);
96 LASSERT(i >= 0 && i < bk->bk_niov); 96 LASSERT(i >= 0 && i < bk->bk_niov);
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 288522d4d7b9..e689ca1846e1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -390,10 +390,10 @@ typedef struct sfw_test_instance {
390 } tsi_u; 390 } tsi_u;
391} sfw_test_instance_t; 391} sfw_test_instance_t;
392 392
393/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at 393/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
394 * the end of pages are not used */ 394 * pages are not used */
395#define SFW_MAX_CONCUR LST_MAX_CONCUR 395#define SFW_MAX_CONCUR LST_MAX_CONCUR
396#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) 396#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) 397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) 398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
399 399
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 33e0b99e1fb4..c6c7f54637fb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
52 return; 52 return;
53 53
54 if (PagePrivate(page)) 54 if (PagePrivate(page))
55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
56 56
57 cancel_dirty_page(page); 57 cancel_dirty_page(page);
58 ClearPageMappedToDisk(page); 58 ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index b5088b13a305..242bb1ef6245 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@ struct lu_context_key {
1118 { \ 1118 { \
1119 type *value; \ 1119 type *value; \
1120 \ 1120 \
1121 CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ 1121 CLASSERT(PAGE_SIZE >= sizeof (*value)); \
1122 \ 1122 \
1123 value = kzalloc(sizeof(*value), GFP_NOFS); \ 1123 value = kzalloc(sizeof(*value), GFP_NOFS); \
1124 if (!value) \ 1124 if (!value) \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index da8bc6eadd13..5aae1d06a5fa 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1022,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
1022 * MDS_READPAGE page size 1022 * MDS_READPAGE page size
1023 * 1023 *
1024 * This is the directory page size packed in MDS_READPAGE RPC. 1024 * This is the directory page size packed in MDS_READPAGE RPC.
1025 * It's different than PAGE_CACHE_SIZE because the client needs to 1025 * It's different than PAGE_SIZE because the client needs to
1026 * access the struct lu_dirpage header packed at the beginning of 1026 * access the struct lu_dirpage header packed at the beginning of
1027 * the "page" and without this there isn't any way to know find the 1027 * the "page" and without this there isn't any way to know find the
1028 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. 1028 * lu_dirpage header is if client and server PAGE_SIZE differ.
1029 */ 1029 */
1030#define LU_PAGE_SHIFT 12 1030#define LU_PAGE_SHIFT 12
1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) 1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1032#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) 1032#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1033 1033
1034#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) 1034#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1035 1035
1036/** @} lu_dir */ 1036/** @} lu_dir */
1037 1037
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index df94f9f3bef2..af77eb359c43 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
155 if (cli->cl_max_mds_easize < body->max_mdsize) { 155 if (cli->cl_max_mds_easize < body->max_mdsize) {
156 cli->cl_max_mds_easize = body->max_mdsize; 156 cli->cl_max_mds_easize = body->max_mdsize;
157 cli->cl_default_mds_easize = 157 cli->cl_default_mds_easize =
158 min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); 158 min_t(__u32, body->max_mdsize, PAGE_SIZE);
159 } 159 }
160 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) { 160 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
161 cli->cl_max_mds_cookiesize = body->max_cookiesize; 161 cli->cl_max_mds_cookiesize = body->max_cookiesize;
162 cli->cl_default_mds_cookiesize = 162 cli->cl_default_mds_cookiesize =
163 min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); 163 min_t(__u32, body->max_cookiesize, PAGE_SIZE);
164 } 164 }
165 } 165 }
166} 166}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 4fa1a18b7d15..69586a522eb7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -99,21 +99,21 @@
99 */ 99 */
100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) 100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) 101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
103 103
104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) 104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE 107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
110 110
111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ 111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) 112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" 113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
114# endif 114# endif
115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) 115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" 116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
117# endif 117# endif
118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) 118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
119# error "PTLRPC_MAX_BRW_SIZE too big" 119# error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4a0f2e8b19f6..4264d97650ec 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -272,7 +272,7 @@ struct client_obd {
272 int cl_grant_shrink_interval; /* seconds */ 272 int cl_grant_shrink_interval; /* seconds */
273 273
274 /* A chunk is an optimal size used by osc_extent to determine 274 /* A chunk is an optimal size used by osc_extent to determine
275 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) 275 * the extent size. A chunk is max(PAGE_SIZE, OST block size)
276 */ 276 */
277 int cl_chunkbits; 277 int cl_chunkbits;
278 int cl_chunk; 278 int cl_chunk;
@@ -1318,7 +1318,7 @@ bad_format:
1318 1318
1319static inline int cli_brw_size(struct obd_device *obd) 1319static inline int cli_brw_size(struct obd_device *obd)
1320{ 1320{
1321 return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 1321 return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
1322} 1322}
1323 1323
1324#endif /* __OBD_H */ 1324#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 225262fa67b6..f8ee3a3254ba 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -500,7 +500,7 @@ extern char obd_jobid_var[];
500 500
501#ifdef POISON_BULK 501#ifdef POISON_BULK
502#define POISON_PAGE(page, val) do { \ 502#define POISON_PAGE(page, val) do { \
503 memset(kmap(page), val, PAGE_CACHE_SIZE); \ 503 memset(kmap(page), val, PAGE_SIZE); \
504 kunmap(page); \ 504 kunmap(page); \
505} while (0) 505} while (0)
506#else 506#else
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index aced41ab93a1..96141d17d07f 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
758 * --bug 17336 758 * --bug 17336
759 */ 759 */
760 loff_t size = cl_isize_read(inode); 760 loff_t size = cl_isize_read(inode);
761 loff_t cur_index = start >> PAGE_CACHE_SHIFT; 761 loff_t cur_index = start >> PAGE_SHIFT;
762 loff_t size_index = (size - 1) >> 762 loff_t size_index = (size - 1) >>
763 PAGE_CACHE_SHIFT; 763 PAGE_SHIFT;
764 764
765 if ((size == 0 && cur_index != 0) || 765 if ((size == 0 && cur_index != 0) ||
766 size_index < cur_index) 766 size_index < cur_index)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index b586d5a88d00..7dd7df59aa1f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
307 cli->cl_avail_grant = 0; 307 cli->cl_avail_grant = 0;
308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */ 308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */
309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; 309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
310 if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) 310 if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
311 cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); 311 cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
312 INIT_LIST_HEAD(&cli->cl_cache_waiters); 312 INIT_LIST_HEAD(&cli->cl_cache_waiters);
313 INIT_LIST_HEAD(&cli->cl_loi_ready_list); 313 INIT_LIST_HEAD(&cli->cl_loi_ready_list);
314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); 314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
353 * In the future this should likely be increased. LU-1431 353 * In the future this should likely be increased. LU-1431
354 */ 354 */
355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, 355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
356 LNET_MTU >> PAGE_CACHE_SHIFT); 356 LNET_MTU >> PAGE_SHIFT);
357 357
358 if (!strcmp(name, LUSTRE_MDC_NAME)) { 358 if (!strcmp(name, LUSTRE_MDC_NAME)) {
359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; 359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
360 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { 360 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
361 cli->cl_max_rpcs_in_flight = 2; 361 cli->cl_max_rpcs_in_flight = 2;
362 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { 362 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
363 cli->cl_max_rpcs_in_flight = 3; 363 cli->cl_max_rpcs_in_flight = 3;
364 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { 364 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
365 cli->cl_max_rpcs_in_flight = 4; 365 cli->cl_max_rpcs_in_flight = 4;
366 } else { 366 } else {
367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; 367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3e937b050203..b913ba9cf97c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -107,7 +107,7 @@
107/* 107/*
108 * 50 ldlm locks for 1MB of RAM. 108 * 50 ldlm locks for 1MB of RAM.
109 */ 109 */
110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) 110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
111 111
112/* 112/*
113 * Maximal possible grant step plan in %. 113 * Maximal possible grant step plan in %.
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index c7904a96f9af..74e193e52cd6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
546{ 546{
547 int avail; 547 int avail;
548 548
549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; 549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
550 if (likely(avail >= 0)) 550 if (likely(avail >= 0))
551 avail /= (int)sizeof(struct lustre_handle); 551 avail /= (int)sizeof(struct lustre_handle);
552 else 552 else
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 4e0a3e583330..e4c82883e580 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
134 * a header lu_dirpage which describes the start/end hash, and whether this 134 * a header lu_dirpage which describes the start/end hash, and whether this
135 * page is empty (contains no dir entry) or hash collide with next page. 135 * page is empty (contains no dir entry) or hash collide with next page.
136 * After client receives reply, several pages will be integrated into dir page 136 * After client receives reply, several pages will be integrated into dir page
137 * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the 137 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
138 * lu_dirpage for this integrated page will be adjusted. See 138 * for this integrated page will be adjusted. See lmv_adjust_dirpages().
139 * lmv_adjust_dirpages().
140 * 139 *
141 */ 140 */
142 141
@@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
153 struct page **page_pool; 152 struct page **page_pool;
154 struct page *page; 153 struct page *page;
155 struct lu_dirpage *dp; 154 struct lu_dirpage *dp;
156 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; 155 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
157 int nrdpgs = 0; /* number of pages read actually */ 156 int nrdpgs = 0; /* number of pages read actually */
158 int npages; 157 int npages;
159 int i; 158 int i;
@@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
193 if (body->valid & OBD_MD_FLSIZE) 192 if (body->valid & OBD_MD_FLSIZE)
194 cl_isize_write(inode, body->size); 193 cl_isize_write(inode, body->size);
195 194
196 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1) 195 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
197 >> PAGE_CACHE_SHIFT; 196 >> PAGE_SHIFT;
198 SetPageUptodate(page0); 197 SetPageUptodate(page0);
199 } 198 }
200 unlock_page(page0); 199 unlock_page(page0);
@@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
209 page = page_pool[i]; 208 page = page_pool[i];
210 209
211 if (rc < 0 || i >= nrdpgs) { 210 if (rc < 0 || i >= nrdpgs) {
212 page_cache_release(page); 211 put_page(page);
213 continue; 212 continue;
214 } 213 }
215 214
@@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
230 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n", 229 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
231 offset, ret); 230 offset, ret);
232 } 231 }
233 page_cache_release(page); 232 put_page(page);
234 } 233 }
235 234
236 if (page_pool != &page0) 235 if (page_pool != &page0)
@@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove)
247 truncate_complete_page(page->mapping, page); 246 truncate_complete_page(page->mapping, page);
248 unlock_page(page); 247 unlock_page(page);
249 } 248 }
250 page_cache_release(page); 249 put_page(page);
251} 250}
252 251
253/* 252/*
@@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
273 if (found > 0 && !radix_tree_exceptional_entry(page)) { 272 if (found > 0 && !radix_tree_exceptional_entry(page)) {
274 struct lu_dirpage *dp; 273 struct lu_dirpage *dp;
275 274
276 page_cache_get(page); 275 get_page(page);
277 spin_unlock_irq(&mapping->tree_lock); 276 spin_unlock_irq(&mapping->tree_lock);
278 /* 277 /*
279 * In contrast to find_lock_page() we are sure that directory 278 * In contrast to find_lock_page() we are sure that directory
@@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
313 page = NULL; 312 page = NULL;
314 } 313 }
315 } else { 314 } else {
316 page_cache_release(page); 315 put_page(page);
317 page = ERR_PTR(-EIO); 316 page = ERR_PTR(-EIO);
318 } 317 }
319 318
@@ -1507,7 +1506,7 @@ skip_lmm:
1507 st.st_gid = body->gid; 1506 st.st_gid = body->gid;
1508 st.st_rdev = body->rdev; 1507 st.st_rdev = body->rdev;
1509 st.st_size = body->size; 1508 st.st_size = body->size;
1510 st.st_blksize = PAGE_CACHE_SIZE; 1509 st.st_blksize = PAGE_SIZE;
1511 st.st_blocks = body->blocks; 1510 st.st_blocks = body->blocks;
1512 st.st_atime = body->atime; 1511 st.st_atime = body->atime;
1513 st.st_mtime = body->mtime; 1512 st.st_mtime = body->mtime;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3e1572cb457b..e3c0f1dd4d31 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
310/* default to about 40meg of readahead on a given system. That much tied 310/* default to about 40meg of readahead on a given system. That much tied
311 * up in 512k readahead requests serviced at 40ms each is about 1GB/s. 311 * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
312 */ 312 */
313#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) 313#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
314 314
315/* default to read-ahead full files smaller than 2MB on the second read */ 315/* default to read-ahead full files smaller than 2MB on the second read */
316#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) 316#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
317 317
318enum ra_stat { 318enum ra_stat {
319 RA_STAT_HIT = 0, 319 RA_STAT_HIT = 0,
@@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
975static inline void ll_invalidate_page(struct page *vmpage) 975static inline void ll_invalidate_page(struct page *vmpage)
976{ 976{
977 struct address_space *mapping = vmpage->mapping; 977 struct address_space *mapping = vmpage->mapping;
978 loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; 978 loff_t offset = vmpage->index << PAGE_SHIFT;
979 979
980 LASSERT(PageLocked(vmpage)); 980 LASSERT(PageLocked(vmpage));
981 if (!mapping) 981 if (!mapping)
982 return; 982 return;
983 983
984 ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); 984 ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
985 truncate_complete_page(mapping, vmpage); 985 truncate_complete_page(mapping, vmpage);
986} 986}
987 987
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6d6bb33e3655..b57a992688a8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
85 85
86 si_meminfo(&si); 86 si_meminfo(&si);
87 pages = si.totalram - si.totalhigh; 87 pages = si.totalram - si.totalhigh;
88 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) 88 if (pages >> (20 - PAGE_SHIFT) < 512)
89 lru_page_max = pages / 2; 89 lru_page_max = pages / 2;
90 else 90 else
91 lru_page_max = (pages / 4) * 3; 91 lru_page_max = (pages / 4) * 3;
@@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
272 valid != CLIENT_CONNECT_MDT_REQD) { 272 valid != CLIENT_CONNECT_MDT_REQD) {
273 char *buf; 273 char *buf;
274 274
275 buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 275 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
276 if (!buf) { 276 if (!buf) {
277 err = -ENOMEM; 277 err = -ENOMEM;
278 goto out_md_fid; 278 goto out_md_fid;
279 } 279 }
280 obd_connect_flags2str(buf, PAGE_CACHE_SIZE, 280 obd_connect_flags2str(buf, PAGE_SIZE,
281 valid ^ CLIENT_CONNECT_MDT_REQD, ","); 281 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
282 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", 282 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
283 sbi->ll_md_exp->exp_obd->obd_name, buf); 283 sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
335 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 335 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
336 sbi->ll_md_brw_size = data->ocd_brw_size; 336 sbi->ll_md_brw_size = data->ocd_brw_size;
337 else 337 else
338 sbi->ll_md_brw_size = PAGE_CACHE_SIZE; 338 sbi->ll_md_brw_size = PAGE_SIZE;
339 339
340 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { 340 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
341 LCONSOLE_INFO("Layout lock feature supported.\n"); 341 LCONSOLE_INFO("Layout lock feature supported.\n");
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 69445a9f2011..5b484e62ffd0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
58 size_t count) 58 size_t count)
59{ 59{
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + 60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); 61 (vma->vm_pgoff << PAGE_SHIFT);
62 policy->l_extent.end = (policy->l_extent.start + count - 1) | 62 policy->l_extent.end = (policy->l_extent.start + count - 1) |
63 ~CFS_PAGE_MASK; 63 ~CFS_PAGE_MASK;
64} 64}
@@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
321 321
322 vmpage = vio->u.fault.ft_vmpage; 322 vmpage = vio->u.fault.ft_vmpage;
323 if (result != 0 && vmpage) { 323 if (result != 0 && vmpage) {
324 page_cache_release(vmpage); 324 put_page(vmpage);
325 vmf->page = NULL; 325 vmf->page = NULL;
326 } 326 }
327 } 327 }
@@ -360,7 +360,7 @@ restart:
360 lock_page(vmpage); 360 lock_page(vmpage);
361 if (unlikely(!vmpage->mapping)) { /* unlucky */ 361 if (unlikely(!vmpage->mapping)) { /* unlucky */
362 unlock_page(vmpage); 362 unlock_page(vmpage);
363 page_cache_release(vmpage); 363 put_page(vmpage);
364 vmf->page = NULL; 364 vmf->page = NULL;
365 365
366 if (!printed && ++count > 16) { 366 if (!printed && ++count > 16) {
@@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
457 LASSERTF(last > first, "last %llu first %llu\n", last, first); 457 LASSERTF(last > first, "last %llu first %llu\n", last, first);
458 if (mapping_mapped(mapping)) { 458 if (mapping_mapped(mapping)) {
459 rc = 0; 459 rc = 0;
460 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, 460 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
461 last - first + 1, 0); 461 last - first + 1, 0);
462 } 462 }
463 463
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index b725fc16cf49..f169c0db63b4 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; 218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
219 bio_for_each_segment(bvec, bio, iter) { 219 bio_for_each_segment(bvec, bio, iter) {
220 BUG_ON(bvec.bv_offset != 0); 220 BUG_ON(bvec.bv_offset != 0);
221 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); 221 BUG_ON(bvec.bv_len != PAGE_SIZE);
222 222
223 pages[page_count] = bvec.bv_page; 223 pages[page_count] = bvec.bv_page;
224 offsets[page_count] = offset; 224 offsets[page_count] = offset;
@@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, 232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
233 page_count); 233 page_count);
234 234
235 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; 235 pvec->ldp_size = page_count << PAGE_SHIFT;
236 pvec->ldp_nr = page_count; 236 pvec->ldp_nr = page_count;
237 237
238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to 238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
507 507
508 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 508 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
509 509
510 lo->lo_blocksize = PAGE_CACHE_SIZE; 510 lo->lo_blocksize = PAGE_SIZE;
511 lo->lo_device = bdev; 511 lo->lo_device = bdev;
512 lo->lo_flags = lo_flags; 512 lo->lo_flags = lo_flags;
513 lo->lo_backing_file = file; 513 lo->lo_backing_file = file;
@@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
525 lo->lo_queue->queuedata = lo; 525 lo->lo_queue->queuedata = lo;
526 526
527 /* queue parameters */ 527 /* queue parameters */
528 CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); 528 CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
529 blk_queue_logical_block_size(lo->lo_queue, 529 blk_queue_logical_block_size(lo->lo_queue,
530 (unsigned short)PAGE_CACHE_SIZE); 530 (unsigned short)PAGE_SIZE);
531 blk_queue_max_hw_sectors(lo->lo_queue, 531 blk_queue_max_hw_sectors(lo->lo_queue,
532 LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); 532 LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
533 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); 533 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
534 534
535 set_capacity(disks[lo->lo_number], size); 535 set_capacity(disks[lo->lo_number], size);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 45941a6600fe..27ab1261400e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
233 pages_number = sbi->ll_ra_info.ra_max_pages; 233 pages_number = sbi->ll_ra_info.ra_max_pages;
234 spin_unlock(&sbi->ll_lock); 234 spin_unlock(&sbi->ll_lock);
235 235
236 mult = 1 << (20 - PAGE_CACHE_SHIFT); 236 mult = 1 << (20 - PAGE_SHIFT);
237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
238} 238}
239 239
@@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
251 if (rc) 251 if (rc)
252 return rc; 252 return rc;
253 253
254 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 254 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
255 255
256 if (pages_number > totalram_pages / 2) { 256 if (pages_number > totalram_pages / 2) {
257 257
258 CERROR("can't set file readahead more than %lu MB\n", 258 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/ 259 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
260 return -ERANGE; 260 return -ERANGE;
261 } 261 }
262 262
@@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file; 281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
282 spin_unlock(&sbi->ll_lock); 282 spin_unlock(&sbi->ll_lock);
283 283
284 mult = 1 << (20 - PAGE_CACHE_SHIFT); 284 mult = 1 << (20 - PAGE_SHIFT);
285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
286} 286}
287 287
@@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; 326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
327 spin_unlock(&sbi->ll_lock); 327 spin_unlock(&sbi->ll_lock);
328 328
329 mult = 1 << (20 - PAGE_CACHE_SHIFT); 329 mult = 1 << (20 - PAGE_SHIFT);
330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
331} 331}
332 332
@@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
349 */ 349 */
350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { 350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", 351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); 352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
353 return -ERANGE; 353 return -ERANGE;
354 } 354 }
355 355
@@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
366 struct super_block *sb = m->private; 366 struct super_block *sb = m->private;
367 struct ll_sb_info *sbi = ll_s2sbi(sb); 367 struct ll_sb_info *sbi = ll_s2sbi(sb);
368 struct cl_client_cache *cache = &sbi->ll_cache; 368 struct cl_client_cache *cache = &sbi->ll_cache;
369 int shift = 20 - PAGE_CACHE_SHIFT; 369 int shift = 20 - PAGE_SHIFT;
370 int max_cached_mb; 370 int max_cached_mb;
371 int unused_mb; 371 int unused_mb;
372 372
@@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
405 return -EFAULT; 405 return -EFAULT;
406 kernbuf[count] = 0; 406 kernbuf[count] = 0;
407 407
408 mult = 1 << (20 - PAGE_CACHE_SHIFT); 408 mult = 1 << (20 - PAGE_SHIFT);
409 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - 409 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
410 kernbuf; 410 kernbuf;
411 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 411 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
415 if (pages_number < 0 || pages_number > totalram_pages) { 415 if (pages_number < 0 || pages_number > totalram_pages) {
416 CERROR("%s: can't set max cache more than %lu MB\n", 416 CERROR("%s: can't set max cache more than %lu MB\n",
417 ll_get_fsname(sb, NULL, 0), 417 ll_get_fsname(sb, NULL, 0),
418 totalram_pages >> (20 - PAGE_CACHE_SHIFT)); 418 totalram_pages >> (20 - PAGE_SHIFT));
419 return -ERANGE; 419 return -ERANGE;
420 } 420 }
421 421
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 34614acf3f8e..edab6c5b7e50 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
146 */ 146 */
147 io->ci_lockreq = CILR_NEVER; 147 io->ci_lockreq = CILR_NEVER;
148 148
149 pos = vmpage->index << PAGE_CACHE_SHIFT; 149 pos = vmpage->index << PAGE_SHIFT;
150 150
151 /* Create a temp IO to serve write. */ 151 /* Create a temp IO to serve write. */
152 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); 152 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
153 if (result == 0) { 153 if (result == 0) {
154 cio->cui_fd = LUSTRE_FPRIVATE(file); 154 cio->cui_fd = LUSTRE_FPRIVATE(file);
155 cio->cui_iter = NULL; 155 cio->cui_iter = NULL;
@@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
498 } 498 }
499 if (rc != 1) 499 if (rc != 1)
500 unlock_page(vmpage); 500 unlock_page(vmpage);
501 page_cache_release(vmpage); 501 put_page(vmpage);
502 } else { 502 } else {
503 which = RA_STAT_FAILED_GRAB_PAGE; 503 which = RA_STAT_FAILED_GRAB_PAGE;
504 msg = "g_c_p_n failed"; 504 msg = "g_c_p_n failed";
@@ -521,13 +521,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
521 * striped over, rather than having a constant value for all files here. 521 * striped over, rather than having a constant value for all files here.
522 */ 522 */
523 523
524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). 524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled 525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
526 * by default, this should be adjusted corresponding with max_read_ahead_mb 526 * by default, this should be adjusted corresponding with max_read_ahead_mb
527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used 527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
528 * up quickly which will affect read performance significantly. See LU-2816 528 * up quickly which will affect read performance significantly. See LU-2816
529 */ 529 */
530#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) 530#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
531 531
532static inline int stride_io_mode(struct ll_readahead_state *ras) 532static inline int stride_io_mode(struct ll_readahead_state *ras)
533{ 533{
@@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
739 end = rpc_boundary; 739 end = rpc_boundary;
740 740
741 /* Truncate RA window to end of file */ 741 /* Truncate RA window to end of file */
742 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); 742 end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
743 743
744 ras->ras_next_readahead = max(end, end + 1); 744 ras->ras_next_readahead = max(end, end + 1);
745 RAS_CDEBUG(ras); 745 RAS_CDEBUG(ras);
@@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
776 if (reserved != 0) 776 if (reserved != 0)
777 ll_ra_count_put(ll_i2sbi(inode), reserved); 777 ll_ra_count_put(ll_i2sbi(inode), reserved);
778 778
779 if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) 779 if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
780 ll_ra_stats_inc(mapping, RA_STAT_EOF); 780 ll_ra_stats_inc(mapping, RA_STAT_EOF);
781 781
782 /* if we didn't get to the end of the region we reserved from 782 /* if we didn't get to the end of the region we reserved from
@@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
985 if (ras->ras_requests == 2 && !ras->ras_request_index) { 985 if (ras->ras_requests == 2 && !ras->ras_request_index) {
986 __u64 kms_pages; 986 __u64 kms_pages;
987 987
988 kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 988 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
989 PAGE_CACHE_SHIFT; 989 PAGE_SHIFT;
990 990
991 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, 991 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
992 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); 992 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1173 * PageWriteback or clean the page. 1173 * PageWriteback or clean the page.
1174 */ 1174 */
1175 result = cl_sync_file_range(inode, offset, 1175 result = cl_sync_file_range(inode, offset,
1176 offset + PAGE_CACHE_SIZE - 1, 1176 offset + PAGE_SIZE - 1,
1177 CL_FSYNC_LOCAL, 1); 1177 CL_FSYNC_LOCAL, 1);
1178 if (result > 0) { 1178 if (result > 0) {
1179 /* actually we may have written more than one page. 1179 /* actually we may have written more than one page.
@@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1211 int ignore_layout = 0; 1211 int ignore_layout = 0;
1212 1212
1213 if (wbc->range_cyclic) { 1213 if (wbc->range_cyclic) {
1214 start = mapping->writeback_index << PAGE_CACHE_SHIFT; 1214 start = mapping->writeback_index << PAGE_SHIFT;
1215 end = OBD_OBJECT_EOF; 1215 end = OBD_OBJECT_EOF;
1216 } else { 1216 } else {
1217 start = wbc->range_start; 1217 start = wbc->range_start;
@@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1241 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { 1241 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1242 if (end == OBD_OBJECT_EOF) 1242 if (end == OBD_OBJECT_EOF)
1243 end = i_size_read(inode); 1243 end = i_size_read(inode);
1244 mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; 1244 mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
1245 } 1245 }
1246 return result; 1246 return result;
1247} 1247}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 7a5db67bc680..69aa15e8e3ef 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
87 * below because they are run with page locked and all our io is 87 * below because they are run with page locked and all our io is
88 * happening with locked page too 88 * happening with locked page too
89 */ 89 */
90 if (offset == 0 && length == PAGE_CACHE_SIZE) { 90 if (offset == 0 && length == PAGE_SIZE) {
91 env = cl_env_get(&refcheck); 91 env = cl_env_get(&refcheck);
92 if (!IS_ERR(env)) { 92 if (!IS_ERR(env)) {
93 inode = vmpage->mapping->host; 93 inode = vmpage->mapping->host;
@@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
193 return -EFBIG; 193 return -EFBIG;
194 } 194 }
195 195
196 *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 196 *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
197 *max_pages -= user_addr >> PAGE_CACHE_SHIFT; 197 *max_pages -= user_addr >> PAGE_SHIFT;
198 198
199 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); 199 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
200 if (*pages) { 200 if (*pages) {
@@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
217 for (i = 0; i < npages; i++) { 217 for (i = 0; i < npages; i++) {
218 if (do_dirty) 218 if (do_dirty)
219 set_page_dirty_lock(pages[i]); 219 set_page_dirty_lock(pages[i]);
220 page_cache_release(pages[i]); 220 put_page(pages[i]);
221 } 221 }
222 kvfree(pages); 222 kvfree(pages);
223} 223}
@@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
357 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. 357 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
358 */ 358 */
359#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ 359#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
360 PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) 360 PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
361static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, 361static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
362 loff_t file_offset) 362 loff_t file_offset)
363{ 363{
@@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
382 CDEBUG(D_VFSTRACE, 382 CDEBUG(D_VFSTRACE,
383 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", 383 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
384 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, 384 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
385 file_offset, file_offset, count >> PAGE_CACHE_SHIFT, 385 file_offset, file_offset, count >> PAGE_SHIFT,
386 MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); 386 MAX_DIO_SIZE >> PAGE_SHIFT);
387 387
388 /* Check that all user buffers are aligned as well */ 388 /* Check that all user buffers are aligned as well */
389 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) 389 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
432 * page worth of page pointers = 4MB on i386. 432 * page worth of page pointers = 4MB on i386.
433 */ 433 */
434 if (result == -ENOMEM && 434 if (result == -ENOMEM &&
435 size > (PAGE_CACHE_SIZE / sizeof(*pages)) * 435 size > (PAGE_SIZE / sizeof(*pages)) *
436 PAGE_CACHE_SIZE) { 436 PAGE_SIZE) {
437 size = ((((size / 2) - 1) | 437 size = ((((size / 2) - 1) |
438 ~CFS_PAGE_MASK) + 1) & 438 ~CFS_PAGE_MASK) + 1) &
439 CFS_PAGE_MASK; 439 CFS_PAGE_MASK;
@@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
474 loff_t pos, unsigned len, unsigned flags, 474 loff_t pos, unsigned len, unsigned flags,
475 struct page **pagep, void **fsdata) 475 struct page **pagep, void **fsdata)
476{ 476{
477 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 477 pgoff_t index = pos >> PAGE_SHIFT;
478 struct page *page; 478 struct page *page;
479 int rc; 479 int rc;
480 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 480 unsigned from = pos & (PAGE_SIZE - 1);
481 481
482 page = grab_cache_page_write_begin(mapping, index, flags); 482 page = grab_cache_page_write_begin(mapping, index, flags);
483 if (!page) 483 if (!page)
@@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
488 rc = ll_prepare_write(file, page, from, from + len); 488 rc = ll_prepare_write(file, page, from, from + len);
489 if (rc) { 489 if (rc) {
490 unlock_page(page); 490 unlock_page(page);
491 page_cache_release(page); 491 put_page(page);
492 } 492 }
493 return rc; 493 return rc;
494} 494}
@@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
497 loff_t pos, unsigned len, unsigned copied, 497 loff_t pos, unsigned len, unsigned copied,
498 struct page *page, void *fsdata) 498 struct page *page, void *fsdata)
499{ 499{
500 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 500 unsigned from = pos & (PAGE_SIZE - 1);
501 int rc; 501 int rc;
502 502
503 rc = ll_commit_write(file, page, from, from + copied); 503 rc = ll_commit_write(file, page, from, from + copied);
504 unlock_page(page); 504 unlock_page(page);
505 page_cache_release(page); 505 put_page(page);
506 506
507 return rc ?: copied; 507 return rc ?: copied;
508} 508}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index fb0c26ee7ff3..85a835976174 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -512,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env,
512 vio->cui_ra_window_set = 1; 512 vio->cui_ra_window_set = 1;
513 bead->lrr_start = cl_index(obj, pos); 513 bead->lrr_start = cl_index(obj, pos);
514 /* 514 /*
515 * XXX: explicit PAGE_CACHE_SIZE 515 * XXX: explicit PAGE_SIZE
516 */ 516 */
517 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); 517 bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
518 ll_ra_read_in(file, bead); 518 ll_ra_read_in(file, bead);
519 } 519 }
520 520
@@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
959 * We're completely overwriting an existing page, so _don't_ 959 * We're completely overwriting an existing page, so _don't_
960 * set it up to date until commit_write 960 * set it up to date until commit_write
961 */ 961 */
962 if (from == 0 && to == PAGE_CACHE_SIZE) { 962 if (from == 0 && to == PAGE_SIZE) {
963 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); 963 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
964 POISON_PAGE(page, 0x11); 964 POISON_PAGE(page, 0x11);
965 } else 965 } else
@@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
1022 set_page_dirty(vmpage); 1022 set_page_dirty(vmpage);
1023 vvp_write_pending(cl2ccc(obj), cp); 1023 vvp_write_pending(cl2ccc(obj), cp);
1024 } else if (result == -EDQUOT) { 1024 } else if (result == -EDQUOT) {
1025 pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; 1025 pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
1026 bool need_clip = true; 1026 bool need_clip = true;
1027 1027
1028 /* 1028 /*
@@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
1040 * being. 1040 * being.
1041 */ 1041 */
1042 if (last_index > pg->cp_index) { 1042 if (last_index > pg->cp_index) {
1043 to = PAGE_CACHE_SIZE; 1043 to = PAGE_SIZE;
1044 need_clip = false; 1044 need_clip = false;
1045 } else if (last_index == pg->cp_index) { 1045 } else if (last_index == pg->cp_index) {
1046 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; 1046 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 850bae734075..33ca3eb34965 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp)
57 struct page *vmpage = cp->cpg_page; 57 struct page *vmpage = cp->cpg_page;
58 58
59 LASSERT(vmpage); 59 LASSERT(vmpage);
60 page_cache_release(vmpage); 60 put_page(vmpage);
61} 61}
62 62
63static void vvp_page_fini(const struct lu_env *env, 63static void vvp_page_fini(const struct lu_env *env,
@@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env,
164 LASSERT(vmpage); 164 LASSERT(vmpage);
165 LASSERT(PageLocked(vmpage)); 165 LASSERT(PageLocked(vmpage));
166 166
167 offset = vmpage->index << PAGE_CACHE_SHIFT; 167 offset = vmpage->index << PAGE_SHIFT;
168 168
169 /* 169 /*
170 * XXX is it safe to call this with the page lock held? 170 * XXX is it safe to call this with the page lock held?
171 */ 171 */
172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); 172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
173 return 0; 173 return 0;
174} 174}
175 175
@@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
537 CLOBINVRNT(env, obj, ccc_object_invariant(obj)); 537 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
538 538
539 cpg->cpg_page = vmpage; 539 cpg->cpg_page = vmpage;
540 page_cache_get(vmpage); 540 get_page(vmpage);
541 541
542 INIT_LIST_HEAD(&cpg->cpg_pending_linkage); 542 INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
543 if (page->cp_type == CPT_CACHEABLE) { 543 if (page->cp_type == CPT_CACHEABLE) {
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 0f776cf8a5aa..9abb7c2b9231 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2017 * |s|e|f|p|ent| 0 | ... | 0 | 2017 * |s|e|f|p|ent| 0 | ... | 0 |
2018 * '----------------- -----' 2018 * '----------------- -----'
2019 * 2019 *
2020 * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is 2020 * However, on hosts where the native VM page size (PAGE_SIZE) is
2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple 2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple
2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the 2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the
2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately 2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span 2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
2049 * to the first entry of the next lu_dirpage. 2049 * to the first entry of the next lu_dirpage.
2050 */ 2050 */
2051#if PAGE_CACHE_SIZE > LU_PAGE_SIZE 2051#if PAGE_SIZE > LU_PAGE_SIZE
2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) 2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2053{ 2053{
2054 int i; 2054 int i;
@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2101} 2101}
2102#else 2102#else
2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) 2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
2104#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ 2104#endif /* PAGE_SIZE > LU_PAGE_SIZE */
2105 2105
2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, 2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2107 struct page **pages, struct ptlrpc_request **request) 2107 struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2110 struct lmv_obd *lmv = &obd->u.lmv; 2110 struct lmv_obd *lmv = &obd->u.lmv;
2111 __u64 offset = op_data->op_offset; 2111 __u64 offset = op_data->op_offset;
2112 int rc; 2112 int rc;
2113 int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ 2113 int ncfspgs; /* pages read in PAGE_SIZE */
2114 int nlupgs; /* pages read in LU_PAGE_SIZE */ 2114 int nlupgs; /* pages read in LU_PAGE_SIZE */
2115 struct lmv_tgt_desc *tgt; 2115 struct lmv_tgt_desc *tgt;
2116 2116
@@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2129 if (rc != 0) 2129 if (rc != 0)
2130 return rc; 2130 return rc;
2131 2131
2132 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1) 2132 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
2133 >> PAGE_CACHE_SHIFT; 2133 >> PAGE_SHIFT;
2134 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; 2134 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
2135 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); 2135 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
2136 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); 2136 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 55dd8ef9525b..b91d3ff18b02 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1002,10 +1002,10 @@ restart_bulk:
1002 1002
1003 /* NB req now owns desc and will free it when it gets freed */ 1003 /* NB req now owns desc and will free it when it gets freed */
1004 for (i = 0; i < op_data->op_npages; i++) 1004 for (i = 0; i < op_data->op_npages; i++)
1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1006 1006
1007 mdc_readdir_pack(req, op_data->op_offset, 1007 mdc_readdir_pack(req, op_data->op_offset,
1008 PAGE_CACHE_SIZE * op_data->op_npages, 1008 PAGE_SIZE * op_data->op_npages,
1009 &op_data->op_fid1); 1009 &op_data->op_fid1);
1010 1010
1011 ptlrpc_request_set_replen(req); 1011 ptlrpc_request_set_replen(req);
@@ -1037,7 +1037,7 @@ restart_bulk:
1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { 1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", 1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
1039 req->rq_bulk->bd_nob_transferred, 1039 req->rq_bulk->bd_nob_transferred,
1040 PAGE_CACHE_SIZE * op_data->op_npages); 1040 PAGE_SIZE * op_data->op_npages);
1041 ptlrpc_req_finished(req); 1041 ptlrpc_req_finished(req);
1042 return -EPROTO; 1042 return -EPROTO;
1043 } 1043 }
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index b7dc87248032..3924b095bfb0 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
1113} 1113}
1114 1114
1115enum { 1115enum {
1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), 1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
1117 CONFIG_READ_NRPAGES = 4 1117 CONFIG_READ_NRPAGES = 4
1118}; 1118};
1119 1119
@@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1137 LASSERT(cfg->cfg_instance); 1137 LASSERT(cfg->cfg_instance);
1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance); 1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance);
1139 1139
1140 inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1140 inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
1141 if (!inst) 1141 if (!inst)
1142 return -ENOMEM; 1142 return -ENOMEM;
1143 1143
1144 pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); 1144 pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
1145 if (pos >= PAGE_CACHE_SIZE) { 1145 if (pos >= PAGE_SIZE) {
1146 kfree(inst); 1146 kfree(inst);
1147 return -E2BIG; 1147 return -E2BIG;
1148 } 1148 }
1149 1149
1150 ++pos; 1150 ++pos;
1151 buf = inst + pos; 1151 buf = inst + pos;
1152 bufsz = PAGE_CACHE_SIZE - pos; 1152 bufsz = PAGE_SIZE - pos;
1153 1153
1154 while (datalen > 0) { 1154 while (datalen > 0) {
1155 int entry_len = sizeof(*entry); 1155 int entry_len = sizeof(*entry);
@@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1181 /* Keep this swab for normal mixed endian handling. LU-1644 */ 1181 /* Keep this swab for normal mixed endian handling. LU-1644 */
1182 if (mne_swab) 1182 if (mne_swab)
1183 lustre_swab_mgs_nidtbl_entry(entry); 1183 lustre_swab_mgs_nidtbl_entry(entry);
1184 if (entry->mne_length > PAGE_CACHE_SIZE) { 1184 if (entry->mne_length > PAGE_SIZE) {
1185 CERROR("MNE too large (%u)\n", entry->mne_length); 1185 CERROR("MNE too large (%u)\n", entry->mne_length);
1186 break; 1186 break;
1187 } 1187 }
@@ -1371,7 +1371,7 @@ again:
1371 } 1371 }
1372 body->mcb_offset = cfg->cfg_last_idx + 1; 1372 body->mcb_offset = cfg->cfg_last_idx + 1;
1373 body->mcb_type = cld->cld_type; 1373 body->mcb_type = cld->cld_type;
1374 body->mcb_bits = PAGE_CACHE_SHIFT; 1374 body->mcb_bits = PAGE_SHIFT;
1375 body->mcb_units = nrpages; 1375 body->mcb_units = nrpages;
1376 1376
1377 /* allocate bulk transfer descriptor */ 1377 /* allocate bulk transfer descriptor */
@@ -1383,7 +1383,7 @@ again:
1383 } 1383 }
1384 1384
1385 for (i = 0; i < nrpages; i++) 1385 for (i = 0; i < nrpages; i++)
1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1387 1387
1388 ptlrpc_request_set_replen(req); 1388 ptlrpc_request_set_replen(req);
1389 rc = ptlrpc_queue_wait(req); 1389 rc = ptlrpc_queue_wait(req);
@@ -1411,7 +1411,7 @@ again:
1411 goto out; 1411 goto out;
1412 } 1412 }
1413 1413
1414 if (ealen > nrpages << PAGE_CACHE_SHIFT) { 1414 if (ealen > nrpages << PAGE_SHIFT) {
1415 rc = -EINVAL; 1415 rc = -EINVAL;
1416 goto out; 1416 goto out;
1417 } 1417 }
@@ -1439,7 +1439,7 @@ again:
1439 1439
1440 ptr = kmap(pages[i]); 1440 ptr = kmap(pages[i]);
1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, 1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
1442 min_t(int, ealen, PAGE_CACHE_SIZE), 1442 min_t(int, ealen, PAGE_SIZE),
1443 mne_swab); 1443 mne_swab);
1444 kunmap(pages[i]); 1444 kunmap(pages[i]);
1445 if (rc2 < 0) { 1445 if (rc2 < 0) {
@@ -1448,7 +1448,7 @@ again:
1448 break; 1448 break;
1449 } 1449 }
1450 1450
1451 ealen -= PAGE_CACHE_SIZE; 1451 ealen -= PAGE_SIZE;
1452 } 1452 }
1453 1453
1454out: 1454out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 231a2f26c693..394580016638 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1477 /* 1477 /*
1478 * XXX for now. 1478 * XXX for now.
1479 */ 1479 */
1480 return (loff_t)idx << PAGE_CACHE_SHIFT; 1480 return (loff_t)idx << PAGE_SHIFT;
1481} 1481}
1482EXPORT_SYMBOL(cl_offset); 1482EXPORT_SYMBOL(cl_offset);
1483 1483
@@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1489 /* 1489 /*
1490 * XXX for now. 1490 * XXX for now.
1491 */ 1491 */
1492 return offset >> PAGE_CACHE_SHIFT; 1492 return offset >> PAGE_SHIFT;
1493} 1493}
1494EXPORT_SYMBOL(cl_index); 1494EXPORT_SYMBOL(cl_index);
1495 1495
1496int cl_page_size(const struct cl_object *obj) 1496int cl_page_size(const struct cl_object *obj)
1497{ 1497{
1498 return 1 << PAGE_CACHE_SHIFT; 1498 return 1 << PAGE_SHIFT;
1499} 1499}
1500EXPORT_SYMBOL(cl_page_size); 1500EXPORT_SYMBOL(cl_page_size);
1501 1501
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 1a938e1376f9..c2cf015962dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -461,9 +461,9 @@ static int obd_init_checks(void)
461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); 461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
462 ret = -EINVAL; 462 ret = -EINVAL;
463 } 463 }
464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { 464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
465 CWARN("mask failed: u64val %llu >= %llu\n", u64val, 465 CWARN("mask failed: u64val %llu >= %llu\n", u64val,
466 (__u64)PAGE_CACHE_SIZE); 466 (__u64)PAGE_SIZE);
467 ret = -EINVAL; 467 ret = -EINVAL;
468 } 468 }
469 469
@@ -509,7 +509,7 @@ static int __init obdclass_init(void)
509 * For clients with less memory, a larger fraction is needed 509 * For clients with less memory, a larger fraction is needed
510 * for other purposes (mostly for BGL). 510 * for other purposes (mostly for BGL).
511 */ 511 */
512 if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) 512 if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
513 obd_max_dirty_pages = totalram_pages / 4; 513 obd_max_dirty_pages = totalram_pages / 4;
514 else 514 else
515 obd_max_dirty_pages = totalram_pages / 2; 515 obd_max_dirty_pages = totalram_pages / 2;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 9496c09b2b69..b41b65e2f021 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
47#include "../../include/lustre/lustre_idl.h" 47#include "../../include/lustre/lustre_idl.h"
48 48
49#include <linux/fs.h> 49#include <linux/fs.h>
50#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
51 50
52void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) 51void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
53{ 52{
@@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
71 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) 70 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
72 dst->i_blkbits = ffs(src->o_blksize) - 1; 71 dst->i_blkbits = ffs(src->o_blksize) - 1;
73 72
74 if (dst->i_blkbits < PAGE_CACHE_SHIFT) 73 if (dst->i_blkbits < PAGE_SHIFT)
75 dst->i_blkbits = PAGE_CACHE_SHIFT; 74 dst->i_blkbits = PAGE_SHIFT;
76 75
77 /* allocation of space */ 76 /* allocation of space */
78 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) 77 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index fd333b9e968c..e6bf414a4444 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
100 char *buf) 100 char *buf)
101{ 101{
102 return sprintf(buf, "%ul\n", 102 return sprintf(buf, "%ul\n",
103 obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT))); 103 obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
104} 104}
105 105
106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, 106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
113 if (rc) 113 if (rc)
114 return rc; 114 return rc;
115 115
116 val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */ 116 val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
117 117
118 if (val > ((totalram_pages / 10) * 9)) { 118 if (val > ((totalram_pages / 10) * 9)) {
119 /* Somebody wants to assign too much memory to dirty pages */ 119 /* Somebody wants to assign too much memory to dirty pages */
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { 123 if (val < 4 << (20 - PAGE_SHIFT)) {
124 /* Less than 4 Mb for dirty cache is also bad */ 124 /* Less than 4 Mb for dirty cache is also bad */
125 return -EINVAL; 125 return -EINVAL;
126 } 126 }
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 65a4746c89ca..978568ada8e9 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -840,8 +840,8 @@ static int lu_htable_order(void)
840 840
841#if BITS_PER_LONG == 32 841#if BITS_PER_LONG == 32
842 /* limit hashtable size for lowmem systems to low RAM */ 842 /* limit hashtable size for lowmem systems to low RAM */
843 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) 843 if (cache_size > 1 << (30 - PAGE_SHIFT))
844 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; 844 cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
845#endif 845#endif
846 846
847 /* clear off unreasonable cache setting. */ 847 /* clear off unreasonable cache setting. */
@@ -853,7 +853,7 @@ static int lu_htable_order(void)
853 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; 853 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
854 } 854 }
855 cache_size = cache_size / 100 * lu_cache_percent * 855 cache_size = cache_size / 100 * lu_cache_percent *
856 (PAGE_CACHE_SIZE / 1024); 856 (PAGE_SIZE / 1024);
857 857
858 for (bits = 1; (1 << bits) < cache_size; ++bits) { 858 for (bits = 1; (1 << bits) < cache_size; ++bits) {
859 ; 859 ;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 64ffe243f870..1e83669c204d 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
278 struct page *vmpage = ep->ep_vmpage; 278 struct page *vmpage = ep->ep_vmpage;
279 279
280 atomic_dec(&eco->eo_npages); 280 atomic_dec(&eco->eo_npages);
281 page_cache_release(vmpage); 281 put_page(vmpage);
282} 282}
283 283
284static int echo_page_prep(const struct lu_env *env, 284static int echo_page_prep(const struct lu_env *env,
@@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
373 struct echo_object *eco = cl2echo_obj(obj); 373 struct echo_object *eco = cl2echo_obj(obj);
374 374
375 ep->ep_vmpage = vmpage; 375 ep->ep_vmpage = vmpage;
376 page_cache_get(vmpage); 376 get_page(vmpage);
377 mutex_init(&ep->ep_lock); 377 mutex_init(&ep->ep_lock);
378 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops); 378 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
379 atomic_inc(&eco->eo_npages); 379 atomic_inc(&eco->eo_npages);
@@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1138 LASSERT(rc == 0); 1138 LASSERT(rc == 0);
1139 1139
1140 rc = cl_echo_enqueue0(env, eco, offset, 1140 rc = cl_echo_enqueue0(env, eco, offset,
1141 offset + npages * PAGE_CACHE_SIZE - 1, 1141 offset + npages * PAGE_SIZE - 1,
1142 rw == READ ? LCK_PR : LCK_PW, &lh.cookie, 1142 rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
1143 CEF_NEVER); 1143 CEF_NEVER);
1144 if (rc < 0) 1144 if (rc < 0)
@@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
1311 int delta; 1311 int delta;
1312 1312
1313 /* no partial pages on the client */ 1313 /* no partial pages on the client */
1314 LASSERT(count == PAGE_CACHE_SIZE); 1314 LASSERT(count == PAGE_SIZE);
1315 1315
1316 addr = kmap(page); 1316 addr = kmap(page);
1317 1317
1318 for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1318 for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1319 if (rw == OBD_BRW_WRITE) { 1319 if (rw == OBD_BRW_WRITE) {
1320 stripe_off = offset + delta; 1320 stripe_off = offset + delta;
1321 stripe_id = id; 1321 stripe_id = id;
@@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
1341 int rc2; 1341 int rc2;
1342 1342
1343 /* no partial pages on the client */ 1343 /* no partial pages on the client */
1344 LASSERT(count == PAGE_CACHE_SIZE); 1344 LASSERT(count == PAGE_SIZE);
1345 1345
1346 addr = kmap(page); 1346 addr = kmap(page);
1347 1347
1348 for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1348 for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1349 stripe_off = offset + delta; 1349 stripe_off = offset + delta;
1350 stripe_id = id; 1350 stripe_id = id;
1351 1351
@@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1391 return -EINVAL; 1391 return -EINVAL;
1392 1392
1393 /* XXX think again with misaligned I/O */ 1393 /* XXX think again with misaligned I/O */
1394 npages = count >> PAGE_CACHE_SHIFT; 1394 npages = count >> PAGE_SHIFT;
1395 1395
1396 if (rw == OBD_BRW_WRITE) 1396 if (rw == OBD_BRW_WRITE)
1397 brw_flags = OBD_BRW_ASYNC; 1397 brw_flags = OBD_BRW_ASYNC;
@@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1408 1408
1409 for (i = 0, pgp = pga, off = offset; 1409 for (i = 0, pgp = pga, off = offset;
1410 i < npages; 1410 i < npages;
1411 i++, pgp++, off += PAGE_CACHE_SIZE) { 1411 i++, pgp++, off += PAGE_SIZE) {
1412 1412
1413 LASSERT(!pgp->pg); /* for cleanup */ 1413 LASSERT(!pgp->pg); /* for cleanup */
1414 1414
@@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1418 goto out; 1418 goto out;
1419 1419
1420 pages[i] = pgp->pg; 1420 pages[i] = pgp->pg;
1421 pgp->count = PAGE_CACHE_SIZE; 1421 pgp->count = PAGE_SIZE;
1422 pgp->off = off; 1422 pgp->off = off;
1423 pgp->flag = brw_flags; 1423 pgp->flag = brw_flags;
1424 1424
@@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
1473 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) 1473 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
1474 return -EINVAL; 1474 return -EINVAL;
1475 1475
1476 npages = batch >> PAGE_CACHE_SHIFT; 1476 npages = batch >> PAGE_SHIFT;
1477 tot_pages = count >> PAGE_CACHE_SHIFT; 1477 tot_pages = count >> PAGE_SHIFT;
1478 1478
1479 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); 1479 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
1480 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); 1480 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
1497 if (tot_pages < npages) 1497 if (tot_pages < npages)
1498 npages = tot_pages; 1498 npages = tot_pages;
1499 1499
1500 for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { 1500 for (i = 0; i < npages; i++, off += PAGE_SIZE) {
1501 rnb[i].offset = off; 1501 rnb[i].offset = off;
1502 rnb[i].len = PAGE_CACHE_SIZE; 1502 rnb[i].len = PAGE_SIZE;
1503 rnb[i].flags = brw_flags; 1503 rnb[i].flags = brw_flags;
1504 } 1504 }
1505 1505
@@ -1878,7 +1878,7 @@ static int __init obdecho_init(void)
1878{ 1878{
1879 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); 1879 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
1880 1880
1881 LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); 1881 LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
1882 1882
1883 return echo_client_init(); 1883 return echo_client_init();
1884} 1884}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 57c43c506ef2..a3358c39b2f1 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
162 if (rc) 162 if (rc)
163 return rc; 163 return rc;
164 164
165 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 165 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
166 166
167 if (pages_number <= 0 || 167 if (pages_number <= 0 ||
168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || 168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
169 pages_number > totalram_pages / 4) /* 1/4 of RAM */ 169 pages_number > totalram_pages / 4) /* 1/4 of RAM */
170 return -ERANGE; 170 return -ERANGE;
171 171
172 client_obd_list_lock(&cli->cl_loi_list_lock); 172 client_obd_list_lock(&cli->cl_loi_list_lock);
173 cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT); 173 cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
174 osc_wake_cache_waiters(cli); 174 osc_wake_cache_waiters(cli);
175 client_obd_list_unlock(&cli->cl_loi_list_lock); 175 client_obd_list_unlock(&cli->cl_loi_list_lock);
176 176
@@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
182{ 182{
183 struct obd_device *dev = m->private; 183 struct obd_device *dev = m->private;
184 struct client_obd *cli = &dev->u.cli; 184 struct client_obd *cli = &dev->u.cli;
185 int shift = 20 - PAGE_CACHE_SHIFT; 185 int shift = 20 - PAGE_SHIFT;
186 186
187 seq_printf(m, 187 seq_printf(m,
188 "used_mb: %d\n" 188 "used_mb: %d\n"
@@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
211 return -EFAULT; 211 return -EFAULT;
212 kernbuf[count] = 0; 212 kernbuf[count] = 0;
213 213
214 mult = 1 << (20 - PAGE_CACHE_SHIFT); 214 mult = 1 << (20 - PAGE_SHIFT);
215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - 215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
216 kernbuf; 216 kernbuf;
217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
569 569
570 /* if the max_pages is specified in bytes, convert to pages */ 570 /* if the max_pages is specified in bytes, convert to pages */
571 if (val >= ONE_MB_BRW_SIZE) 571 if (val >= ONE_MB_BRW_SIZE)
572 val >>= PAGE_CACHE_SHIFT; 572 val >>= PAGE_SHIFT;
573 573
574 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); 574 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
575 /* max_pages_per_rpc must be chunk aligned */ 575 /* max_pages_per_rpc must be chunk aligned */
576 val = (val + ~chunk_mask) & chunk_mask; 576 val = (val + ~chunk_mask) & chunk_mask;
577 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { 577 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
578 return -ERANGE; 578 return -ERANGE;
579 } 579 }
580 client_obd_list_lock(&cli->cl_loi_list_lock); 580 client_obd_list_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 63363111380c..5f25bf83dcfc 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
544 return -ERANGE; 544 return -ERANGE;
545 545
546 LASSERT(cur->oe_osclock == victim->oe_osclock); 546 LASSERT(cur->oe_osclock == victim->oe_osclock);
547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; 547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
548 chunk_start = cur->oe_start >> ppc_bits; 548 chunk_start = cur->oe_start >> ppc_bits;
549 chunk_end = cur->oe_end >> ppc_bits; 549 chunk_end = cur->oe_end >> ppc_bits;
550 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && 550 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); 647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); 648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
649 649
650 LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); 650 LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
651 ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 651 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
652 chunk_mask = ~((1 << ppc_bits) - 1); 652 chunk_mask = ~((1 << ppc_bits) - 1);
653 chunksize = 1 << cli->cl_chunkbits; 653 chunksize = 1 << cli->cl_chunkbits;
654 chunk = index >> ppc_bits; 654 chunk = index >> ppc_bits;
@@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
871 871
872 if (!sent) { 872 if (!sent) {
873 lost_grant = ext->oe_grants; 873 lost_grant = ext->oe_grants;
874 } else if (blocksize < PAGE_CACHE_SIZE && 874 } else if (blocksize < PAGE_SIZE &&
875 last_count != PAGE_CACHE_SIZE) { 875 last_count != PAGE_SIZE) {
876 /* For short writes we shouldn't count parts of pages that 876 /* For short writes we shouldn't count parts of pages that
877 * span a whole chunk on the OST side, or our accounting goes 877 * span a whole chunk on the OST side, or our accounting goes
878 * wrong. Should match the code in filter_grant_check. 878 * wrong. Should match the code in filter_grant_check.
@@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
884 if (end) 884 if (end)
885 count += blocksize - end; 885 count += blocksize - end;
886 886
887 lost_grant = PAGE_CACHE_SIZE - count; 887 lost_grant = PAGE_SIZE - count;
888 } 888 }
889 if (ext->oe_grants > 0) 889 if (ext->oe_grants > 0)
890 osc_free_grant(cli, nr_pages, lost_grant); 890 osc_free_grant(cli, nr_pages, lost_grant);
@@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
967 struct osc_async_page *oap; 967 struct osc_async_page *oap;
968 struct osc_async_page *tmp; 968 struct osc_async_page *tmp;
969 int pages_in_chunk = 0; 969 int pages_in_chunk = 0;
970 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 970 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
971 __u64 trunc_chunk = trunc_index >> ppc_bits; 971 __u64 trunc_chunk = trunc_index >> ppc_bits;
972 int grants = 0; 972 int grants = 0;
973 int nr_pages = 0; 973 int nr_pages = 0;
@@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1125 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { 1125 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
1126 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); 1126 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1127 LASSERT(last->oap_count > 0); 1127 LASSERT(last->oap_count > 0);
1128 LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); 1128 LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
1129 last->oap_async_flags |= ASYNC_COUNT_STABLE; 1129 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1130 } 1130 }
1131 1131
@@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1134 */ 1134 */
1135 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { 1135 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1136 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { 1136 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1137 oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; 1137 oap->oap_count = PAGE_SIZE - oap->oap_page_off;
1138 oap->oap_async_flags |= ASYNC_COUNT_STABLE; 1138 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1139 } 1139 }
1140 } 1140 }
@@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
1158 struct osc_object *obj = ext->oe_obj; 1158 struct osc_object *obj = ext->oe_obj;
1159 struct client_obd *cli = osc_cli(obj); 1159 struct client_obd *cli = osc_cli(obj);
1160 struct osc_extent *next; 1160 struct osc_extent *next;
1161 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 1161 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
1162 pgoff_t chunk = index >> ppc_bits; 1162 pgoff_t chunk = index >> ppc_bits;
1163 pgoff_t end_chunk; 1163 pgoff_t end_chunk;
1164 pgoff_t end_index; 1164 pgoff_t end_index;
@@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
1293 return 0; 1293 return 0;
1294 else if (cl_offset(obj, page->cp_index + 1) > kms) 1294 else if (cl_offset(obj, page->cp_index + 1) > kms)
1295 /* catch sub-page write at end of file */ 1295 /* catch sub-page write at end of file */
1296 return kms % PAGE_CACHE_SIZE; 1296 return kms % PAGE_SIZE;
1297 else 1297 else
1298 return PAGE_CACHE_SIZE; 1298 return PAGE_SIZE;
1299} 1299}
1300 1300
1301static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, 1301static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
1376 assert_spin_locked(&cli->cl_loi_list_lock.lock); 1376 assert_spin_locked(&cli->cl_loi_list_lock.lock);
1377 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); 1377 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1378 atomic_inc(&obd_dirty_pages); 1378 atomic_inc(&obd_dirty_pages);
1379 cli->cl_dirty += PAGE_CACHE_SIZE; 1379 cli->cl_dirty += PAGE_SIZE;
1380 pga->flag |= OBD_BRW_FROM_GRANT; 1380 pga->flag |= OBD_BRW_FROM_GRANT;
1381 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", 1381 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
1382 PAGE_CACHE_SIZE, pga, pga->pg); 1382 PAGE_SIZE, pga, pga->pg);
1383 osc_update_next_shrink(cli); 1383 osc_update_next_shrink(cli);
1384} 1384}
1385 1385
@@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
1396 1396
1397 pga->flag &= ~OBD_BRW_FROM_GRANT; 1397 pga->flag &= ~OBD_BRW_FROM_GRANT;
1398 atomic_dec(&obd_dirty_pages); 1398 atomic_dec(&obd_dirty_pages);
1399 cli->cl_dirty -= PAGE_CACHE_SIZE; 1399 cli->cl_dirty -= PAGE_SIZE;
1400 if (pga->flag & OBD_BRW_NOCACHE) { 1400 if (pga->flag & OBD_BRW_NOCACHE) {
1401 pga->flag &= ~OBD_BRW_NOCACHE; 1401 pga->flag &= ~OBD_BRW_NOCACHE;
1402 atomic_dec(&obd_dirty_transit_pages); 1402 atomic_dec(&obd_dirty_transit_pages);
1403 cli->cl_dirty_transit -= PAGE_CACHE_SIZE; 1403 cli->cl_dirty_transit -= PAGE_SIZE;
1404 } 1404 }
1405} 1405}
1406 1406
@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
1456 * used, we should return these grants to OST. There're two cases where grants 1456 * used, we should return these grants to OST. There're two cases where grants
1457 * can be lost: 1457 * can be lost:
1458 * 1. truncate; 1458 * 1. truncate;
1459 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was 1459 * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
1460 * written. In this case OST may use less chunks to serve this partial 1460 * written. In this case OST may use less chunks to serve this partial
1461 * write. OSTs don't actually know the page size on the client side. so 1461 * write. OSTs don't actually know the page size on the client side. so
1462 * clients have to calculate lost grant by the blocksize on the OST. 1462 * clients have to calculate lost grant by the blocksize on the OST.
@@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1469 1469
1470 client_obd_list_lock(&cli->cl_loi_list_lock); 1470 client_obd_list_lock(&cli->cl_loi_list_lock);
1471 atomic_sub(nr_pages, &obd_dirty_pages); 1471 atomic_sub(nr_pages, &obd_dirty_pages);
1472 cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; 1472 cli->cl_dirty -= nr_pages << PAGE_SHIFT;
1473 cli->cl_lost_grant += lost_grant; 1473 cli->cl_lost_grant += lost_grant;
1474 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { 1474 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
1475 /* borrow some grant from truncate to avoid the case that 1475 /* borrow some grant from truncate to avoid the case that
@@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
1512 if (rc < 0) 1512 if (rc < 0)
1513 return 0; 1513 return 0;
1514 1514
1515 if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && 1515 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1516 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { 1516 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
1517 osc_consume_write_grant(cli, &oap->oap_brw_page); 1517 osc_consume_write_grant(cli, &oap->oap_brw_page);
1518 if (transient) { 1518 if (transient) {
1519 cli->cl_dirty_transit += PAGE_CACHE_SIZE; 1519 cli->cl_dirty_transit += PAGE_SIZE;
1520 atomic_inc(&obd_dirty_transit_pages); 1520 atomic_inc(&obd_dirty_transit_pages);
1521 oap->oap_brw_flags |= OBD_BRW_NOCACHE; 1521 oap->oap_brw_flags |= OBD_BRW_NOCACHE;
1522 } 1522 }
@@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1562 * of queued writes and create a discontiguous rpc stream 1562 * of queued writes and create a discontiguous rpc stream
1563 */ 1563 */
1564 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || 1564 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
1565 cli->cl_dirty_max < PAGE_CACHE_SIZE || 1565 cli->cl_dirty_max < PAGE_SIZE ||
1566 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { 1566 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
1567 rc = -EDQUOT; 1567 rc = -EDQUOT;
1568 goto out; 1568 goto out;
@@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
1632 1632
1633 ocw->ocw_rc = -EDQUOT; 1633 ocw->ocw_rc = -EDQUOT;
1634 /* we can't dirty more */ 1634 /* we can't dirty more */
1635 if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || 1635 if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
1636 (atomic_read(&obd_dirty_pages) + 1 > 1636 (atomic_read(&obd_dirty_pages) + 1 >
1637 obd_max_dirty_pages)) { 1637 obd_max_dirty_pages)) {
1638 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", 1638 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index d720b1a1c18c..ce9ddd515f64 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
410 int result; 410 int result;
411 411
412 opg->ops_from = 0; 412 opg->ops_from = 0;
413 opg->ops_to = PAGE_CACHE_SIZE; 413 opg->ops_to = PAGE_SIZE;
414 414
415 result = osc_prep_async_page(osc, opg, vmpage, 415 result = osc_prep_async_page(osc, opg, vmpage,
416 cl_offset(obj, page->cp_index)); 416 cl_offset(obj, page->cp_index));
@@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
487/* LRU pages are freed in batch mode. OSC should at least free this 487/* LRU pages are freed in batch mode. OSC should at least free this
488 * number of pages to avoid running out of LRU budget, and.. 488 * number of pages to avoid running out of LRU budget, and..
489 */ 489 */
490static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ 490static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
491/* free this number at most otherwise it will take too long time to finish. */ 491/* free this number at most otherwise it will take too long time to finish. */
492static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ 492static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
493 493
494/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, 494/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
495 * we should free slots aggressively. In this way, slots are freed in a steady 495 * we should free slots aggressively. In this way, slots are freed in a steady
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 74805f1ae888..30526ebcad04 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
826 oa->o_undirty = 0; 826 oa->o_undirty = 0;
827 } else { 827 } else {
828 long max_in_flight = (cli->cl_max_pages_per_rpc << 828 long max_in_flight = (cli->cl_max_pages_per_rpc <<
829 PAGE_CACHE_SHIFT)* 829 PAGE_SHIFT)*
830 (cli->cl_max_rpcs_in_flight + 1); 830 (cli->cl_max_rpcs_in_flight + 1);
831 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); 831 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
832 } 832 }
@@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
909static int osc_shrink_grant(struct client_obd *cli) 909static int osc_shrink_grant(struct client_obd *cli)
910{ 910{
911 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * 911 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
912 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); 912 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
913 913
914 client_obd_list_lock(&cli->cl_loi_list_lock); 914 client_obd_list_lock(&cli->cl_loi_list_lock);
915 if (cli->cl_avail_grant <= target_bytes) 915 if (cli->cl_avail_grant <= target_bytes)
916 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 916 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
917 client_obd_list_unlock(&cli->cl_loi_list_lock); 917 client_obd_list_unlock(&cli->cl_loi_list_lock);
918 918
919 return osc_shrink_grant_to_target(cli, target_bytes); 919 return osc_shrink_grant_to_target(cli, target_bytes);
@@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
929 * We don't want to shrink below a single RPC, as that will negatively 929 * We don't want to shrink below a single RPC, as that will negatively
930 * impact block allocation and long-term performance. 930 * impact block allocation and long-term performance.
931 */ 931 */
932 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) 932 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
933 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 933 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
934 934
935 if (target_bytes >= cli->cl_avail_grant) { 935 if (target_bytes >= cli->cl_avail_grant) {
936 client_obd_list_unlock(&cli->cl_loi_list_lock); 936 client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
978 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) 978 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
979 * Keep comment here so that it can be found by searching. 979 * Keep comment here so that it can be found by searching.
980 */ 980 */
981 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 981 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
982 982
983 if (client->cl_import->imp_state == LUSTRE_IMP_FULL && 983 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
984 client->cl_avail_grant > brw_size) 984 client->cl_avail_grant > brw_size)
@@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1052 } 1052 }
1053 1053
1054 /* determine the appropriate chunk size used by osc_extent. */ 1054 /* determine the appropriate chunk size used by osc_extent. */
1055 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); 1055 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
1056 client_obd_list_unlock(&cli->cl_loi_list_lock); 1056 client_obd_list_unlock(&cli->cl_loi_list_lock);
1057 1057
1058 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", 1058 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1317 LASSERT(pg->count > 0); 1317 LASSERT(pg->count > 0);
1318 /* make sure there is no gap in the middle of page array */ 1318 /* make sure there is no gap in the middle of page array */
1319 LASSERTF(page_count == 1 || 1319 LASSERTF(page_count == 1 ||
1320 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && 1320 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1321 ergo(i > 0 && i < page_count - 1, 1321 ergo(i > 0 && i < page_count - 1,
1322 poff == 0 && pg->count == PAGE_CACHE_SIZE) && 1322 poff == 0 && pg->count == PAGE_SIZE) &&
1323 ergo(i == page_count - 1, poff == 0)), 1323 ergo(i == page_count - 1, poff == 0)),
1324 "i: %d/%d pg: %p off: %llu, count: %u\n", 1324 "i: %d/%d pg: %p off: %llu, count: %u\n",
1325 i, page_count, pg, pg->off, pg->count); 1325 i, page_count, pg, pg->off, pg->count);
@@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1877 oap->oap_count; 1877 oap->oap_count;
1878 else 1878 else
1879 LASSERT(oap->oap_page_off + oap->oap_count == 1879 LASSERT(oap->oap_page_off + oap->oap_count ==
1880 PAGE_CACHE_SIZE); 1880 PAGE_SIZE);
1881 } 1881 }
1882 } 1882 }
1883 1883
@@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1993 tmp->oap_request = ptlrpc_request_addref(req); 1993 tmp->oap_request = ptlrpc_request_addref(req);
1994 1994
1995 client_obd_list_lock(&cli->cl_loi_list_lock); 1995 client_obd_list_lock(&cli->cl_loi_list_lock);
1996 starting_offset >>= PAGE_CACHE_SHIFT; 1996 starting_offset >>= PAGE_SHIFT;
1997 if (cmd == OBD_BRW_READ) { 1997 if (cmd == OBD_BRW_READ) {
1998 cli->cl_r_in_flight++; 1998 cli->cl_r_in_flight++;
1999 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); 1999 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2790,12 +2790,12 @@ out:
2790 CFS_PAGE_MASK; 2790 CFS_PAGE_MASK;
2791 2791
2792 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= 2792 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2793 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) 2793 fm_key->fiemap.fm_start + PAGE_SIZE - 1)
2794 policy.l_extent.end = OBD_OBJECT_EOF; 2794 policy.l_extent.end = OBD_OBJECT_EOF;
2795 else 2795 else
2796 policy.l_extent.end = (fm_key->fiemap.fm_start + 2796 policy.l_extent.end = (fm_key->fiemap.fm_start +
2797 fm_key->fiemap.fm_length + 2797 fm_key->fiemap.fm_length +
2798 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; 2798 PAGE_SIZE - 1) & CFS_PAGE_MASK;
2799 2799
2800 ostid_build_res_name(&fm_key->oa.o_oi, &res_id); 2800 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2801 mode = ldlm_lock_match(exp->exp_obd->obd_namespace, 2801 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 1b7673eec4d7..cf3ac8eee9ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
174 LASSERT(page); 174 LASSERT(page);
175 LASSERT(pageoffset >= 0); 175 LASSERT(pageoffset >= 0);
176 LASSERT(len > 0); 176 LASSERT(len > 0);
177 LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); 177 LASSERT(pageoffset + len <= PAGE_SIZE);
178 178
179 desc->bd_nob += len; 179 desc->bd_nob += len;
180 180
181 if (pin) 181 if (pin)
182 page_cache_get(page); 182 get_page(page);
183 183
184 ptlrpc_add_bulk_page(desc, page, pageoffset, len); 184 ptlrpc_add_bulk_page(desc, page, pageoffset, len);
185} 185}
@@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
206 206
207 if (unpin) { 207 if (unpin) {
208 for (i = 0; i < desc->bd_iov_count; i++) 208 for (i = 0; i < desc->bd_iov_count; i++)
209 page_cache_release(desc->bd_iov[i].kiov_page); 209 put_page(desc->bd_iov[i].kiov_page);
210 } 210 }
211 211
212 kfree(desc); 212 kfree(desc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index b4eddf291269..cd94fed0ffdf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1092,7 +1092,7 @@ finish:
1092 1092
1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1094 cli->cl_max_pages_per_rpc = 1094 cli->cl_max_pages_per_rpc =
1095 min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, 1095 min(ocd->ocd_brw_size >> PAGE_SHIFT,
1096 cli->cl_max_pages_per_rpc); 1096 cli->cl_max_pages_per_rpc);
1097 else if (imp->imp_connect_op == MDS_CONNECT || 1097 else if (imp->imp_connect_op == MDS_CONNECT ||
1098 imp->imp_connect_op == MGS_CONNECT) 1098 imp->imp_connect_op == MGS_CONNECT)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cee04efb6fb5..c95a91ce26c9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
308 * hose a kernel by allowing the request history to grow too 308 * hose a kernel by allowing the request history to grow too
309 * far. 309 * far.
310 */ 310 */
311 bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 311 bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
312 if (val > totalram_pages / (2 * bufpages)) 312 if (val > totalram_pages / (2 * bufpages))
313 return -ERANGE; 313 return -ERANGE;
314 314
@@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
1226 const char prefix[] = "connection="; 1226 const char prefix[] = "connection=";
1227 const int prefix_len = sizeof(prefix) - 1; 1227 const int prefix_len = sizeof(prefix) - 1;
1228 1228
1229 if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) 1229 if (count > PAGE_SIZE - 1 || count <= prefix_len)
1230 return -EINVAL; 1230 return -EINVAL;
1231 1231
1232 kbuf = kzalloc(count + 1, GFP_NOFS); 1232 kbuf = kzalloc(count + 1, GFP_NOFS);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 5f27d9c2e4ef..30d9a164e52d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
195 } 195 }
196 196
197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { 197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
198 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, 198 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
199 "req %p bad\n", req); 199 "req %p bad\n", req);
200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); 200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
201 if (!ptlrpc_no_resend(req)) 201 if (!ptlrpc_no_resend(req))
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 72d5b9bf5b29..d3872b8c9a6e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -58,7 +58,7 @@
58 * bulk encryption page pools * 58 * bulk encryption page pools *
59 ****************************************/ 59 ****************************************/
60 60
61#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 61#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
62#define PAGES_PER_POOL (POINTERS_PER_PAGE) 62#define PAGES_PER_POOL (POINTERS_PER_PAGE)
63 63
64#define IDLE_IDX_MAX (100) 64#define IDLE_IDX_MAX (100)
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04028a3..be72a8e5f221 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
172static int vpfe_update_pipe_state(struct vpfe_video_device *video) 172static int vpfe_update_pipe_state(struct vpfe_video_device *video)
173{ 173{
174 struct vpfe_pipeline *pipe = &video->pipe; 174 struct vpfe_pipeline *pipe = &video->pipe;
175 int ret;
175 176
176 if (vpfe_prepare_pipeline(video)) 177 ret = vpfe_prepare_pipeline(video);
177 return vpfe_prepare_pipeline(video); 178 if (ret)
179 return ret;
178 180
179 /* 181 /*
180 * Find out if there is any input video 182 * Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
182 */ 184 */
183 if (pipe->input_num == 0) { 185 if (pipe->input_num == 0) {
184 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; 186 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
185 if (vpfe_update_current_ext_subdev(video)) { 187 ret = vpfe_update_current_ext_subdev(video);
188 if (ret) {
186 pr_err("Invalid external subdev\n"); 189 pr_err("Invalid external subdev\n");
187 return vpfe_update_current_ext_subdev(video); 190 return ret;
188 } 191 }
189 } else { 192 } else {
190 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; 193 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
667 struct v4l2_subdev *subdev; 670 struct v4l2_subdev *subdev;
668 struct v4l2_format format; 671 struct v4l2_format format;
669 struct media_pad *remote; 672 struct media_pad *remote;
673 int ret;
670 674
671 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); 675 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
672 676
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
695 sd_fmt.pad = remote->index; 699 sd_fmt.pad = remote->index;
696 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 700 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
697 /* get output format of remote subdev */ 701 /* get output format of remote subdev */
698 if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) { 702 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
703 if (ret) {
699 v4l2_err(&vpfe_dev->v4l2_dev, 704 v4l2_err(&vpfe_dev->v4l2_dev,
700 "invalid remote subdev for video node\n"); 705 "invalid remote subdev for video node\n");
701 return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); 706 return ret;
702 } 707 }
703 /* convert to pix format */ 708 /* convert to pix format */
704 mbus.code = sd_fmt.format.code; 709 mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
725 struct vpfe_video_device *video = video_drvdata(file); 730 struct vpfe_video_device *video = video_drvdata(file);
726 struct vpfe_device *vpfe_dev = video->vpfe_dev; 731 struct vpfe_device *vpfe_dev = video->vpfe_dev;
727 struct v4l2_format format; 732 struct v4l2_format format;
733 int ret;
728 734
729 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); 735 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
730 /* If streaming is started, return error */ 736 /* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
733 return -EBUSY; 739 return -EBUSY;
734 } 740 }
735 /* get adjacent subdev's output pad format */ 741 /* get adjacent subdev's output pad format */
736 if (__vpfe_video_get_format(video, &format)) 742 ret = __vpfe_video_get_format(video, &format);
737 return __vpfe_video_get_format(video, &format); 743 if (ret)
744 return ret;
738 *fmt = format; 745 *fmt = format;
739 video->fmt = *fmt; 746 video->fmt = *fmt;
740 return 0; 747 return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
757 struct vpfe_video_device *video = video_drvdata(file); 764 struct vpfe_video_device *video = video_drvdata(file);
758 struct vpfe_device *vpfe_dev = video->vpfe_dev; 765 struct vpfe_device *vpfe_dev = video->vpfe_dev;
759 struct v4l2_format format; 766 struct v4l2_format format;
767 int ret;
760 768
761 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); 769 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
762 /* get adjacent subdev's output pad format */ 770 /* get adjacent subdev's output pad format */
763 if (__vpfe_video_get_format(video, &format)) 771 ret = __vpfe_video_get_format(video, &format);
764 return __vpfe_video_get_format(video, &format); 772 if (ret)
773 return ret;
765 774
766 *fmt = format; 775 *fmt = format;
767 return 0; 776 return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
838 847
839 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); 848 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
840 849
841 if (mutex_lock_interruptible(&video->lock)) 850 ret = mutex_lock_interruptible(&video->lock);
842 return mutex_lock_interruptible(&video->lock); 851 if (ret)
852 return ret;
843 /* 853 /*
844 * If streaming is started return device busy 854 * If streaming is started return device busy
845 * error 855 * error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
940 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); 950 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
941 951
942 /* Call decoder driver function to set the standard */ 952 /* Call decoder driver function to set the standard */
943 if (mutex_lock_interruptible(&video->lock)) 953 ret = mutex_lock_interruptible(&video->lock);
944 return mutex_lock_interruptible(&video->lock); 954 if (ret)
955 return ret;
945 sdinfo = video->current_ext_subdev; 956 sdinfo = video->current_ext_subdev;
946 /* If streaming is started, return device busy error */ 957 /* If streaming is started, return device busy error */
947 if (video->started) { 958 if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1327 return -EINVAL; 1338 return -EINVAL;
1328 } 1339 }
1329 1340
1330 if (mutex_lock_interruptible(&video->lock)) 1341 ret = mutex_lock_interruptible(&video->lock);
1331 return mutex_lock_interruptible(&video->lock); 1342 if (ret)
1343 return ret;
1332 1344
1333 if (video->io_usrs != 0) { 1345 if (video->io_usrs != 0) {
1334 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); 1346 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1354 q->buf_struct_size = sizeof(struct vpfe_cap_buffer); 1366 q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
1355 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1367 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1356 1368
1357 if (vb2_queue_init(q)) { 1369 ret = vb2_queue_init(q);
1370 if (ret) {
1358 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); 1371 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
1359 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); 1372 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
1360 return vb2_queue_init(q); 1373 return ret;
1361 } 1374 }
1362 1375
1363 fh->io_allowed = 1; 1376 fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
1533 return -EINVAL; 1546 return -EINVAL;
1534 } 1547 }
1535 1548
1536 if (mutex_lock_interruptible(&video->lock)) 1549 ret = mutex_lock_interruptible(&video->lock);
1537 return mutex_lock_interruptible(&video->lock); 1550 if (ret)
1551 return ret;
1538 1552
1539 vpfe_stop_capture(video); 1553 vpfe_stop_capture(video);
1540 ret = vb2_streamoff(&video->buffer_queue, buf_type); 1554 ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
new file mode 100644
index 000000000000..d277f048789e
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -0,0 +1,35 @@
1config FB_OLPC_DCON
2 tristate "One Laptop Per Child Display CONtroller support"
3 depends on OLPC && FB
4 depends on I2C
5 depends on (GPIO_CS5535 || GPIO_CS5535=n)
6 select BACKLIGHT_CLASS_DEVICE
7 ---help---
8 In order to support very low power operation, the XO laptop uses a
9 secondary Display CONtroller, or DCON. This secondary controller
10 is present in the video pipeline between the primary display
11 controller (integrate into the processor or chipset) and the LCD
12 panel. It allows the main processor/display controller to be
13 completely powered off while still retaining an image on the display.
14 This controller is only available on OLPC platforms. Unless you have
15 one of these platforms, you will want to say 'N'.
16
17config FB_OLPC_DCON_1
18 bool "OLPC XO-1 DCON support"
19 depends on FB_OLPC_DCON && GPIO_CS5535
20 default y
21 ---help---
22 Enable support for the DCON in XO-1 model laptops. The kernel
23 communicates with the DCON using model-specific code. If you
24 have an XO-1 (or if you're unsure what model you have), you should
25 say 'Y'.
26
27config FB_OLPC_DCON_1_5
28 bool "OLPC XO-1.5 DCON support"
29 depends on FB_OLPC_DCON && ACPI
30 default y
31 ---help---
32 Enable support for the DCON in XO-1.5 model laptops. The kernel
33 communicates with the DCON using model-specific code. If you
34 have an XO-1.5 (or if you're unsure what model you have), you
35 should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
new file mode 100644
index 000000000000..36c7e67fec20
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -0,0 +1,6 @@
1olpc-dcon-objs += olpc_dcon.o
2olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
3olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
4obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
5
6
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
new file mode 100644
index 000000000000..61c2e65ac354
--- /dev/null
+++ b/drivers/staging/olpc_dcon/TODO
@@ -0,0 +1,9 @@
1TODO:
2 - see if vx855 gpio API can be made similar enough to cs5535 so we can
3 share more code
4 - allow simultaneous XO-1 and XO-1.5 support
5
6Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
7copy:
8 Daniel Drake <dsd@laptop.org>
9 Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
new file mode 100644
index 000000000000..f45b2ef05f48
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -0,0 +1,813 @@
1/*
2 * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
3 *
4 * Copyright © 2006-2007 Red Hat, Inc.
5 * Copyright © 2006-2007 Advanced Micro Devices, Inc.
6 * Copyright © 2009 VIA Technology, Inc.
7 * Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net>
8 *
9 * This program is free software. You can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/kernel.h>
17#include <linux/fb.h>
18#include <linux/console.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/module.h>
24#include <linux/backlight.h>
25#include <linux/device.h>
26#include <linux/uaccess.h>
27#include <linux/ctype.h>
28#include <linux/reboot.h>
29#include <linux/olpc-ec.h>
30#include <asm/tsc.h>
31#include <asm/olpc.h>
32
33#include "olpc_dcon.h"
34
35/* Module definitions */
36
37static ushort resumeline = 898;
38module_param(resumeline, ushort, 0444);
39
40static struct dcon_platform_data *pdata;
41
42/* I2C structures */
43
44/* Platform devices */
45static struct platform_device *dcon_device;
46
47static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
48
49static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
50{
51 return i2c_smbus_write_word_data(dcon->client, reg, val);
52}
53
54static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
55{
56 return i2c_smbus_read_word_data(dcon->client, reg);
57}
58
59/* ===== API functions - these are called by a variety of users ==== */
60
61static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
62{
63 u16 ver;
64 int rc = 0;
65
66 ver = dcon_read(dcon, DCON_REG_ID);
67 if ((ver >> 8) != 0xDC) {
68 pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
69 rc = -ENXIO;
70 goto err;
71 }
72
73 if (is_init) {
74 pr_info("Discovered DCON version %x\n", ver & 0xFF);
75 rc = pdata->init(dcon);
76 if (rc != 0) {
77 pr_err("Unable to init.\n");
78 goto err;
79 }
80 }
81
82 if (ver < 0xdc02) {
83 dev_err(&dcon->client->dev,
84 "DCON v1 is unsupported, giving up..\n");
85 rc = -ENODEV;
86 goto err;
87 }
88
89 /* SDRAM setup/hold time */
90 dcon_write(dcon, 0x3a, 0xc040);
91 dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */
92 dcon_write(dcon, DCON_REG_MEM_OPT_A,
93 MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
94 dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
95
96 /* Colour swizzle, AA, no passthrough, backlight */
97 if (is_init) {
98 dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
99 MODE_CSWIZZLE | MODE_COL_AA;
100 }
101 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
102
103 /* Set the scanline to interrupt on during resume */
104 dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
105
106err:
107 return rc;
108}
109
110/*
111 * The smbus doesn't always come back due to what is believed to be
112 * hardware (power rail) bugs. For older models where this is known to
113 * occur, our solution is to attempt to wait for the bus to stabilize;
114 * if it doesn't happen, cut power to the dcon, repower it, and wait
115 * for the bus to stabilize. Rinse, repeat until we have a working
116 * smbus. For newer models, we simply BUG(); we want to know if this
117 * still happens despite the power fixes that have been made!
118 */
119static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
120{
121 unsigned long timeout;
122 u8 pm;
123 int x;
124
125power_up:
126 if (is_powered_down) {
127 pm = 1;
128 x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
129 if (x) {
130 pr_warn("unable to force dcon to power up: %d!\n", x);
131 return x;
132 }
133 usleep_range(10000, 11000); /* we'll be conservative */
134 }
135
136 pdata->bus_stabilize_wiggle();
137
138 for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
139 usleep_range(1000, 1100);
140 x = dcon_read(dcon, DCON_REG_ID);
141 }
142 if (x < 0) {
143 pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
144 BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
145 pm = 0;
146 olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
147 msleep(100);
148 is_powered_down = 1;
149 goto power_up; /* argh, stupid hardware.. */
150 }
151
152 if (is_powered_down)
153 return dcon_hw_init(dcon, 0);
154 return 0;
155}
156
157static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
158{
159 dcon->bl_val = level;
160 dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
161
162 /* Purposely turn off the backlight when we go to level 0 */
163 if (dcon->bl_val == 0) {
164 dcon->disp_mode &= ~MODE_BL_ENABLE;
165 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
166 } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
167 dcon->disp_mode |= MODE_BL_ENABLE;
168 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
169 }
170}
171
172/* Set the output type to either color or mono */
173static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
174{
175 if (dcon->mono == enable_mono)
176 return 0;
177
178 dcon->mono = enable_mono;
179
180 if (enable_mono) {
181 dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
182 dcon->disp_mode |= MODE_MONO_LUMA;
183 } else {
184 dcon->disp_mode &= ~(MODE_MONO_LUMA);
185 dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
186 }
187
188 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
189 return 0;
190}
191
192/* For now, this will be really stupid - we need to address how
193 * DCONLOAD works in a sleep and account for it accordingly
194 */
195
196static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
197{
198 int x;
199
200 /* Turn off the backlight and put the DCON to sleep */
201
202 if (dcon->asleep == sleep)
203 return;
204
205 if (!olpc_board_at_least(olpc_board(0xc2)))
206 return;
207
208 if (sleep) {
209 u8 pm = 0;
210
211 x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
212 if (x)
213 pr_warn("unable to force dcon to power down: %d!\n", x);
214 else
215 dcon->asleep = sleep;
216 } else {
217 /* Only re-enable the backlight if the backlight value is set */
218 if (dcon->bl_val != 0)
219 dcon->disp_mode |= MODE_BL_ENABLE;
220 x = dcon_bus_stabilize(dcon, 1);
221 if (x)
222 pr_warn("unable to reinit dcon hardware: %d!\n", x);
223 else
224 dcon->asleep = sleep;
225
226 /* Restore backlight */
227 dcon_set_backlight(dcon, dcon->bl_val);
228 }
229
230 /* We should turn off some stuff in the framebuffer - but what? */
231}
232
233/* the DCON seems to get confused if we change DCONLOAD too
234 * frequently -- i.e., approximately faster than frame time.
235 * normally we don't change it this fast, so in general we won't
236 * delay here.
237 */
238static void dcon_load_holdoff(struct dcon_priv *dcon)
239{
240 ktime_t delta_t, now;
241
242 while (1) {
243 now = ktime_get();
244 delta_t = ktime_sub(now, dcon->load_time);
245 if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20)
246 break;
247 mdelay(4);
248 }
249}
250
251static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
252{
253 int err;
254
255 console_lock();
256 if (!lock_fb_info(dcon->fbinfo)) {
257 console_unlock();
258 dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
259 return false;
260 }
261
262 dcon->ignore_fb_events = true;
263 err = fb_blank(dcon->fbinfo,
264 blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
265 dcon->ignore_fb_events = false;
266 unlock_fb_info(dcon->fbinfo);
267 console_unlock();
268
269 if (err) {
270 dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
271 blank ? "" : "un");
272 return false;
273 }
274 return true;
275}
276
277/* Set the source of the display (CPU or DCON) */
278static void dcon_source_switch(struct work_struct *work)
279{
280 struct dcon_priv *dcon = container_of(work, struct dcon_priv,
281 switch_source);
282 int source = dcon->pending_src;
283
284 if (dcon->curr_src == source)
285 return;
286
287 dcon_load_holdoff(dcon);
288
289 dcon->switched = false;
290
291 switch (source) {
292 case DCON_SOURCE_CPU:
293 pr_info("dcon_source_switch to CPU\n");
294 /* Enable the scanline interrupt bit */
295 if (dcon_write(dcon, DCON_REG_MODE,
296 dcon->disp_mode | MODE_SCAN_INT))
297 pr_err("couldn't enable scanline interrupt!\n");
298 else
299 /* Wait up to one second for the scanline interrupt */
300 wait_event_timeout(dcon->waitq, dcon->switched, HZ);
301
302 if (!dcon->switched)
303 pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
304
305 /* Turn off the scanline interrupt */
306 if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
307 pr_err("couldn't disable scanline interrupt!\n");
308
309 /*
310 * Ideally we'd like to disable interrupts here so that the
311 * fb unblanking and DCON turn on happen at a known time value;
312 * however, we can't do that right now with fb_blank
313 * messing with semaphores.
314 *
315 * For now, we just hope..
316 */
317 if (!dcon_blank_fb(dcon, false)) {
318 pr_err("Failed to enter CPU mode\n");
319 dcon->pending_src = DCON_SOURCE_DCON;
320 return;
321 }
322
323 /* And turn off the DCON */
324 pdata->set_dconload(1);
325 dcon->load_time = ktime_get();
326
327 pr_info("The CPU has control\n");
328 break;
329 case DCON_SOURCE_DCON:
330 {
331 ktime_t delta_t;
332
333 pr_info("dcon_source_switch to DCON\n");
334
335 /* Clear DCONLOAD - this implies that the DCON is in control */
336 pdata->set_dconload(0);
337 dcon->load_time = ktime_get();
338
339 wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
340
341 if (!dcon->switched) {
342 pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
343 } else {
344 /* sometimes the DCON doesn't follow its own rules,
345 * and doesn't wait for two vsync pulses before
346 * ack'ing the frame load with an IRQ. the result
347 * is that the display shows the *previously*
348 * loaded frame. we can detect this by looking at
349 * the time between asserting DCONLOAD and the IRQ --
350 * if it's less than 20msec, then the DCON couldn't
351 * have seen two VSYNC pulses. in that case we
352 * deassert and reassert, and hope for the best.
353 * see http://dev.laptop.org/ticket/9664
354 */
355 delta_t = ktime_sub(dcon->irq_time, dcon->load_time);
356 if (dcon->switched && ktime_to_ns(delta_t)
357 < NSEC_PER_MSEC * 20) {
358 pr_err("missed loading, retrying\n");
359 pdata->set_dconload(1);
360 mdelay(41);
361 pdata->set_dconload(0);
362 dcon->load_time = ktime_get();
363 mdelay(41);
364 }
365 }
366
367 dcon_blank_fb(dcon, true);
368 pr_info("The DCON has control\n");
369 break;
370 }
371 default:
372 BUG();
373 }
374
375 dcon->curr_src = source;
376}
377
378static void dcon_set_source(struct dcon_priv *dcon, int arg)
379{
380 if (dcon->pending_src == arg)
381 return;
382
383 dcon->pending_src = arg;
384
385 if (dcon->curr_src != arg)
386 schedule_work(&dcon->switch_source);
387}
388
389static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
390{
391 dcon_set_source(dcon, arg);
392 flush_scheduled_work();
393}
394
395static ssize_t dcon_mode_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
397{
398 struct dcon_priv *dcon = dev_get_drvdata(dev);
399
400 return sprintf(buf, "%4.4X\n", dcon->disp_mode);
401}
402
403static ssize_t dcon_sleep_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 struct dcon_priv *dcon = dev_get_drvdata(dev);
407
408 return sprintf(buf, "%d\n", dcon->asleep);
409}
410
411static ssize_t dcon_freeze_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
413{
414 struct dcon_priv *dcon = dev_get_drvdata(dev);
415
416 return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
417}
418
419static ssize_t dcon_mono_show(struct device *dev,
420 struct device_attribute *attr, char *buf)
421{
422 struct dcon_priv *dcon = dev_get_drvdata(dev);
423
424 return sprintf(buf, "%d\n", dcon->mono);
425}
426
427static ssize_t dcon_resumeline_show(struct device *dev,
428 struct device_attribute *attr, char *buf)
429{
430 return sprintf(buf, "%d\n", resumeline);
431}
432
433static ssize_t dcon_mono_store(struct device *dev,
434 struct device_attribute *attr, const char *buf, size_t count)
435{
436 unsigned long enable_mono;
437 int rc;
438
439 rc = kstrtoul(buf, 10, &enable_mono);
440 if (rc)
441 return rc;
442
443 dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
444
445 return count;
446}
447
448static ssize_t dcon_freeze_store(struct device *dev,
449 struct device_attribute *attr, const char *buf, size_t count)
450{
451 struct dcon_priv *dcon = dev_get_drvdata(dev);
452 unsigned long output;
453 int ret;
454
455 ret = kstrtoul(buf, 10, &output);
456 if (ret)
457 return ret;
458
459 pr_info("dcon_freeze_store: %lu\n", output);
460
461 switch (output) {
462 case 0:
463 dcon_set_source(dcon, DCON_SOURCE_CPU);
464 break;
465 case 1:
466 dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
467 break;
468 case 2: /* normally unused */
469 dcon_set_source(dcon, DCON_SOURCE_DCON);
470 break;
471 default:
472 return -EINVAL;
473 }
474
475 return count;
476}
477
478static ssize_t dcon_resumeline_store(struct device *dev,
479 struct device_attribute *attr, const char *buf, size_t count)
480{
481 unsigned short rl;
482 int rc;
483
484 rc = kstrtou16(buf, 10, &rl);
485 if (rc)
486 return rc;
487
488 resumeline = rl;
489 dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
490
491 return count;
492}
493
494static ssize_t dcon_sleep_store(struct device *dev,
495 struct device_attribute *attr, const char *buf, size_t count)
496{
497 unsigned long output;
498 int ret;
499
500 ret = kstrtoul(buf, 10, &output);
501 if (ret)
502 return ret;
503
504 dcon_sleep(dev_get_drvdata(dev), output ? true : false);
505 return count;
506}
507
508static struct device_attribute dcon_device_files[] = {
509 __ATTR(mode, 0444, dcon_mode_show, NULL),
510 __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
511 __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
512 __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
513 __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
514};
515
516static int dcon_bl_update(struct backlight_device *dev)
517{
518 struct dcon_priv *dcon = bl_get_data(dev);
519 u8 level = dev->props.brightness & 0x0F;
520
521 if (dev->props.power != FB_BLANK_UNBLANK)
522 level = 0;
523
524 if (level != dcon->bl_val)
525 dcon_set_backlight(dcon, level);
526
527 /* power down the DCON when the screen is blanked */
528 if (!dcon->ignore_fb_events)
529 dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
530
531 return 0;
532}
533
534static int dcon_bl_get(struct backlight_device *dev)
535{
536 struct dcon_priv *dcon = bl_get_data(dev);
537
538 return dcon->bl_val;
539}
540
541static const struct backlight_ops dcon_bl_ops = {
542 .update_status = dcon_bl_update,
543 .get_brightness = dcon_bl_get,
544};
545
546static struct backlight_properties dcon_bl_props = {
547 .max_brightness = 15,
548 .type = BACKLIGHT_RAW,
549 .power = FB_BLANK_UNBLANK,
550};
551
552static int dcon_reboot_notify(struct notifier_block *nb,
553 unsigned long foo, void *bar)
554{
555 struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
556
557 if (!dcon || !dcon->client)
558 return NOTIFY_DONE;
559
560 /* Turn off the DCON. Entirely. */
561 dcon_write(dcon, DCON_REG_MODE, 0x39);
562 dcon_write(dcon, DCON_REG_MODE, 0x32);
563 return NOTIFY_DONE;
564}
565
566static int unfreeze_on_panic(struct notifier_block *nb,
567 unsigned long e, void *p)
568{
569 pdata->set_dconload(1);
570 return NOTIFY_DONE;
571}
572
573static struct notifier_block dcon_panic_nb = {
574 .notifier_call = unfreeze_on_panic,
575};
576
577static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
578{
579 strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
580
581 return 0;
582}
583
584static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
585{
586 struct dcon_priv *dcon;
587 int rc, i, j;
588
589 if (!pdata)
590 return -ENXIO;
591
592 dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
593 if (!dcon)
594 return -ENOMEM;
595
596 dcon->client = client;
597 init_waitqueue_head(&dcon->waitq);
598 INIT_WORK(&dcon->switch_source, dcon_source_switch);
599 dcon->reboot_nb.notifier_call = dcon_reboot_notify;
600 dcon->reboot_nb.priority = -1;
601
602 i2c_set_clientdata(client, dcon);
603
604 if (num_registered_fb < 1) {
605 dev_err(&client->dev, "DCON driver requires a registered fb\n");
606 rc = -EIO;
607 goto einit;
608 }
609 dcon->fbinfo = registered_fb[0];
610
611 rc = dcon_hw_init(dcon, 1);
612 if (rc)
613 goto einit;
614
615 /* Add the DCON device */
616
617 dcon_device = platform_device_alloc("dcon", -1);
618
619 if (!dcon_device) {
620 pr_err("Unable to create the DCON device\n");
621 rc = -ENOMEM;
622 goto eirq;
623 }
624 rc = platform_device_add(dcon_device);
625 platform_set_drvdata(dcon_device, dcon);
626
627 if (rc) {
628 pr_err("Unable to add the DCON device\n");
629 goto edev;
630 }
631
632 for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
633 rc = device_create_file(&dcon_device->dev,
634 &dcon_device_files[i]);
635 if (rc) {
636 dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
637 goto ecreate;
638 }
639 }
640
641 dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
642
643 /* Add the backlight device for the DCON */
644 dcon_bl_props.brightness = dcon->bl_val;
645 dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
646 dcon, &dcon_bl_ops, &dcon_bl_props);
647 if (IS_ERR(dcon->bl_dev)) {
648 dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
649 PTR_ERR(dcon->bl_dev));
650 dcon->bl_dev = NULL;
651 }
652
653 register_reboot_notifier(&dcon->reboot_nb);
654 atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
655
656 return 0;
657
658 ecreate:
659 for (j = 0; j < i; j++)
660 device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
661 edev:
662 platform_device_unregister(dcon_device);
663 dcon_device = NULL;
664 eirq:
665 free_irq(DCON_IRQ, dcon);
666 einit:
667 kfree(dcon);
668 return rc;
669}
670
671static int dcon_remove(struct i2c_client *client)
672{
673 struct dcon_priv *dcon = i2c_get_clientdata(client);
674
675 unregister_reboot_notifier(&dcon->reboot_nb);
676 atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
677
678 free_irq(DCON_IRQ, dcon);
679
680 backlight_device_unregister(dcon->bl_dev);
681
682 if (dcon_device)
683 platform_device_unregister(dcon_device);
684 cancel_work_sync(&dcon->switch_source);
685
686 kfree(dcon);
687
688 return 0;
689}
690
691#ifdef CONFIG_PM
692static int dcon_suspend(struct device *dev)
693{
694 struct i2c_client *client = to_i2c_client(dev);
695 struct dcon_priv *dcon = i2c_get_clientdata(client);
696
697 if (!dcon->asleep) {
698 /* Set up the DCON to have the source */
699 dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
700 }
701
702 return 0;
703}
704
705static int dcon_resume(struct device *dev)
706{
707 struct i2c_client *client = to_i2c_client(dev);
708 struct dcon_priv *dcon = i2c_get_clientdata(client);
709
710 if (!dcon->asleep) {
711 dcon_bus_stabilize(dcon, 0);
712 dcon_set_source(dcon, DCON_SOURCE_CPU);
713 }
714
715 return 0;
716}
717
718#else
719
720#define dcon_suspend NULL
721#define dcon_resume NULL
722
723#endif /* CONFIG_PM */
724
725irqreturn_t dcon_interrupt(int irq, void *id)
726{
727 struct dcon_priv *dcon = id;
728 u8 status;
729
730 if (pdata->read_status(&status))
731 return IRQ_NONE;
732
733 switch (status & 3) {
734 case 3:
735 pr_debug("DCONLOAD_MISSED interrupt\n");
736 break;
737
738 case 2: /* switch to DCON mode */
739 case 1: /* switch to CPU mode */
740 dcon->switched = true;
741 dcon->irq_time = ktime_get();
742 wake_up(&dcon->waitq);
743 break;
744
745 case 0:
746 /* workaround resume case: the DCON (on 1.5) doesn't
747 * ever assert status 0x01 when switching to CPU mode
748 * during resume. this is because DCONLOAD is de-asserted
749 * _immediately_ upon exiting S3, so the actual release
750 * of the DCON happened long before this point.
751 * see http://dev.laptop.org/ticket/9869
752 */
753 if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
754 dcon->switched = true;
755 dcon->irq_time = ktime_get();
756 wake_up(&dcon->waitq);
757 pr_debug("switching w/ status 0/0\n");
758 } else {
759 pr_debug("scanline interrupt w/CPU\n");
760 }
761 }
762
763 return IRQ_HANDLED;
764}
765
766static const struct dev_pm_ops dcon_pm_ops = {
767 .suspend = dcon_suspend,
768 .resume = dcon_resume,
769};
770
771static const struct i2c_device_id dcon_idtable[] = {
772 { "olpc_dcon", 0 },
773 { }
774};
775MODULE_DEVICE_TABLE(i2c, dcon_idtable);
776
777static struct i2c_driver dcon_driver = {
778 .driver = {
779 .name = "olpc_dcon",
780 .pm = &dcon_pm_ops,
781 },
782 .class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
783 .id_table = dcon_idtable,
784 .probe = dcon_probe,
785 .remove = dcon_remove,
786 .detect = dcon_detect,
787 .address_list = normal_i2c,
788};
789
790static int __init olpc_dcon_init(void)
791{
792#ifdef CONFIG_FB_OLPC_DCON_1_5
793 /* XO-1.5 */
794 if (olpc_board_at_least(olpc_board(0xd0)))
795 pdata = &dcon_pdata_xo_1_5;
796#endif
797#ifdef CONFIG_FB_OLPC_DCON_1
798 if (!pdata)
799 pdata = &dcon_pdata_xo_1;
800#endif
801
802 return i2c_add_driver(&dcon_driver);
803}
804
805static void __exit olpc_dcon_exit(void)
806{
807 i2c_del_driver(&dcon_driver);
808}
809
810module_init(olpc_dcon_init);
811module_exit(olpc_dcon_exit);
812
813MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
new file mode 100644
index 000000000000..215e7ec4dea2
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -0,0 +1,111 @@
1#ifndef OLPC_DCON_H_
2#define OLPC_DCON_H_
3
4#include <linux/notifier.h>
5#include <linux/workqueue.h>
6
7/* DCON registers */
8
9#define DCON_REG_ID 0
10#define DCON_REG_MODE 1
11
12#define MODE_PASSTHRU (1<<0)
13#define MODE_SLEEP (1<<1)
14#define MODE_SLEEP_AUTO (1<<2)
15#define MODE_BL_ENABLE (1<<3)
16#define MODE_BLANK (1<<4)
17#define MODE_CSWIZZLE (1<<5)
18#define MODE_COL_AA (1<<6)
19#define MODE_MONO_LUMA (1<<7)
20#define MODE_SCAN_INT (1<<8)
21#define MODE_CLOCKDIV (1<<9)
22#define MODE_DEBUG (1<<14)
23#define MODE_SELFTEST (1<<15)
24
25#define DCON_REG_HRES 0x2
26#define DCON_REG_HTOTAL 0x3
27#define DCON_REG_HSYNC_WIDTH 0x4
28#define DCON_REG_VRES 0x5
29#define DCON_REG_VTOTAL 0x6
30#define DCON_REG_VSYNC_WIDTH 0x7
31#define DCON_REG_TIMEOUT 0x8
32#define DCON_REG_SCAN_INT 0x9
33#define DCON_REG_BRIGHT 0xa
34#define DCON_REG_MEM_OPT_A 0x41
35#define DCON_REG_MEM_OPT_B 0x42
36
37/* Load Delay Locked Loop (DLL) settings for clock delay */
38#define MEM_DLL_CLOCK_DELAY (1<<0)
39/* Memory controller power down function */
40#define MEM_POWER_DOWN (1<<8)
41/* Memory controller software reset */
42#define MEM_SOFT_RESET (1<<0)
43
44/* Status values */
45
46#define DCONSTAT_SCANINT 0
47#define DCONSTAT_SCANINT_DCON 1
48#define DCONSTAT_DISPLAYLOAD 2
49#define DCONSTAT_MISSED 3
50
51/* Source values */
52
53#define DCON_SOURCE_DCON 0
54#define DCON_SOURCE_CPU 1
55
56/* Interrupt */
57#define DCON_IRQ 6
58
59struct dcon_priv {
60 struct i2c_client *client;
61 struct fb_info *fbinfo;
62 struct backlight_device *bl_dev;
63
64 wait_queue_head_t waitq;
65 struct work_struct switch_source;
66 struct notifier_block reboot_nb;
67
68 /* Shadow register for the DCON_REG_MODE register */
69 u8 disp_mode;
70
71 /* The current backlight value - this saves us some smbus traffic */
72 u8 bl_val;
73
74 /* Current source, initialized at probe time */
75 int curr_src;
76
77 /* Desired source */
78 int pending_src;
79
80 /* Variables used during switches */
81 bool switched;
82 ktime_t irq_time;
83 ktime_t load_time;
84
85 /* Current output type; true == mono, false == color */
86 bool mono;
87 bool asleep;
88 /* This get set while controlling fb blank state from the driver */
89 bool ignore_fb_events;
90};
91
92struct dcon_platform_data {
93 int (*init)(struct dcon_priv *);
94 void (*bus_stabilize_wiggle)(void);
95 void (*set_dconload)(int);
96 int (*read_status)(u8 *);
97};
98
99#include <linux/interrupt.h>
100
101irqreturn_t dcon_interrupt(int irq, void *id);
102
103#ifdef CONFIG_FB_OLPC_DCON_1
104extern struct dcon_platform_data dcon_pdata_xo_1;
105#endif
106
107#ifdef CONFIG_FB_OLPC_DCON_1_5
108extern struct dcon_platform_data dcon_pdata_xo_1_5;
109#endif
110
111#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
new file mode 100644
index 000000000000..0c5a10c69401
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -0,0 +1,205 @@
1/*
2 * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
3 *
4 * Copyright © 2006-2007 Red Hat, Inc.
5 * Copyright © 2006-2007 Advanced Micro Devices, Inc.
6 * Copyright © 2009 VIA Technology, Inc.
7 * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
8 *
9 * This program is free software. You can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cs5535.h>
17#include <linux/gpio.h>
18#include <linux/delay.h>
19#include <asm/olpc.h>
20
21#include "olpc_dcon.h"
22
23static int dcon_init_xo_1(struct dcon_priv *dcon)
24{
25 unsigned char lob;
26
27 if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
28 pr_err("failed to request STAT0 GPIO\n");
29 return -EIO;
30 }
31 if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
32 pr_err("failed to request STAT1 GPIO\n");
33 goto err_gp_stat1;
34 }
35 if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
36 pr_err("failed to request IRQ GPIO\n");
37 goto err_gp_irq;
38 }
39 if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
40 pr_err("failed to request LOAD GPIO\n");
41 goto err_gp_load;
42 }
43 if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
44 pr_err("failed to request BLANK GPIO\n");
45 goto err_gp_blank;
46 }
47
48 /* Turn off the event enable for GPIO7 just to be safe */
49 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
50
51 /*
52 * Determine the current state by reading the GPIO bit; earlier
53 * stages of the boot process have established the state.
54 *
55 * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
56 * this is because OFW will disable input for the pin and set a value..
57 * READ_BACK will only contain a valid value if input is enabled and
58 * then a value is set. So, future readings of the pin can use
59 * READ_BACK, but the first one cannot. Awesome, huh?
60 */
61 dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
62 ? DCON_SOURCE_CPU
63 : DCON_SOURCE_DCON;
64 dcon->pending_src = dcon->curr_src;
65
66 /* Set the directions for the GPIO pins */
67 gpio_direction_input(OLPC_GPIO_DCON_STAT0);
68 gpio_direction_input(OLPC_GPIO_DCON_STAT1);
69 gpio_direction_input(OLPC_GPIO_DCON_IRQ);
70 gpio_direction_input(OLPC_GPIO_DCON_BLANK);
71 gpio_direction_output(OLPC_GPIO_DCON_LOAD,
72 dcon->curr_src == DCON_SOURCE_CPU);
73
74 /* Set up the interrupt mappings */
75
76 /* Set the IRQ to pair 2 */
77 cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
78
79 /* Enable group 2 to trigger the DCON interrupt */
80 cs5535_gpio_set_irq(2, DCON_IRQ);
81
82 /* Select edge level for interrupt (in PIC) */
83 lob = inb(0x4d0);
84 lob &= ~(1 << DCON_IRQ);
85 outb(lob, 0x4d0);
86
87 /* Register the interrupt handler */
88 if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
89 pr_err("failed to request DCON's irq\n");
90 goto err_req_irq;
91 }
92
93 /* Clear INV_EN for GPIO7 (DCONIRQ) */
94 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
95
96 /* Enable filter for GPIO12 (DCONBLANK) */
97 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
98
99 /* Disable filter for GPIO7 */
100 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
101
102 /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
103 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
104 cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
105
106 /* Add GPIO12 to the Filter Event Pair #7 */
107 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
108
109 /* Turn off negative Edge Enable for GPIO12 */
110 cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
111
112 /* Enable negative Edge Enable for GPIO7 */
113 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
114
115 /* Zero the filter amount for Filter Event Pair #7 */
116 cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
117
118 /* Clear the negative edge status for GPIO7 and GPIO12 */
119 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
120 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
121
122 /* FIXME: Clear the positive status as well, just to be sure */
123 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
124 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
125
126 /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
127 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
128 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
129
130 return 0;
131
132err_req_irq:
133 gpio_free(OLPC_GPIO_DCON_BLANK);
134err_gp_blank:
135 gpio_free(OLPC_GPIO_DCON_LOAD);
136err_gp_load:
137 gpio_free(OLPC_GPIO_DCON_IRQ);
138err_gp_irq:
139 gpio_free(OLPC_GPIO_DCON_STAT1);
140err_gp_stat1:
141 gpio_free(OLPC_GPIO_DCON_STAT0);
142 return -EIO;
143}
144
145static void dcon_wiggle_xo_1(void)
146{
147 int x;
148
149 /*
150 * According to HiMax, when powering the DCON up we should hold
151 * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
152 * state machine to reset to a (sane) initial state. Mitch Bradley
153 * did some testing and discovered that holding for 16 SMB_CLK cycles
154 * worked a lot more reliably, so that's what we do here.
155 *
156 * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
157 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
158 * GPIO15.
159 */
160 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
161 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
162 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
163 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
164 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
165 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
166 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
167 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
168 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
169 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
170
171 for (x = 0; x < 16; x++) {
172 udelay(5);
173 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
174 udelay(5);
175 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
176 }
177 udelay(5);
178 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
179 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
180 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
181 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
182}
183
184static void dcon_set_dconload_1(int val)
185{
186 gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
187}
188
189static int dcon_read_status_xo_1(u8 *status)
190{
191 *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
192 *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
193
194 /* Clear the negative edge status for GPIO7 */
195 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
196
197 return 0;
198}
199
200struct dcon_platform_data dcon_pdata_xo_1 = {
201 .init = dcon_init_xo_1,
202 .bus_stabilize_wiggle = dcon_wiggle_xo_1,
203 .set_dconload = dcon_set_dconload_1,
204 .read_status = dcon_read_status_xo_1,
205};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
new file mode 100644
index 000000000000..6a4d379c16a3
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright (c) 2009,2010 One Laptop per Child
3 *
4 * This program is free software. You can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/acpi.h>
12#include <linux/delay.h>
13#include <linux/gpio.h>
14#include <asm/olpc.h>
15
16/* TODO: this eventually belongs in linux/vx855.h */
17#define NR_VX855_GPI 14
18#define NR_VX855_GPO 13
19#define NR_VX855_GPIO 15
20
21#define VX855_GPI(n) (n)
22#define VX855_GPO(n) (NR_VX855_GPI + (n))
23#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n))
24
25#include "olpc_dcon.h"
26
27/* Hardware setup on the XO 1.5:
28 * DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
29 * DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver
30 * DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
31 * DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
32 * DCONIRQ connects to VX855_GPIO12
33 * DCONSMBDATA connects to VX855 graphics CRTSPD
34 * DCONSMBCLK connects to VX855 graphics CRTSPCLK
35 */
36
37#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
38#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */
39#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */
40#define BIT_GPIO12 0x40
41
42#define PREFIX "OLPC DCON:"
43
44static void dcon_clear_irq(void)
45{
46 /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
47 outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
48}
49
50static int dcon_was_irq(void)
51{
52 u_int8_t tmp;
53
54 /* irq status will appear in PMIO_Rx50[6] on gpio12 */
55 tmp = inb(VX855_GPI_STATUS_CHG);
56 return !!(tmp & BIT_GPIO12);
57
58 return 0;
59}
60
61static int dcon_init_xo_1_5(struct dcon_priv *dcon)
62{
63 unsigned int irq;
64
65 dcon_clear_irq();
66
67 /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
68 outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
69
70 /* Determine the current state of DCONLOAD, likely set by firmware */
71 /* GPIO1 */
72 dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
73 DCON_SOURCE_CPU : DCON_SOURCE_DCON;
74 dcon->pending_src = dcon->curr_src;
75
76 /* we're sharing the IRQ with ACPI */
77 irq = acpi_gbl_FADT.sci_interrupt;
78 if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
79 pr_err("DCON (IRQ%d) allocation failed\n", irq);
80 return 1;
81 }
82
83 return 0;
84}
85
86static void set_i2c_line(int sda, int scl)
87{
88 unsigned char tmp;
89 unsigned int port = 0x26;
90
91 /* FIXME: This directly accesses the CRT GPIO controller !!! */
92 outb(port, 0x3c4);
93 tmp = inb(0x3c5);
94
95 if (scl)
96 tmp |= 0x20;
97 else
98 tmp &= ~0x20;
99
100 if (sda)
101 tmp |= 0x10;
102 else
103 tmp &= ~0x10;
104
105 tmp |= 0x01;
106
107 outb(port, 0x3c4);
108 outb(tmp, 0x3c5);
109}
110
111
112static void dcon_wiggle_xo_1_5(void)
113{
114 int x;
115
116 /*
117 * According to HiMax, when powering the DCON up we should hold
118 * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
119 * state machine to reset to a (sane) initial state. Mitch Bradley
120 * did some testing and discovered that holding for 16 SMB_CLK cycles
121 * worked a lot more reliably, so that's what we do here.
122 */
123 set_i2c_line(1, 1);
124
125 for (x = 0; x < 16; x++) {
126 udelay(5);
127 set_i2c_line(1, 0);
128 udelay(5);
129 set_i2c_line(1, 1);
130 }
131 udelay(5);
132
133 /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
134 outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
135}
136
137static void dcon_set_dconload_xo_1_5(int val)
138{
139 gpio_set_value(VX855_GPIO(1), val);
140}
141
142static int dcon_read_status_xo_1_5(u8 *status)
143{
144 if (!dcon_was_irq())
145 return -1;
146
147 /* i believe this is the same as "inb(0x44b) & 3" */
148 *status = gpio_get_value(VX855_GPI(10));
149 *status |= gpio_get_value(VX855_GPI(11)) << 1;
150
151 dcon_clear_irq();
152
153 return 0;
154}
155
156struct dcon_platform_data dcon_pdata_xo_1_5 = {
157 .init = dcon_init_xo_1_5,
158 .bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
159 .set_dconload = dcon_set_dconload_xo_1_5,
160 .read_status = dcon_read_status_xo_1_5,
161};
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
index 3e668d852f03..a925fb0db706 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/staging/rdma/hfi1/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_HFI1
2 tristate "Intel OPA Gen1 support" 2 tristate "Intel OPA Gen1 support"
3 depends on X86_64 && INFINIBAND_RDMAVT 3 depends on X86_64 && INFINIBAND_RDMAVT
4 select MMU_NOTIFIER 4 select MMU_NOTIFIER
5 select CRC32
5 default m 6 default m
6 ---help--- 7 ---help---
7 This is a low-level driver for Intel OPA Gen1 adapter. 8 This is a low-level driver for Intel OPA Gen1 adapter.
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
3- Remove unneeded file entries in sysfs 3- Remove unneeded file entries in sysfs
4- Remove software processing of IB protocol and place in library for use 4- Remove software processing of IB protocol and place in library for use
5 by qib, ipath (if still present), hfi1, and eventually soft-roce 5 by qib, ipath (if still present), hfi1, and eventually soft-roce
6 6- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5fb6c1..c1c5bf82addb 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
49#include <linux/vmalloc.h> 49#include <linux/vmalloc.h>
50#include <linux/io.h> 50#include <linux/io.h>
51 51
52#include <rdma/ib.h>
53
52#include "hfi.h" 54#include "hfi.h"
53#include "pio.h" 55#include "pio.h"
54#include "device.h" 56#include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
190 int uctxt_required = 1; 192 int uctxt_required = 1;
191 int must_be_root = 0; 193 int must_be_root = 0;
192 194
195 /* FIXME: This interface cannot continue out of staging */
196 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
197 return -EACCES;
198
193 if (count < sizeof(cmd)) { 199 if (count < sizeof(cmd)) {
194 ret = -EINVAL; 200 ret = -EINVAL;
195 goto bail; 201 goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
791 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 797 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
792 798
793 dd->rcd[uctxt->ctxt] = NULL; 799 dd->rcd[uctxt->ctxt] = NULL;
800
801 hfi1_user_exp_rcv_free(fdata);
802 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
803
794 uctxt->rcvwait_to = 0; 804 uctxt->rcvwait_to = 0;
795 uctxt->piowait_to = 0; 805 uctxt->piowait_to = 0;
796 uctxt->rcvnowait = 0; 806 uctxt->rcvnowait = 0;
797 uctxt->pionowait = 0; 807 uctxt->pionowait = 0;
798 uctxt->event_flags = 0; 808 uctxt->event_flags = 0;
799 809
800 hfi1_user_exp_rcv_free(fdata);
801 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
802
803 hfi1_stats.sps_ctxts--; 810 hfi1_stats.sps_ctxts--;
804 if (++dd->freectxts == dd->num_user_contexts) 811 if (++dd->freectxts == dd->num_user_contexts)
805 aspm_enable_all(dd); 812 aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
1127 1134
1128static int user_init(struct file *fp) 1135static int user_init(struct file *fp)
1129{ 1136{
1130 int ret;
1131 unsigned int rcvctrl_ops = 0; 1137 unsigned int rcvctrl_ops = 0;
1132 struct hfi1_filedata *fd = fp->private_data; 1138 struct hfi1_filedata *fd = fp->private_data;
1133 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1139 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1134 1140
1135 /* make sure that the context has already been setup */ 1141 /* make sure that the context has already been setup */
1136 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) { 1142 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
1137 ret = -EFAULT; 1143 return -EFAULT;
1138 goto done;
1139 }
1140
1141 /*
1142 * Subctxts don't need to initialize anything since master
1143 * has done it.
1144 */
1145 if (fd->subctxt) {
1146 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1147 HFI1_CTXT_MASTER_UNINIT,
1148 &uctxt->event_flags));
1149 goto expected;
1150 }
1151 1144
1152 /* initialize poll variables... */ 1145 /* initialize poll variables... */
1153 uctxt->urgent = 0; 1146 uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
1202 wake_up(&uctxt->wait); 1195 wake_up(&uctxt->wait);
1203 } 1196 }
1204 1197
1205expected: 1198 return 0;
1206 /*
1207 * Expected receive has to be setup for all processes (including
1208 * shared contexts). However, it has to be done after the master
1209 * context has been fully configured as it depends on the
1210 * eager/expected split of the RcvArray entries.
1211 * Setting it up here ensures that the subcontexts will be waiting
1212 * (due to the above wait_event_interruptible() until the master
1213 * is setup.
1214 */
1215 ret = hfi1_user_exp_rcv_init(fp);
1216done:
1217 return ret;
1218} 1199}
1219 1200
1220static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) 1201static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
1261 int ret = 0; 1242 int ret = 0;
1262 1243
1263 /* 1244 /*
1264 * Context should be set up only once (including allocation and 1245 * Context should be set up only once, including allocation and
1265 * programming of eager buffers. This is done if context sharing 1246 * programming of eager buffers. This is done if context sharing
1266 * is not requested or by the master process. 1247 * is not requested or by the master process.
1267 */ 1248 */
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp)
1282 if (ret) 1263 if (ret)
1283 goto done; 1264 goto done;
1284 } 1265 }
1266 } else {
1267 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1268 HFI1_CTXT_MASTER_UNINIT,
1269 &uctxt->event_flags));
1270 if (ret)
1271 goto done;
1285 } 1272 }
1273
1286 ret = hfi1_user_sdma_alloc_queues(uctxt, fp); 1274 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1287 if (ret) 1275 if (ret)
1288 goto done; 1276 goto done;
1277 /*
1278 * Expected receive has to be setup for all processes (including
1279 * shared contexts). However, it has to be done after the master
1280 * context has been fully configured as it depends on the
1281 * eager/expected split of the RcvArray entries.
1282 * Setting it up here ensures that the subcontexts will be waiting
1283 * (due to the above wait_event_interruptible() until the master
1284 * is setup.
1285 */
1286 ret = hfi1_user_exp_rcv_init(fp);
1287 if (ret)
1288 goto done;
1289 1289
1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); 1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1291done: 1291done:
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1565{ 1565{
1566 struct hfi1_devdata *dd = filp->private_data; 1566 struct hfi1_devdata *dd = filp->private_data;
1567 1567
1568 switch (whence) { 1568 return fixed_size_llseek(filp, offset, whence,
1569 case SEEK_SET: 1569 (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
1570 break;
1571 case SEEK_CUR:
1572 offset += filp->f_pos;
1573 break;
1574 case SEEK_END:
1575 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1576 offset;
1577 break;
1578 default:
1579 return -EINVAL;
1580 }
1581
1582 if (offset < 0)
1583 return -EINVAL;
1584
1585 if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1586 return -EINVAL;
1587
1588 filp->f_pos = offset;
1589
1590 return filp->f_pos;
1591} 1570}
1592 1571
1593/* NOTE: assumes unsigned long is 8 bytes */ 1572/* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad0164ea9a..b3f0682a36c9 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
71 struct mm_struct *, 71 struct mm_struct *,
72 unsigned long, unsigned long); 72 unsigned long, unsigned long);
73static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 73static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
74 struct mm_struct *,
74 unsigned long, unsigned long); 75 unsigned long, unsigned long);
75static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 76static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
76 unsigned long, unsigned long); 77 unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
137 rbnode = rb_entry(node, struct mmu_rb_node, node); 138 rbnode = rb_entry(node, struct mmu_rb_node, node);
138 rb_erase(node, root); 139 rb_erase(node, root);
139 if (handler->ops->remove) 140 if (handler->ops->remove)
140 handler->ops->remove(root, rbnode, false); 141 handler->ops->remove(root, rbnode, NULL);
141 } 142 }
142 } 143 }
143 144
@@ -176,7 +177,7 @@ unlock:
176 return ret; 177 return ret;
177} 178}
178 179
179/* Caller must host handler lock */ 180/* Caller must hold handler lock */
180static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 181static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
181 unsigned long addr, 182 unsigned long addr,
182 unsigned long len) 183 unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
200 return node; 201 return node;
201} 202}
202 203
204/* Caller must *not* hold handler lock. */
203static void __mmu_rb_remove(struct mmu_rb_handler *handler, 205static void __mmu_rb_remove(struct mmu_rb_handler *handler,
204 struct mmu_rb_node *node, bool arg) 206 struct mmu_rb_node *node, struct mm_struct *mm)
205{ 207{
208 unsigned long flags;
209
206 /* Validity of handler and node pointers has been checked by caller. */ 210 /* Validity of handler and node pointers has been checked by caller. */
207 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 211 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
208 node->len); 212 node->len);
213 spin_lock_irqsave(&handler->lock, flags);
209 __mmu_int_rb_remove(node, handler->root); 214 __mmu_int_rb_remove(node, handler->root);
215 spin_unlock_irqrestore(&handler->lock, flags);
216
210 if (handler->ops->remove) 217 if (handler->ops->remove)
211 handler->ops->remove(handler->root, node, arg); 218 handler->ops->remove(handler->root, node, mm);
212} 219}
213 220
214struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, 221struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
231void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) 238void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
232{ 239{
233 struct mmu_rb_handler *handler = find_mmu_handler(root); 240 struct mmu_rb_handler *handler = find_mmu_handler(root);
234 unsigned long flags;
235 241
236 if (!handler || !node) 242 if (!handler || !node)
237 return; 243 return;
238 244
239 spin_lock_irqsave(&handler->lock, flags); 245 __mmu_rb_remove(handler, node, NULL);
240 __mmu_rb_remove(handler, node, false);
241 spin_unlock_irqrestore(&handler->lock, flags);
242} 246}
243 247
244static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) 248static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
260static inline void mmu_notifier_page(struct mmu_notifier *mn, 264static inline void mmu_notifier_page(struct mmu_notifier *mn,
261 struct mm_struct *mm, unsigned long addr) 265 struct mm_struct *mm, unsigned long addr)
262{ 266{
263 mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); 267 mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
264} 268}
265 269
266static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 270static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
268 unsigned long start, 272 unsigned long start,
269 unsigned long end) 273 unsigned long end)
270{ 274{
271 mmu_notifier_mem_invalidate(mn, start, end); 275 mmu_notifier_mem_invalidate(mn, mm, start, end);
272} 276}
273 277
274static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 278static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
279 struct mm_struct *mm,
275 unsigned long start, unsigned long end) 280 unsigned long start, unsigned long end)
276{ 281{
277 struct mmu_rb_handler *handler = 282 struct mmu_rb_handler *handler =
278 container_of(mn, struct mmu_rb_handler, mn); 283 container_of(mn, struct mmu_rb_handler, mn);
279 struct rb_root *root = handler->root; 284 struct rb_root *root = handler->root;
280 struct mmu_rb_node *node; 285 struct mmu_rb_node *node, *ptr = NULL;
281 unsigned long flags; 286 unsigned long flags;
282 287
283 spin_lock_irqsave(&handler->lock, flags); 288 spin_lock_irqsave(&handler->lock, flags);
284 for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; 289 for (node = __mmu_int_rb_iter_first(root, start, end - 1);
285 node = __mmu_int_rb_iter_next(node, start, end - 1)) { 290 node; node = ptr) {
291 /* Guard against node removal. */
292 ptr = __mmu_int_rb_iter_next(node, start, end - 1);
286 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 293 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
287 node->addr, node->len); 294 node->addr, node->len);
288 if (handler->ops->invalidate(root, node)) 295 if (handler->ops->invalidate(root, node)) {
289 __mmu_rb_remove(handler, node, true); 296 spin_unlock_irqrestore(&handler->lock, flags);
297 __mmu_rb_remove(handler, node, mm);
298 spin_lock_irqsave(&handler->lock, flags);
299 }
290 } 300 }
291 spin_unlock_irqrestore(&handler->lock, flags); 301 spin_unlock_irqrestore(&handler->lock, flags);
292} 302}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fdb8a18..19a306e83c7d 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@ struct mmu_rb_node {
59struct mmu_rb_ops { 59struct mmu_rb_ops {
60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); 60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
61 int (*insert)(struct rb_root *, struct mmu_rb_node *); 61 int (*insert)(struct rb_root *, struct mmu_rb_node *);
62 void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); 62 void (*remove)(struct rb_root *, struct mmu_rb_node *,
63 struct mm_struct *);
63 int (*invalidate)(struct rb_root *, struct mmu_rb_node *); 64 int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
64}; 65};
65 66
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad28019b..dc9119e1b458 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
519 * do the flush work until that QP's 519 * do the flush work until that QP's
520 * sdma work has finished. 520 * sdma work has finished.
521 */ 521 */
522 spin_lock(&qp->s_lock);
522 if (qp->s_flags & RVT_S_WAIT_DMA) { 523 if (qp->s_flags & RVT_S_WAIT_DMA) {
523 qp->s_flags &= ~RVT_S_WAIT_DMA; 524 qp->s_flags &= ~RVT_S_WAIT_DMA;
524 hfi1_schedule_send(qp); 525 hfi1_schedule_send(qp);
525 } 526 }
527 spin_unlock(&qp->s_lock);
526} 528}
527 529
528/** 530/**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e095df8d..8bd56d5c783d 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
87static int set_rcvarray_entry(struct file *, unsigned long, u32, 87static int set_rcvarray_entry(struct file *, unsigned long, u32,
88 struct tid_group *, struct page **, unsigned); 88 struct tid_group *, struct page **, unsigned);
89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); 89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
91 struct mm_struct *);
91static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 92static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
92static int program_rcvarray(struct file *, unsigned long, struct tid_group *, 93static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
93 struct tid_pageset *, unsigned, u16, struct page **, 94 struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
254 struct hfi1_ctxtdata *uctxt = fd->uctxt; 255 struct hfi1_ctxtdata *uctxt = fd->uctxt;
255 struct tid_group *grp, *gptr; 256 struct tid_group *grp, *gptr;
256 257
258 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
259 return 0;
257 /* 260 /*
258 * The notifier would have been removed when the process'es mm 261 * The notifier would have been removed when the process'es mm
259 * was freed. 262 * was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
899 if (!node || node->rcventry != (uctxt->expected_base + rcventry)) 902 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
900 return -EBADF; 903 return -EBADF;
901 if (HFI1_CAP_IS_USET(TID_UNMAP)) 904 if (HFI1_CAP_IS_USET(TID_UNMAP))
902 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); 905 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
903 else 906 else
904 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); 907 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
905 908
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
965 continue; 968 continue;
966 if (HFI1_CAP_IS_USET(TID_UNMAP)) 969 if (HFI1_CAP_IS_USET(TID_UNMAP))
967 mmu_rb_remove(&fd->tid_rb_root, 970 mmu_rb_remove(&fd->tid_rb_root,
968 &node->mmu, false); 971 &node->mmu, NULL);
969 else 972 else
970 hfi1_mmu_rb_remove(&fd->tid_rb_root, 973 hfi1_mmu_rb_remove(&fd->tid_rb_root,
971 &node->mmu); 974 &node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
1032} 1035}
1033 1036
1034static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, 1037static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
1035 bool notifier) 1038 struct mm_struct *mm)
1036{ 1039{
1037 struct hfi1_filedata *fdata = 1040 struct hfi1_filedata *fdata =
1038 container_of(root, struct hfi1_filedata, tid_rb_root); 1041 container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a42000f..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
278static void user_sdma_free_request(struct user_sdma_request *, bool); 278static void user_sdma_free_request(struct user_sdma_request *, bool);
279static int pin_vector_pages(struct user_sdma_request *, 279static int pin_vector_pages(struct user_sdma_request *,
280 struct user_sdma_iovec *); 280 struct user_sdma_iovec *);
281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); 281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
282 unsigned);
282static int check_header_template(struct user_sdma_request *, 283static int check_header_template(struct user_sdma_request *,
283 struct hfi1_pkt_header *, u32, u32); 284 struct hfi1_pkt_header *, u32, u32);
284static int set_txreq_header(struct user_sdma_request *, 285static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
299static void activate_packet_queue(struct iowait *, int); 300static void activate_packet_queue(struct iowait *, int);
300static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); 301static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
301static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); 302static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
302static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 303static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
304 struct mm_struct *);
303static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 305static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
304 306
305static struct mmu_rb_ops sdma_rb_ops = { 307static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
1063 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, 1065 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
1064 (unsigned long)iovec->iov.iov_base, 1066 (unsigned long)iovec->iov.iov_base,
1065 iovec->iov.iov_len); 1067 iovec->iov.iov_len);
1066 if (rb_node) 1068 if (rb_node && !IS_ERR(rb_node))
1067 node = container_of(rb_node, struct sdma_mmu_node, rb); 1069 node = container_of(rb_node, struct sdma_mmu_node, rb);
1070 else
1071 rb_node = NULL;
1068 1072
1069 if (!node) { 1073 if (!node) {
1070 node = kzalloc(sizeof(*node), GFP_KERNEL); 1074 node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
1107 goto bail; 1111 goto bail;
1108 } 1112 }
1109 if (pinned != npages) { 1113 if (pinned != npages) {
1110 unpin_vector_pages(current->mm, pages, pinned); 1114 unpin_vector_pages(current->mm, pages, node->npages,
1115 pinned);
1111 ret = -EFAULT; 1116 ret = -EFAULT;
1112 goto bail; 1117 goto bail;
1113 } 1118 }
@@ -1147,9 +1152,9 @@ bail:
1147} 1152}
1148 1153
1149static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 1154static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1150 unsigned npages) 1155 unsigned start, unsigned npages)
1151{ 1156{
1152 hfi1_release_user_pages(mm, pages, npages, 0); 1157 hfi1_release_user_pages(mm, pages + start, npages, 0);
1153 kfree(pages); 1158 kfree(pages);
1154} 1159}
1155 1160
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1502 &req->pq->sdma_rb_root, 1507 &req->pq->sdma_rb_root,
1503 (unsigned long)req->iovs[i].iov.iov_base, 1508 (unsigned long)req->iovs[i].iov.iov_base,
1504 req->iovs[i].iov.iov_len); 1509 req->iovs[i].iov.iov_len);
1505 if (!mnode) 1510 if (!mnode || IS_ERR(mnode))
1506 continue; 1511 continue;
1507 1512
1508 node = container_of(mnode, struct sdma_mmu_node, rb); 1513 node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
1547} 1552}
1548 1553
1549static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, 1554static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1550 bool notifier) 1555 struct mm_struct *mm)
1551{ 1556{
1552 struct sdma_mmu_node *node = 1557 struct sdma_mmu_node *node =
1553 container_of(mnode, struct sdma_mmu_node, rb); 1558 container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1557 node->pq->n_locked -= node->npages; 1562 node->pq->n_locked -= node->npages;
1558 spin_unlock(&node->pq->evict_lock); 1563 spin_unlock(&node->pq->evict_lock);
1559 1564
1560 unpin_vector_pages(notifier ? NULL : current->mm, node->pages, 1565 /*
1566 * If mm is set, we are being called by the MMU notifier and we
1567 * should not pass a mm_struct to unpin_vector_page(). This is to
1568 * prevent a deadlock when hfi1_release_user_pages() attempts to
1569 * take the mmap_sem, which the MMU notifier has already taken.
1570 */
1571 unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
1561 node->npages); 1572 node->npages);
1562 /* 1573 /*
1563 * If called by the MMU notifier, we have to adjust the pinned 1574 * If called by the MMU notifier, we have to adjust the pinned
1564 * page count ourselves. 1575 * page count ourselves.
1565 */ 1576 */
1566 if (notifier) 1577 if (mm)
1567 current->mm->pinned_vm -= node->npages; 1578 mm->pinned_vm -= node->npages;
1568 kfree(node); 1579 kfree(node);
1569} 1580}
1570 1581
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c37eedc35a24..3c3dc4a3d52c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -376,6 +376,8 @@ config MTK_THERMAL
376 tristate "Temperature sensor driver for mediatek SoCs" 376 tristate "Temperature sensor driver for mediatek SoCs"
377 depends on ARCH_MEDIATEK || COMPILE_TEST 377 depends on ARCH_MEDIATEK || COMPILE_TEST
378 depends on HAS_IOMEM 378 depends on HAS_IOMEM
379 depends on NVMEM || NVMEM=n
380 depends on RESET_CONTROLLER
379 default y 381 default y
380 help 382 help
381 Enable this option if you want to have support for thermal management 383 Enable this option if you want to have support for thermal management
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..5e820b541506 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
68 * Every step equals (1 * 200) / 255 celsius, and finally 68 * Every step equals (1 * 200) / 255 celsius, and finally
69 * need convert to millicelsius. 69 * need convert to millicelsius.
70 */ 70 */
71 return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; 71 return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
72} 72}
73 73
74static inline long _temp_to_step(long temp) 74static inline long _temp_to_step(long temp)
75{ 75{
76 return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); 76 return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
77} 77}
78 78
79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, 79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 3d93b1c07cee..507632b9648e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -27,7 +27,6 @@
27#include <linux/thermal.h> 27#include <linux/thermal.h>
28#include <linux/reset.h> 28#include <linux/reset.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/nvmem-consumer.h>
31 30
32/* AUXADC Registers */ 31/* AUXADC Registers */
33#define AUXADC_CON0_V 0x000 32#define AUXADC_CON0_V 0x000
@@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
619 618
620module_platform_driver(mtk_thermal_driver); 619module_platform_driver(mtk_thermal_driver);
621 620
622MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); 621MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
623MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>"); 622MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
624MODULE_DESCRIPTION("Mediatek thermal driver"); 623MODULE_DESCRIPTION("Mediatek thermal driver");
625MODULE_LICENSE("GPL v2"); 624MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 49ac23d3e776..d8ec44b194d6 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
803 * otherwise, it returns a corresponding ERR_PTR(). Caller must 803 * otherwise, it returns a corresponding ERR_PTR(). Caller must
804 * check the return value with help of IS_ERR() helper. 804 * check the return value with help of IS_ERR() helper.
805 */ 805 */
806static struct __thermal_zone * 806static struct __thermal_zone
807thermal_of_build_thermal_zone(struct device_node *np) 807__init *thermal_of_build_thermal_zone(struct device_node *np)
808{ 808{
809 struct device_node *child = NULL, *gchild; 809 struct device_node *child = NULL, *gchild;
810 struct __thermal_zone *tz; 810 struct __thermal_zone *tz;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..2f1a863a8e15 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
301 capped_extra_power = 0; 301 capped_extra_power = 0;
302 extra_power = 0; 302 extra_power = 0;
303 for (i = 0; i < num_actors; i++) { 303 for (i = 0; i < num_actors; i++) {
304 u64 req_range = req_power[i] * power_range; 304 u64 req_range = (u64)req_power[i] * power_range;
305 305
306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, 306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
307 total_req_power); 307 total_req_power);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d4b54653ecf8..5133cd1e10b7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
688{ 688{
689 struct thermal_zone_device *tz = to_thermal_zone(dev); 689 struct thermal_zone_device *tz = to_thermal_zone(dev);
690 int trip, ret; 690 int trip, ret;
691 unsigned long temperature; 691 int temperature;
692 692
693 if (!tz->ops->set_trip_temp) 693 if (!tz->ops->set_trip_temp)
694 return -EPERM; 694 return -EPERM;
@@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) 696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
697 return -EINVAL; 697 return -EINVAL;
698 698
699 if (kstrtoul(buf, 10, &temperature)) 699 if (kstrtoint(buf, 10, &temperature))
700 return -EINVAL; 700 return -EINVAL;
701 701
702 ret = tz->ops->set_trip_temp(tz, trip, temperature); 702 ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
899{ 899{
900 struct thermal_zone_device *tz = to_thermal_zone(dev); 900 struct thermal_zone_device *tz = to_thermal_zone(dev);
901 int ret = 0; 901 int ret = 0;
902 unsigned long temperature; 902 int temperature;
903 903
904 if (kstrtoul(buf, 10, &temperature)) 904 if (kstrtoint(buf, 10, &temperature))
905 return -EINVAL; 905 return -EINVAL;
906 906
907 if (!tz->ops->set_emul_temp) { 907 if (!tz->ops->set_emul_temp) {
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
959 struct thermal_zone_device *tz = to_thermal_zone(dev); \ 959 struct thermal_zone_device *tz = to_thermal_zone(dev); \
960 \ 960 \
961 if (tz->tzp) \ 961 if (tz->tzp) \
962 return sprintf(buf, "%u\n", tz->tzp->name); \ 962 return sprintf(buf, "%d\n", tz->tzp->name); \
963 else \ 963 else \
964 return -EIO; \ 964 return -EIO; \
965 } \ 965 } \
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e16a49b507ef..cf0dc51a2690 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
626 */ 626 */
627 627
628static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, 628static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
629 struct inode *ptm_inode, int idx) 629 struct file *file, int idx)
630{ 630{
631 /* Master must be open via /dev/ptmx */ 631 /* Master must be open via /dev/ptmx */
632 return ERR_PTR(-EIO); 632 return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
642 */ 642 */
643 643
644static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, 644static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
645 struct inode *pts_inode, int idx) 645 struct file *file, int idx)
646{ 646{
647 struct tty_struct *tty; 647 struct tty_struct *tty;
648 648
649 mutex_lock(&devpts_mutex); 649 mutex_lock(&devpts_mutex);
650 tty = devpts_get_priv(pts_inode); 650 tty = devpts_get_priv(file->f_path.dentry);
651 mutex_unlock(&devpts_mutex); 651 mutex_unlock(&devpts_mutex);
652 /* Master must be open before slave */ 652 /* Master must be open before slave */
653 if (!tty) 653 if (!tty)
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
663/* this is called once with whichever end is closed last */ 663/* this is called once with whichever end is closed last */
664static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 664static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
665{ 665{
666 struct inode *ptmx_inode; 666 struct pts_fs_info *fsi;
667 667
668 if (tty->driver->subtype == PTY_TYPE_MASTER) 668 if (tty->driver->subtype == PTY_TYPE_MASTER)
669 ptmx_inode = tty->driver_data; 669 fsi = tty->driver_data;
670 else 670 else
671 ptmx_inode = tty->link->driver_data; 671 fsi = tty->link->driver_data;
672 devpts_kill_index(ptmx_inode, tty->index); 672 devpts_kill_index(fsi, tty->index);
673 devpts_del_ref(ptmx_inode); 673 devpts_put_ref(fsi);
674} 674}
675 675
676static const struct tty_operations ptm_unix98_ops = { 676static const struct tty_operations ptm_unix98_ops = {
@@ -720,8 +720,9 @@ static const struct tty_operations pty_unix98_ops = {
720 720
721static int ptmx_open(struct inode *inode, struct file *filp) 721static int ptmx_open(struct inode *inode, struct file *filp)
722{ 722{
723 struct pts_fs_info *fsi;
723 struct tty_struct *tty; 724 struct tty_struct *tty;
724 struct inode *slave_inode; 725 struct dentry *dentry;
725 int retval; 726 int retval;
726 int index; 727 int index;
727 728
@@ -734,54 +735,46 @@ static int ptmx_open(struct inode *inode, struct file *filp)
734 if (retval) 735 if (retval)
735 return retval; 736 return retval;
736 737
738 fsi = devpts_get_ref(inode, filp);
739 retval = -ENODEV;
740 if (!fsi)
741 goto out_free_file;
742
737 /* find a device that is not in use. */ 743 /* find a device that is not in use. */
738 mutex_lock(&devpts_mutex); 744 mutex_lock(&devpts_mutex);
739 index = devpts_new_index(inode); 745 index = devpts_new_index(fsi);
740 if (index < 0) {
741 retval = index;
742 mutex_unlock(&devpts_mutex);
743 goto err_file;
744 }
745
746 mutex_unlock(&devpts_mutex); 746 mutex_unlock(&devpts_mutex);
747 747
748 mutex_lock(&tty_mutex); 748 retval = index;
749 tty = tty_init_dev(ptm_driver, index); 749 if (index < 0)
750 goto out_put_ref;
750 751
751 if (IS_ERR(tty)) {
752 retval = PTR_ERR(tty);
753 goto out;
754 }
755 752
753 mutex_lock(&tty_mutex);
754 tty = tty_init_dev(ptm_driver, index);
756 /* The tty returned here is locked so we can safely 755 /* The tty returned here is locked so we can safely
757 drop the mutex */ 756 drop the mutex */
758 mutex_unlock(&tty_mutex); 757 mutex_unlock(&tty_mutex);
759 758
760 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 759 retval = PTR_ERR(tty);
761 tty->driver_data = inode; 760 if (IS_ERR(tty))
761 goto out;
762 762
763 /* 763 /*
764 * In the case where all references to ptmx inode are dropped and we 764 * From here on out, the tty is "live", and the index and
765 * still have /dev/tty opened pointing to the master/slave pair (ptmx 765 * fsi will be killed/put by the tty_release()
766 * is closed/released before /dev/tty), we must make sure that the inode
767 * is still valid when we call the final pty_unix98_shutdown, thus we
768 * hold an additional reference to the ptmx inode. For the same /dev/tty
769 * last close case, we also need to make sure the super_block isn't
770 * destroyed (devpts instance unmounted), before /dev/tty is closed and
771 * on its release devpts_kill_index is called.
772 */ 766 */
773 devpts_add_ref(inode); 767 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
768 tty->driver_data = fsi;
774 769
775 tty_add_file(tty, filp); 770 tty_add_file(tty, filp);
776 771
777 slave_inode = devpts_pty_new(inode, 772 dentry = devpts_pty_new(fsi, index, tty->link);
778 MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, 773 if (IS_ERR(dentry)) {
779 tty->link); 774 retval = PTR_ERR(dentry);
780 if (IS_ERR(slave_inode)) {
781 retval = PTR_ERR(slave_inode);
782 goto err_release; 775 goto err_release;
783 } 776 }
784 tty->link->driver_data = slave_inode; 777 tty->link->driver_data = dentry;
785 778
786 retval = ptm_driver->ops->open(tty, filp); 779 retval = ptm_driver->ops->open(tty, filp);
787 if (retval) 780 if (retval)
@@ -793,12 +786,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
793 return 0; 786 return 0;
794err_release: 787err_release:
795 tty_unlock(tty); 788 tty_unlock(tty);
789 // This will also put-ref the fsi
796 tty_release(inode, filp); 790 tty_release(inode, filp);
797 return retval; 791 return retval;
798out: 792out:
799 mutex_unlock(&tty_mutex); 793 devpts_kill_index(fsi, index);
800 devpts_kill_index(inode, index); 794out_put_ref:
801err_file: 795 devpts_put_ref(fsi);
796out_free_file:
802 tty_free_file(filp); 797 tty_free_file(filp);
803 return retval; 798 return retval;
804} 799}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e213da01a3d7..00ad2637b08c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
1403 /* 1403 /*
1404 * Empty the RX FIFO, we are not interested in anything 1404 * Empty the RX FIFO, we are not interested in anything
1405 * received during the half-duplex transmission. 1405 * received during the half-duplex transmission.
1406 * Enable previously disabled RX interrupts.
1406 */ 1407 */
1407 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) 1408 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
1408 serial8250_clear_fifos(p); 1409 serial8250_clear_fifos(p);
1410
1411 serial8250_rpm_get(p);
1412
1413 p->ier |= UART_IER_RLSI | UART_IER_RDI;
1414 serial_port_out(&p->port, UART_IER, p->ier);
1415
1416 serial8250_rpm_put(p);
1417 }
1409} 1418}
1410 1419
1411static void serial8250_em485_handle_stop_tx(unsigned long arg) 1420static void serial8250_em485_handle_stop_tx(unsigned long arg)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 64742a086ae3..4d7cb9c04fce 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -324,7 +324,6 @@ config SERIAL_8250_EM
324config SERIAL_8250_RT288X 324config SERIAL_8250_RT288X
325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" 325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
326 depends on SERIAL_8250 326 depends on SERIAL_8250
327 depends on MIPS || COMPILE_TEST
328 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620 327 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
329 help 328 help
330 Selecting this option will add support for the alternate register 329 Selecting this option will add support for the alternate register
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9fdfc8bf47f..d08baa668d5d 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
72 iowrite32be(val, addr); 72 iowrite32be(val, addr);
73} 73}
74 74
75static const struct uartlite_reg_ops uartlite_be = { 75static struct uartlite_reg_ops uartlite_be = {
76 .in = uartlite_inbe32, 76 .in = uartlite_inbe32,
77 .out = uartlite_outbe32, 77 .out = uartlite_outbe32,
78}; 78};
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
87 iowrite32(val, addr); 87 iowrite32(val, addr);
88} 88}
89 89
90static const struct uartlite_reg_ops uartlite_le = { 90static struct uartlite_reg_ops uartlite_le = {
91 .in = uartlite_inle32, 91 .in = uartlite_inle32,
92 .out = uartlite_outle32, 92 .out = uartlite_outle32,
93}; 93};
94 94
95static inline u32 uart_in32(u32 offset, struct uart_port *port) 95static inline u32 uart_in32(u32 offset, struct uart_port *port)
96{ 96{
97 const struct uartlite_reg_ops *reg_ops = port->private_data; 97 struct uartlite_reg_ops *reg_ops = port->private_data;
98 98
99 return reg_ops->in(port->membase + offset); 99 return reg_ops->in(port->membase + offset);
100} 100}
101 101
102static inline void uart_out32(u32 val, u32 offset, struct uart_port *port) 102static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
103{ 103{
104 const struct uartlite_reg_ops *reg_ops = port->private_data; 104 struct uartlite_reg_ops *reg_ops = port->private_data;
105 105
106 reg_ops->out(val, port->membase + offset); 106 reg_ops->out(val, port->membase + offset);
107} 107}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8d26ed79bb4c..24d5491ef0da 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
1367 * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. 1367 * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
1368 */ 1368 */
1369static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, 1369static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
1370 struct inode *inode, int idx) 1370 struct file *file, int idx)
1371{ 1371{
1372 struct tty_struct *tty; 1372 struct tty_struct *tty;
1373 1373
1374 if (driver->ops->lookup) 1374 if (driver->ops->lookup)
1375 tty = driver->ops->lookup(driver, inode, idx); 1375 tty = driver->ops->lookup(driver, file, idx);
1376 else 1376 else
1377 tty = driver->ttys[idx]; 1377 tty = driver->ttys[idx];
1378 1378
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
2040 } 2040 }
2041 2041
2042 /* check whether we're reopening an existing tty */ 2042 /* check whether we're reopening an existing tty */
2043 tty = tty_driver_lookup_tty(driver, inode, index); 2043 tty = tty_driver_lookup_tty(driver, filp, index);
2044 if (IS_ERR(tty)) { 2044 if (IS_ERR(tty)) {
2045 mutex_unlock(&tty_mutex); 2045 mutex_unlock(&tty_mutex);
2046 goto out; 2046 goto out;
@@ -2049,14 +2049,13 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
2049 if (tty) { 2049 if (tty) {
2050 mutex_unlock(&tty_mutex); 2050 mutex_unlock(&tty_mutex);
2051 retval = tty_lock_interruptible(tty); 2051 retval = tty_lock_interruptible(tty);
2052 tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
2052 if (retval) { 2053 if (retval) {
2053 if (retval == -EINTR) 2054 if (retval == -EINTR)
2054 retval = -ERESTARTSYS; 2055 retval = -ERESTARTSYS;
2055 tty = ERR_PTR(retval); 2056 tty = ERR_PTR(retval);
2056 goto out; 2057 goto out;
2057 } 2058 }
2058 /* safe to drop the kref from tty_driver_lookup_tty() */
2059 tty_kref_put(tty);
2060 retval = tty_reopen(tty); 2059 retval = tty_reopen(tty);
2061 if (retval < 0) { 2060 if (retval < 0) {
2062 tty_unlock(tty); 2061 tty_unlock(tty);
@@ -2158,7 +2157,7 @@ retry_open:
2158 read_lock(&tasklist_lock); 2157 read_lock(&tasklist_lock);
2159 spin_lock_irq(&current->sighand->siglock); 2158 spin_lock_irq(&current->sighand->siglock);
2160 noctty = (filp->f_flags & O_NOCTTY) || 2159 noctty = (filp->f_flags & O_NOCTTY) ||
2161 device == MKDEV(TTY_MAJOR, 0) || 2160 (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
2162 device == MKDEV(TTYAUX_MAJOR, 1) || 2161 device == MKDEV(TTYAUX_MAJOR, 1) ||
2163 (tty->driver->type == TTY_DRIVER_TYPE_PTY && 2162 (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
2164 tty->driver->subtype == PTY_TYPE_MASTER); 2163 tty->driver->subtype == PTY_TYPE_MASTER);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 83fd30b0577c..a6c4a1b895bd 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -744,11 +744,15 @@ static void acm_tty_flush_chars(struct tty_struct *tty)
744 int err; 744 int err;
745 unsigned long flags; 745 unsigned long flags;
746 746
747 if (!cur) /* nothing to do */
748 return;
749
747 acm->putbuffer = NULL; 750 acm->putbuffer = NULL;
748 err = usb_autopm_get_interface_async(acm->control); 751 err = usb_autopm_get_interface_async(acm->control);
749 spin_lock_irqsave(&acm->write_lock, flags); 752 spin_lock_irqsave(&acm->write_lock, flags);
750 if (err < 0) { 753 if (err < 0) {
751 cur->use = 0; 754 cur->use = 0;
755 acm->putbuffer = cur;
752 goto out; 756 goto out;
753 } 757 }
754 758
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5eb1a87228b4..31ccdccd7a04 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -75,8 +75,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
75 * be the first thing immediately following the endpoint descriptor. 75 * be the first thing immediately following the endpoint descriptor.
76 */ 76 */
77 desc = (struct usb_ss_ep_comp_descriptor *) buffer; 77 desc = (struct usb_ss_ep_comp_descriptor *) buffer;
78 buffer += desc->bLength;
79 size -= desc->bLength;
80 78
81 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || 79 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
82 size < USB_DT_SS_EP_COMP_SIZE) { 80 size < USB_DT_SS_EP_COMP_SIZE) {
@@ -100,7 +98,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
100 ep->desc.wMaxPacketSize; 98 ep->desc.wMaxPacketSize;
101 return; 99 return;
102 } 100 }
103 101 buffer += desc->bLength;
102 size -= desc->bLength;
104 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); 103 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
105 104
106 /* Check the various values */ 105 /* Check the various values */
@@ -146,12 +145,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
146 ep->ss_ep_comp.bmAttributes = 2; 145 ep->ss_ep_comp.bmAttributes = 2;
147 } 146 }
148 147
149 /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
150 if (usb_endpoint_xfer_isoc(&ep->desc) &&
151 USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
152 usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
153 ep, buffer, size);
154
155 if (usb_endpoint_xfer_isoc(&ep->desc)) 148 if (usb_endpoint_xfer_isoc(&ep->desc))
156 max_tx = (desc->bMaxBurst + 1) * 149 max_tx = (desc->bMaxBurst + 1) *
157 (USB_SS_MULT(desc->bmAttributes)) * 150 (USB_SS_MULT(desc->bmAttributes)) *
@@ -171,6 +164,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
171 max_tx); 164 max_tx);
172 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); 165 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
173 } 166 }
167 /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
168 if (usb_endpoint_xfer_isoc(&ep->desc) &&
169 USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
170 usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
171 ep, buffer, size);
174} 172}
175 173
176static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, 174static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f9d42cf23e55..7859d738df41 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -73,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
73 if (companion->bus != pdev->bus || 73 if (companion->bus != pdev->bus ||
74 PCI_SLOT(companion->devfn) != slot) 74 PCI_SLOT(companion->devfn) != slot)
75 continue; 75 continue;
76
77 /*
78 * Companion device should be either UHCI,OHCI or EHCI host
79 * controller, otherwise skip.
80 */
81 if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
82 companion->class != CL_EHCI)
83 continue;
84
76 companion_hcd = pci_get_drvdata(companion); 85 companion_hcd = pci_get_drvdata(companion);
77 if (!companion_hcd || !companion_hcd->self.root_hub) 86 if (!companion_hcd || !companion_hcd->self.root_hub)
78 continue; 87 continue;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e9940dd004e4..818f158232bb 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2254,6 +2254,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2254{ 2254{
2255 u32 intmsk; 2255 u32 intmsk;
2256 u32 val; 2256 u32 val;
2257 u32 usbcfg;
2257 2258
2258 /* Kill any ep0 requests as controller will be reinitialized */ 2259 /* Kill any ep0 requests as controller will be reinitialized */
2259 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 2260 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2267,10 +2268,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2267 * set configuration. 2268 * set configuration.
2268 */ 2269 */
2269 2270
2271 /* keep other bits untouched (so e.g. forced modes are not lost) */
2272 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2273 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
2274 GUSBCFG_HNPCAP);
2275
2270 /* set the PLL on, remove the HNP/SRP and set the PHY */ 2276 /* set the PLL on, remove the HNP/SRP and set the PHY */
2271 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 2277 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
2272 dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | 2278 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
2273 (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG); 2279 (val << GUSBCFG_USBTRDTIM_SHIFT);
2280 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
2274 2281
2275 dwc2_hsotg_init_fifo(hsotg); 2282 dwc2_hsotg_init_fifo(hsotg);
2276 2283
@@ -3031,6 +3038,7 @@ static struct usb_ep_ops dwc2_hsotg_ep_ops = {
3031static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) 3038static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
3032{ 3039{
3033 u32 trdtim; 3040 u32 trdtim;
3041 u32 usbcfg;
3034 /* unmask subset of endpoint interrupts */ 3042 /* unmask subset of endpoint interrupts */
3035 3043
3036 dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 3044 dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -3054,11 +3062,16 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
3054 3062
3055 dwc2_hsotg_init_fifo(hsotg); 3063 dwc2_hsotg_init_fifo(hsotg);
3056 3064
3065 /* keep other bits untouched (so e.g. forced modes are not lost) */
3066 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3067 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3068 GUSBCFG_HNPCAP);
3069
3057 /* set the PLL on, remove the HNP/SRP and set the PHY */ 3070 /* set the PLL on, remove the HNP/SRP and set the PHY */
3058 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 3071 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
3059 dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | 3072 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
3060 (trdtim << GUSBCFG_USBTRDTIM_SHIFT), 3073 (trdtim << GUSBCFG_USBTRDTIM_SHIFT);
3061 hsotg->regs + GUSBCFG); 3074 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
3062 3075
3063 if (using_dma(hsotg)) 3076 if (using_dma(hsotg))
3064 __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN); 3077 __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 17fd81447c9f..34277ced26bd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -67,23 +67,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
67static int dwc3_core_soft_reset(struct dwc3 *dwc) 67static int dwc3_core_soft_reset(struct dwc3 *dwc)
68{ 68{
69 u32 reg; 69 u32 reg;
70 int retries = 1000;
70 int ret; 71 int ret;
71 72
72 /* Before Resetting PHY, put Core in Reset */
73 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
74 reg |= DWC3_GCTL_CORESOFTRESET;
75 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
76
77 /* Assert USB3 PHY reset */
78 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
79 reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
80 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
81
82 /* Assert USB2 PHY reset */
83 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
84 reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
85 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
86
87 usb_phy_init(dwc->usb2_phy); 73 usb_phy_init(dwc->usb2_phy);
88 usb_phy_init(dwc->usb3_phy); 74 usb_phy_init(dwc->usb3_phy);
89 ret = phy_init(dwc->usb2_generic_phy); 75 ret = phy_init(dwc->usb2_generic_phy);
@@ -95,26 +81,28 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
95 phy_exit(dwc->usb2_generic_phy); 81 phy_exit(dwc->usb2_generic_phy);
96 return ret; 82 return ret;
97 } 83 }
98 mdelay(100);
99 84
100 /* Clear USB3 PHY reset */ 85 /*
101 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 86 * We're resetting only the device side because, if we're in host mode,
102 reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; 87 * XHCI driver will reset the host block. If dwc3 was configured for
103 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 88 * host-only mode, then we can return early.
89 */
90 if (dwc->dr_mode == USB_DR_MODE_HOST)
91 return 0;
104 92
105 /* Clear USB2 PHY reset */ 93 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 94 reg |= DWC3_DCTL_CSFTRST;
107 reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; 95 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
108 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
109 96
110 mdelay(100); 97 do {
98 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
99 if (!(reg & DWC3_DCTL_CSFTRST))
100 return 0;
111 101
112 /* After PHYs are stable we can take Core out of reset state */ 102 udelay(1);
113 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 103 } while (--retries);
114 reg &= ~DWC3_GCTL_CORESOFTRESET;
115 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
116 104
117 return 0; 105 return -ETIMEDOUT;
118} 106}
119 107
120/** 108/**
@@ -1162,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
1162 phy_exit(dwc->usb2_generic_phy); 1150 phy_exit(dwc->usb2_generic_phy);
1163 phy_exit(dwc->usb3_generic_phy); 1151 phy_exit(dwc->usb3_generic_phy);
1164 1152
1153 usb_phy_set_suspend(dwc->usb2_phy, 1);
1154 usb_phy_set_suspend(dwc->usb3_phy, 1);
1155 WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
1156 WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
1157
1165 pinctrl_pm_select_sleep_state(dev); 1158 pinctrl_pm_select_sleep_state(dev);
1166 1159
1167 return 0; 1160 return 0;
@@ -1175,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
1175 1168
1176 pinctrl_pm_select_default_state(dev); 1169 pinctrl_pm_select_default_state(dev);
1177 1170
1171 usb_phy_set_suspend(dwc->usb2_phy, 0);
1172 usb_phy_set_suspend(dwc->usb3_phy, 0);
1173 ret = phy_power_on(dwc->usb2_generic_phy);
1174 if (ret < 0)
1175 return ret;
1176
1177 ret = phy_power_on(dwc->usb3_generic_phy);
1178 if (ret < 0)
1179 goto err_usb2phy_power;
1180
1178 usb_phy_init(dwc->usb3_phy); 1181 usb_phy_init(dwc->usb3_phy);
1179 usb_phy_init(dwc->usb2_phy); 1182 usb_phy_init(dwc->usb2_phy);
1180 ret = phy_init(dwc->usb2_generic_phy); 1183 ret = phy_init(dwc->usb2_generic_phy);
1181 if (ret < 0) 1184 if (ret < 0)
1182 return ret; 1185 goto err_usb3phy_power;
1183 1186
1184 ret = phy_init(dwc->usb3_generic_phy); 1187 ret = phy_init(dwc->usb3_generic_phy);
1185 if (ret < 0) 1188 if (ret < 0)
@@ -1212,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
1212err_usb2phy_init: 1215err_usb2phy_init:
1213 phy_exit(dwc->usb2_generic_phy); 1216 phy_exit(dwc->usb2_generic_phy);
1214 1217
1218err_usb3phy_power:
1219 phy_power_off(dwc->usb3_generic_phy);
1220
1221err_usb2phy_power:
1222 phy_power_off(dwc->usb2_generic_phy);
1223
1215 return ret; 1224 return ret;
1216} 1225}
1217 1226
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9ac37fe1b6a7..cebf9e38b60a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); 645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
646 if (!file) { 646 if (!file) {
647 ret = -ENOMEM; 647 ret = -ENOMEM;
648 goto err1; 648 goto err2;
649 } 649 }
650 650
651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { 651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
653 dwc, &dwc3_mode_fops); 653 dwc, &dwc3_mode_fops);
654 if (!file) { 654 if (!file) {
655 ret = -ENOMEM; 655 ret = -ENOMEM;
656 goto err1; 656 goto err2;
657 } 657 }
658 } 658 }
659 659
@@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
663 dwc, &dwc3_testmode_fops); 663 dwc, &dwc3_testmode_fops);
664 if (!file) { 664 if (!file) {
665 ret = -ENOMEM; 665 ret = -ENOMEM;
666 goto err1; 666 goto err2;
667 } 667 }
668 668
669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, 669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
670 dwc, &dwc3_link_state_fops); 670 dwc, &dwc3_link_state_fops);
671 if (!file) { 671 if (!file) {
672 ret = -ENOMEM; 672 ret = -ENOMEM;
673 goto err1; 673 goto err2;
674 } 674 }
675 } 675 }
676 676
677 return 0; 677 return 0;
678 678
679err2:
680 kfree(dwc->regset);
681
679err1: 682err1:
680 debugfs_remove_recursive(root); 683 debugfs_remove_recursive(root);
681 684
@@ -686,5 +689,5 @@ err0:
686void dwc3_debugfs_exit(struct dwc3 *dwc) 689void dwc3_debugfs_exit(struct dwc3 *dwc)
687{ 690{
688 debugfs_remove_recursive(dwc->root); 691 debugfs_remove_recursive(dwc->root);
689 dwc->root = NULL; 692 kfree(dwc->regset);
690} 693}
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d2423d..72664700b8a2 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -39,8 +39,6 @@
39#define USBSS_IRQ_COREIRQ_EN BIT(0) 39#define USBSS_IRQ_COREIRQ_EN BIT(0)
40#define USBSS_IRQ_COREIRQ_CLR BIT(0) 40#define USBSS_IRQ_COREIRQ_CLR BIT(0)
41 41
42static u64 kdwc3_dma_mask;
43
44struct dwc3_keystone { 42struct dwc3_keystone {
45 struct device *dev; 43 struct device *dev;
46 struct clk *clk; 44 struct clk *clk;
@@ -108,9 +106,6 @@ static int kdwc3_probe(struct platform_device *pdev)
108 if (IS_ERR(kdwc->usbss)) 106 if (IS_ERR(kdwc->usbss))
109 return PTR_ERR(kdwc->usbss); 107 return PTR_ERR(kdwc->usbss);
110 108
111 kdwc3_dma_mask = dma_get_mask(dev);
112 dev->dma_mask = &kdwc3_dma_mask;
113
114 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb");
115 110
116 error = clk_prepare_enable(kdwc->clk); 111 error = clk_prepare_enable(kdwc->clk);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 22e9606d8e08..55da2c7f727f 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
496 ret = pm_runtime_get_sync(dev); 496 ret = pm_runtime_get_sync(dev);
497 if (ret < 0) { 497 if (ret < 0) {
498 dev_err(dev, "get_sync failed with err %d\n", ret); 498 dev_err(dev, "get_sync failed with err %d\n", ret);
499 goto err0; 499 goto err1;
500 } 500 }
501 501
502 dwc3_omap_map_offset(omap); 502 dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
516 516
517 ret = dwc3_omap_extcon_register(omap); 517 ret = dwc3_omap_extcon_register(omap);
518 if (ret < 0) 518 if (ret < 0)
519 goto err2; 519 goto err1;
520 520
521 ret = of_platform_populate(node, NULL, NULL, dev); 521 ret = of_platform_populate(node, NULL, NULL, dev);
522 if (ret) { 522 if (ret) {
523 dev_err(&pdev->dev, "failed to create dwc3 core\n"); 523 dev_err(&pdev->dev, "failed to create dwc3 core\n");
524 goto err3; 524 goto err2;
525 } 525 }
526 526
527 dwc3_omap_enable_irqs(omap); 527 dwc3_omap_enable_irqs(omap);
528 528
529 return 0; 529 return 0;
530 530
531err3: 531err2:
532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
534err2:
535 dwc3_omap_disable_irqs(omap);
536 534
537err1: 535err1:
538 pm_runtime_put_sync(dev); 536 pm_runtime_put_sync(dev);
539
540err0:
541 pm_runtime_disable(dev); 537 pm_runtime_disable(dev);
542 538
543 return ret; 539 return ret;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d83048c8c..adc1e8a624cb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -35,6 +35,7 @@
35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa 37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
38#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
39 40
40static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 41static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -213,6 +214,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
213 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, 216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
217 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 218 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
217 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 219 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
218 { } /* Terminating Entry */ 220 { } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3ac170f9d94d..8e4a1b195e9b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -568,7 +568,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
569 569
570 if (!usb_endpoint_xfer_isoc(desc)) 570 if (!usb_endpoint_xfer_isoc(desc))
571 return 0; 571 goto out;
572 572
573 /* Link TRB for ISOC. The HWO bit is never reset */ 573 /* Link TRB for ISOC. The HWO bit is never reset */
574 trb_st_hw = &dep->trb_pool[0]; 574 trb_st_hw = &dep->trb_pool[0];
@@ -582,9 +582,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
583 } 583 }
584 584
585out:
585 switch (usb_endpoint_type(desc)) { 586 switch (usb_endpoint_type(desc)) {
586 case USB_ENDPOINT_XFER_CONTROL: 587 case USB_ENDPOINT_XFER_CONTROL:
587 strlcat(dep->name, "-control", sizeof(dep->name)); 588 /* don't change name */
588 break; 589 break;
589 case USB_ENDPOINT_XFER_ISOC: 590 case USB_ENDPOINT_XFER_ISOC:
590 strlcat(dep->name, "-isoc", sizeof(dep->name)); 591 strlcat(dep->name, "-isoc", sizeof(dep->name));
@@ -2487,7 +2488,11 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2487 * implemented. 2488 * implemented.
2488 */ 2489 */
2489 2490
2490 dwc->gadget_driver->resume(&dwc->gadget); 2491 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2492 spin_unlock(&dwc->lock);
2493 dwc->gadget_driver->resume(&dwc->gadget);
2494 spin_lock(&dwc->lock);
2495 }
2491} 2496}
2492 2497
2493static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2498static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
@@ -2931,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
2931 2936
2932int dwc3_gadget_suspend(struct dwc3 *dwc) 2937int dwc3_gadget_suspend(struct dwc3 *dwc)
2933{ 2938{
2939 if (!dwc->gadget_driver)
2940 return 0;
2941
2934 if (dwc->pullups_connected) { 2942 if (dwc->pullups_connected) {
2935 dwc3_gadget_disable_irq(dwc); 2943 dwc3_gadget_disable_irq(dwc);
2936 dwc3_gadget_run_stop(dwc, true, true); 2944 dwc3_gadget_run_stop(dwc, true, true);
@@ -2949,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
2949 struct dwc3_ep *dep; 2957 struct dwc3_ep *dep;
2950 int ret; 2958 int ret;
2951 2959
2960 if (!dwc->gadget_driver)
2961 return 0;
2962
2952 /* Start with SuperSpeed Default */ 2963 /* Start with SuperSpeed Default */
2953 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2964 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2954 2965
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a5c62093c26c..524e233d48de 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -651,12 +651,15 @@ static int bos_desc(struct usb_composite_dev *cdev)
651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1); 651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE; 653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
654 ssp_cap->bReserved = 0;
655 ssp_cap->wReserved = 0;
654 656
655 /* SSAC = 1 (2 attributes) */ 657 /* SSAC = 1 (2 attributes) */
656 ssp_cap->bmAttributes = cpu_to_le32(1); 658 ssp_cap->bmAttributes = cpu_to_le32(1);
657 659
658 /* Min RX/TX Lane Count = 1 */ 660 /* Min RX/TX Lane Count = 1 */
659 ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12); 661 ssp_cap->wFunctionalitySupport =
662 cpu_to_le16((1 << 8) | (1 << 12));
660 663
661 /* 664 /*
662 * bmSublinkSpeedAttr[0]: 665 * bmSublinkSpeedAttr[0]:
@@ -666,7 +669,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
666 * LSM = 10 (10 Gbps) 669 * LSM = 10 (10 Gbps)
667 */ 670 */
668 ssp_cap->bmSublinkSpeedAttr[0] = 671 ssp_cap->bmSublinkSpeedAttr[0] =
669 (3 << 4) | (1 << 14) | (0xa << 16); 672 cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
670 /* 673 /*
671 * bmSublinkSpeedAttr[1] = 674 * bmSublinkSpeedAttr[1] =
672 * ST = Symmetric, TX 675 * ST = Symmetric, TX
@@ -675,7 +678,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
675 * LSM = 10 (10 Gbps) 678 * LSM = 10 (10 Gbps)
676 */ 679 */
677 ssp_cap->bmSublinkSpeedAttr[1] = 680 ssp_cap->bmSublinkSpeedAttr[1] =
678 (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7); 681 cpu_to_le32((3 << 4) | (1 << 14) |
682 (0xa << 16) | (1 << 7));
679 } 683 }
680 684
681 return le16_to_cpu(bos->wTotalLength); 685 return le16_to_cpu(bos->wTotalLength);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8cfce105c7ee..15b648cbc75c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
646 work); 646 work);
647 int ret = io_data->req->status ? io_data->req->status : 647 int ret = io_data->req->status ? io_data->req->status :
648 io_data->req->actual; 648 io_data->req->actual;
649 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
649 650
650 if (io_data->read && ret > 0) { 651 if (io_data->read && ret > 0) {
651 use_mm(io_data->mm); 652 use_mm(io_data->mm);
@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
657 658
658 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); 659 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
659 660
660 if (io_data->ffs->ffs_eventfd && 661 if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
661 !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
662 eventfd_signal(io_data->ffs->ffs_eventfd, 1); 662 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
663 663
664 usb_ep_free_request(io_data->ep, io_data->req); 664 usb_ep_free_request(io_data->ep, io_data->req);
665 665
666 io_data->kiocb->private = NULL;
667 if (io_data->read) 666 if (io_data->read)
668 kfree(io_data->to_free); 667 kfree(io_data->to_free);
669 kfree(io_data->buf); 668 kfree(io_data->buf);
@@ -1147,8 +1146,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1147 ffs->sb = sb; 1146 ffs->sb = sb;
1148 data->ffs_data = NULL; 1147 data->ffs_data = NULL;
1149 sb->s_fs_info = ffs; 1148 sb->s_fs_info = ffs;
1150 sb->s_blocksize = PAGE_CACHE_SIZE; 1149 sb->s_blocksize = PAGE_SIZE;
1151 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1150 sb->s_blocksize_bits = PAGE_SHIFT;
1152 sb->s_magic = FUNCTIONFS_MAGIC; 1151 sb->s_magic = FUNCTIONFS_MAGIC;
1153 sb->s_op = &ffs_sb_operations; 1152 sb->s_op = &ffs_sb_operations;
1154 sb->s_time_gran = 1; 1153 sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 84c0ee5ebd1e..58fc199a18ec 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/kfifo.h> 26#include <linux/kfifo.h>
27#include <linux/spinlock.h>
27 28
28#include <sound/core.h> 29#include <sound/core.h>
29#include <sound/initval.h> 30#include <sound/initval.h>
@@ -89,6 +90,7 @@ struct f_midi {
89 unsigned int buflen, qlen; 90 unsigned int buflen, qlen;
90 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */ 91 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
91 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *); 92 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
93 spinlock_t transmit_lock;
92 unsigned int in_last_port; 94 unsigned int in_last_port;
93 95
94 struct gmidi_in_port in_ports_array[/* in_ports */]; 96 struct gmidi_in_port in_ports_array[/* in_ports */];
@@ -358,7 +360,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
358 /* allocate a bunch of read buffers and queue them all at once. */ 360 /* allocate a bunch of read buffers and queue them all at once. */
359 for (i = 0; i < midi->qlen && err == 0; i++) { 361 for (i = 0; i < midi->qlen && err == 0; i++) {
360 struct usb_request *req = 362 struct usb_request *req =
361 midi_alloc_ep_req(midi->out_ep, midi->buflen); 363 midi_alloc_ep_req(midi->out_ep,
364 max_t(unsigned, midi->buflen,
365 bulk_out_desc.wMaxPacketSize));
362 if (req == NULL) 366 if (req == NULL)
363 return -ENOMEM; 367 return -ENOMEM;
364 368
@@ -597,17 +601,24 @@ static void f_midi_transmit(struct f_midi *midi)
597{ 601{
598 struct usb_ep *ep = midi->in_ep; 602 struct usb_ep *ep = midi->in_ep;
599 int ret; 603 int ret;
604 unsigned long flags;
600 605
601 /* We only care about USB requests if IN endpoint is enabled */ 606 /* We only care about USB requests if IN endpoint is enabled */
602 if (!ep || !ep->enabled) 607 if (!ep || !ep->enabled)
603 goto drop_out; 608 goto drop_out;
604 609
610 spin_lock_irqsave(&midi->transmit_lock, flags);
611
605 do { 612 do {
606 ret = f_midi_do_transmit(midi, ep); 613 ret = f_midi_do_transmit(midi, ep);
607 if (ret < 0) 614 if (ret < 0) {
615 spin_unlock_irqrestore(&midi->transmit_lock, flags);
608 goto drop_out; 616 goto drop_out;
617 }
609 } while (ret); 618 } while (ret);
610 619
620 spin_unlock_irqrestore(&midi->transmit_lock, flags);
621
611 return; 622 return;
612 623
613drop_out: 624drop_out:
@@ -1201,6 +1212,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1201 if (status) 1212 if (status)
1202 goto setup_fail; 1213 goto setup_fail;
1203 1214
1215 spin_lock_init(&midi->transmit_lock);
1216
1204 ++opts->refcnt; 1217 ++opts->refcnt;
1205 mutex_unlock(&opts->lock); 1218 mutex_unlock(&opts->lock);
1206 1219
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5cdaf0150a4e..e64479f882a5 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
1954 return -ENODEV; 1954 return -ENODEV;
1955 1955
1956 /* superblock */ 1956 /* superblock */
1957 sb->s_blocksize = PAGE_CACHE_SIZE; 1957 sb->s_blocksize = PAGE_SIZE;
1958 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1958 sb->s_blocksize_bits = PAGE_SHIFT;
1959 sb->s_magic = GADGETFS_MAGIC; 1959 sb->s_magic = GADGETFS_MAGIC;
1960 sb->s_op = &gadget_fs_operations; 1960 sb->s_op = &gadget_fs_operations;
1961 sb->s_time_gran = 1; 1961 sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 81d42cce885a..18569de06b04 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1045,20 +1045,6 @@ static void reset_all_endpoints(struct usba_udc *udc)
1045 list_del_init(&req->queue); 1045 list_del_init(&req->queue);
1046 request_complete(ep, req, -ECONNRESET); 1046 request_complete(ep, req, -ECONNRESET);
1047 } 1047 }
1048
1049 /* NOTE: normally, the next call to the gadget driver is in
1050 * charge of disabling endpoints... usually disconnect().
1051 * The exception would be entering a high speed test mode.
1052 *
1053 * FIXME remove this code ... and retest thoroughly.
1054 */
1055 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1056 if (ep->ep.desc) {
1057 spin_unlock(&udc->lock);
1058 usba_ep_disable(&ep->ep);
1059 spin_lock(&udc->lock);
1060 }
1061 }
1062} 1048}
1063 1049
1064static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) 1050static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 4151597e9d28..e4e70e11d0f6 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -371,12 +371,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
371 INIT_WORK(&gadget->work, usb_gadget_state_work); 371 INIT_WORK(&gadget->work, usb_gadget_state_work);
372 gadget->dev.parent = parent; 372 gadget->dev.parent = parent;
373 373
374#ifdef CONFIG_HAS_DMA
375 dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
376 gadget->dev.dma_parms = parent->dma_parms;
377 gadget->dev.dma_mask = parent->dma_mask;
378#endif
379
380 if (release) 374 if (release)
381 gadget->dev.release = release; 375 gadget->dev.release = release;
382 else 376 else
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 80c1de239e9a..bad0d1f9a41d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,6 +1861,12 @@ no_bw:
1861 kfree(xhci->rh_bw); 1861 kfree(xhci->rh_bw);
1862 kfree(xhci->ext_caps); 1862 kfree(xhci->ext_caps);
1863 1863
1864 xhci->usb2_ports = NULL;
1865 xhci->usb3_ports = NULL;
1866 xhci->port_array = NULL;
1867 xhci->rh_bw = NULL;
1868 xhci->ext_caps = NULL;
1869
1864 xhci->page_size = 0; 1870 xhci->page_size = 0;
1865 xhci->page_shift = 0; 1871 xhci->page_shift = 0;
1866 xhci->bus_state[0].bus_suspended = 0; 1872 xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f0640b7a1c42..48672fac7ff3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
48#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f 48#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
49#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f 49#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
50#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 50#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
51#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
51 52
52static const char hcd_name[] = "xhci_hcd"; 53static const char hcd_name[] = "xhci_hcd";
53 54
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
155 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || 156 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
156 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || 157 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
157 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 158 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
158 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) { 159 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
160 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
159 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 161 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
160 } 162 }
161 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 163 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
302 struct xhci_hcd *xhci; 304 struct xhci_hcd *xhci;
303 305
304 xhci = hcd_to_xhci(pci_get_drvdata(dev)); 306 xhci = hcd_to_xhci(pci_get_drvdata(dev));
307 xhci->xhc_state |= XHCI_STATE_REMOVING;
305 if (xhci->shared_hcd) { 308 if (xhci->shared_hcd) {
306 usb_remove_hcd(xhci->shared_hcd); 309 usb_remove_hcd(xhci->shared_hcd);
307 usb_put_hcd(xhci->shared_hcd); 310 usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5c15e9bc5f7a..474b5fa14900 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
39 39
40static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) 40static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
41{ 41{
42 struct usb_hcd *hcd = xhci_to_hcd(xhci);
43
42 /* 44 /*
43 * As of now platform drivers don't provide MSI support so we ensure 45 * As of now platform drivers don't provide MSI support so we ensure
44 * here that the generic code does not try to make a pci_dev from our 46 * here that the generic code does not try to make a pci_dev from our
45 * dev struct in order to setup MSI 47 * dev struct in order to setup MSI
46 */ 48 */
47 xhci->quirks |= XHCI_PLAT; 49 xhci->quirks |= XHCI_PLAT;
50
51 /*
52 * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
53 * to 1. However, these SoCs don't support 64-bit address memory
54 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
55 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
56 * xhci_gen_setup().
57 */
58 if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
59 xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
60 xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
48} 61}
49 62
50/* called during probe() after chip reset completes */ 63/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5a2e2e3936c4..529c3c40f901 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -14,7 +14,7 @@
14#include "xhci.h" /* for hcd_to_xhci() */ 14#include "xhci.h" /* for hcd_to_xhci() */
15 15
16enum xhci_plat_type { 16enum xhci_plat_type {
17 XHCI_PLAT_TYPE_MARVELL_ARMADA, 17 XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
18 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2, 18 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
19 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3, 19 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
20}; 20};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7cf66212ceae..99b4ff42f7a0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4004,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4004 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 4004 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4005 int ret; 4005 int ret;
4006 4006
4007 if (xhci->xhc_state) { 4007 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4008 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4008 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); 4009 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4009 return -ESHUTDOWN; 4010 return -ESHUTDOWN;
4010 } 4011 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d51ee0c3cf9f..9e71c96ad74a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
147 "waited %u microseconds.\n", 147 "waited %u microseconds.\n",
148 XHCI_MAX_HALT_USEC); 148 XHCI_MAX_HALT_USEC);
149 if (!ret) 149 if (!ret)
150 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); 150 /* clear state flags. Including dying, halted or removing */
151 xhci->xhc_state = 0;
151 152
152 return ret; 153 return ret;
153} 154}
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1108 /* Resume root hubs only when have pending events. */ 1109 /* Resume root hubs only when have pending events. */
1109 status = readl(&xhci->op_regs->status); 1110 status = readl(&xhci->op_regs->status);
1110 if (status & STS_EINT) { 1111 if (status & STS_EINT) {
1111 usb_hcd_resume_root_hub(hcd);
1112 usb_hcd_resume_root_hub(xhci->shared_hcd); 1112 usb_hcd_resume_root_hub(xhci->shared_hcd);
1113 usb_hcd_resume_root_hub(hcd);
1113 } 1114 }
1114 } 1115 }
1115 1116
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1124 1125
1125 /* Re-enable port polling. */ 1126 /* Re-enable port polling. */
1126 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1127 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1127 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1128 usb_hcd_poll_rh_status(hcd);
1129 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1128 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1130 usb_hcd_poll_rh_status(xhci->shared_hcd); 1129 usb_hcd_poll_rh_status(xhci->shared_hcd);
1130 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1131 usb_hcd_poll_rh_status(hcd);
1131 1132
1132 return retval; 1133 return retval;
1133} 1134}
@@ -2773,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2773 if (ret <= 0) 2774 if (ret <= 0)
2774 return ret; 2775 return ret;
2775 xhci = hcd_to_xhci(hcd); 2776 xhci = hcd_to_xhci(hcd);
2776 if (xhci->xhc_state & XHCI_STATE_DYING) 2777 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2778 (xhci->xhc_state & XHCI_STATE_REMOVING))
2777 return -ENODEV; 2779 return -ENODEV;
2778 2780
2779 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2781 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3820 3822
3821 mutex_lock(&xhci->mutex); 3823 mutex_lock(&xhci->mutex);
3822 3824
3823 if (xhci->xhc_state) /* dying or halted */ 3825 if (xhci->xhc_state) /* dying, removing or halted */
3824 goto out; 3826 goto out;
3825 3827
3826 if (!udev->slot_id) { 3828 if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4948 return retval; 4950 return retval;
4949 xhci_dbg(xhci, "Reset complete\n"); 4951 xhci_dbg(xhci, "Reset complete\n");
4950 4952
4953 /*
4954 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4955 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4956 * address memory pointers actually. So, this driver clears the AC64
4957 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4958 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4959 */
4960 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4961 xhci->hcc_params &= ~BIT(0);
4962
4951 /* Set dma_mask and coherent_dma_mask to 64-bits, 4963 /* Set dma_mask and coherent_dma_mask to 64-bits,
4952 * if xHC supports 64-bit addressing */ 4964 * if xHC supports 64-bit addressing */
4953 if (HCC_64BIT_ADDR(xhci->hcc_params) && 4965 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e293e0974f48..6c629c97f8ad 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1605,6 +1605,7 @@ struct xhci_hcd {
1605 */ 1605 */
1606#define XHCI_STATE_DYING (1 << 0) 1606#define XHCI_STATE_DYING (1 << 0)
1607#define XHCI_STATE_HALTED (1 << 1) 1607#define XHCI_STATE_HALTED (1 << 1)
1608#define XHCI_STATE_REMOVING (1 << 2)
1608 /* Statistics */ 1609 /* Statistics */
1609 int error_bitmask; 1610 int error_bitmask;
1610 unsigned int quirks; 1611 unsigned int quirks;
@@ -1641,6 +1642,7 @@ struct xhci_hcd {
1641#define XHCI_PME_STUCK_QUIRK (1 << 20) 1642#define XHCI_PME_STUCK_QUIRK (1 << 20)
1642#define XHCI_MTK_HOST (1 << 21) 1643#define XHCI_MTK_HOST (1 << 21)
1643#define XHCI_SSIC_PORT_UNUSED (1 << 22) 1644#define XHCI_SSIC_PORT_UNUSED (1 << 22)
1645#define XHCI_NO_64BIT_SUPPORT (1 << 23)
1644 unsigned int num_active_eps; 1646 unsigned int num_active_eps;
1645 unsigned int limit_active_eps; 1647 unsigned int limit_active_eps;
1646 /* There are two roothubs to keep track of bus suspend info for */ 1648 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index 579587d97217..3d7af85aecb9 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -65,9 +65,7 @@ struct phy_8x16 {
65 void __iomem *regs; 65 void __iomem *regs;
66 struct clk *core_clk; 66 struct clk *core_clk;
67 struct clk *iface_clk; 67 struct clk *iface_clk;
68 struct regulator *v3p3; 68 struct regulator_bulk_data regulator[3];
69 struct regulator *v1p8;
70 struct regulator *vdd;
71 69
72 struct reset_control *phy_reset; 70 struct reset_control *phy_reset;
73 71
@@ -78,51 +76,6 @@ struct phy_8x16 {
78 struct notifier_block reboot_notify; 76 struct notifier_block reboot_notify;
79}; 77};
80 78
81static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
82{
83 int ret;
84
85 ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
86 if (ret)
87 return ret;
88
89 ret = regulator_enable(qphy->vdd);
90 if (ret)
91 return ret;
92
93 ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
94 if (ret)
95 goto off_vdd;
96
97 ret = regulator_enable(qphy->v3p3);
98 if (ret)
99 goto off_vdd;
100
101 ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
102 if (ret)
103 goto off_3p3;
104
105 ret = regulator_enable(qphy->v1p8);
106 if (ret)
107 goto off_3p3;
108
109 return 0;
110
111off_3p3:
112 regulator_disable(qphy->v3p3);
113off_vdd:
114 regulator_disable(qphy->vdd);
115
116 return ret;
117}
118
119static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
120{
121 regulator_disable(qphy->v1p8);
122 regulator_disable(qphy->v3p3);
123 regulator_disable(qphy->vdd);
124}
125
126static int phy_8x16_notify_connect(struct usb_phy *phy, 79static int phy_8x16_notify_connect(struct usb_phy *phy,
127 enum usb_device_speed speed) 80 enum usb_device_speed speed)
128{ 81{
@@ -261,7 +214,6 @@ static void phy_8x16_shutdown(struct usb_phy *phy)
261 214
262static int phy_8x16_read_devicetree(struct phy_8x16 *qphy) 215static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
263{ 216{
264 struct regulator_bulk_data regs[3];
265 struct device *dev = qphy->phy.dev; 217 struct device *dev = qphy->phy.dev;
266 int ret; 218 int ret;
267 219
@@ -273,18 +225,15 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
273 if (IS_ERR(qphy->iface_clk)) 225 if (IS_ERR(qphy->iface_clk))
274 return PTR_ERR(qphy->iface_clk); 226 return PTR_ERR(qphy->iface_clk);
275 227
276 regs[0].supply = "v3p3"; 228 qphy->regulator[0].supply = "v3p3";
277 regs[1].supply = "v1p8"; 229 qphy->regulator[1].supply = "v1p8";
278 regs[2].supply = "vddcx"; 230 qphy->regulator[2].supply = "vddcx";
279 231
280 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs); 232 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
233 qphy->regulator);
281 if (ret) 234 if (ret)
282 return ret; 235 return ret;
283 236
284 qphy->v3p3 = regs[0].consumer;
285 qphy->v1p8 = regs[1].consumer;
286 qphy->vdd = regs[2].consumer;
287
288 qphy->phy_reset = devm_reset_control_get(dev, "phy"); 237 qphy->phy_reset = devm_reset_control_get(dev, "phy");
289 if (IS_ERR(qphy->phy_reset)) 238 if (IS_ERR(qphy->phy_reset))
290 return PTR_ERR(qphy->phy_reset); 239 return PTR_ERR(qphy->phy_reset);
@@ -364,8 +313,9 @@ static int phy_8x16_probe(struct platform_device *pdev)
364 if (ret < 0) 313 if (ret < 0)
365 goto off_core; 314 goto off_core;
366 315
367 ret = phy_8x16_regulators_enable(qphy); 316 ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
368 if (0 && ret) 317 qphy->regulator);
318 if (WARN_ON(ret))
369 goto off_clks; 319 goto off_clks;
370 320
371 qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify; 321 qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
@@ -387,7 +337,7 @@ off_extcon:
387 extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB, 337 extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB,
388 &qphy->vbus_notify); 338 &qphy->vbus_notify);
389off_power: 339off_power:
390 phy_8x16_regulators_disable(qphy); 340 regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
391off_clks: 341off_clks:
392 clk_disable_unprepare(qphy->iface_clk); 342 clk_disable_unprepare(qphy->iface_clk);
393off_core: 343off_core:
@@ -413,7 +363,7 @@ static int phy_8x16_remove(struct platform_device *pdev)
413 363
414 clk_disable_unprepare(qphy->iface_clk); 364 clk_disable_unprepare(qphy->iface_clk);
415 clk_disable_unprepare(qphy->core_clk); 365 clk_disable_unprepare(qphy->core_clk);
416 phy_8x16_regulators_disable(qphy); 366 regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
417 return 0; 367 return 0;
418} 368}
419 369
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b4de70ee16d3..000f9750149f 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
190 goto __usbhs_pkt_handler_end; 190 goto __usbhs_pkt_handler_end;
191 } 191 }
192 192
193 ret = func(pkt, &is_done); 193 if (likely(func))
194 ret = func(pkt, &is_done);
194 195
195 if (is_done) 196 if (is_done)
196 __usbhsf_pkt_del(pkt); 197 __usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
889 890
890 pkt->trans = len; 891 pkt->trans = len;
891 892
893 usbhsf_tx_irq_ctrl(pipe, 0);
892 INIT_WORK(&pkt->work, xfer_work); 894 INIT_WORK(&pkt->work, xfer_work);
893 schedule_work(&pkt->work); 895 schedule_work(&pkt->work);
894 896
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 664b263e4b20..53d104b56ef1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
158 struct usbhs_pipe *pipe = pkt->pipe; 158 struct usbhs_pipe *pipe = pkt->pipe;
159 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); 159 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
160 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); 160 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
161 unsigned long flags;
161 162
162 ureq->req.actual = pkt->actual; 163 ureq->req.actual = pkt->actual;
163 164
164 usbhsg_queue_pop(uep, ureq, 0); 165 usbhs_lock(priv, flags);
166 if (uep)
167 __usbhsg_queue_pop(uep, ureq, 0);
168 usbhs_unlock(priv, flags);
165} 169}
166 170
167static void usbhsg_queue_push(struct usbhsg_uep *uep, 171static void usbhsg_queue_push(struct usbhsg_uep *uep,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbfe761c7fba..dd47823bb014 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
168 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
168 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ 169 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
169 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 170 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
170 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 171 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index b283eb8b86d6..bbeeb2bd55a8 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
447 struct usb_serial *serial = port->serial; 447 struct usb_serial *serial = port->serial;
448 struct cypress_private *priv; 448 struct cypress_private *priv;
449 449
450 if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
451 dev_err(&port->dev, "required endpoint is missing\n");
452 return -ENODEV;
453 }
454
450 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL); 455 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
451 if (!priv) 456 if (!priv)
452 return -ENOMEM; 457 return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
606 cypress_set_termios(tty, port, &priv->tmp_termios); 611 cypress_set_termios(tty, port, &priv->tmp_termios);
607 612
608 /* setup the port and start reading from the device */ 613 /* setup the port and start reading from the device */
609 if (!port->interrupt_in_urb) {
610 dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
611 __func__);
612 return -1;
613 }
614
615 usb_fill_int_urb(port->interrupt_in_urb, serial->dev, 614 usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
616 usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), 615 usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
617 port->interrupt_in_urb->transfer_buffer, 616 port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 010a42a92688..16e8e37b3b36 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
1251 1251
1252static int digi_startup(struct usb_serial *serial) 1252static int digi_startup(struct usb_serial *serial)
1253{ 1253{
1254 struct device *dev = &serial->interface->dev;
1254 struct digi_serial *serial_priv; 1255 struct digi_serial *serial_priv;
1255 int ret; 1256 int ret;
1257 int i;
1258
1259 /* check whether the device has the expected number of endpoints */
1260 if (serial->num_port_pointers < serial->type->num_ports + 1) {
1261 dev_err(dev, "OOB endpoints missing\n");
1262 return -ENODEV;
1263 }
1264
1265 for (i = 0; i < serial->type->num_ports + 1 ; i++) {
1266 if (!serial->port[i]->read_urb) {
1267 dev_err(dev, "bulk-in endpoint missing\n");
1268 return -ENODEV;
1269 }
1270 if (!serial->port[i]->write_urb) {
1271 dev_err(dev, "bulk-out endpoint missing\n");
1272 return -ENODEV;
1273 }
1274 }
1256 1275
1257 serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL); 1276 serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
1258 if (!serial_priv) 1277 if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 427ae43ee898..3a814e802dee 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
1004 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, 1004 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
1005 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, 1005 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
1006 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, 1006 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
1007 /* ICP DAS I-756xU devices */
1008 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
1009 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1010 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1007 { } /* Terminating entry */ 1011 { } /* Terminating entry */
1008}; 1012};
1009 1013
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df2513994..c5d6c1e73e8e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
872#define NOVITUS_BONO_E_PID 0x6010 872#define NOVITUS_BONO_E_PID 0x6010
873 873
874/* 874/*
875 * ICPDAS I-756*U devices
876 */
877#define ICPDAS_VID 0x1b5c
878#define ICPDAS_I7560U_PID 0x0103
879#define ICPDAS_I7561U_PID 0x0104
880#define ICPDAS_I7563U_PID 0x0105
881
882/*
875 * RT Systems programming cables for various ham radios 883 * RT Systems programming cables for various ham radios
876 */ 884 */
877#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 885#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 4446b8d70ac2..885655315de1 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
376 376
377static int mct_u232_port_probe(struct usb_serial_port *port) 377static int mct_u232_port_probe(struct usb_serial_port *port)
378{ 378{
379 struct usb_serial *serial = port->serial;
379 struct mct_u232_private *priv; 380 struct mct_u232_private *priv;
380 381
382 /* check first to simplify error handling */
383 if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
384 dev_err(&port->dev, "expected endpoint missing\n");
385 return -ENODEV;
386 }
387
381 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 388 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
382 if (!priv) 389 if (!priv)
383 return -ENOMEM; 390 return -ENOMEM;
384 391
385 /* Use second interrupt-in endpoint for reading. */ 392 /* Use second interrupt-in endpoint for reading. */
386 priv->read_urb = port->serial->port[1]->interrupt_in_urb; 393 priv->read_urb = serial->port[1]->interrupt_in_urb;
387 priv->read_urb->context = port; 394 priv->read_urb->context = port;
388 395
389 spin_lock_init(&priv->lock); 396 spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e19834b83..c6f497f16526 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
1818 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, 1818 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1819 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, 1819 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1820 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1820 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1821 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
1822 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1821 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1823 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1822 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1824 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1823 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1825 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba51362d2e2..90901861bfc0 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
123 unsigned int max_sectors = 64; 123 unsigned int max_sectors = 64;
124 124
125 if (us->fflags & US_FL_MAX_SECTORS_MIN) 125 if (us->fflags & US_FL_MAX_SECTORS_MIN)
126 max_sectors = PAGE_CACHE_SIZE >> 9; 126 max_sectors = PAGE_SIZE >> 9;
127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) 127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
128 blk_queue_max_hw_sectors(sdev->request_queue, 128 blk_queue_max_hw_sectors(sdev->request_queue,
129 max_sectors); 129 max_sectors);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 13e4cc31bc79..16bc679dc2fc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
2 * USB Attached SCSI 2 * USB Attached SCSI
3 * Note that this is not the same as the USB Mass Storage driver 3 * Note that this is not the same as the USB Mass Storage driver
4 * 4 *
5 * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014 5 * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
6 * Copyright Matthew Wilcox for Intel Corp, 2010 6 * Copyright Matthew Wilcox for Intel Corp, 2010
7 * Copyright Sarah Sharp for Intel Corp, 2010 7 * Copyright Sarah Sharp for Intel Corp, 2010
8 * 8 *
@@ -781,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
781 return SUCCESS; 781 return SUCCESS;
782} 782}
783 783
784static int uas_target_alloc(struct scsi_target *starget)
785{
786 struct uas_dev_info *devinfo = (struct uas_dev_info *)
787 dev_to_shost(starget->dev.parent)->hostdata;
788
789 if (devinfo->flags & US_FL_NO_REPORT_LUNS)
790 starget->no_report_luns = 1;
791
792 return 0;
793}
794
784static int uas_slave_alloc(struct scsi_device *sdev) 795static int uas_slave_alloc(struct scsi_device *sdev)
785{ 796{
786 struct uas_dev_info *devinfo = 797 struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
824 if (devinfo->flags & US_FL_BROKEN_FUA) 835 if (devinfo->flags & US_FL_BROKEN_FUA)
825 sdev->broken_fua = 1; 836 sdev->broken_fua = 1;
826 837
827 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
828 return 0; 838 return 0;
829} 839}
830 840
@@ -832,6 +842,7 @@ static struct scsi_host_template uas_host_template = {
832 .module = THIS_MODULE, 842 .module = THIS_MODULE,
833 .name = "uas", 843 .name = "uas",
834 .queuecommand = uas_queuecommand, 844 .queuecommand = uas_queuecommand,
845 .target_alloc = uas_target_alloc,
835 .slave_alloc = uas_slave_alloc, 846 .slave_alloc = uas_slave_alloc,
836 .slave_configure = uas_slave_configure, 847 .slave_configure = uas_slave_configure,
837 .eh_abort_handler = uas_eh_abort_handler, 848 .eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
956 if (result) 967 if (result)
957 goto set_alt0; 968 goto set_alt0;
958 969
970 /*
971 * 1 tag is reserved for untagged commands +
972 * 1 tag to avoid off by one errors in some bridge firmwares
973 */
974 shost->can_queue = devinfo->qdepth - 2;
975
959 usb_set_intfdata(intf, shost); 976 usb_set_intfdata(intf, shost);
960 result = scsi_add_host(shost, &intf->dev); 977 result = scsi_add_host(shost, &intf->dev);
961 if (result) 978 if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
64 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 64 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
65 US_FL_NO_ATA_1X), 65 US_FL_NO_ATA_1X),
66 66
67/* Reported-by: David Webb <djw@noc.ac.uk> */
68UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
69 "Seagate",
70 "Expansion Desk",
71 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
72 US_FL_NO_REPORT_LUNS),
73
67/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 74/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
68UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999, 75UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
69 "Seagate", 76 "Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
482 US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | 482 US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
483 US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | 483 US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
484 US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES | 484 US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
485 US_FL_MAX_SECTORS_240); 485 US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
486 486
487 p = quirks; 487 p = quirks;
488 while (*p) { 488 while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
532 case 'i': 532 case 'i':
533 f |= US_FL_IGNORE_DEVICE; 533 f |= US_FL_IGNORE_DEVICE;
534 break; 534 break;
535 case 'j':
536 f |= US_FL_NO_REPORT_LUNS;
537 break;
535 case 'l': 538 case 'l':
536 f |= US_FL_NOT_LOCKABLE; 539 f |= US_FL_NOT_LOCKABLE;
537 break; 540 break;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf003f19..e40da7759a0e 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
741 if (!(size > 0)) 741 if (!(size > 0))
742 return 0; 742 return 0;
743 743
744 if (size > urb->transfer_buffer_length) {
745 /* should not happen, probably malicious packet */
746 if (ud->side == USBIP_STUB) {
747 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
748 return 0;
749 } else {
750 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
751 return -EPIPE;
752 }
753 }
754
744 ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); 755 ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
745 if (ret != size) { 756 if (ret != size) {
746 dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); 757 dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index fe274b5851c7..93e66a9148b9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
440 fb->off_ienb = CLCD_PL111_IENB; 440 fb->off_ienb = CLCD_PL111_IENB;
441 fb->off_cntl = CLCD_PL111_CNTL; 441 fb->off_cntl = CLCD_PL111_CNTL;
442 } else { 442 } else {
443#ifdef CONFIG_ARCH_VERSATILE 443 if (of_machine_is_compatible("arm,versatile-ab") ||
444 fb->off_ienb = CLCD_PL111_IENB; 444 of_machine_is_compatible("arm,versatile-pb")) {
445 fb->off_cntl = CLCD_PL111_CNTL; 445 fb->off_ienb = CLCD_PL111_IENB;
446#else 446 fb->off_cntl = CLCD_PL111_CNTL;
447 fb->off_ienb = CLCD_PL110_IENB; 447 } else {
448 fb->off_cntl = CLCD_PL110_CNTL; 448 fb->off_ienb = CLCD_PL110_IENB;
449#endif 449 fb->off_cntl = CLCD_PL110_CNTL;
450 }
450 } 451 }
451 452
452 fb->clk = clk_get(&fb->dev->dev, NULL); 453 fb->clk = clk_get(&fb->dev->dev, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..1954ec913ce5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, 200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
201 char *desc, struct gpio_desc **gpiod) 201 char *desc, struct gpio_desc **gpiod)
202{ 202{
203 struct gpio_desc *gd;
204 int r; 203 int r;
205 204
206 *gpiod = NULL;
207
208 r = devm_gpio_request_one(dev, gpio, flags, desc); 205 r = devm_gpio_request_one(dev, gpio, flags, desc);
209 if (r) 206 if (r) {
207 *gpiod = NULL;
210 return r == -ENOENT ? 0 : r; 208 return r == -ENOENT ? 0 : r;
209 }
211 210
212 gd = gpio_to_desc(gpio); 211 *gpiod = gpio_to_desc(gpio);
213 if (IS_ERR(gd))
214 return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
215 212
216 *gpiod = gd;
217 return 0; 213 return 0;
218} 214}
219 215
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 71a923e53f93..3b1ca4411073 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -735,7 +735,7 @@ out:
735 735
736out_unmap: 736out_unmap:
737 for (i = 0; i < nr_pages; i++) 737 for (i = 0; i < nr_pages; i++)
738 page_cache_release(pages[i]); 738 put_page(pages[i]);
739 739
740 kfree(pages); 740 kfree(pages);
741 741
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f6f28cc7eb45..e76bd91a29da 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
17 * 17 *
18 */ 18 */
19 19
20#include <linux/delay.h>
20#define VIRTIO_PCI_NO_LEGACY 21#define VIRTIO_PCI_NO_LEGACY
21#include "virtio_pci_common.h" 22#include "virtio_pci_common.h"
22 23
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
271 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 272 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
272 /* 0 status means a reset. */ 273 /* 0 status means a reset. */
273 vp_iowrite8(0, &vp_dev->common->device_status); 274 vp_iowrite8(0, &vp_dev->common->device_status);
274 /* Flush out the status write, and flush in device writes, 275 /* After writing 0 to device_status, the driver MUST wait for a read of
275 * including MSI-X interrupts, if any. */ 276 * device_status to return 0 before reinitializing the device.
276 vp_ioread8(&vp_dev->common->device_status); 277 * This will flush out the status write, and flush in device writes,
278 * including MSI-X interrupts, if any.
279 */
280 while (vp_ioread8(&vp_dev->common->device_status))
281 msleep(1);
277 /* Flush pending VQ/configuration callbacks. */ 282 /* Flush pending VQ/configuration callbacks. */
278 vp_synchronize_vectors(vdev); 283 vp_synchronize_vectors(vdev);
279} 284}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 488017a0806a..cb7138c97c69 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
485 int rc = 0; 485 int rc = 0;
486 486
487 irq_move_irq(data); 487 if (!VALID_EVTCHN(evtchn))
488 return;
488 489
489 if (VALID_EVTCHN(evtchn)) 490 if (unlikely(irqd_is_setaffinity_pending(data))) {
491 int masked = test_and_set_mask(evtchn);
492
493 clear_evtchn(evtchn);
494
495 irq_move_masked_irq(data);
496
497 if (!masked)
498 unmask_evtchn(evtchn);
499 } else
490 clear_evtchn(evtchn); 500 clear_evtchn(evtchn);
491 501
492 if (pirq_needs_eoi(data->irq)) { 502 if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
1357{ 1367{
1358 int evtchn = evtchn_from_irq(data->irq); 1368 int evtchn = evtchn_from_irq(data->irq);
1359 1369
1360 irq_move_irq(data); 1370 if (!VALID_EVTCHN(evtchn))
1371 return;
1361 1372
1362 if (VALID_EVTCHN(evtchn)) 1373 if (unlikely(irqd_is_setaffinity_pending(data))) {
1374 int masked = test_and_set_mask(evtchn);
1375
1376 clear_evtchn(evtchn);
1377
1378 irq_move_masked_irq(data);
1379
1380 if (!masked)
1381 unmask_evtchn(evtchn);
1382 } else
1363 clear_evtchn(evtchn); 1383 clear_evtchn(evtchn);
1364} 1384}
1365 1385
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index e9e04376c52c..ac9225e86bf3 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
153 * If called with zero offset, we should release 153 * If called with zero offset, we should release
154 * the private state assocated with the page 154 * the private state assocated with the page
155 */ 155 */
156 if (offset == 0 && length == PAGE_CACHE_SIZE) 156 if (offset == 0 && length == PAGE_SIZE)
157 v9fs_fscache_invalidate_page(page); 157 v9fs_fscache_invalidate_page(page);
158} 158}
159 159
@@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page)
166 struct bio_vec bvec; 166 struct bio_vec bvec;
167 int err, len; 167 int err, len;
168 168
169 if (page->index == size >> PAGE_CACHE_SHIFT) 169 if (page->index == size >> PAGE_SHIFT)
170 len = size & ~PAGE_CACHE_MASK; 170 len = size & ~PAGE_MASK;
171 else 171 else
172 len = PAGE_CACHE_SIZE; 172 len = PAGE_SIZE;
173 173
174 bvec.bv_page = page; 174 bvec.bv_page = page;
175 bvec.bv_offset = 0; 175 bvec.bv_offset = 0;
@@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
271 int retval = 0; 271 int retval = 0;
272 struct page *page; 272 struct page *page;
273 struct v9fs_inode *v9inode; 273 struct v9fs_inode *v9inode;
274 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 274 pgoff_t index = pos >> PAGE_SHIFT;
275 struct inode *inode = mapping->host; 275 struct inode *inode = mapping->host;
276 276
277 277
@@ -288,11 +288,11 @@ start:
288 if (PageUptodate(page)) 288 if (PageUptodate(page))
289 goto out; 289 goto out;
290 290
291 if (len == PAGE_CACHE_SIZE) 291 if (len == PAGE_SIZE)
292 goto out; 292 goto out;
293 293
294 retval = v9fs_fid_readpage(v9inode->writeback_fid, page); 294 retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
295 page_cache_release(page); 295 put_page(page);
296 if (!retval) 296 if (!retval)
297 goto start; 297 goto start;
298out: 298out:
@@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
313 /* 313 /*
314 * zero out the rest of the area 314 * zero out the rest of the area
315 */ 315 */
316 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 316 unsigned from = pos & (PAGE_SIZE - 1);
317 317
318 zero_user(page, from + copied, len - copied); 318 zero_user(page, from + copied, len - copied);
319 flush_dcache_page(page); 319 flush_dcache_page(page);
@@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
331 } 331 }
332 set_page_dirty(page); 332 set_page_dirty(page);
333 unlock_page(page); 333 unlock_page(page);
334 page_cache_release(page); 334 put_page(page);
335 335
336 return copied; 336 return copied;
337} 337}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index eadc894faea2..b84c291ba1eb 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
421 struct inode *inode = file_inode(file); 421 struct inode *inode = file_inode(file);
422 loff_t i_size; 422 loff_t i_size;
423 unsigned long pg_start, pg_end; 423 unsigned long pg_start, pg_end;
424 pg_start = origin >> PAGE_CACHE_SHIFT; 424 pg_start = origin >> PAGE_SHIFT;
425 pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; 425 pg_end = (origin + retval - 1) >> PAGE_SHIFT;
426 if (inode->i_mapping && inode->i_mapping->nrpages) 426 if (inode->i_mapping && inode->i_mapping->nrpages)
427 invalidate_inode_pages2_range(inode->i_mapping, 427 invalidate_inode_pages2_range(inode->i_mapping,
428 pg_start, pg_end); 428 pg_start, pg_end);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index bf495cedec26..de3ed8629196 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -87,7 +87,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
87 sb->s_op = &v9fs_super_ops; 87 sb->s_op = &v9fs_super_ops;
88 sb->s_bdi = &v9ses->bdi; 88 sb->s_bdi = &v9ses->bdi;
89 if (v9ses->cache) 89 if (v9ses->cache)
90 sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; 90 sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
91 91
92 sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; 92 sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
93 if (!v9ses->cache) 93 if (!v9ses->cache)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 22fc7c802d69..0cde550050e8 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -510,9 +510,9 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
510 510
511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, 511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
512 page->index, to); 512 page->index, to);
513 BUG_ON(to > PAGE_CACHE_SIZE); 513 BUG_ON(to > PAGE_SIZE);
514 bsize = AFFS_SB(sb)->s_data_blksize; 514 bsize = AFFS_SB(sb)->s_data_blksize;
515 tmp = page->index << PAGE_CACHE_SHIFT; 515 tmp = page->index << PAGE_SHIFT;
516 bidx = tmp / bsize; 516 bidx = tmp / bsize;
517 boff = tmp % bsize; 517 boff = tmp % bsize;
518 518
@@ -613,10 +613,10 @@ affs_readpage_ofs(struct file *file, struct page *page)
613 int err; 613 int err;
614 614
615 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); 615 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
616 to = PAGE_CACHE_SIZE; 616 to = PAGE_SIZE;
617 if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { 617 if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
618 to = inode->i_size & ~PAGE_CACHE_MASK; 618 to = inode->i_size & ~PAGE_MASK;
619 memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); 619 memset(page_address(page) + to, 0, PAGE_SIZE - to);
620 } 620 }
621 621
622 err = affs_do_readpage_ofs(page, to); 622 err = affs_do_readpage_ofs(page, to);
@@ -646,7 +646,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
646 return err; 646 return err;
647 } 647 }
648 648
649 index = pos >> PAGE_CACHE_SHIFT; 649 index = pos >> PAGE_SHIFT;
650 page = grab_cache_page_write_begin(mapping, index, flags); 650 page = grab_cache_page_write_begin(mapping, index, flags);
651 if (!page) 651 if (!page)
652 return -ENOMEM; 652 return -ENOMEM;
@@ -656,10 +656,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
656 return 0; 656 return 0;
657 657
658 /* XXX: inefficient but safe in the face of short writes */ 658 /* XXX: inefficient but safe in the face of short writes */
659 err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE); 659 err = affs_do_readpage_ofs(page, PAGE_SIZE);
660 if (err) { 660 if (err) {
661 unlock_page(page); 661 unlock_page(page);
662 page_cache_release(page); 662 put_page(page);
663 } 663 }
664 return err; 664 return err;
665} 665}
@@ -677,7 +677,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
677 u32 tmp; 677 u32 tmp;
678 int written; 678 int written;
679 679
680 from = pos & (PAGE_CACHE_SIZE - 1); 680 from = pos & (PAGE_SIZE - 1);
681 to = pos + len; 681 to = pos + len;
682 /* 682 /*
683 * XXX: not sure if this can handle short copies (len < copied), but 683 * XXX: not sure if this can handle short copies (len < copied), but
@@ -692,7 +692,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
692 692
693 bh = NULL; 693 bh = NULL;
694 written = 0; 694 written = 0;
695 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 695 tmp = (page->index << PAGE_SHIFT) + from;
696 bidx = tmp / bsize; 696 bidx = tmp / bsize;
697 boff = tmp % bsize; 697 boff = tmp % bsize;
698 if (boff) { 698 if (boff) {
@@ -788,13 +788,13 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
788 788
789done: 789done:
790 affs_brelse(bh); 790 affs_brelse(bh);
791 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 791 tmp = (page->index << PAGE_SHIFT) + from;
792 if (tmp > inode->i_size) 792 if (tmp > inode->i_size)
793 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 793 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
794 794
795err_first_bh: 795err_first_bh:
796 unlock_page(page); 796 unlock_page(page);
797 page_cache_release(page); 797 put_page(page);
798 798
799 return written; 799 return written;
800 800
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e10e17788f06..5fda2bc53cd7 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -181,7 +181,7 @@ error:
181static inline void afs_dir_put_page(struct page *page) 181static inline void afs_dir_put_page(struct page *page)
182{ 182{
183 kunmap(page); 183 kunmap(page);
184 page_cache_release(page); 184 put_page(page);
185} 185}
186 186
187/* 187/*
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 999bc3caec92..6344aee4ac4b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -164,7 +164,7 @@ int afs_page_filler(void *data, struct page *page)
164 _debug("cache said ENOBUFS"); 164 _debug("cache said ENOBUFS");
165 default: 165 default:
166 go_on: 166 go_on:
167 offset = page->index << PAGE_CACHE_SHIFT; 167 offset = page->index << PAGE_SHIFT;
168 len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); 168 len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
169 169
170 /* read the contents of the file from the server into the 170 /* read the contents of the file from the server into the
@@ -319,7 +319,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
319 BUG_ON(!PageLocked(page)); 319 BUG_ON(!PageLocked(page));
320 320
321 /* we clean up only if the entire page is being invalidated */ 321 /* we clean up only if the entire page is being invalidated */
322 if (offset == 0 && length == PAGE_CACHE_SIZE) { 322 if (offset == 0 && length == PAGE_SIZE) {
323#ifdef CONFIG_AFS_FSCACHE 323#ifdef CONFIG_AFS_FSCACHE
324 if (PageFsCache(page)) { 324 if (PageFsCache(page)) {
325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index ccd0b212e82a..81dd075356b9 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -93,7 +93,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
93 93
94 kunmap(page); 94 kunmap(page);
95out_free: 95out_free:
96 page_cache_release(page); 96 put_page(page);
97out: 97out:
98 _leave(" = %d", ret); 98 _leave(" = %d", ret);
99 return ret; 99 return ret;
@@ -189,7 +189,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
189 buf = kmap_atomic(page); 189 buf = kmap_atomic(page);
190 memcpy(devname, buf, size); 190 memcpy(devname, buf, size);
191 kunmap_atomic(buf); 191 kunmap_atomic(buf);
192 page_cache_release(page); 192 put_page(page);
193 page = NULL; 193 page = NULL;
194 } 194 }
195 195
@@ -211,7 +211,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
211 return mnt; 211 return mnt;
212 212
213error: 213error:
214 page_cache_release(page); 214 put_page(page);
215error_no_page: 215error_no_page:
216 free_page((unsigned long) options); 216 free_page((unsigned long) options);
217error_no_options: 217error_no_options:
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 81afefe7d8a6..fbdb022b75a2 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -315,8 +315,8 @@ static int afs_fill_super(struct super_block *sb,
315 _enter(""); 315 _enter("");
316 316
317 /* fill in the superblock */ 317 /* fill in the superblock */
318 sb->s_blocksize = PAGE_CACHE_SIZE; 318 sb->s_blocksize = PAGE_SIZE;
319 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 319 sb->s_blocksize_bits = PAGE_SHIFT;
320 sb->s_magic = AFS_FS_MAGIC; 320 sb->s_magic = AFS_FS_MAGIC;
321 sb->s_op = &afs_super_ops; 321 sb->s_op = &afs_super_ops;
322 sb->s_bdi = &as->volume->bdi; 322 sb->s_bdi = &as->volume->bdi;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dfef94f70667..65de439bdc4f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
93 _enter(",,%llu", (unsigned long long)pos); 93 _enter(",,%llu", (unsigned long long)pos);
94 94
95 i_size = i_size_read(&vnode->vfs_inode); 95 i_size = i_size_read(&vnode->vfs_inode);
96 if (pos + PAGE_CACHE_SIZE > i_size) 96 if (pos + PAGE_SIZE > i_size)
97 len = i_size - pos; 97 len = i_size - pos;
98 else 98 else
99 len = PAGE_CACHE_SIZE; 99 len = PAGE_SIZE;
100 100
101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page); 101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
102 if (ret < 0) { 102 if (ret < 0) {
@@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
124 struct page *page; 124 struct page *page;
125 struct key *key = file->private_data; 125 struct key *key = file->private_data;
126 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 126 unsigned from = pos & (PAGE_SIZE - 1);
127 unsigned to = from + len; 127 unsigned to = from + len;
128 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 128 pgoff_t index = pos >> PAGE_SHIFT;
129 int ret; 129 int ret;
130 130
131 _enter("{%x:%u},{%lx},%u,%u", 131 _enter("{%x:%u},{%lx},%u,%u",
@@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
151 *pagep = page; 151 *pagep = page;
152 /* page won't leak in error case: it eventually gets cleaned off LRU */ 152 /* page won't leak in error case: it eventually gets cleaned off LRU */
153 153
154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { 154 if (!PageUptodate(page) && len != PAGE_SIZE) {
155 ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); 155 ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
156 if (ret < 0) { 156 if (ret < 0) {
157 kfree(candidate); 157 kfree(candidate);
158 _leave(" = %d [prep]", ret); 158 _leave(" = %d [prep]", ret);
@@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
266 if (PageDirty(page)) 266 if (PageDirty(page))
267 _debug("dirtied"); 267 _debug("dirtied");
268 unlock_page(page); 268 unlock_page(page);
269 page_cache_release(page); 269 put_page(page);
270 270
271 return copied; 271 return copied;
272} 272}
@@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping,
480 480
481 if (page->index > end) { 481 if (page->index > end) {
482 *_next = index; 482 *_next = index;
483 page_cache_release(page); 483 put_page(page);
484 _leave(" = 0 [%lx]", *_next); 484 _leave(" = 0 [%lx]", *_next);
485 return 0; 485 return 0;
486 } 486 }
@@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping,
494 494
495 if (page->mapping != mapping) { 495 if (page->mapping != mapping) {
496 unlock_page(page); 496 unlock_page(page);
497 page_cache_release(page); 497 put_page(page);
498 continue; 498 continue;
499 } 499 }
500 500
@@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping,
515 515
516 ret = afs_write_back_from_locked_page(wb, page); 516 ret = afs_write_back_from_locked_page(wb, page);
517 unlock_page(page); 517 unlock_page(page);
518 page_cache_release(page); 518 put_page(page);
519 if (ret < 0) { 519 if (ret < 0) {
520 _leave(" = %d", ret); 520 _leave(" = %d", ret);
521 return ret; 521 return ret;
@@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping,
551 &next); 551 &next);
552 mapping->writeback_index = next; 552 mapping->writeback_index = next;
553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
554 end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT); 554 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
555 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 555 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
556 if (wbc->nr_to_write > 0) 556 if (wbc->nr_to_write > 0)
557 mapping->writeback_index = next; 557 mapping->writeback_index = next;
558 } else { 558 } else {
559 start = wbc->range_start >> PAGE_CACHE_SHIFT; 559 start = wbc->range_start >> PAGE_SHIFT;
560 end = wbc->range_end >> PAGE_CACHE_SHIFT; 560 end = wbc->range_end >> PAGE_SHIFT;
561 ret = afs_writepages_region(mapping, wbc, start, end, &next); 561 ret = afs_writepages_region(mapping, wbc, start, end, &next);
562 } 562 }
563 563
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7d914c67a9d0..81381cc0dd17 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2292,7 +2292,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2292 void *kaddr = kmap(page); 2292 void *kaddr = kmap(page);
2293 stop = !dump_emit(cprm, kaddr, PAGE_SIZE); 2293 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2294 kunmap(page); 2294 kunmap(page);
2295 page_cache_release(page); 2295 put_page(page);
2296 } else 2296 } else
2297 stop = !dump_skip(cprm, PAGE_SIZE); 2297 stop = !dump_skip(cprm, PAGE_SIZE);
2298 if (stop) 2298 if (stop)
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index b1adb92e69de..083ea2bc60ab 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1533,7 +1533,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
1533 void *kaddr = kmap(page); 1533 void *kaddr = kmap(page);
1534 res = dump_emit(cprm, kaddr, PAGE_SIZE); 1534 res = dump_emit(cprm, kaddr, PAGE_SIZE);
1535 kunmap(page); 1535 kunmap(page);
1536 page_cache_release(page); 1536 put_page(page);
1537 } else { 1537 } else {
1538 res = dump_skip(cprm, PAGE_SIZE); 1538 res = dump_skip(cprm, PAGE_SIZE);
1539 } 1539 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3172c4e2f502..20a2c02b77c4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,7 +331,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
331 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); 331 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
332 332
333 unlock_page(page); 333 unlock_page(page);
334 page_cache_release(page); 334 put_page(page);
335 335
336 return ret; 336 return ret;
337} 337}
@@ -1149,7 +1149,7 @@ void bd_set_size(struct block_device *bdev, loff_t size)
1149 inode_lock(bdev->bd_inode); 1149 inode_lock(bdev->bd_inode);
1150 i_size_write(bdev->bd_inode, size); 1150 i_size_write(bdev->bd_inode, size);
1151 inode_unlock(bdev->bd_inode); 1151 inode_unlock(bdev->bd_inode);
1152 while (bsize < PAGE_CACHE_SIZE) { 1152 while (bsize < PAGE_SIZE) {
1153 if (size & bsize) 1153 if (size & bsize)
1154 break; 1154 break;
1155 bsize <<= 1; 1155 bsize <<= 1;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index e34a71b3e225..516e19d1d202 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -757,7 +757,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
757 BUG_ON(NULL == l); 757 BUG_ON(NULL == l);
758 758
759 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 759 ret = btrfsic_read_block(state, &tmp_next_block_ctx);
760 if (ret < (int)PAGE_CACHE_SIZE) { 760 if (ret < (int)PAGE_SIZE) {
761 printk(KERN_INFO 761 printk(KERN_INFO
762 "btrfsic: read @logical %llu failed!\n", 762 "btrfsic: read @logical %llu failed!\n",
763 tmp_next_block_ctx.start); 763 tmp_next_block_ctx.start);
@@ -1231,15 +1231,15 @@ static void btrfsic_read_from_block_data(
1231 size_t offset_in_page; 1231 size_t offset_in_page;
1232 char *kaddr; 1232 char *kaddr;
1233 char *dst = (char *)dstv; 1233 char *dst = (char *)dstv;
1234 size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1); 1234 size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1);
1235 unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT; 1235 unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
1236 1236
1237 WARN_ON(offset + len > block_ctx->len); 1237 WARN_ON(offset + len > block_ctx->len);
1238 offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1); 1238 offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1);
1239 1239
1240 while (len > 0) { 1240 while (len > 0) {
1241 cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); 1241 cur = min(len, ((size_t)PAGE_SIZE - offset_in_page));
1242 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE)); 1242 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
1243 kaddr = block_ctx->datav[i]; 1243 kaddr = block_ctx->datav[i];
1244 memcpy(dst, kaddr + offset_in_page, cur); 1244 memcpy(dst, kaddr + offset_in_page, cur);
1245 1245
@@ -1605,8 +1605,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
1605 1605
1606 BUG_ON(!block_ctx->datav); 1606 BUG_ON(!block_ctx->datav);
1607 BUG_ON(!block_ctx->pagev); 1607 BUG_ON(!block_ctx->pagev);
1608 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1608 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
1609 PAGE_CACHE_SHIFT; 1609 PAGE_SHIFT;
1610 while (num_pages > 0) { 1610 while (num_pages > 0) {
1611 num_pages--; 1611 num_pages--;
1612 if (block_ctx->datav[num_pages]) { 1612 if (block_ctx->datav[num_pages]) {
@@ -1637,15 +1637,15 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1637 BUG_ON(block_ctx->datav); 1637 BUG_ON(block_ctx->datav);
1638 BUG_ON(block_ctx->pagev); 1638 BUG_ON(block_ctx->pagev);
1639 BUG_ON(block_ctx->mem_to_free); 1639 BUG_ON(block_ctx->mem_to_free);
1640 if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) { 1640 if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
1641 printk(KERN_INFO 1641 printk(KERN_INFO
1642 "btrfsic: read_block() with unaligned bytenr %llu\n", 1642 "btrfsic: read_block() with unaligned bytenr %llu\n",
1643 block_ctx->dev_bytenr); 1643 block_ctx->dev_bytenr);
1644 return -1; 1644 return -1;
1645 } 1645 }
1646 1646
1647 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1647 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
1648 PAGE_CACHE_SHIFT; 1648 PAGE_SHIFT;
1649 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) + 1649 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
1650 sizeof(*block_ctx->pagev)) * 1650 sizeof(*block_ctx->pagev)) *
1651 num_pages, GFP_NOFS); 1651 num_pages, GFP_NOFS);
@@ -1676,8 +1676,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1676 1676
1677 for (j = i; j < num_pages; j++) { 1677 for (j = i; j < num_pages; j++) {
1678 ret = bio_add_page(bio, block_ctx->pagev[j], 1678 ret = bio_add_page(bio, block_ctx->pagev[j],
1679 PAGE_CACHE_SIZE, 0); 1679 PAGE_SIZE, 0);
1680 if (PAGE_CACHE_SIZE != ret) 1680 if (PAGE_SIZE != ret)
1681 break; 1681 break;
1682 } 1682 }
1683 if (j == i) { 1683 if (j == i) {
@@ -1693,7 +1693,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1693 return -1; 1693 return -1;
1694 } 1694 }
1695 bio_put(bio); 1695 bio_put(bio);
1696 dev_bytenr += (j - i) * PAGE_CACHE_SIZE; 1696 dev_bytenr += (j - i) * PAGE_SIZE;
1697 i = j; 1697 i = j;
1698 } 1698 }
1699 for (i = 0; i < num_pages; i++) { 1699 for (i = 0; i < num_pages; i++) {
@@ -1769,9 +1769,9 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1769 u32 crc = ~(u32)0; 1769 u32 crc = ~(u32)0;
1770 unsigned int i; 1770 unsigned int i;
1771 1771
1772 if (num_pages * PAGE_CACHE_SIZE < state->metablock_size) 1772 if (num_pages * PAGE_SIZE < state->metablock_size)
1773 return 1; /* not metadata */ 1773 return 1; /* not metadata */
1774 num_pages = state->metablock_size >> PAGE_CACHE_SHIFT; 1774 num_pages = state->metablock_size >> PAGE_SHIFT;
1775 h = (struct btrfs_header *)datav[0]; 1775 h = (struct btrfs_header *)datav[0];
1776 1776
1777 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) 1777 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1779,8 +1779,8 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1779 1779
1780 for (i = 0; i < num_pages; i++) { 1780 for (i = 0; i < num_pages; i++) {
1781 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1781 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
1782 size_t sublen = i ? PAGE_CACHE_SIZE : 1782 size_t sublen = i ? PAGE_SIZE :
1783 (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); 1783 (PAGE_SIZE - BTRFS_CSUM_SIZE);
1784 1784
1785 crc = btrfs_crc32c(crc, data, sublen); 1785 crc = btrfs_crc32c(crc, data, sublen);
1786 } 1786 }
@@ -1826,14 +1826,14 @@ again:
1826 if (block->is_superblock) { 1826 if (block->is_superblock) {
1827 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1827 bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
1828 mapped_datav[0]); 1828 mapped_datav[0]);
1829 if (num_pages * PAGE_CACHE_SIZE < 1829 if (num_pages * PAGE_SIZE <
1830 BTRFS_SUPER_INFO_SIZE) { 1830 BTRFS_SUPER_INFO_SIZE) {
1831 printk(KERN_INFO 1831 printk(KERN_INFO
1832 "btrfsic: cannot work with too short bios!\n"); 1832 "btrfsic: cannot work with too short bios!\n");
1833 return; 1833 return;
1834 } 1834 }
1835 is_metadata = 1; 1835 is_metadata = 1;
1836 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1)); 1836 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1));
1837 processed_len = BTRFS_SUPER_INFO_SIZE; 1837 processed_len = BTRFS_SUPER_INFO_SIZE;
1838 if (state->print_mask & 1838 if (state->print_mask &
1839 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1839 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
@@ -1844,7 +1844,7 @@ again:
1844 } 1844 }
1845 if (is_metadata) { 1845 if (is_metadata) {
1846 if (!block->is_superblock) { 1846 if (!block->is_superblock) {
1847 if (num_pages * PAGE_CACHE_SIZE < 1847 if (num_pages * PAGE_SIZE <
1848 state->metablock_size) { 1848 state->metablock_size) {
1849 printk(KERN_INFO 1849 printk(KERN_INFO
1850 "btrfsic: cannot work with too short bios!\n"); 1850 "btrfsic: cannot work with too short bios!\n");
@@ -1880,7 +1880,7 @@ again:
1880 } 1880 }
1881 block->logical_bytenr = bytenr; 1881 block->logical_bytenr = bytenr;
1882 } else { 1882 } else {
1883 if (num_pages * PAGE_CACHE_SIZE < 1883 if (num_pages * PAGE_SIZE <
1884 state->datablock_size) { 1884 state->datablock_size) {
1885 printk(KERN_INFO 1885 printk(KERN_INFO
1886 "btrfsic: cannot work with too short bios!\n"); 1886 "btrfsic: cannot work with too short bios!\n");
@@ -2013,7 +2013,7 @@ again:
2013 block->logical_bytenr = bytenr; 2013 block->logical_bytenr = bytenr;
2014 block->is_metadata = 1; 2014 block->is_metadata = 1;
2015 if (block->is_superblock) { 2015 if (block->is_superblock) {
2016 BUG_ON(PAGE_CACHE_SIZE != 2016 BUG_ON(PAGE_SIZE !=
2017 BTRFS_SUPER_INFO_SIZE); 2017 BTRFS_SUPER_INFO_SIZE);
2018 ret = btrfsic_process_written_superblock( 2018 ret = btrfsic_process_written_superblock(
2019 state, 2019 state,
@@ -2172,8 +2172,8 @@ again:
2172continue_loop: 2172continue_loop:
2173 BUG_ON(!processed_len); 2173 BUG_ON(!processed_len);
2174 dev_bytenr += processed_len; 2174 dev_bytenr += processed_len;
2175 mapped_datav += processed_len >> PAGE_CACHE_SHIFT; 2175 mapped_datav += processed_len >> PAGE_SHIFT;
2176 num_pages -= processed_len >> PAGE_CACHE_SHIFT; 2176 num_pages -= processed_len >> PAGE_SHIFT;
2177 goto again; 2177 goto again;
2178} 2178}
2179 2179
@@ -2954,7 +2954,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
2954 goto leave; 2954 goto leave;
2955 cur_bytenr = dev_bytenr; 2955 cur_bytenr = dev_bytenr;
2956 for (i = 0; i < bio->bi_vcnt; i++) { 2956 for (i = 0; i < bio->bi_vcnt; i++) {
2957 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); 2957 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
2958 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); 2958 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
2959 if (!mapped_datav[i]) { 2959 if (!mapped_datav[i]) {
2960 while (i > 0) { 2960 while (i > 0) {
@@ -3037,16 +3037,16 @@ int btrfsic_mount(struct btrfs_root *root,
3037 struct list_head *dev_head = &fs_devices->devices; 3037 struct list_head *dev_head = &fs_devices->devices;
3038 struct btrfs_device *device; 3038 struct btrfs_device *device;
3039 3039
3040 if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) { 3040 if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
3041 printk(KERN_INFO 3041 printk(KERN_INFO
3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
3043 root->nodesize, PAGE_CACHE_SIZE); 3043 root->nodesize, PAGE_SIZE);
3044 return -1; 3044 return -1;
3045 } 3045 }
3046 if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) { 3046 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
3047 printk(KERN_INFO 3047 printk(KERN_INFO
3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
3049 root->sectorsize, PAGE_CACHE_SIZE); 3049 root->sectorsize, PAGE_SIZE);
3050 return -1; 3050 return -1;
3051 } 3051 }
3052 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 3052 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3346cd8f9910..ff61a41ac90b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode,
119 csum = ~(u32)0; 119 csum = ~(u32)0;
120 120
121 kaddr = kmap_atomic(page); 121 kaddr = kmap_atomic(page);
122 csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); 122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
123 btrfs_csum_final(csum, (char *)&csum); 123 btrfs_csum_final(csum, (char *)&csum);
124 kunmap_atomic(kaddr); 124 kunmap_atomic(kaddr);
125 125
@@ -190,7 +190,7 @@ csum_failed:
190 for (index = 0; index < cb->nr_pages; index++) { 190 for (index = 0; index < cb->nr_pages; index++) {
191 page = cb->compressed_pages[index]; 191 page = cb->compressed_pages[index];
192 page->mapping = NULL; 192 page->mapping = NULL;
193 page_cache_release(page); 193 put_page(page);
194 } 194 }
195 195
196 /* do io completion on the original bio */ 196 /* do io completion on the original bio */
@@ -224,8 +224,8 @@ out:
224static noinline void end_compressed_writeback(struct inode *inode, 224static noinline void end_compressed_writeback(struct inode *inode,
225 const struct compressed_bio *cb) 225 const struct compressed_bio *cb)
226{ 226{
227 unsigned long index = cb->start >> PAGE_CACHE_SHIFT; 227 unsigned long index = cb->start >> PAGE_SHIFT;
228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT; 228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
229 struct page *pages[16]; 229 struct page *pages[16];
230 unsigned long nr_pages = end_index - index + 1; 230 unsigned long nr_pages = end_index - index + 1;
231 int i; 231 int i;
@@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
247 if (cb->errors) 247 if (cb->errors)
248 SetPageError(pages[i]); 248 SetPageError(pages[i]);
249 end_page_writeback(pages[i]); 249 end_page_writeback(pages[i]);
250 page_cache_release(pages[i]); 250 put_page(pages[i]);
251 } 251 }
252 nr_pages -= ret; 252 nr_pages -= ret;
253 index += ret; 253 index += ret;
@@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio)
304 for (index = 0; index < cb->nr_pages; index++) { 304 for (index = 0; index < cb->nr_pages; index++) {
305 page = cb->compressed_pages[index]; 305 page = cb->compressed_pages[index];
306 page->mapping = NULL; 306 page->mapping = NULL;
307 page_cache_release(page); 307 put_page(page);
308 } 308 }
309 309
310 /* finally free the cb struct */ 310 /* finally free the cb struct */
@@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
341 int ret; 341 int ret;
342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
343 343
344 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 344 WARN_ON(start & ((u64)PAGE_SIZE - 1));
345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
346 if (!cb) 346 if (!cb)
347 return -ENOMEM; 347 return -ENOMEM;
@@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
374 page->mapping = inode->i_mapping; 374 page->mapping = inode->i_mapping;
375 if (bio->bi_iter.bi_size) 375 if (bio->bi_iter.bi_size)
376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
377 PAGE_CACHE_SIZE, 377 PAGE_SIZE,
378 bio, 0); 378 bio, 0);
379 else 379 else
380 ret = 0; 380 ret = 0;
381 381
382 page->mapping = NULL; 382 page->mapping = NULL;
383 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 383 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
384 PAGE_CACHE_SIZE) { 384 PAGE_SIZE) {
385 bio_get(bio); 385 bio_get(bio);
386 386
387 /* 387 /*
@@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
410 BUG_ON(!bio); 410 BUG_ON(!bio);
411 bio->bi_private = cb; 411 bio->bi_private = cb;
412 bio->bi_end_io = end_compressed_bio_write; 412 bio->bi_end_io = end_compressed_bio_write;
413 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 413 bio_add_page(bio, page, PAGE_SIZE, 0);
414 } 414 }
415 if (bytes_left < PAGE_CACHE_SIZE) { 415 if (bytes_left < PAGE_SIZE) {
416 btrfs_info(BTRFS_I(inode)->root->fs_info, 416 btrfs_info(BTRFS_I(inode)->root->fs_info,
417 "bytes left %lu compress len %lu nr %lu", 417 "bytes left %lu compress len %lu nr %lu",
418 bytes_left, cb->compressed_len, cb->nr_pages); 418 bytes_left, cb->compressed_len, cb->nr_pages);
419 } 419 }
420 bytes_left -= PAGE_CACHE_SIZE; 420 bytes_left -= PAGE_SIZE;
421 first_byte += PAGE_CACHE_SIZE; 421 first_byte += PAGE_SIZE;
422 cond_resched(); 422 cond_resched();
423 } 423 }
424 bio_get(bio); 424 bio_get(bio);
@@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
457 int misses = 0; 457 int misses = 0;
458 458
459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
460 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 460 last_offset = (page_offset(page) + PAGE_SIZE);
461 em_tree = &BTRFS_I(inode)->extent_tree; 461 em_tree = &BTRFS_I(inode)->extent_tree;
462 tree = &BTRFS_I(inode)->io_tree; 462 tree = &BTRFS_I(inode)->io_tree;
463 463
464 if (isize == 0) 464 if (isize == 0)
465 return 0; 465 return 0;
466 466
467 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 467 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
468 468
469 while (last_offset < compressed_end) { 469 while (last_offset < compressed_end) {
470 pg_index = last_offset >> PAGE_CACHE_SHIFT; 470 pg_index = last_offset >> PAGE_SHIFT;
471 471
472 if (pg_index > end_index) 472 if (pg_index > end_index)
473 break; 473 break;
@@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode,
488 break; 488 break;
489 489
490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
491 page_cache_release(page); 491 put_page(page);
492 goto next; 492 goto next;
493 } 493 }
494 494
495 end = last_offset + PAGE_CACHE_SIZE - 1; 495 end = last_offset + PAGE_SIZE - 1;
496 /* 496 /*
497 * at this point, we have a locked page in the page cache 497 * at this point, we have a locked page in the page cache
498 * for these bytes in the file. But, we have to make 498 * for these bytes in the file. But, we have to make
@@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode,
502 lock_extent(tree, last_offset, end); 502 lock_extent(tree, last_offset, end);
503 read_lock(&em_tree->lock); 503 read_lock(&em_tree->lock);
504 em = lookup_extent_mapping(em_tree, last_offset, 504 em = lookup_extent_mapping(em_tree, last_offset,
505 PAGE_CACHE_SIZE); 505 PAGE_SIZE);
506 read_unlock(&em_tree->lock); 506 read_unlock(&em_tree->lock);
507 507
508 if (!em || last_offset < em->start || 508 if (!em || last_offset < em->start ||
509 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 509 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
511 free_extent_map(em); 511 free_extent_map(em);
512 unlock_extent(tree, last_offset, end); 512 unlock_extent(tree, last_offset, end);
513 unlock_page(page); 513 unlock_page(page);
514 page_cache_release(page); 514 put_page(page);
515 break; 515 break;
516 } 516 }
517 free_extent_map(em); 517 free_extent_map(em);
518 518
519 if (page->index == end_index) { 519 if (page->index == end_index) {
520 char *userpage; 520 char *userpage;
521 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 521 size_t zero_offset = isize & (PAGE_SIZE - 1);
522 522
523 if (zero_offset) { 523 if (zero_offset) {
524 int zeros; 524 int zeros;
525 zeros = PAGE_CACHE_SIZE - zero_offset; 525 zeros = PAGE_SIZE - zero_offset;
526 userpage = kmap_atomic(page); 526 userpage = kmap_atomic(page);
527 memset(userpage + zero_offset, 0, zeros); 527 memset(userpage + zero_offset, 0, zeros);
528 flush_dcache_page(page); 528 flush_dcache_page(page);
@@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode,
531 } 531 }
532 532
533 ret = bio_add_page(cb->orig_bio, page, 533 ret = bio_add_page(cb->orig_bio, page,
534 PAGE_CACHE_SIZE, 0); 534 PAGE_SIZE, 0);
535 535
536 if (ret == PAGE_CACHE_SIZE) { 536 if (ret == PAGE_SIZE) {
537 nr_pages++; 537 nr_pages++;
538 page_cache_release(page); 538 put_page(page);
539 } else { 539 } else {
540 unlock_extent(tree, last_offset, end); 540 unlock_extent(tree, last_offset, end);
541 unlock_page(page); 541 unlock_page(page);
542 page_cache_release(page); 542 put_page(page);
543 break; 543 break;
544 } 544 }
545next: 545next:
546 last_offset += PAGE_CACHE_SIZE; 546 last_offset += PAGE_SIZE;
547 } 547 }
548 return 0; 548 return 0;
549} 549}
@@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
567 struct extent_map_tree *em_tree; 567 struct extent_map_tree *em_tree;
568 struct compressed_bio *cb; 568 struct compressed_bio *cb;
569 struct btrfs_root *root = BTRFS_I(inode)->root; 569 struct btrfs_root *root = BTRFS_I(inode)->root;
570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
571 unsigned long compressed_len; 571 unsigned long compressed_len;
572 unsigned long nr_pages; 572 unsigned long nr_pages;
573 unsigned long pg_index; 573 unsigned long pg_index;
@@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
589 read_lock(&em_tree->lock); 589 read_lock(&em_tree->lock);
590 em = lookup_extent_mapping(em_tree, 590 em = lookup_extent_mapping(em_tree,
591 page_offset(bio->bi_io_vec->bv_page), 591 page_offset(bio->bi_io_vec->bv_page),
592 PAGE_CACHE_SIZE); 592 PAGE_SIZE);
593 read_unlock(&em_tree->lock); 593 read_unlock(&em_tree->lock);
594 if (!em) 594 if (!em)
595 return -EIO; 595 return -EIO;
@@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
617 cb->compress_type = extent_compress_type(bio_flags); 617 cb->compress_type = extent_compress_type(bio_flags);
618 cb->orig_bio = bio; 618 cb->orig_bio = bio;
619 619
620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); 620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
622 GFP_NOFS); 622 GFP_NOFS);
623 if (!cb->compressed_pages) 623 if (!cb->compressed_pages)
@@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
640 add_ra_bio_pages(inode, em_start + em_len, cb); 640 add_ra_bio_pages(inode, em_start + em_len, cb);
641 641
642 /* include any pages we added in add_ra-bio_pages */ 642 /* include any pages we added in add_ra-bio_pages */
643 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 643 uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
644 cb->len = uncompressed_len; 644 cb->len = uncompressed_len;
645 645
646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
@@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
653 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 653 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
654 page = cb->compressed_pages[pg_index]; 654 page = cb->compressed_pages[pg_index];
655 page->mapping = inode->i_mapping; 655 page->mapping = inode->i_mapping;
656 page->index = em_start >> PAGE_CACHE_SHIFT; 656 page->index = em_start >> PAGE_SHIFT;
657 657
658 if (comp_bio->bi_iter.bi_size) 658 if (comp_bio->bi_iter.bi_size)
659 ret = tree->ops->merge_bio_hook(READ, page, 0, 659 ret = tree->ops->merge_bio_hook(READ, page, 0,
660 PAGE_CACHE_SIZE, 660 PAGE_SIZE,
661 comp_bio, 0); 661 comp_bio, 0);
662 else 662 else
663 ret = 0; 663 ret = 0;
664 664
665 page->mapping = NULL; 665 page->mapping = NULL;
666 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 666 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
667 PAGE_CACHE_SIZE) { 667 PAGE_SIZE) {
668 bio_get(comp_bio); 668 bio_get(comp_bio);
669 669
670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
@@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
702 comp_bio->bi_private = cb; 702 comp_bio->bi_private = cb;
703 comp_bio->bi_end_io = end_compressed_bio_read; 703 comp_bio->bi_end_io = end_compressed_bio_read;
704 704
705 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 705 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
706 } 706 }
707 cur_disk_byte += PAGE_CACHE_SIZE; 707 cur_disk_byte += PAGE_SIZE;
708 } 708 }
709 bio_get(comp_bio); 709 bio_get(comp_bio);
710 710
@@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1013 1013
1014 /* copy bytes from the working buffer into the pages */ 1014 /* copy bytes from the working buffer into the pages */
1015 while (working_bytes > 0) { 1015 while (working_bytes > 0) {
1016 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 1016 bytes = min(PAGE_SIZE - *pg_offset,
1017 PAGE_CACHE_SIZE - buf_offset); 1017 PAGE_SIZE - buf_offset);
1018 bytes = min(bytes, working_bytes); 1018 bytes = min(bytes, working_bytes);
1019 kaddr = kmap_atomic(page_out); 1019 kaddr = kmap_atomic(page_out);
1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
@@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1027 current_buf_start += bytes; 1027 current_buf_start += bytes;
1028 1028
1029 /* check if we need to pick another page */ 1029 /* check if we need to pick another page */
1030 if (*pg_offset == PAGE_CACHE_SIZE) { 1030 if (*pg_offset == PAGE_SIZE) {
1031 (*pg_index)++; 1031 (*pg_index)++;
1032 if (*pg_index >= vcnt) 1032 if (*pg_index >= vcnt)
1033 return 0; 1033 return 0;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 77592931ab4f..ec7928a27aaa 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -19,6 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/rbtree.h> 21#include <linux/rbtree.h>
22#include <linux/vmalloc.h>
22#include "ctree.h" 23#include "ctree.h"
23#include "disk-io.h" 24#include "disk-io.h"
24#include "transaction.h" 25#include "transaction.h"
@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5361 goto out; 5362 goto out;
5362 } 5363 }
5363 5364
5364 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL); 5365 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
5365 if (!tmp_buf) { 5366 if (!tmp_buf) {
5366 ret = -ENOMEM; 5367 tmp_buf = vmalloc(left_root->nodesize);
5367 goto out; 5368 if (!tmp_buf) {
5369 ret = -ENOMEM;
5370 goto out;
5371 }
5368 } 5372 }
5369 5373
5370 left_path->search_commit_root = 1; 5374 left_path->search_commit_root = 1;
@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5565out: 5569out:
5566 btrfs_free_path(left_path); 5570 btrfs_free_path(left_path);
5567 btrfs_free_path(right_path); 5571 btrfs_free_path(right_path);
5568 kfree(tmp_buf); 5572 kvfree(tmp_buf);
5569 return ret; 5573 return ret;
5570} 5574}
5571 5575
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index a1d6652e0c47..26bcb487f958 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
394 dev_replace->cursor_right = 0; 394 dev_replace->cursor_right = 0;
395 dev_replace->is_valid = 1; 395 dev_replace->is_valid = 1;
396 dev_replace->item_needs_writeback = 1; 396 dev_replace->item_needs_writeback = 1;
397 atomic64_set(&dev_replace->num_write_errors, 0);
398 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
397 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 399 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
398 btrfs_dev_replace_unlock(dev_replace, 1); 400 btrfs_dev_replace_unlock(dev_replace, 1);
399 401
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d01f89d130e0..4e47849d7427 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1062,7 +1062,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
1062 (unsigned long long)page_offset(page)); 1062 (unsigned long long)page_offset(page));
1063 ClearPagePrivate(page); 1063 ClearPagePrivate(page);
1064 set_page_private(page, 0); 1064 set_page_private(page, 0);
1065 page_cache_release(page); 1065 put_page(page);
1066 } 1066 }
1067} 1067}
1068 1068
@@ -1764,7 +1764,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1764 if (err) 1764 if (err)
1765 return err; 1765 return err;
1766 1766
1767 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; 1767 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
1768 bdi->congested_fn = btrfs_congested_fn; 1768 bdi->congested_fn = btrfs_congested_fn;
1769 bdi->congested_data = info; 1769 bdi->congested_data = info;
1770 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 1770 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
@@ -2542,7 +2542,7 @@ int open_ctree(struct super_block *sb,
2542 err = ret; 2542 err = ret;
2543 goto fail_bdi; 2543 goto fail_bdi;
2544 } 2544 }
2545 fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * 2545 fs_info->dirty_metadata_batch = PAGE_SIZE *
2546 (1 + ilog2(nr_cpu_ids)); 2546 (1 + ilog2(nr_cpu_ids));
2547 2547
2548 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 2548 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
@@ -2787,7 +2787,7 @@ int open_ctree(struct super_block *sb,
2787 * flag our filesystem as having big metadata blocks if 2787 * flag our filesystem as having big metadata blocks if
2788 * they are bigger than the page size 2788 * they are bigger than the page size
2789 */ 2789 */
2790 if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) { 2790 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2791 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 2791 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2792 printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); 2792 printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
2793 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 2793 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
@@ -2837,7 +2837,7 @@ int open_ctree(struct super_block *sb,
2837 2837
2838 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 2838 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2839 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 2839 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2840 SZ_4M / PAGE_CACHE_SIZE); 2840 SZ_4M / PAGE_SIZE);
2841 2841
2842 tree_root->nodesize = nodesize; 2842 tree_root->nodesize = nodesize;
2843 tree_root->sectorsize = sectorsize; 2843 tree_root->sectorsize = sectorsize;
@@ -4076,9 +4076,9 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4076 ret = -EINVAL; 4076 ret = -EINVAL;
4077 } 4077 }
4078 /* Only PAGE SIZE is supported yet */ 4078 /* Only PAGE SIZE is supported yet */
4079 if (sectorsize != PAGE_CACHE_SIZE) { 4079 if (sectorsize != PAGE_SIZE) {
4080 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n", 4080 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
4081 sectorsize, PAGE_CACHE_SIZE); 4081 sectorsize, PAGE_SIZE);
4082 ret = -EINVAL; 4082 ret = -EINVAL;
4083 } 4083 }
4084 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 4084 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 53e12977bfd0..84e060eb0de8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3452,7 +3452,7 @@ again:
3452 num_pages = 1; 3452 num_pages = 1;
3453 3453
3454 num_pages *= 16; 3454 num_pages *= 16;
3455 num_pages *= PAGE_CACHE_SIZE; 3455 num_pages *= PAGE_SIZE;
3456 3456
3457 ret = btrfs_check_data_free_space(inode, 0, num_pages); 3457 ret = btrfs_check_data_free_space(inode, 0, num_pages);
3458 if (ret) 3458 if (ret)
@@ -4639,7 +4639,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4639 loops = 0; 4639 loops = 0;
4640 while (delalloc_bytes && loops < 3) { 4640 while (delalloc_bytes && loops < 3) {
4641 max_reclaim = min(delalloc_bytes, to_reclaim); 4641 max_reclaim = min(delalloc_bytes, to_reclaim);
4642 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; 4642 nr_pages = max_reclaim >> PAGE_SHIFT;
4643 btrfs_writeback_inodes_sb_nr(root, nr_pages, items); 4643 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4644 /* 4644 /*
4645 * We need to wait for the async pages to actually start before 4645 * We need to wait for the async pages to actually start before
@@ -9386,15 +9386,23 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9386 u64 dev_min = 1; 9386 u64 dev_min = 1;
9387 u64 dev_nr = 0; 9387 u64 dev_nr = 0;
9388 u64 target; 9388 u64 target;
9389 int debug;
9389 int index; 9390 int index;
9390 int full = 0; 9391 int full = 0;
9391 int ret = 0; 9392 int ret = 0;
9392 9393
9394 debug = btrfs_test_opt(root, ENOSPC_DEBUG);
9395
9393 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 9396 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9394 9397
9395 /* odd, couldn't find the block group, leave it alone */ 9398 /* odd, couldn't find the block group, leave it alone */
9396 if (!block_group) 9399 if (!block_group) {
9400 if (debug)
9401 btrfs_warn(root->fs_info,
9402 "can't find block group for bytenr %llu",
9403 bytenr);
9397 return -1; 9404 return -1;
9405 }
9398 9406
9399 min_free = btrfs_block_group_used(&block_group->item); 9407 min_free = btrfs_block_group_used(&block_group->item);
9400 9408
@@ -9448,8 +9456,13 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9448 * this is just a balance, so if we were marked as full 9456 * this is just a balance, so if we were marked as full
9449 * we know there is no space for a new chunk 9457 * we know there is no space for a new chunk
9450 */ 9458 */
9451 if (full) 9459 if (full) {
9460 if (debug)
9461 btrfs_warn(root->fs_info,
9462 "no space to alloc new chunk for block group %llu",
9463 block_group->key.objectid);
9452 goto out; 9464 goto out;
9465 }
9453 9466
9454 index = get_block_group_index(block_group); 9467 index = get_block_group_index(block_group);
9455 } 9468 }
@@ -9496,6 +9509,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9496 ret = -1; 9509 ret = -1;
9497 } 9510 }
9498 } 9511 }
9512 if (debug && ret == -1)
9513 btrfs_warn(root->fs_info,
9514 "no space to allocate a new chunk for block group %llu",
9515 block_group->key.objectid);
9499 mutex_unlock(&root->fs_info->chunk_mutex); 9516 mutex_unlock(&root->fs_info->chunk_mutex);
9500 btrfs_end_transaction(trans, root); 9517 btrfs_end_transaction(trans, root);
9501out: 9518out:
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 76a0c8597d98..d247fc0eea19 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1363 1363
1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) 1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1365{ 1365{
1366 unsigned long index = start >> PAGE_CACHE_SHIFT; 1366 unsigned long index = start >> PAGE_SHIFT;
1367 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1367 unsigned long end_index = end >> PAGE_SHIFT;
1368 struct page *page; 1368 struct page *page;
1369 1369
1370 while (index <= end_index) { 1370 while (index <= end_index) {
1371 page = find_get_page(inode->i_mapping, index); 1371 page = find_get_page(inode->i_mapping, index);
1372 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1372 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1373 clear_page_dirty_for_io(page); 1373 clear_page_dirty_for_io(page);
1374 page_cache_release(page); 1374 put_page(page);
1375 index++; 1375 index++;
1376 } 1376 }
1377} 1377}
1378 1378
1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) 1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1380{ 1380{
1381 unsigned long index = start >> PAGE_CACHE_SHIFT; 1381 unsigned long index = start >> PAGE_SHIFT;
1382 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1382 unsigned long end_index = end >> PAGE_SHIFT;
1383 struct page *page; 1383 struct page *page;
1384 1384
1385 while (index <= end_index) { 1385 while (index <= end_index) {
@@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1388 __set_page_dirty_nobuffers(page); 1388 __set_page_dirty_nobuffers(page);
1389 account_page_redirty(page); 1389 account_page_redirty(page);
1390 page_cache_release(page); 1390 put_page(page);
1391 index++; 1391 index++;
1392 } 1392 }
1393} 1393}
@@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1397 */ 1397 */
1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1399{ 1399{
1400 unsigned long index = start >> PAGE_CACHE_SHIFT; 1400 unsigned long index = start >> PAGE_SHIFT;
1401 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1401 unsigned long end_index = end >> PAGE_SHIFT;
1402 struct page *page; 1402 struct page *page;
1403 1403
1404 while (index <= end_index) { 1404 while (index <= end_index) {
1405 page = find_get_page(tree->mapping, index); 1405 page = find_get_page(tree->mapping, index);
1406 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1406 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1407 set_page_writeback(page); 1407 set_page_writeback(page);
1408 page_cache_release(page); 1408 put_page(page);
1409 index++; 1409 index++;
1410 } 1410 }
1411} 1411}
@@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
1556{ 1556{
1557 int ret; 1557 int ret;
1558 struct page *pages[16]; 1558 struct page *pages[16];
1559 unsigned long index = start >> PAGE_CACHE_SHIFT; 1559 unsigned long index = start >> PAGE_SHIFT;
1560 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1560 unsigned long end_index = end >> PAGE_SHIFT;
1561 unsigned long nr_pages = end_index - index + 1; 1561 unsigned long nr_pages = end_index - index + 1;
1562 int i; 1562 int i;
1563 1563
@@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
1571 for (i = 0; i < ret; i++) { 1571 for (i = 0; i < ret; i++) {
1572 if (pages[i] != locked_page) 1572 if (pages[i] != locked_page)
1573 unlock_page(pages[i]); 1573 unlock_page(pages[i]);
1574 page_cache_release(pages[i]); 1574 put_page(pages[i]);
1575 } 1575 }
1576 nr_pages -= ret; 1576 nr_pages -= ret;
1577 index += ret; 1577 index += ret;
@@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1584 u64 delalloc_start, 1584 u64 delalloc_start,
1585 u64 delalloc_end) 1585 u64 delalloc_end)
1586{ 1586{
1587 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; 1587 unsigned long index = delalloc_start >> PAGE_SHIFT;
1588 unsigned long start_index = index; 1588 unsigned long start_index = index;
1589 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; 1589 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1590 unsigned long pages_locked = 0; 1590 unsigned long pages_locked = 0;
1591 struct page *pages[16]; 1591 struct page *pages[16];
1592 unsigned long nrpages; 1592 unsigned long nrpages;
@@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1619 pages[i]->mapping != inode->i_mapping) { 1619 pages[i]->mapping != inode->i_mapping) {
1620 ret = -EAGAIN; 1620 ret = -EAGAIN;
1621 unlock_page(pages[i]); 1621 unlock_page(pages[i]);
1622 page_cache_release(pages[i]); 1622 put_page(pages[i]);
1623 goto done; 1623 goto done;
1624 } 1624 }
1625 } 1625 }
1626 page_cache_release(pages[i]); 1626 put_page(pages[i]);
1627 pages_locked++; 1627 pages_locked++;
1628 } 1628 }
1629 nrpages -= ret; 1629 nrpages -= ret;
@@ -1636,7 +1636,7 @@ done:
1636 __unlock_for_delalloc(inode, locked_page, 1636 __unlock_for_delalloc(inode, locked_page,
1637 delalloc_start, 1637 delalloc_start,
1638 ((u64)(start_index + pages_locked - 1)) << 1638 ((u64)(start_index + pages_locked - 1)) <<
1639 PAGE_CACHE_SHIFT); 1639 PAGE_SHIFT);
1640 } 1640 }
1641 return ret; 1641 return ret;
1642} 1642}
@@ -1696,7 +1696,7 @@ again:
1696 free_extent_state(cached_state); 1696 free_extent_state(cached_state);
1697 cached_state = NULL; 1697 cached_state = NULL;
1698 if (!loops) { 1698 if (!loops) {
1699 max_bytes = PAGE_CACHE_SIZE; 1699 max_bytes = PAGE_SIZE;
1700 loops = 1; 1700 loops = 1;
1701 goto again; 1701 goto again;
1702 } else { 1702 } else {
@@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1735 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 1735 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1736 int ret; 1736 int ret;
1737 struct page *pages[16]; 1737 struct page *pages[16];
1738 unsigned long index = start >> PAGE_CACHE_SHIFT; 1738 unsigned long index = start >> PAGE_SHIFT;
1739 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1739 unsigned long end_index = end >> PAGE_SHIFT;
1740 unsigned long nr_pages = end_index - index + 1; 1740 unsigned long nr_pages = end_index - index + 1;
1741 int i; 1741 int i;
1742 1742
@@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1757 SetPagePrivate2(pages[i]); 1757 SetPagePrivate2(pages[i]);
1758 1758
1759 if (pages[i] == locked_page) { 1759 if (pages[i] == locked_page) {
1760 page_cache_release(pages[i]); 1760 put_page(pages[i]);
1761 continue; 1761 continue;
1762 } 1762 }
1763 if (page_ops & PAGE_CLEAR_DIRTY) 1763 if (page_ops & PAGE_CLEAR_DIRTY)
@@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1770 end_page_writeback(pages[i]); 1770 end_page_writeback(pages[i]);
1771 if (page_ops & PAGE_UNLOCK) 1771 if (page_ops & PAGE_UNLOCK)
1772 unlock_page(pages[i]); 1772 unlock_page(pages[i]);
1773 page_cache_release(pages[i]); 1773 put_page(pages[i]);
1774 } 1774 }
1775 nr_pages -= ret; 1775 nr_pages -= ret;
1776 index += ret; 1776 index += ret;
@@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) 1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1962{ 1962{
1963 u64 start = page_offset(page); 1963 u64 start = page_offset(page);
1964 u64 end = start + PAGE_CACHE_SIZE - 1; 1964 u64 end = start + PAGE_SIZE - 1;
1965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1966 SetPageUptodate(page); 1966 SetPageUptodate(page);
1967} 1967}
@@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2071 struct page *p = eb->pages[i]; 2071 struct page *p = eb->pages[i];
2072 2072
2073 ret = repair_io_failure(root->fs_info->btree_inode, start, 2073 ret = repair_io_failure(root->fs_info->btree_inode, start,
2074 PAGE_CACHE_SIZE, start, p, 2074 PAGE_SIZE, start, p,
2075 start - page_offset(p), mirror_num); 2075 start - page_offset(p), mirror_num);
2076 if (ret) 2076 if (ret)
2077 break; 2077 break;
2078 start += PAGE_CACHE_SIZE; 2078 start += PAGE_SIZE;
2079 } 2079 }
2080 2080
2081 return ret; 2081 return ret;
@@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio)
2466 * advance bv_offset and adjust bv_len to compensate. 2466 * advance bv_offset and adjust bv_len to compensate.
2467 * Print a warning for nonzero offsets, and an error 2467 * Print a warning for nonzero offsets, and an error
2468 * if they don't add up to a full page. */ 2468 * if they don't add up to a full page. */
2469 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2469 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2470 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2470 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2471 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2471 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2472 "partial page write in btrfs with offset %u and length %u", 2472 "partial page write in btrfs with offset %u and length %u",
2473 bvec->bv_offset, bvec->bv_len); 2473 bvec->bv_offset, bvec->bv_len);
@@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio)
2541 * advance bv_offset and adjust bv_len to compensate. 2541 * advance bv_offset and adjust bv_len to compensate.
2542 * Print a warning for nonzero offsets, and an error 2542 * Print a warning for nonzero offsets, and an error
2543 * if they don't add up to a full page. */ 2543 * if they don't add up to a full page. */
2544 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2544 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2545 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2545 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2546 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2546 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2547 "partial page read in btrfs with offset %u and length %u", 2547 "partial page read in btrfs with offset %u and length %u",
2548 bvec->bv_offset, bvec->bv_len); 2548 bvec->bv_offset, bvec->bv_len);
@@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio)
2598readpage_ok: 2598readpage_ok:
2599 if (likely(uptodate)) { 2599 if (likely(uptodate)) {
2600 loff_t i_size = i_size_read(inode); 2600 loff_t i_size = i_size_read(inode);
2601 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2601 pgoff_t end_index = i_size >> PAGE_SHIFT;
2602 unsigned off; 2602 unsigned off;
2603 2603
2604 /* Zero out the end if this page straddles i_size */ 2604 /* Zero out the end if this page straddles i_size */
2605 off = i_size & (PAGE_CACHE_SIZE-1); 2605 off = i_size & (PAGE_SIZE-1);
2606 if (page->index == end_index && off) 2606 if (page->index == end_index && off)
2607 zero_user_segment(page, off, PAGE_CACHE_SIZE); 2607 zero_user_segment(page, off, PAGE_SIZE);
2608 SetPageUptodate(page); 2608 SetPageUptodate(page);
2609 } else { 2609 } else {
2610 ClearPageUptodate(page); 2610 ClearPageUptodate(page);
@@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2768 struct bio *bio; 2768 struct bio *bio;
2769 int contig = 0; 2769 int contig = 0;
2770 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; 2770 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2771 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); 2771 size_t page_size = min_t(size_t, size, PAGE_SIZE);
2772 2772
2773 if (bio_ret && *bio_ret) { 2773 if (bio_ret && *bio_ret) {
2774 bio = *bio_ret; 2774 bio = *bio_ret;
@@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb,
2821{ 2821{
2822 if (!PagePrivate(page)) { 2822 if (!PagePrivate(page)) {
2823 SetPagePrivate(page); 2823 SetPagePrivate(page);
2824 page_cache_get(page); 2824 get_page(page);
2825 set_page_private(page, (unsigned long)eb); 2825 set_page_private(page, (unsigned long)eb);
2826 } else { 2826 } else {
2827 WARN_ON(page->private != (unsigned long)eb); 2827 WARN_ON(page->private != (unsigned long)eb);
@@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page)
2832{ 2832{
2833 if (!PagePrivate(page)) { 2833 if (!PagePrivate(page)) {
2834 SetPagePrivate(page); 2834 SetPagePrivate(page);
2835 page_cache_get(page); 2835 get_page(page);
2836 set_page_private(page, EXTENT_PAGE_PRIVATE); 2836 set_page_private(page, EXTENT_PAGE_PRIVATE);
2837 } 2837 }
2838} 2838}
@@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree,
2880{ 2880{
2881 struct inode *inode = page->mapping->host; 2881 struct inode *inode = page->mapping->host;
2882 u64 start = page_offset(page); 2882 u64 start = page_offset(page);
2883 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2883 u64 page_end = start + PAGE_SIZE - 1;
2884 u64 end; 2884 u64 end;
2885 u64 cur = start; 2885 u64 cur = start;
2886 u64 extent_offset; 2886 u64 extent_offset;
@@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree,
2909 } 2909 }
2910 } 2910 }
2911 2911
2912 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { 2912 if (page->index == last_byte >> PAGE_SHIFT) {
2913 char *userpage; 2913 char *userpage;
2914 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); 2914 size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2915 2915
2916 if (zero_offset) { 2916 if (zero_offset) {
2917 iosize = PAGE_CACHE_SIZE - zero_offset; 2917 iosize = PAGE_SIZE - zero_offset;
2918 userpage = kmap_atomic(page); 2918 userpage = kmap_atomic(page);
2919 memset(userpage + zero_offset, 0, iosize); 2919 memset(userpage + zero_offset, 0, iosize);
2920 flush_dcache_page(page); 2920 flush_dcache_page(page);
@@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree,
2922 } 2922 }
2923 } 2923 }
2924 while (cur <= end) { 2924 while (cur <= end) {
2925 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2925 unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
2926 bool force_bio_submit = false; 2926 bool force_bio_submit = false;
2927 2927
2928 if (cur >= last_byte) { 2928 if (cur >= last_byte) {
2929 char *userpage; 2929 char *userpage;
2930 struct extent_state *cached = NULL; 2930 struct extent_state *cached = NULL;
2931 2931
2932 iosize = PAGE_CACHE_SIZE - pg_offset; 2932 iosize = PAGE_SIZE - pg_offset;
2933 userpage = kmap_atomic(page); 2933 userpage = kmap_atomic(page);
2934 memset(userpage + pg_offset, 0, iosize); 2934 memset(userpage + pg_offset, 0, iosize);
2935 flush_dcache_page(page); 2935 flush_dcache_page(page);
@@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3112 for (index = 0; index < nr_pages; index++) { 3112 for (index = 0; index < nr_pages; index++) {
3113 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 3113 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3114 mirror_num, bio_flags, rw, prev_em_start); 3114 mirror_num, bio_flags, rw, prev_em_start);
3115 page_cache_release(pages[index]); 3115 put_page(pages[index]);
3116 } 3116 }
3117} 3117}
3118 3118
@@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree,
3134 page_start = page_offset(pages[index]); 3134 page_start = page_offset(pages[index]);
3135 if (!end) { 3135 if (!end) {
3136 start = page_start; 3136 start = page_start;
3137 end = start + PAGE_CACHE_SIZE - 1; 3137 end = start + PAGE_SIZE - 1;
3138 first_index = index; 3138 first_index = index;
3139 } else if (end + 1 == page_start) { 3139 } else if (end + 1 == page_start) {
3140 end += PAGE_CACHE_SIZE; 3140 end += PAGE_SIZE;
3141 } else { 3141 } else {
3142 __do_contiguous_readpages(tree, &pages[first_index], 3142 __do_contiguous_readpages(tree, &pages[first_index],
3143 index - first_index, start, 3143 index - first_index, start,
@@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
3145 bio, mirror_num, bio_flags, 3145 bio, mirror_num, bio_flags,
3146 rw, prev_em_start); 3146 rw, prev_em_start);
3147 start = page_start; 3147 start = page_start;
3148 end = start + PAGE_CACHE_SIZE - 1; 3148 end = start + PAGE_SIZE - 1;
3149 first_index = index; 3149 first_index = index;
3150 } 3150 }
3151 } 3151 }
@@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
3167 struct inode *inode = page->mapping->host; 3167 struct inode *inode = page->mapping->host;
3168 struct btrfs_ordered_extent *ordered; 3168 struct btrfs_ordered_extent *ordered;
3169 u64 start = page_offset(page); 3169 u64 start = page_offset(page);
3170 u64 end = start + PAGE_CACHE_SIZE - 1; 3170 u64 end = start + PAGE_SIZE - 1;
3171 int ret; 3171 int ret;
3172 3172
3173 while (1) { 3173 while (1) {
3174 lock_extent(tree, start, end); 3174 lock_extent(tree, start, end);
3175 ordered = btrfs_lookup_ordered_range(inode, start, 3175 ordered = btrfs_lookup_ordered_range(inode, start,
3176 PAGE_CACHE_SIZE); 3176 PAGE_SIZE);
3177 if (!ordered) 3177 if (!ordered)
3178 break; 3178 break;
3179 unlock_extent(tree, start, end); 3179 unlock_extent(tree, start, end);
@@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3227 unsigned long *nr_written) 3227 unsigned long *nr_written)
3228{ 3228{
3229 struct extent_io_tree *tree = epd->tree; 3229 struct extent_io_tree *tree = epd->tree;
3230 u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1; 3230 u64 page_end = delalloc_start + PAGE_SIZE - 1;
3231 u64 nr_delalloc; 3231 u64 nr_delalloc;
3232 u64 delalloc_to_write = 0; 3232 u64 delalloc_to_write = 0;
3233 u64 delalloc_end = 0; 3233 u64 delalloc_end = 0;
@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3264 goto done; 3264 goto done;
3265 } 3265 }
3266 /* 3266 /*
3267 * delalloc_end is already one less than the total 3267 * delalloc_end is already one less than the total length, so
3268 * length, so we don't subtract one from 3268 * we don't subtract one from PAGE_SIZE
3269 * PAGE_CACHE_SIZE
3270 */ 3269 */
3271 delalloc_to_write += (delalloc_end - delalloc_start + 3270 delalloc_to_write += (delalloc_end - delalloc_start +
3272 PAGE_CACHE_SIZE) >> 3271 PAGE_SIZE) >> PAGE_SHIFT;
3273 PAGE_CACHE_SHIFT;
3274 delalloc_start = delalloc_end + 1; 3272 delalloc_start = delalloc_end + 1;
3275 } 3273 }
3276 if (wbc->nr_to_write < delalloc_to_write) { 3274 if (wbc->nr_to_write < delalloc_to_write) {
@@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3319{ 3317{
3320 struct extent_io_tree *tree = epd->tree; 3318 struct extent_io_tree *tree = epd->tree;
3321 u64 start = page_offset(page); 3319 u64 start = page_offset(page);
3322 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3320 u64 page_end = start + PAGE_SIZE - 1;
3323 u64 end; 3321 u64 end;
3324 u64 cur = start; 3322 u64 cur = start;
3325 u64 extent_offset; 3323 u64 extent_offset;
@@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3434 if (ret) { 3432 if (ret) {
3435 SetPageError(page); 3433 SetPageError(page);
3436 } else { 3434 } else {
3437 unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; 3435 unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
3438 3436
3439 set_range_writeback(tree, cur, cur + iosize - 1); 3437 set_range_writeback(tree, cur, cur + iosize - 1);
3440 if (!PageWriteback(page)) { 3438 if (!PageWriteback(page)) {
@@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3477 struct inode *inode = page->mapping->host; 3475 struct inode *inode = page->mapping->host;
3478 struct extent_page_data *epd = data; 3476 struct extent_page_data *epd = data;
3479 u64 start = page_offset(page); 3477 u64 start = page_offset(page);
3480 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3478 u64 page_end = start + PAGE_SIZE - 1;
3481 int ret; 3479 int ret;
3482 int nr = 0; 3480 int nr = 0;
3483 size_t pg_offset = 0; 3481 size_t pg_offset = 0;
3484 loff_t i_size = i_size_read(inode); 3482 loff_t i_size = i_size_read(inode);
3485 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 3483 unsigned long end_index = i_size >> PAGE_SHIFT;
3486 int write_flags; 3484 int write_flags;
3487 unsigned long nr_written = 0; 3485 unsigned long nr_written = 0;
3488 3486
@@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3497 3495
3498 ClearPageError(page); 3496 ClearPageError(page);
3499 3497
3500 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); 3498 pg_offset = i_size & (PAGE_SIZE - 1);
3501 if (page->index > end_index || 3499 if (page->index > end_index ||
3502 (page->index == end_index && !pg_offset)) { 3500 (page->index == end_index && !pg_offset)) {
3503 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 3501 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3504 unlock_page(page); 3502 unlock_page(page);
3505 return 0; 3503 return 0;
3506 } 3504 }
@@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3510 3508
3511 userpage = kmap_atomic(page); 3509 userpage = kmap_atomic(page);
3512 memset(userpage + pg_offset, 0, 3510 memset(userpage + pg_offset, 0,
3513 PAGE_CACHE_SIZE - pg_offset); 3511 PAGE_SIZE - pg_offset);
3514 kunmap_atomic(userpage); 3512 kunmap_atomic(userpage);
3515 flush_dcache_page(page); 3513 flush_dcache_page(page);
3516 } 3514 }
@@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3748 clear_page_dirty_for_io(p); 3746 clear_page_dirty_for_io(p);
3749 set_page_writeback(p); 3747 set_page_writeback(p);
3750 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, 3748 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3751 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, 3749 PAGE_SIZE, 0, bdev, &epd->bio,
3752 -1, end_bio_extent_buffer_writepage, 3750 -1, end_bio_extent_buffer_writepage,
3753 0, epd->bio_flags, bio_flags, false); 3751 0, epd->bio_flags, bio_flags, false);
3754 epd->bio_flags = bio_flags; 3752 epd->bio_flags = bio_flags;
@@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3760 ret = -EIO; 3758 ret = -EIO;
3761 break; 3759 break;
3762 } 3760 }
3763 offset += PAGE_CACHE_SIZE; 3761 offset += PAGE_SIZE;
3764 update_nr_written(p, wbc, 1); 3762 update_nr_written(p, wbc, 1);
3765 unlock_page(p); 3763 unlock_page(p);
3766 } 3764 }
@@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping,
3804 index = mapping->writeback_index; /* Start from prev offset */ 3802 index = mapping->writeback_index; /* Start from prev offset */
3805 end = -1; 3803 end = -1;
3806 } else { 3804 } else {
3807 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3805 index = wbc->range_start >> PAGE_SHIFT;
3808 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3806 end = wbc->range_end >> PAGE_SHIFT;
3809 scanned = 1; 3807 scanned = 1;
3810 } 3808 }
3811 if (wbc->sync_mode == WB_SYNC_ALL) 3809 if (wbc->sync_mode == WB_SYNC_ALL)
@@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
3948 index = mapping->writeback_index; /* Start from prev offset */ 3946 index = mapping->writeback_index; /* Start from prev offset */
3949 end = -1; 3947 end = -1;
3950 } else { 3948 } else {
3951 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3949 index = wbc->range_start >> PAGE_SHIFT;
3952 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3950 end = wbc->range_end >> PAGE_SHIFT;
3953 scanned = 1; 3951 scanned = 1;
3954 } 3952 }
3955 if (wbc->sync_mode == WB_SYNC_ALL) 3953 if (wbc->sync_mode == WB_SYNC_ALL)
@@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4083 int ret = 0; 4081 int ret = 0;
4084 struct address_space *mapping = inode->i_mapping; 4082 struct address_space *mapping = inode->i_mapping;
4085 struct page *page; 4083 struct page *page;
4086 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> 4084 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4087 PAGE_CACHE_SHIFT; 4085 PAGE_SHIFT;
4088 4086
4089 struct extent_page_data epd = { 4087 struct extent_page_data epd = {
4090 .bio = NULL, 4088 .bio = NULL,
@@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4102 }; 4100 };
4103 4101
4104 while (start <= end) { 4102 while (start <= end) {
4105 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 4103 page = find_get_page(mapping, start >> PAGE_SHIFT);
4106 if (clear_page_dirty_for_io(page)) 4104 if (clear_page_dirty_for_io(page))
4107 ret = __extent_writepage(page, &wbc_writepages, &epd); 4105 ret = __extent_writepage(page, &wbc_writepages, &epd);
4108 else { 4106 else {
4109 if (tree->ops && tree->ops->writepage_end_io_hook) 4107 if (tree->ops && tree->ops->writepage_end_io_hook)
4110 tree->ops->writepage_end_io_hook(page, start, 4108 tree->ops->writepage_end_io_hook(page, start,
4111 start + PAGE_CACHE_SIZE - 1, 4109 start + PAGE_SIZE - 1,
4112 NULL, 1); 4110 NULL, 1);
4113 unlock_page(page); 4111 unlock_page(page);
4114 } 4112 }
4115 page_cache_release(page); 4113 put_page(page);
4116 start += PAGE_CACHE_SIZE; 4114 start += PAGE_SIZE;
4117 } 4115 }
4118 4116
4119 flush_epd_write_bio(&epd); 4117 flush_epd_write_bio(&epd);
@@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree,
4163 list_del(&page->lru); 4161 list_del(&page->lru);
4164 if (add_to_page_cache_lru(page, mapping, 4162 if (add_to_page_cache_lru(page, mapping,
4165 page->index, GFP_NOFS)) { 4163 page->index, GFP_NOFS)) {
4166 page_cache_release(page); 4164 put_page(page);
4167 continue; 4165 continue;
4168 } 4166 }
4169 4167
@@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
4197{ 4195{
4198 struct extent_state *cached_state = NULL; 4196 struct extent_state *cached_state = NULL;
4199 u64 start = page_offset(page); 4197 u64 start = page_offset(page);
4200 u64 end = start + PAGE_CACHE_SIZE - 1; 4198 u64 end = start + PAGE_SIZE - 1;
4201 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 4199 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4202 4200
4203 start += ALIGN(offset, blocksize); 4201 start += ALIGN(offset, blocksize);
@@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map,
4223 struct page *page, gfp_t mask) 4221 struct page *page, gfp_t mask)
4224{ 4222{
4225 u64 start = page_offset(page); 4223 u64 start = page_offset(page);
4226 u64 end = start + PAGE_CACHE_SIZE - 1; 4224 u64 end = start + PAGE_SIZE - 1;
4227 int ret = 1; 4225 int ret = 1;
4228 4226
4229 if (test_range_bit(tree, start, end, 4227 if (test_range_bit(tree, start, end,
@@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
4262{ 4260{
4263 struct extent_map *em; 4261 struct extent_map *em;
4264 u64 start = page_offset(page); 4262 u64 start = page_offset(page);
4265 u64 end = start + PAGE_CACHE_SIZE - 1; 4263 u64 end = start + PAGE_SIZE - 1;
4266 4264
4267 if (gfpflags_allow_blocking(mask) && 4265 if (gfpflags_allow_blocking(mask) &&
4268 page->mapping->host->i_size > SZ_16M) { 4266 page->mapping->host->i_size > SZ_16M) {
@@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4587 ClearPagePrivate(page); 4585 ClearPagePrivate(page);
4588 set_page_private(page, 0); 4586 set_page_private(page, 0);
4589 /* One for the page private */ 4587 /* One for the page private */
4590 page_cache_release(page); 4588 put_page(page);
4591 } 4589 }
4592 4590
4593 if (mapped) 4591 if (mapped)
4594 spin_unlock(&page->mapping->private_lock); 4592 spin_unlock(&page->mapping->private_lock);
4595 4593
4596 /* One for when we alloced the page */ 4594 /* One for when we alloced the page */
4597 page_cache_release(page); 4595 put_page(page);
4598 } while (index != 0); 4596 } while (index != 0);
4599} 4597}
4600 4598
@@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4779 4777
4780 rcu_read_lock(); 4778 rcu_read_lock();
4781 eb = radix_tree_lookup(&fs_info->buffer_radix, 4779 eb = radix_tree_lookup(&fs_info->buffer_radix,
4782 start >> PAGE_CACHE_SHIFT); 4780 start >> PAGE_SHIFT);
4783 if (eb && atomic_inc_not_zero(&eb->refs)) { 4781 if (eb && atomic_inc_not_zero(&eb->refs)) {
4784 rcu_read_unlock(); 4782 rcu_read_unlock();
4785 /* 4783 /*
@@ -4829,7 +4827,7 @@ again:
4829 goto free_eb; 4827 goto free_eb;
4830 spin_lock(&fs_info->buffer_lock); 4828 spin_lock(&fs_info->buffer_lock);
4831 ret = radix_tree_insert(&fs_info->buffer_radix, 4829 ret = radix_tree_insert(&fs_info->buffer_radix,
4832 start >> PAGE_CACHE_SHIFT, eb); 4830 start >> PAGE_SHIFT, eb);
4833 spin_unlock(&fs_info->buffer_lock); 4831 spin_unlock(&fs_info->buffer_lock);
4834 radix_tree_preload_end(); 4832 radix_tree_preload_end();
4835 if (ret == -EEXIST) { 4833 if (ret == -EEXIST) {
@@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4862 unsigned long len = fs_info->tree_root->nodesize; 4860 unsigned long len = fs_info->tree_root->nodesize;
4863 unsigned long num_pages = num_extent_pages(start, len); 4861 unsigned long num_pages = num_extent_pages(start, len);
4864 unsigned long i; 4862 unsigned long i;
4865 unsigned long index = start >> PAGE_CACHE_SHIFT; 4863 unsigned long index = start >> PAGE_SHIFT;
4866 struct extent_buffer *eb; 4864 struct extent_buffer *eb;
4867 struct extent_buffer *exists = NULL; 4865 struct extent_buffer *exists = NULL;
4868 struct page *p; 4866 struct page *p;
@@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4896 if (atomic_inc_not_zero(&exists->refs)) { 4894 if (atomic_inc_not_zero(&exists->refs)) {
4897 spin_unlock(&mapping->private_lock); 4895 spin_unlock(&mapping->private_lock);
4898 unlock_page(p); 4896 unlock_page(p);
4899 page_cache_release(p); 4897 put_page(p);
4900 mark_extent_buffer_accessed(exists, p); 4898 mark_extent_buffer_accessed(exists, p);
4901 goto free_eb; 4899 goto free_eb;
4902 } 4900 }
@@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4908 */ 4906 */
4909 ClearPagePrivate(p); 4907 ClearPagePrivate(p);
4910 WARN_ON(PageDirty(p)); 4908 WARN_ON(PageDirty(p));
4911 page_cache_release(p); 4909 put_page(p);
4912 } 4910 }
4913 attach_extent_buffer_page(eb, p); 4911 attach_extent_buffer_page(eb, p);
4914 spin_unlock(&mapping->private_lock); 4912 spin_unlock(&mapping->private_lock);
@@ -4931,7 +4929,7 @@ again:
4931 4929
4932 spin_lock(&fs_info->buffer_lock); 4930 spin_lock(&fs_info->buffer_lock);
4933 ret = radix_tree_insert(&fs_info->buffer_radix, 4931 ret = radix_tree_insert(&fs_info->buffer_radix,
4934 start >> PAGE_CACHE_SHIFT, eb); 4932 start >> PAGE_SHIFT, eb);
4935 spin_unlock(&fs_info->buffer_lock); 4933 spin_unlock(&fs_info->buffer_lock);
4936 radix_tree_preload_end(); 4934 radix_tree_preload_end();
4937 if (ret == -EEXIST) { 4935 if (ret == -EEXIST) {
@@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
4994 4992
4995 spin_lock(&fs_info->buffer_lock); 4993 spin_lock(&fs_info->buffer_lock);
4996 radix_tree_delete(&fs_info->buffer_radix, 4994 radix_tree_delete(&fs_info->buffer_radix,
4997 eb->start >> PAGE_CACHE_SHIFT); 4995 eb->start >> PAGE_SHIFT);
4998 spin_unlock(&fs_info->buffer_lock); 4996 spin_unlock(&fs_info->buffer_lock);
4999 } else { 4997 } else {
5000 spin_unlock(&eb->refs_lock); 4998 spin_unlock(&eb->refs_lock);
@@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
5168 5166
5169 if (start) { 5167 if (start) {
5170 WARN_ON(start < eb->start); 5168 WARN_ON(start < eb->start);
5171 start_i = (start >> PAGE_CACHE_SHIFT) - 5169 start_i = (start >> PAGE_SHIFT) -
5172 (eb->start >> PAGE_CACHE_SHIFT); 5170 (eb->start >> PAGE_SHIFT);
5173 } else { 5171 } else {
5174 start_i = 0; 5172 start_i = 0;
5175 } 5173 }
@@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5252 struct page *page; 5250 struct page *page;
5253 char *kaddr; 5251 char *kaddr;
5254 char *dst = (char *)dstv; 5252 char *dst = (char *)dstv;
5255 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5253 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5256 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5254 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5257 5255
5258 WARN_ON(start > eb->len); 5256 WARN_ON(start > eb->len);
5259 WARN_ON(start + len > eb->start + eb->len); 5257 WARN_ON(start + len > eb->start + eb->len);
5260 5258
5261 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5259 offset = (start_offset + start) & (PAGE_SIZE - 1);
5262 5260
5263 while (len > 0) { 5261 while (len > 0) {
5264 page = eb->pages[i]; 5262 page = eb->pages[i];
5265 5263
5266 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5264 cur = min(len, (PAGE_SIZE - offset));
5267 kaddr = page_address(page); 5265 kaddr = page_address(page);
5268 memcpy(dst, kaddr + offset, cur); 5266 memcpy(dst, kaddr + offset, cur);
5269 5267
@@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5283 struct page *page; 5281 struct page *page;
5284 char *kaddr; 5282 char *kaddr;
5285 char __user *dst = (char __user *)dstv; 5283 char __user *dst = (char __user *)dstv;
5286 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5284 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5287 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5285 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5288 int ret = 0; 5286 int ret = 0;
5289 5287
5290 WARN_ON(start > eb->len); 5288 WARN_ON(start > eb->len);
5291 WARN_ON(start + len > eb->start + eb->len); 5289 WARN_ON(start + len > eb->start + eb->len);
5292 5290
5293 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5291 offset = (start_offset + start) & (PAGE_SIZE - 1);
5294 5292
5295 while (len > 0) { 5293 while (len > 0) {
5296 page = eb->pages[i]; 5294 page = eb->pages[i];
5297 5295
5298 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5296 cur = min(len, (PAGE_SIZE - offset));
5299 kaddr = page_address(page); 5297 kaddr = page_address(page);
5300 if (copy_to_user(dst, kaddr + offset, cur)) { 5298 if (copy_to_user(dst, kaddr + offset, cur)) {
5301 ret = -EFAULT; 5299 ret = -EFAULT;
@@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5316 unsigned long *map_start, 5314 unsigned long *map_start,
5317 unsigned long *map_len) 5315 unsigned long *map_len)
5318{ 5316{
5319 size_t offset = start & (PAGE_CACHE_SIZE - 1); 5317 size_t offset = start & (PAGE_SIZE - 1);
5320 char *kaddr; 5318 char *kaddr;
5321 struct page *p; 5319 struct page *p;
5322 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5320 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5323 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5321 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5324 unsigned long end_i = (start_offset + start + min_len - 1) >> 5322 unsigned long end_i = (start_offset + start + min_len - 1) >>
5325 PAGE_CACHE_SHIFT; 5323 PAGE_SHIFT;
5326 5324
5327 if (i != end_i) 5325 if (i != end_i)
5328 return -EINVAL; 5326 return -EINVAL;
@@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5332 *map_start = 0; 5330 *map_start = 0;
5333 } else { 5331 } else {
5334 offset = 0; 5332 offset = 0;
5335 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 5333 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5336 } 5334 }
5337 5335
5338 if (start + min_len > eb->len) { 5336 if (start + min_len > eb->len) {
@@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5345 p = eb->pages[i]; 5343 p = eb->pages[i];
5346 kaddr = page_address(p); 5344 kaddr = page_address(p);
5347 *map = kaddr + offset; 5345 *map = kaddr + offset;
5348 *map_len = PAGE_CACHE_SIZE - offset; 5346 *map_len = PAGE_SIZE - offset;
5349 return 0; 5347 return 0;
5350} 5348}
5351 5349
@@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5358 struct page *page; 5356 struct page *page;
5359 char *kaddr; 5357 char *kaddr;
5360 char *ptr = (char *)ptrv; 5358 char *ptr = (char *)ptrv;
5361 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5359 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5362 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5360 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5363 int ret = 0; 5361 int ret = 0;
5364 5362
5365 WARN_ON(start > eb->len); 5363 WARN_ON(start > eb->len);
5366 WARN_ON(start + len > eb->start + eb->len); 5364 WARN_ON(start + len > eb->start + eb->len);
5367 5365
5368 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5366 offset = (start_offset + start) & (PAGE_SIZE - 1);
5369 5367
5370 while (len > 0) { 5368 while (len > 0) {
5371 page = eb->pages[i]; 5369 page = eb->pages[i];
5372 5370
5373 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5371 cur = min(len, (PAGE_SIZE - offset));
5374 5372
5375 kaddr = page_address(page); 5373 kaddr = page_address(page);
5376 ret = memcmp(ptr, kaddr + offset, cur); 5374 ret = memcmp(ptr, kaddr + offset, cur);
@@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5393 struct page *page; 5391 struct page *page;
5394 char *kaddr; 5392 char *kaddr;
5395 char *src = (char *)srcv; 5393 char *src = (char *)srcv;
5396 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5394 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5397 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5395 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5398 5396
5399 WARN_ON(start > eb->len); 5397 WARN_ON(start > eb->len);
5400 WARN_ON(start + len > eb->start + eb->len); 5398 WARN_ON(start + len > eb->start + eb->len);
5401 5399
5402 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5400 offset = (start_offset + start) & (PAGE_SIZE - 1);
5403 5401
5404 while (len > 0) { 5402 while (len > 0) {
5405 page = eb->pages[i]; 5403 page = eb->pages[i];
5406 WARN_ON(!PageUptodate(page)); 5404 WARN_ON(!PageUptodate(page));
5407 5405
5408 cur = min(len, PAGE_CACHE_SIZE - offset); 5406 cur = min(len, PAGE_SIZE - offset);
5409 kaddr = page_address(page); 5407 kaddr = page_address(page);
5410 memcpy(kaddr + offset, src, cur); 5408 memcpy(kaddr + offset, src, cur);
5411 5409
@@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
5423 size_t offset; 5421 size_t offset;
5424 struct page *page; 5422 struct page *page;
5425 char *kaddr; 5423 char *kaddr;
5426 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5424 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5427 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5425 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5428 5426
5429 WARN_ON(start > eb->len); 5427 WARN_ON(start > eb->len);
5430 WARN_ON(start + len > eb->start + eb->len); 5428 WARN_ON(start + len > eb->start + eb->len);
5431 5429
5432 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5430 offset = (start_offset + start) & (PAGE_SIZE - 1);
5433 5431
5434 while (len > 0) { 5432 while (len > 0) {
5435 page = eb->pages[i]; 5433 page = eb->pages[i];
5436 WARN_ON(!PageUptodate(page)); 5434 WARN_ON(!PageUptodate(page));
5437 5435
5438 cur = min(len, PAGE_CACHE_SIZE - offset); 5436 cur = min(len, PAGE_SIZE - offset);
5439 kaddr = page_address(page); 5437 kaddr = page_address(page);
5440 memset(kaddr + offset, c, cur); 5438 memset(kaddr + offset, c, cur);
5441 5439
@@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5454 size_t offset; 5452 size_t offset;
5455 struct page *page; 5453 struct page *page;
5456 char *kaddr; 5454 char *kaddr;
5457 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5455 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5458 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5456 unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5459 5457
5460 WARN_ON(src->len != dst_len); 5458 WARN_ON(src->len != dst_len);
5461 5459
5462 offset = (start_offset + dst_offset) & 5460 offset = (start_offset + dst_offset) &
5463 (PAGE_CACHE_SIZE - 1); 5461 (PAGE_SIZE - 1);
5464 5462
5465 while (len > 0) { 5463 while (len > 0) {
5466 page = dst->pages[i]; 5464 page = dst->pages[i];
5467 WARN_ON(!PageUptodate(page)); 5465 WARN_ON(!PageUptodate(page));
5468 5466
5469 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 5467 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5470 5468
5471 kaddr = page_address(page); 5469 kaddr = page_address(page);
5472 read_extent_buffer(src, kaddr + offset, src_offset, cur); 5470 read_extent_buffer(src, kaddr + offset, src_offset, cur);
@@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
5508 unsigned long *page_index, 5506 unsigned long *page_index,
5509 size_t *page_offset) 5507 size_t *page_offset)
5510{ 5508{
5511 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5509 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5512 size_t byte_offset = BIT_BYTE(nr); 5510 size_t byte_offset = BIT_BYTE(nr);
5513 size_t offset; 5511 size_t offset;
5514 5512
@@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
5519 */ 5517 */
5520 offset = start_offset + start + byte_offset; 5518 offset = start_offset + start + byte_offset;
5521 5519
5522 *page_index = offset >> PAGE_CACHE_SHIFT; 5520 *page_index = offset >> PAGE_SHIFT;
5523 *page_offset = offset & (PAGE_CACHE_SIZE - 1); 5521 *page_offset = offset & (PAGE_SIZE - 1);
5524} 5522}
5525 5523
5526/** 5524/**
@@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5572 len -= bits_to_set; 5570 len -= bits_to_set;
5573 bits_to_set = BITS_PER_BYTE; 5571 bits_to_set = BITS_PER_BYTE;
5574 mask_to_set = ~0U; 5572 mask_to_set = ~0U;
5575 if (++offset >= PAGE_CACHE_SIZE && len > 0) { 5573 if (++offset >= PAGE_SIZE && len > 0) {
5576 offset = 0; 5574 offset = 0;
5577 page = eb->pages[++i]; 5575 page = eb->pages[++i];
5578 WARN_ON(!PageUptodate(page)); 5576 WARN_ON(!PageUptodate(page));
@@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5614 len -= bits_to_clear; 5612 len -= bits_to_clear;
5615 bits_to_clear = BITS_PER_BYTE; 5613 bits_to_clear = BITS_PER_BYTE;
5616 mask_to_clear = ~0U; 5614 mask_to_clear = ~0U;
5617 if (++offset >= PAGE_CACHE_SIZE && len > 0) { 5615 if (++offset >= PAGE_SIZE && len > 0) {
5618 offset = 0; 5616 offset = 0;
5619 page = eb->pages[++i]; 5617 page = eb->pages[++i];
5620 WARN_ON(!PageUptodate(page)); 5618 WARN_ON(!PageUptodate(page));
@@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5661 size_t cur; 5659 size_t cur;
5662 size_t dst_off_in_page; 5660 size_t dst_off_in_page;
5663 size_t src_off_in_page; 5661 size_t src_off_in_page;
5664 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5662 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5665 unsigned long dst_i; 5663 unsigned long dst_i;
5666 unsigned long src_i; 5664 unsigned long src_i;
5667 5665
@@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5680 5678
5681 while (len > 0) { 5679 while (len > 0) {
5682 dst_off_in_page = (start_offset + dst_offset) & 5680 dst_off_in_page = (start_offset + dst_offset) &
5683 (PAGE_CACHE_SIZE - 1); 5681 (PAGE_SIZE - 1);
5684 src_off_in_page = (start_offset + src_offset) & 5682 src_off_in_page = (start_offset + src_offset) &
5685 (PAGE_CACHE_SIZE - 1); 5683 (PAGE_SIZE - 1);
5686 5684
5687 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5685 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5688 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 5686 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5689 5687
5690 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 5688 cur = min(len, (unsigned long)(PAGE_SIZE -
5691 src_off_in_page)); 5689 src_off_in_page));
5692 cur = min_t(unsigned long, cur, 5690 cur = min_t(unsigned long, cur,
5693 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); 5691 (unsigned long)(PAGE_SIZE - dst_off_in_page));
5694 5692
5695 copy_pages(dst->pages[dst_i], dst->pages[src_i], 5693 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5696 dst_off_in_page, src_off_in_page, cur); 5694 dst_off_in_page, src_off_in_page, cur);
@@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5709 size_t src_off_in_page; 5707 size_t src_off_in_page;
5710 unsigned long dst_end = dst_offset + len - 1; 5708 unsigned long dst_end = dst_offset + len - 1;
5711 unsigned long src_end = src_offset + len - 1; 5709 unsigned long src_end = src_offset + len - 1;
5712 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5710 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5713 unsigned long dst_i; 5711 unsigned long dst_i;
5714 unsigned long src_i; 5712 unsigned long src_i;
5715 5713
@@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5728 return; 5726 return;
5729 } 5727 }
5730 while (len > 0) { 5728 while (len > 0) {
5731 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 5729 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5732 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 5730 src_i = (start_offset + src_end) >> PAGE_SHIFT;
5733 5731
5734 dst_off_in_page = (start_offset + dst_end) & 5732 dst_off_in_page = (start_offset + dst_end) &
5735 (PAGE_CACHE_SIZE - 1); 5733 (PAGE_SIZE - 1);
5736 src_off_in_page = (start_offset + src_end) & 5734 src_off_in_page = (start_offset + src_end) &
5737 (PAGE_CACHE_SIZE - 1); 5735 (PAGE_SIZE - 1);
5738 5736
5739 cur = min_t(unsigned long, len, src_off_in_page + 1); 5737 cur = min_t(unsigned long, len, src_off_in_page + 1);
5740 cur = min(cur, dst_off_in_page + 1); 5738 cur = min(cur, dst_off_in_page + 1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 5dbf92e68fbd..b5e0ade90e88 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -120,7 +120,7 @@ struct extent_state {
120}; 120};
121 121
122#define INLINE_EXTENT_BUFFER_PAGES 16 122#define INLINE_EXTENT_BUFFER_PAGES 16
123#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE) 123#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
124struct extent_buffer { 124struct extent_buffer {
125 u64 start; 125 u64 start;
126 unsigned long len; 126 unsigned long len;
@@ -365,8 +365,8 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
365 365
366static inline unsigned long num_extent_pages(u64 start, u64 len) 366static inline unsigned long num_extent_pages(u64 start, u64 len)
367{ 367{
368 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 368 return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
369 (start >> PAGE_CACHE_SHIFT); 369 (start >> PAGE_SHIFT);
370} 370}
371 371
372static inline void extent_buffer_get(struct extent_buffer *eb) 372static inline void extent_buffer_get(struct extent_buffer *eb)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b5baf5bdc8e1..7a7d6e253cfc 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -32,7 +32,7 @@
32 size) - 1)) 32 size) - 1))
33 33
34#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 34#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_CACHE_SIZE)) 35 PAGE_SIZE))
36 36
37#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ 37#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \ 38 sizeof(struct btrfs_ordered_sum)) / \
@@ -203,7 +203,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
203 csum = (u8 *)dst; 203 csum = (u8 *)dst;
204 } 204 }
205 205
206 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) 206 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
207 path->reada = READA_FORWARD; 207 path->reada = READA_FORWARD;
208 208
209 WARN_ON(bio->bi_vcnt <= 0); 209 WARN_ON(bio->bi_vcnt <= 0);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 15a09cb156ce..8d7b5a45c005 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
414 size_t copied = 0; 414 size_t copied = 0;
415 size_t total_copied = 0; 415 size_t total_copied = 0;
416 int pg = 0; 416 int pg = 0;
417 int offset = pos & (PAGE_CACHE_SIZE - 1); 417 int offset = pos & (PAGE_SIZE - 1);
418 418
419 while (write_bytes > 0) { 419 while (write_bytes > 0) {
420 size_t count = min_t(size_t, 420 size_t count = min_t(size_t,
421 PAGE_CACHE_SIZE - offset, write_bytes); 421 PAGE_SIZE - offset, write_bytes);
422 struct page *page = prepared_pages[pg]; 422 struct page *page = prepared_pages[pg];
423 /* 423 /*
424 * Copy data from userspace to the current page 424 * Copy data from userspace to the current page
@@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
448 if (unlikely(copied == 0)) 448 if (unlikely(copied == 0))
449 break; 449 break;
450 450
451 if (copied < PAGE_CACHE_SIZE - offset) { 451 if (copied < PAGE_SIZE - offset) {
452 offset += copied; 452 offset += copied;
453 } else { 453 } else {
454 pg++; 454 pg++;
@@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
473 */ 473 */
474 ClearPageChecked(pages[i]); 474 ClearPageChecked(pages[i]);
475 unlock_page(pages[i]); 475 unlock_page(pages[i]);
476 page_cache_release(pages[i]); 476 put_page(pages[i]);
477 } 477 }
478} 478}
479 479
@@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode,
1297{ 1297{
1298 int ret = 0; 1298 int ret = 0;
1299 1299
1300 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && 1300 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1301 !PageUptodate(page)) { 1301 !PageUptodate(page)) {
1302 ret = btrfs_readpage(NULL, page); 1302 ret = btrfs_readpage(NULL, page);
1303 if (ret) 1303 if (ret)
@@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1323 size_t write_bytes, bool force_uptodate) 1323 size_t write_bytes, bool force_uptodate)
1324{ 1324{
1325 int i; 1325 int i;
1326 unsigned long index = pos >> PAGE_CACHE_SHIFT; 1326 unsigned long index = pos >> PAGE_SHIFT;
1327 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1327 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1328 int err = 0; 1328 int err = 0;
1329 int faili; 1329 int faili;
@@ -1345,7 +1345,7 @@ again:
1345 err = prepare_uptodate_page(inode, pages[i], 1345 err = prepare_uptodate_page(inode, pages[i],
1346 pos + write_bytes, false); 1346 pos + write_bytes, false);
1347 if (err) { 1347 if (err) {
1348 page_cache_release(pages[i]); 1348 put_page(pages[i]);
1349 if (err == -EAGAIN) { 1349 if (err == -EAGAIN) {
1350 err = 0; 1350 err = 0;
1351 goto again; 1351 goto again;
@@ -1360,7 +1360,7 @@ again:
1360fail: 1360fail:
1361 while (faili >= 0) { 1361 while (faili >= 0) {
1362 unlock_page(pages[faili]); 1362 unlock_page(pages[faili]);
1363 page_cache_release(pages[faili]); 1363 put_page(pages[faili]);
1364 faili--; 1364 faili--;
1365 } 1365 }
1366 return err; 1366 return err;
@@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1408 cached_state, GFP_NOFS); 1408 cached_state, GFP_NOFS);
1409 for (i = 0; i < num_pages; i++) { 1409 for (i = 0; i < num_pages; i++) {
1410 unlock_page(pages[i]); 1410 unlock_page(pages[i]);
1411 page_cache_release(pages[i]); 1411 put_page(pages[i]);
1412 } 1412 }
1413 btrfs_start_ordered_extent(inode, ordered, 1); 1413 btrfs_start_ordered_extent(inode, ordered, 1);
1414 btrfs_put_ordered_extent(ordered); 1414 btrfs_put_ordered_extent(ordered);
@@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1497 bool force_page_uptodate = false; 1497 bool force_page_uptodate = false;
1498 bool need_unlock; 1498 bool need_unlock;
1499 1499
1500 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE), 1500 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1501 PAGE_CACHE_SIZE / (sizeof(struct page *))); 1501 PAGE_SIZE / (sizeof(struct page *)));
1502 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1502 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1503 nrptrs = max(nrptrs, 8); 1503 nrptrs = max(nrptrs, 8);
1504 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1504 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
@@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1506 return -ENOMEM; 1506 return -ENOMEM;
1507 1507
1508 while (iov_iter_count(i) > 0) { 1508 while (iov_iter_count(i) > 0) {
1509 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1509 size_t offset = pos & (PAGE_SIZE - 1);
1510 size_t sector_offset; 1510 size_t sector_offset;
1511 size_t write_bytes = min(iov_iter_count(i), 1511 size_t write_bytes = min(iov_iter_count(i),
1512 nrptrs * (size_t)PAGE_CACHE_SIZE - 1512 nrptrs * (size_t)PAGE_SIZE -
1513 offset); 1513 offset);
1514 size_t num_pages = DIV_ROUND_UP(write_bytes + offset, 1514 size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1515 PAGE_CACHE_SIZE); 1515 PAGE_SIZE);
1516 size_t reserve_bytes; 1516 size_t reserve_bytes;
1517 size_t dirty_pages; 1517 size_t dirty_pages;
1518 size_t copied; 1518 size_t copied;
@@ -1547,7 +1547,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1547 * write_bytes, so scale down. 1547 * write_bytes, so scale down.
1548 */ 1548 */
1549 num_pages = DIV_ROUND_UP(write_bytes + offset, 1549 num_pages = DIV_ROUND_UP(write_bytes + offset,
1550 PAGE_CACHE_SIZE); 1550 PAGE_SIZE);
1551 reserve_bytes = round_up(write_bytes + sector_offset, 1551 reserve_bytes = round_up(write_bytes + sector_offset,
1552 root->sectorsize); 1552 root->sectorsize);
1553 goto reserve_metadata; 1553 goto reserve_metadata;
@@ -1609,7 +1609,7 @@ again:
1609 } else { 1609 } else {
1610 force_page_uptodate = false; 1610 force_page_uptodate = false;
1611 dirty_pages = DIV_ROUND_UP(copied + offset, 1611 dirty_pages = DIV_ROUND_UP(copied + offset,
1612 PAGE_CACHE_SIZE); 1612 PAGE_SIZE);
1613 } 1613 }
1614 1614
1615 /* 1615 /*
@@ -1641,7 +1641,7 @@ again:
1641 u64 __pos; 1641 u64 __pos;
1642 1642
1643 __pos = round_down(pos, root->sectorsize) + 1643 __pos = round_down(pos, root->sectorsize) +
1644 (dirty_pages << PAGE_CACHE_SHIFT); 1644 (dirty_pages << PAGE_SHIFT);
1645 btrfs_delalloc_release_space(inode, __pos, 1645 btrfs_delalloc_release_space(inode, __pos,
1646 release_bytes); 1646 release_bytes);
1647 } 1647 }
@@ -1682,7 +1682,7 @@ again:
1682 cond_resched(); 1682 cond_resched();
1683 1683
1684 balance_dirty_pages_ratelimited(inode->i_mapping); 1684 balance_dirty_pages_ratelimited(inode->i_mapping);
1685 if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1) 1685 if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
1686 btrfs_btree_balance_dirty(root); 1686 btrfs_btree_balance_dirty(root);
1687 1687
1688 pos += copied; 1688 pos += copied;
@@ -1738,8 +1738,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1738 goto out; 1738 goto out;
1739 written += written_buffered; 1739 written += written_buffered;
1740 iocb->ki_pos = pos + written_buffered; 1740 iocb->ki_pos = pos + written_buffered;
1741 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, 1741 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1742 endbyte >> PAGE_CACHE_SHIFT); 1742 endbyte >> PAGE_SHIFT);
1743out: 1743out:
1744 return written ? written : err; 1744 return written ? written : err;
1745} 1745}
@@ -1905,7 +1905,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1905 */ 1905 */
1906int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1906int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1907{ 1907{
1908 struct dentry *dentry = file->f_path.dentry; 1908 struct dentry *dentry = file_dentry(file);
1909 struct inode *inode = d_inode(dentry); 1909 struct inode *inode = d_inode(dentry);
1910 struct btrfs_root *root = BTRFS_I(inode)->root; 1910 struct btrfs_root *root = BTRFS_I(inode)->root;
1911 struct btrfs_trans_handle *trans; 1911 struct btrfs_trans_handle *trans;
@@ -2682,9 +2682,12 @@ static long btrfs_fallocate(struct file *file, int mode,
2682 return ret; 2682 return ret;
2683 2683
2684 inode_lock(inode); 2684 inode_lock(inode);
2685 ret = inode_newsize_ok(inode, alloc_end); 2685
2686 if (ret) 2686 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
2687 goto out; 2687 ret = inode_newsize_ok(inode, offset + len);
2688 if (ret)
2689 goto out;
2690 }
2688 2691
2689 /* 2692 /*
2690 * TODO: Move these two operations after we have checked 2693 * TODO: Move these two operations after we have checked
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 8f835bfa1bdd..5e6062c26129 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -29,7 +29,7 @@
29#include "inode-map.h" 29#include "inode-map.h"
30#include "volumes.h" 30#include "volumes.h"
31 31
32#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 32#define BITS_PER_BITMAP (PAGE_SIZE * 8)
33#define MAX_CACHE_BYTES_PER_GIG SZ_32K 33#define MAX_CACHE_BYTES_PER_GIG SZ_32K
34 34
35struct btrfs_trim_range { 35struct btrfs_trim_range {
@@ -295,7 +295,7 @@ static int readahead_cache(struct inode *inode)
295 return -ENOMEM; 295 return -ENOMEM;
296 296
297 file_ra_state_init(ra, inode->i_mapping); 297 file_ra_state_init(ra, inode->i_mapping);
298 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 298 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
299 299
300 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); 300 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
301 301
@@ -310,14 +310,14 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
310 int num_pages; 310 int num_pages;
311 int check_crcs = 0; 311 int check_crcs = 0;
312 312
313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); 313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
314 314
315 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) 315 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
316 check_crcs = 1; 316 check_crcs = 1;
317 317
318 /* Make sure we can fit our crcs into the first page */ 318 /* Make sure we can fit our crcs into the first page */
319 if (write && check_crcs && 319 if (write && check_crcs &&
320 (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) 320 (num_pages * sizeof(u32)) >= PAGE_SIZE)
321 return -ENOSPC; 321 return -ENOSPC;
322 322
323 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); 323 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -354,9 +354,9 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
354 io_ctl->page = io_ctl->pages[io_ctl->index++]; 354 io_ctl->page = io_ctl->pages[io_ctl->index++];
355 io_ctl->cur = page_address(io_ctl->page); 355 io_ctl->cur = page_address(io_ctl->page);
356 io_ctl->orig = io_ctl->cur; 356 io_ctl->orig = io_ctl->cur;
357 io_ctl->size = PAGE_CACHE_SIZE; 357 io_ctl->size = PAGE_SIZE;
358 if (clear) 358 if (clear)
359 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); 359 memset(io_ctl->cur, 0, PAGE_SIZE);
360} 360}
361 361
362static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) 362static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
@@ -369,7 +369,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
369 if (io_ctl->pages[i]) { 369 if (io_ctl->pages[i]) {
370 ClearPageChecked(io_ctl->pages[i]); 370 ClearPageChecked(io_ctl->pages[i]);
371 unlock_page(io_ctl->pages[i]); 371 unlock_page(io_ctl->pages[i]);
372 page_cache_release(io_ctl->pages[i]); 372 put_page(io_ctl->pages[i]);
373 } 373 }
374 } 374 }
375} 375}
@@ -475,7 +475,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
475 offset = sizeof(u32) * io_ctl->num_pages; 475 offset = sizeof(u32) * io_ctl->num_pages;
476 476
477 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 477 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
478 PAGE_CACHE_SIZE - offset); 478 PAGE_SIZE - offset);
479 btrfs_csum_final(crc, (char *)&crc); 479 btrfs_csum_final(crc, (char *)&crc);
480 io_ctl_unmap_page(io_ctl); 480 io_ctl_unmap_page(io_ctl);
481 tmp = page_address(io_ctl->pages[0]); 481 tmp = page_address(io_ctl->pages[0]);
@@ -503,7 +503,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
503 503
504 io_ctl_map_page(io_ctl, 0); 504 io_ctl_map_page(io_ctl, 0);
505 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 505 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
506 PAGE_CACHE_SIZE - offset); 506 PAGE_SIZE - offset);
507 btrfs_csum_final(crc, (char *)&crc); 507 btrfs_csum_final(crc, (char *)&crc);
508 if (val != crc) { 508 if (val != crc) {
509 btrfs_err_rl(io_ctl->root->fs_info, 509 btrfs_err_rl(io_ctl->root->fs_info,
@@ -561,7 +561,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
561 io_ctl_map_page(io_ctl, 0); 561 io_ctl_map_page(io_ctl, 0);
562 } 562 }
563 563
564 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); 564 memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
565 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 565 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
566 if (io_ctl->index < io_ctl->num_pages) 566 if (io_ctl->index < io_ctl->num_pages)
567 io_ctl_map_page(io_ctl, 0); 567 io_ctl_map_page(io_ctl, 0);
@@ -621,7 +621,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
621 if (ret) 621 if (ret)
622 return ret; 622 return ret;
623 623
624 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); 624 memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
625 io_ctl_unmap_page(io_ctl); 625 io_ctl_unmap_page(io_ctl);
626 626
627 return 0; 627 return 0;
@@ -775,7 +775,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
775 } else { 775 } else {
776 ASSERT(num_bitmaps); 776 ASSERT(num_bitmaps);
777 num_bitmaps--; 777 num_bitmaps--;
778 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 778 e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
779 if (!e->bitmap) { 779 if (!e->bitmap) {
780 kmem_cache_free( 780 kmem_cache_free(
781 btrfs_free_space_cachep, e); 781 btrfs_free_space_cachep, e);
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1661 * we add more bitmaps. 1661 * we add more bitmaps.
1662 */ 1662 */
1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; 1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
1664 1664
1665 if (bitmap_bytes >= max_bytes) { 1665 if (bitmap_bytes >= max_bytes) {
1666 ctl->extents_thresh = 0; 1666 ctl->extents_thresh = 0;
@@ -2111,7 +2111,7 @@ new_bitmap:
2111 } 2111 }
2112 2112
2113 /* allocate the bitmap */ 2113 /* allocate the bitmap */
2114 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 2114 info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
2115 spin_lock(&ctl->tree_lock); 2115 spin_lock(&ctl->tree_lock);
2116 if (!info->bitmap) { 2116 if (!info->bitmap) {
2117 ret = -ENOMEM; 2117 ret = -ENOMEM;
@@ -3580,7 +3580,7 @@ again:
3580 } 3580 }
3581 3581
3582 if (!map) { 3582 if (!map) {
3583 map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 3583 map = kzalloc(PAGE_SIZE, GFP_NOFS);
3584 if (!map) { 3584 if (!map) {
3585 kmem_cache_free(btrfs_free_space_cachep, info); 3585 kmem_cache_free(btrfs_free_space_cachep, info);
3586 return -ENOMEM; 3586 return -ENOMEM;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 1f0ec19b23f6..70107f7c9307 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
283} 283}
284 284
285#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) 285#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
286#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) 286#define INODES_PER_BITMAP (PAGE_SIZE * 8)
287 287
288/* 288/*
289 * The goal is to keep the memory used by the free_ino tree won't 289 * The goal is to keep the memory used by the free_ino tree won't
@@ -317,7 +317,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
317 } 317 }
318 318
319 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * 319 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
320 PAGE_CACHE_SIZE / sizeof(*info); 320 PAGE_SIZE / sizeof(*info);
321} 321}
322 322
323/* 323/*
@@ -481,12 +481,12 @@ again:
481 481
482 spin_lock(&ctl->tree_lock); 482 spin_lock(&ctl->tree_lock);
483 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; 483 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
484 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); 484 prealloc = ALIGN(prealloc, PAGE_SIZE);
485 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; 485 prealloc += ctl->total_bitmaps * PAGE_SIZE;
486 spin_unlock(&ctl->tree_lock); 486 spin_unlock(&ctl->tree_lock);
487 487
488 /* Just to make sure we have enough space */ 488 /* Just to make sure we have enough space */
489 prealloc += 8 * PAGE_CACHE_SIZE; 489 prealloc += 8 * PAGE_SIZE;
490 490
491 ret = btrfs_delalloc_reserve_space(inode, 0, prealloc); 491 ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
492 if (ret) 492 if (ret)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 41a5688ffdfe..2aaba58b4856 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
194 while (compressed_size > 0) { 194 while (compressed_size > 0) {
195 cpage = compressed_pages[i]; 195 cpage = compressed_pages[i];
196 cur_size = min_t(unsigned long, compressed_size, 196 cur_size = min_t(unsigned long, compressed_size,
197 PAGE_CACHE_SIZE); 197 PAGE_SIZE);
198 198
199 kaddr = kmap_atomic(cpage); 199 kaddr = kmap_atomic(cpage);
200 write_extent_buffer(leaf, kaddr, ptr, cur_size); 200 write_extent_buffer(leaf, kaddr, ptr, cur_size);
@@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
208 compress_type); 208 compress_type);
209 } else { 209 } else {
210 page = find_get_page(inode->i_mapping, 210 page = find_get_page(inode->i_mapping,
211 start >> PAGE_CACHE_SHIFT); 211 start >> PAGE_SHIFT);
212 btrfs_set_file_extent_compression(leaf, ei, 0); 212 btrfs_set_file_extent_compression(leaf, ei, 0);
213 kaddr = kmap_atomic(page); 213 kaddr = kmap_atomic(page);
214 offset = start & (PAGE_CACHE_SIZE - 1); 214 offset = start & (PAGE_SIZE - 1);
215 write_extent_buffer(leaf, kaddr + offset, ptr, size); 215 write_extent_buffer(leaf, kaddr + offset, ptr, size);
216 kunmap_atomic(kaddr); 216 kunmap_atomic(kaddr);
217 page_cache_release(page); 217 put_page(page);
218 } 218 }
219 btrfs_mark_buffer_dirty(leaf); 219 btrfs_mark_buffer_dirty(leaf);
220 btrfs_release_path(path); 220 btrfs_release_path(path);
@@ -322,7 +322,7 @@ out:
322 * And at reserve time, it's always aligned to page size, so 322 * And at reserve time, it's always aligned to page size, so
323 * just free one page here. 323 * just free one page here.
324 */ 324 */
325 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); 325 btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
326 btrfs_free_path(path); 326 btrfs_free_path(path);
327 btrfs_end_transaction(trans, root); 327 btrfs_end_transaction(trans, root);
328 return ret; 328 return ret;
@@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode,
435 actual_end = min_t(u64, isize, end + 1); 435 actual_end = min_t(u64, isize, end + 1);
436again: 436again:
437 will_compress = 0; 437 will_compress = 0;
438 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 438 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE); 439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
440 440
441 /* 441 /*
442 * we don't want to send crud past the end of i_size through 442 * we don't want to send crud past the end of i_size through
@@ -514,7 +514,7 @@ again:
514 514
515 if (!ret) { 515 if (!ret) {
516 unsigned long offset = total_compressed & 516 unsigned long offset = total_compressed &
517 (PAGE_CACHE_SIZE - 1); 517 (PAGE_SIZE - 1);
518 struct page *page = pages[nr_pages_ret - 1]; 518 struct page *page = pages[nr_pages_ret - 1];
519 char *kaddr; 519 char *kaddr;
520 520
@@ -524,7 +524,7 @@ again:
524 if (offset) { 524 if (offset) {
525 kaddr = kmap_atomic(page); 525 kaddr = kmap_atomic(page);
526 memset(kaddr + offset, 0, 526 memset(kaddr + offset, 0,
527 PAGE_CACHE_SIZE - offset); 527 PAGE_SIZE - offset);
528 kunmap_atomic(kaddr); 528 kunmap_atomic(kaddr);
529 } 529 }
530 will_compress = 1; 530 will_compress = 1;
@@ -580,7 +580,7 @@ cont:
580 * one last check to make sure the compression is really a 580 * one last check to make sure the compression is really a
581 * win, compare the page count read with the blocks on disk 581 * win, compare the page count read with the blocks on disk
582 */ 582 */
583 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); 583 total_in = ALIGN(total_in, PAGE_SIZE);
584 if (total_compressed >= total_in) { 584 if (total_compressed >= total_in) {
585 will_compress = 0; 585 will_compress = 0;
586 } else { 586 } else {
@@ -594,7 +594,7 @@ cont:
594 */ 594 */
595 for (i = 0; i < nr_pages_ret; i++) { 595 for (i = 0; i < nr_pages_ret; i++) {
596 WARN_ON(pages[i]->mapping); 596 WARN_ON(pages[i]->mapping);
597 page_cache_release(pages[i]); 597 put_page(pages[i]);
598 } 598 }
599 kfree(pages); 599 kfree(pages);
600 pages = NULL; 600 pages = NULL;
@@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed:
650free_pages_out: 650free_pages_out:
651 for (i = 0; i < nr_pages_ret; i++) { 651 for (i = 0; i < nr_pages_ret; i++) {
652 WARN_ON(pages[i]->mapping); 652 WARN_ON(pages[i]->mapping);
653 page_cache_release(pages[i]); 653 put_page(pages[i]);
654 } 654 }
655 kfree(pages); 655 kfree(pages);
656} 656}
@@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
664 664
665 for (i = 0; i < async_extent->nr_pages; i++) { 665 for (i = 0; i < async_extent->nr_pages; i++) {
666 WARN_ON(async_extent->pages[i]->mapping); 666 WARN_ON(async_extent->pages[i]->mapping);
667 page_cache_release(async_extent->pages[i]); 667 put_page(async_extent->pages[i]);
668 } 668 }
669 kfree(async_extent->pages); 669 kfree(async_extent->pages);
670 async_extent->nr_pages = 0; 670 async_extent->nr_pages = 0;
@@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode,
966 PAGE_END_WRITEBACK); 966 PAGE_END_WRITEBACK);
967 967
968 *nr_written = *nr_written + 968 *nr_written = *nr_written +
969 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 969 (end - start + PAGE_SIZE) / PAGE_SIZE;
970 *page_started = 1; 970 *page_started = 1;
971 goto out; 971 goto out;
972 } else if (ret < 0) { 972 } else if (ret < 0) {
@@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
1106 async_cow = container_of(work, struct async_cow, work); 1106 async_cow = container_of(work, struct async_cow, work);
1107 1107
1108 root = async_cow->root; 1108 root = async_cow->root;
1109 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1109 nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1110 PAGE_CACHE_SHIFT; 1110 PAGE_SHIFT;
1111 1111
1112 /* 1112 /*
1113 * atomic_sub_return implies a barrier for waitqueue_active 1113 * atomic_sub_return implies a barrier for waitqueue_active
@@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1164 async_cow_start, async_cow_submit, 1164 async_cow_start, async_cow_submit,
1165 async_cow_free); 1165 async_cow_free);
1166 1166
1167 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1167 nr_pages = (cur_end - start + PAGE_SIZE) >>
1168 PAGE_CACHE_SHIFT; 1168 PAGE_SHIFT;
1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1170 1170
1171 btrfs_queue_work(root->fs_info->delalloc_workers, 1171 btrfs_queue_work(root->fs_info->delalloc_workers,
@@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1961 struct extent_state **cached_state) 1961 struct extent_state **cached_state)
1962{ 1962{
1963 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); 1963 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1965 cached_state, GFP_NOFS); 1965 cached_state, GFP_NOFS);
1966} 1966}
@@ -1993,7 +1993,7 @@ again:
1993 1993
1994 inode = page->mapping->host; 1994 inode = page->mapping->host;
1995 page_start = page_offset(page); 1995 page_start = page_offset(page);
1996 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1996 page_end = page_offset(page) + PAGE_SIZE - 1;
1997 1997
1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
1999 &cached_state); 1999 &cached_state);
@@ -2003,7 +2003,7 @@ again:
2003 goto out; 2003 goto out;
2004 2004
2005 ordered = btrfs_lookup_ordered_range(inode, page_start, 2005 ordered = btrfs_lookup_ordered_range(inode, page_start,
2006 PAGE_CACHE_SIZE); 2006 PAGE_SIZE);
2007 if (ordered) { 2007 if (ordered) {
2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2009 page_end, &cached_state, GFP_NOFS); 2009 page_end, &cached_state, GFP_NOFS);
@@ -2014,7 +2014,7 @@ again:
2014 } 2014 }
2015 2015
2016 ret = btrfs_delalloc_reserve_space(inode, page_start, 2016 ret = btrfs_delalloc_reserve_space(inode, page_start,
2017 PAGE_CACHE_SIZE); 2017 PAGE_SIZE);
2018 if (ret) { 2018 if (ret) {
2019 mapping_set_error(page->mapping, ret); 2019 mapping_set_error(page->mapping, ret);
2020 end_extent_writepage(page, ret, page_start, page_end); 2020 end_extent_writepage(page, ret, page_start, page_end);
@@ -2030,7 +2030,7 @@ out:
2030 &cached_state, GFP_NOFS); 2030 &cached_state, GFP_NOFS);
2031out_page: 2031out_page:
2032 unlock_page(page); 2032 unlock_page(page);
2033 page_cache_release(page); 2033 put_page(page);
2034 kfree(fixup); 2034 kfree(fixup);
2035} 2035}
2036 2036
@@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2063 return -EAGAIN; 2063 return -EAGAIN;
2064 2064
2065 SetPageChecked(page); 2065 SetPageChecked(page);
2066 page_cache_get(page); 2066 get_page(page);
2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2068 btrfs_writepage_fixup_worker, NULL, NULL); 2068 btrfs_writepage_fixup_worker, NULL, NULL);
2069 fixup->page = page; 2069 fixup->page = page;
@@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode,
4247 4247
4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { 4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4249 loff_t offset = new_size; 4249 loff_t offset = new_size;
4250 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); 4250 loff_t page_end = ALIGN(offset, PAGE_SIZE);
4251 4251
4252 /* 4252 /*
4253 * Zero out the remaining of the last page of our inline extent, 4253 * Zero out the remaining of the last page of our inline extent,
@@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4633 struct extent_state *cached_state = NULL; 4633 struct extent_state *cached_state = NULL;
4634 char *kaddr; 4634 char *kaddr;
4635 u32 blocksize = root->sectorsize; 4635 u32 blocksize = root->sectorsize;
4636 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4636 pgoff_t index = from >> PAGE_SHIFT;
4637 unsigned offset = from & (blocksize - 1); 4637 unsigned offset = from & (blocksize - 1);
4638 struct page *page; 4638 struct page *page;
4639 gfp_t mask = btrfs_alloc_write_mask(mapping); 4639 gfp_t mask = btrfs_alloc_write_mask(mapping);
@@ -4668,7 +4668,7 @@ again:
4668 lock_page(page); 4668 lock_page(page);
4669 if (page->mapping != mapping) { 4669 if (page->mapping != mapping) {
4670 unlock_page(page); 4670 unlock_page(page);
4671 page_cache_release(page); 4671 put_page(page);
4672 goto again; 4672 goto again;
4673 } 4673 }
4674 if (!PageUptodate(page)) { 4674 if (!PageUptodate(page)) {
@@ -4686,7 +4686,7 @@ again:
4686 unlock_extent_cached(io_tree, block_start, block_end, 4686 unlock_extent_cached(io_tree, block_start, block_end,
4687 &cached_state, GFP_NOFS); 4687 &cached_state, GFP_NOFS);
4688 unlock_page(page); 4688 unlock_page(page);
4689 page_cache_release(page); 4689 put_page(page);
4690 btrfs_start_ordered_extent(inode, ordered, 1); 4690 btrfs_start_ordered_extent(inode, ordered, 1);
4691 btrfs_put_ordered_extent(ordered); 4691 btrfs_put_ordered_extent(ordered);
4692 goto again; 4692 goto again;
@@ -4728,7 +4728,7 @@ out_unlock:
4728 btrfs_delalloc_release_space(inode, block_start, 4728 btrfs_delalloc_release_space(inode, block_start,
4729 blocksize); 4729 blocksize);
4730 unlock_page(page); 4730 unlock_page(page);
4731 page_cache_release(page); 4731 put_page(page);
4732out: 4732out:
4733 return ret; 4733 return ret;
4734} 4734}
@@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
6717 6717
6718 read_extent_buffer(leaf, tmp, ptr, inline_size); 6718 read_extent_buffer(leaf, tmp, ptr, inline_size);
6719 6719
6720 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 6720 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6721 ret = btrfs_decompress(compress_type, tmp, page, 6721 ret = btrfs_decompress(compress_type, tmp, page,
6722 extent_offset, inline_size, max_size); 6722 extent_offset, inline_size, max_size);
6723 kfree(tmp); 6723 kfree(tmp);
@@ -6879,8 +6879,8 @@ next:
6879 6879
6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6881 extent_offset = page_offset(page) + pg_offset - extent_start; 6881 extent_offset = page_offset(page) + pg_offset - extent_start;
6882 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 6882 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6883 size - extent_offset); 6883 size - extent_offset);
6884 em->start = extent_start + extent_offset; 6884 em->start = extent_start + extent_offset;
6885 em->len = ALIGN(copy_size, root->sectorsize); 6885 em->len = ALIGN(copy_size, root->sectorsize);
6886 em->orig_block_len = em->len; 6886 em->orig_block_len = em->len;
@@ -6899,9 +6899,9 @@ next:
6899 map = kmap(page); 6899 map = kmap(page);
6900 read_extent_buffer(leaf, map + pg_offset, ptr, 6900 read_extent_buffer(leaf, map + pg_offset, ptr,
6901 copy_size); 6901 copy_size);
6902 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 6902 if (pg_offset + copy_size < PAGE_SIZE) {
6903 memset(map + pg_offset + copy_size, 0, 6903 memset(map + pg_offset + copy_size, 0,
6904 PAGE_CACHE_SIZE - pg_offset - 6904 PAGE_SIZE - pg_offset -
6905 copy_size); 6905 copy_size);
6906 } 6906 }
6907 kunmap(page); 6907 kunmap(page);
@@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7336 int start_idx; 7336 int start_idx;
7337 int end_idx; 7337 int end_idx;
7338 7338
7339 start_idx = start >> PAGE_CACHE_SHIFT; 7339 start_idx = start >> PAGE_SHIFT;
7340 7340
7341 /* 7341 /*
7342 * end is the last byte in the last page. end == start is legal 7342 * end is the last byte in the last page. end == start is legal
7343 */ 7343 */
7344 end_idx = end >> PAGE_CACHE_SHIFT; 7344 end_idx = end >> PAGE_SHIFT;
7345 7345
7346 rcu_read_lock(); 7346 rcu_read_lock();
7347 7347
@@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7382 * include/linux/pagemap.h for details. 7382 * include/linux/pagemap.h for details.
7383 */ 7383 */
7384 if (unlikely(page != *pagep)) { 7384 if (unlikely(page != *pagep)) {
7385 page_cache_release(page); 7385 put_page(page);
7386 page = NULL; 7386 page = NULL;
7387 } 7387 }
7388 } 7388 }
@@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7390 if (page) { 7390 if (page) {
7391 if (page->index <= end_idx) 7391 if (page->index <= end_idx)
7392 found = true; 7392 found = true;
7393 page_cache_release(page); 7393 put_page(page);
7394 } 7394 }
7395 7395
7396 rcu_read_unlock(); 7396 rcu_read_unlock();
@@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8719 if (ret == 1) { 8719 if (ret == 1) {
8720 ClearPagePrivate(page); 8720 ClearPagePrivate(page);
8721 set_page_private(page, 0); 8721 set_page_private(page, 0);
8722 page_cache_release(page); 8722 put_page(page);
8723 } 8723 }
8724 return ret; 8724 return ret;
8725} 8725}
@@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8739 struct btrfs_ordered_extent *ordered; 8739 struct btrfs_ordered_extent *ordered;
8740 struct extent_state *cached_state = NULL; 8740 struct extent_state *cached_state = NULL;
8741 u64 page_start = page_offset(page); 8741 u64 page_start = page_offset(page);
8742 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8742 u64 page_end = page_start + PAGE_SIZE - 1;
8743 u64 start; 8743 u64 start;
8744 u64 end; 8744 u64 end;
8745 int inode_evicting = inode->i_state & I_FREEING; 8745 int inode_evicting = inode->i_state & I_FREEING;
@@ -8822,7 +8822,7 @@ again:
8822 * 2) Not written to disk 8822 * 2) Not written to disk
8823 * This means the reserved space should be freed here. 8823 * This means the reserved space should be freed here.
8824 */ 8824 */
8825 btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); 8825 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8826 if (!inode_evicting) { 8826 if (!inode_evicting) {
8827 clear_extent_bit(tree, page_start, page_end, 8827 clear_extent_bit(tree, page_start, page_end,
8828 EXTENT_LOCKED | EXTENT_DIRTY | 8828 EXTENT_LOCKED | EXTENT_DIRTY |
@@ -8837,7 +8837,7 @@ again:
8837 if (PagePrivate(page)) { 8837 if (PagePrivate(page)) {
8838 ClearPagePrivate(page); 8838 ClearPagePrivate(page);
8839 set_page_private(page, 0); 8839 set_page_private(page, 0);
8840 page_cache_release(page); 8840 put_page(page);
8841 } 8841 }
8842} 8842}
8843 8843
@@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8874 u64 page_end; 8874 u64 page_end;
8875 u64 end; 8875 u64 end;
8876 8876
8877 reserved_space = PAGE_CACHE_SIZE; 8877 reserved_space = PAGE_SIZE;
8878 8878
8879 sb_start_pagefault(inode->i_sb); 8879 sb_start_pagefault(inode->i_sb);
8880 page_start = page_offset(page); 8880 page_start = page_offset(page);
8881 page_end = page_start + PAGE_CACHE_SIZE - 1; 8881 page_end = page_start + PAGE_SIZE - 1;
8882 end = page_end; 8882 end = page_end;
8883 8883
8884 /* 8884 /*
@@ -8934,15 +8934,15 @@ again:
8934 goto again; 8934 goto again;
8935 } 8935 }
8936 8936
8937 if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) { 8937 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8938 reserved_space = round_up(size - page_start, root->sectorsize); 8938 reserved_space = round_up(size - page_start, root->sectorsize);
8939 if (reserved_space < PAGE_CACHE_SIZE) { 8939 if (reserved_space < PAGE_SIZE) {
8940 end = page_start + reserved_space - 1; 8940 end = page_start + reserved_space - 1;
8941 spin_lock(&BTRFS_I(inode)->lock); 8941 spin_lock(&BTRFS_I(inode)->lock);
8942 BTRFS_I(inode)->outstanding_extents++; 8942 BTRFS_I(inode)->outstanding_extents++;
8943 spin_unlock(&BTRFS_I(inode)->lock); 8943 spin_unlock(&BTRFS_I(inode)->lock);
8944 btrfs_delalloc_release_space(inode, page_start, 8944 btrfs_delalloc_release_space(inode, page_start,
8945 PAGE_CACHE_SIZE - reserved_space); 8945 PAGE_SIZE - reserved_space);
8946 } 8946 }
8947 } 8947 }
8948 8948
@@ -8969,14 +8969,14 @@ again:
8969 ret = 0; 8969 ret = 0;
8970 8970
8971 /* page is wholly or partially inside EOF */ 8971 /* page is wholly or partially inside EOF */
8972 if (page_start + PAGE_CACHE_SIZE > size) 8972 if (page_start + PAGE_SIZE > size)
8973 zero_start = size & ~PAGE_CACHE_MASK; 8973 zero_start = size & ~PAGE_MASK;
8974 else 8974 else
8975 zero_start = PAGE_CACHE_SIZE; 8975 zero_start = PAGE_SIZE;
8976 8976
8977 if (zero_start != PAGE_CACHE_SIZE) { 8977 if (zero_start != PAGE_SIZE) {
8978 kaddr = kmap(page); 8978 kaddr = kmap(page);
8979 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 8979 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
8980 flush_dcache_page(page); 8980 flush_dcache_page(page);
8981 kunmap(page); 8981 kunmap(page);
8982 } 8982 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 053e677839fe..5a23806ae418 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -898,7 +898,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
898 u64 end; 898 u64 end;
899 899
900 read_lock(&em_tree->lock); 900 read_lock(&em_tree->lock);
901 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); 901 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
902 read_unlock(&em_tree->lock); 902 read_unlock(&em_tree->lock);
903 903
904 if (em) { 904 if (em) {
@@ -988,7 +988,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
990 struct extent_map *em; 990 struct extent_map *em;
991 u64 len = PAGE_CACHE_SIZE; 991 u64 len = PAGE_SIZE;
992 992
993 /* 993 /*
994 * hopefully we have this extent in the tree already, try without 994 * hopefully we have this extent in the tree already, try without
@@ -1124,15 +1124,15 @@ static int cluster_pages_for_defrag(struct inode *inode,
1124 struct extent_io_tree *tree; 1124 struct extent_io_tree *tree;
1125 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1125 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1126 1126
1127 file_end = (isize - 1) >> PAGE_CACHE_SHIFT; 1127 file_end = (isize - 1) >> PAGE_SHIFT;
1128 if (!isize || start_index > file_end) 1128 if (!isize || start_index > file_end)
1129 return 0; 1129 return 0;
1130 1130
1131 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); 1131 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1132 1132
1133 ret = btrfs_delalloc_reserve_space(inode, 1133 ret = btrfs_delalloc_reserve_space(inode,
1134 start_index << PAGE_CACHE_SHIFT, 1134 start_index << PAGE_SHIFT,
1135 page_cnt << PAGE_CACHE_SHIFT); 1135 page_cnt << PAGE_SHIFT);
1136 if (ret) 1136 if (ret)
1137 return ret; 1137 return ret;
1138 i_done = 0; 1138 i_done = 0;
@@ -1148,7 +1148,7 @@ again:
1148 break; 1148 break;
1149 1149
1150 page_start = page_offset(page); 1150 page_start = page_offset(page);
1151 page_end = page_start + PAGE_CACHE_SIZE - 1; 1151 page_end = page_start + PAGE_SIZE - 1;
1152 while (1) { 1152 while (1) {
1153 lock_extent_bits(tree, page_start, page_end, 1153 lock_extent_bits(tree, page_start, page_end,
1154 &cached_state); 1154 &cached_state);
@@ -1169,7 +1169,7 @@ again:
1169 */ 1169 */
1170 if (page->mapping != inode->i_mapping) { 1170 if (page->mapping != inode->i_mapping) {
1171 unlock_page(page); 1171 unlock_page(page);
1172 page_cache_release(page); 1172 put_page(page);
1173 goto again; 1173 goto again;
1174 } 1174 }
1175 } 1175 }
@@ -1179,7 +1179,7 @@ again:
1179 lock_page(page); 1179 lock_page(page);
1180 if (!PageUptodate(page)) { 1180 if (!PageUptodate(page)) {
1181 unlock_page(page); 1181 unlock_page(page);
1182 page_cache_release(page); 1182 put_page(page);
1183 ret = -EIO; 1183 ret = -EIO;
1184 break; 1184 break;
1185 } 1185 }
@@ -1187,7 +1187,7 @@ again:
1187 1187
1188 if (page->mapping != inode->i_mapping) { 1188 if (page->mapping != inode->i_mapping) {
1189 unlock_page(page); 1189 unlock_page(page);
1190 page_cache_release(page); 1190 put_page(page);
1191 goto again; 1191 goto again;
1192 } 1192 }
1193 1193
@@ -1208,7 +1208,7 @@ again:
1208 wait_on_page_writeback(pages[i]); 1208 wait_on_page_writeback(pages[i]);
1209 1209
1210 page_start = page_offset(pages[0]); 1210 page_start = page_offset(pages[0]);
1211 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; 1211 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1212 1212
1213 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1213 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1214 page_start, page_end - 1, &cached_state); 1214 page_start, page_end - 1, &cached_state);
@@ -1222,8 +1222,8 @@ again:
1222 BTRFS_I(inode)->outstanding_extents++; 1222 BTRFS_I(inode)->outstanding_extents++;
1223 spin_unlock(&BTRFS_I(inode)->lock); 1223 spin_unlock(&BTRFS_I(inode)->lock);
1224 btrfs_delalloc_release_space(inode, 1224 btrfs_delalloc_release_space(inode,
1225 start_index << PAGE_CACHE_SHIFT, 1225 start_index << PAGE_SHIFT,
1226 (page_cnt - i_done) << PAGE_CACHE_SHIFT); 1226 (page_cnt - i_done) << PAGE_SHIFT);
1227 } 1227 }
1228 1228
1229 1229
@@ -1240,17 +1240,17 @@ again:
1240 set_page_extent_mapped(pages[i]); 1240 set_page_extent_mapped(pages[i]);
1241 set_page_dirty(pages[i]); 1241 set_page_dirty(pages[i]);
1242 unlock_page(pages[i]); 1242 unlock_page(pages[i]);
1243 page_cache_release(pages[i]); 1243 put_page(pages[i]);
1244 } 1244 }
1245 return i_done; 1245 return i_done;
1246out: 1246out:
1247 for (i = 0; i < i_done; i++) { 1247 for (i = 0; i < i_done; i++) {
1248 unlock_page(pages[i]); 1248 unlock_page(pages[i]);
1249 page_cache_release(pages[i]); 1249 put_page(pages[i]);
1250 } 1250 }
1251 btrfs_delalloc_release_space(inode, 1251 btrfs_delalloc_release_space(inode,
1252 start_index << PAGE_CACHE_SHIFT, 1252 start_index << PAGE_SHIFT,
1253 page_cnt << PAGE_CACHE_SHIFT); 1253 page_cnt << PAGE_SHIFT);
1254 return ret; 1254 return ret;
1255 1255
1256} 1256}
@@ -1273,7 +1273,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1273 int defrag_count = 0; 1273 int defrag_count = 0;
1274 int compress_type = BTRFS_COMPRESS_ZLIB; 1274 int compress_type = BTRFS_COMPRESS_ZLIB;
1275 u32 extent_thresh = range->extent_thresh; 1275 u32 extent_thresh = range->extent_thresh;
1276 unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT; 1276 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1277 unsigned long cluster = max_cluster; 1277 unsigned long cluster = max_cluster;
1278 u64 new_align = ~((u64)SZ_128K - 1); 1278 u64 new_align = ~((u64)SZ_128K - 1);
1279 struct page **pages = NULL; 1279 struct page **pages = NULL;
@@ -1317,9 +1317,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1317 /* find the last page to defrag */ 1317 /* find the last page to defrag */
1318 if (range->start + range->len > range->start) { 1318 if (range->start + range->len > range->start) {
1319 last_index = min_t(u64, isize - 1, 1319 last_index = min_t(u64, isize - 1,
1320 range->start + range->len - 1) >> PAGE_CACHE_SHIFT; 1320 range->start + range->len - 1) >> PAGE_SHIFT;
1321 } else { 1321 } else {
1322 last_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1322 last_index = (isize - 1) >> PAGE_SHIFT;
1323 } 1323 }
1324 1324
1325 if (newer_than) { 1325 if (newer_than) {
@@ -1331,11 +1331,11 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1331 * we always align our defrag to help keep 1331 * we always align our defrag to help keep
1332 * the extents in the file evenly spaced 1332 * the extents in the file evenly spaced
1333 */ 1333 */
1334 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1334 i = (newer_off & new_align) >> PAGE_SHIFT;
1335 } else 1335 } else
1336 goto out_ra; 1336 goto out_ra;
1337 } else { 1337 } else {
1338 i = range->start >> PAGE_CACHE_SHIFT; 1338 i = range->start >> PAGE_SHIFT;
1339 } 1339 }
1340 if (!max_to_defrag) 1340 if (!max_to_defrag)
1341 max_to_defrag = last_index - i + 1; 1341 max_to_defrag = last_index - i + 1;
@@ -1348,7 +1348,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1348 inode->i_mapping->writeback_index = i; 1348 inode->i_mapping->writeback_index = i;
1349 1349
1350 while (i <= last_index && defrag_count < max_to_defrag && 1350 while (i <= last_index && defrag_count < max_to_defrag &&
1351 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) { 1351 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1352 /* 1352 /*
1353 * make sure we stop running if someone unmounts 1353 * make sure we stop running if someone unmounts
1354 * the FS 1354 * the FS
@@ -1362,7 +1362,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1362 break; 1362 break;
1363 } 1363 }
1364 1364
1365 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1365 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1366 extent_thresh, &last_len, &skip, 1366 extent_thresh, &last_len, &skip,
1367 &defrag_end, range->flags & 1367 &defrag_end, range->flags &
1368 BTRFS_DEFRAG_RANGE_COMPRESS)) { 1368 BTRFS_DEFRAG_RANGE_COMPRESS)) {
@@ -1371,14 +1371,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1371 * the should_defrag function tells us how much to skip 1371 * the should_defrag function tells us how much to skip
1372 * bump our counter by the suggested amount 1372 * bump our counter by the suggested amount
1373 */ 1373 */
1374 next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE); 1374 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1375 i = max(i + 1, next); 1375 i = max(i + 1, next);
1376 continue; 1376 continue;
1377 } 1377 }
1378 1378
1379 if (!newer_than) { 1379 if (!newer_than) {
1380 cluster = (PAGE_CACHE_ALIGN(defrag_end) >> 1380 cluster = (PAGE_ALIGN(defrag_end) >>
1381 PAGE_CACHE_SHIFT) - i; 1381 PAGE_SHIFT) - i;
1382 cluster = min(cluster, max_cluster); 1382 cluster = min(cluster, max_cluster);
1383 } else { 1383 } else {
1384 cluster = max_cluster; 1384 cluster = max_cluster;
@@ -1412,20 +1412,20 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1412 i += ret; 1412 i += ret;
1413 1413
1414 newer_off = max(newer_off + 1, 1414 newer_off = max(newer_off + 1,
1415 (u64)i << PAGE_CACHE_SHIFT); 1415 (u64)i << PAGE_SHIFT);
1416 1416
1417 ret = find_new_extents(root, inode, newer_than, 1417 ret = find_new_extents(root, inode, newer_than,
1418 &newer_off, SZ_64K); 1418 &newer_off, SZ_64K);
1419 if (!ret) { 1419 if (!ret) {
1420 range->start = newer_off; 1420 range->start = newer_off;
1421 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1421 i = (newer_off & new_align) >> PAGE_SHIFT;
1422 } else { 1422 } else {
1423 break; 1423 break;
1424 } 1424 }
1425 } else { 1425 } else {
1426 if (ret > 0) { 1426 if (ret > 0) {
1427 i += ret; 1427 i += ret;
1428 last_len += ret << PAGE_CACHE_SHIFT; 1428 last_len += ret << PAGE_SHIFT;
1429 } else { 1429 } else {
1430 i++; 1430 i++;
1431 last_len = 0; 1431 last_len = 0;
@@ -1654,7 +1654,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1654 1654
1655 src_inode = file_inode(src.file); 1655 src_inode = file_inode(src.file);
1656 if (src_inode->i_sb != file_inode(file)->i_sb) { 1656 if (src_inode->i_sb != file_inode(file)->i_sb) {
1657 btrfs_info(BTRFS_I(src_inode)->root->fs_info, 1657 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1658 "Snapshot src from another FS"); 1658 "Snapshot src from another FS");
1659 ret = -EXDEV; 1659 ret = -EXDEV;
1660 } else if (!inode_owner_or_capable(src_inode)) { 1660 } else if (!inode_owner_or_capable(src_inode)) {
@@ -1722,7 +1722,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1722 if (vol_args->flags & BTRFS_SUBVOL_RDONLY) 1722 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1723 readonly = true; 1723 readonly = true;
1724 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { 1724 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1725 if (vol_args->size > PAGE_CACHE_SIZE) { 1725 if (vol_args->size > PAGE_SIZE) {
1726 ret = -EINVAL; 1726 ret = -EINVAL;
1727 goto free_args; 1727 goto free_args;
1728 } 1728 }
@@ -2806,12 +2806,12 @@ static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2806 lock_page(page); 2806 lock_page(page);
2807 if (!PageUptodate(page)) { 2807 if (!PageUptodate(page)) {
2808 unlock_page(page); 2808 unlock_page(page);
2809 page_cache_release(page); 2809 put_page(page);
2810 return ERR_PTR(-EIO); 2810 return ERR_PTR(-EIO);
2811 } 2811 }
2812 if (page->mapping != inode->i_mapping) { 2812 if (page->mapping != inode->i_mapping) {
2813 unlock_page(page); 2813 unlock_page(page);
2814 page_cache_release(page); 2814 put_page(page);
2815 return ERR_PTR(-EAGAIN); 2815 return ERR_PTR(-EAGAIN);
2816 } 2816 }
2817 } 2817 }
@@ -2823,7 +2823,7 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
2823 int num_pages, u64 off) 2823 int num_pages, u64 off)
2824{ 2824{
2825 int i; 2825 int i;
2826 pgoff_t index = off >> PAGE_CACHE_SHIFT; 2826 pgoff_t index = off >> PAGE_SHIFT;
2827 2827
2828 for (i = 0; i < num_pages; i++) { 2828 for (i = 0; i < num_pages; i++) {
2829again: 2829again:
@@ -2932,12 +2932,12 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2932 pg = cmp->src_pages[i]; 2932 pg = cmp->src_pages[i];
2933 if (pg) { 2933 if (pg) {
2934 unlock_page(pg); 2934 unlock_page(pg);
2935 page_cache_release(pg); 2935 put_page(pg);
2936 } 2936 }
2937 pg = cmp->dst_pages[i]; 2937 pg = cmp->dst_pages[i];
2938 if (pg) { 2938 if (pg) {
2939 unlock_page(pg); 2939 unlock_page(pg);
2940 page_cache_release(pg); 2940 put_page(pg);
2941 } 2941 }
2942 } 2942 }
2943 kfree(cmp->src_pages); 2943 kfree(cmp->src_pages);
@@ -2949,7 +2949,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2949 u64 len, struct cmp_pages *cmp) 2949 u64 len, struct cmp_pages *cmp)
2950{ 2950{
2951 int ret; 2951 int ret;
2952 int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; 2952 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
2953 struct page **src_pgarr, **dst_pgarr; 2953 struct page **src_pgarr, **dst_pgarr;
2954 2954
2955 /* 2955 /*
@@ -2987,12 +2987,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2987 int ret = 0; 2987 int ret = 0;
2988 int i; 2988 int i;
2989 struct page *src_page, *dst_page; 2989 struct page *src_page, *dst_page;
2990 unsigned int cmp_len = PAGE_CACHE_SIZE; 2990 unsigned int cmp_len = PAGE_SIZE;
2991 void *addr, *dst_addr; 2991 void *addr, *dst_addr;
2992 2992
2993 i = 0; 2993 i = 0;
2994 while (len) { 2994 while (len) {
2995 if (len < PAGE_CACHE_SIZE) 2995 if (len < PAGE_SIZE)
2996 cmp_len = len; 2996 cmp_len = len;
2997 2997
2998 BUG_ON(i >= cmp->num_pages); 2998 BUG_ON(i >= cmp->num_pages);
@@ -3191,7 +3191,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
3191 if (olen > BTRFS_MAX_DEDUPE_LEN) 3191 if (olen > BTRFS_MAX_DEDUPE_LEN)
3192 olen = BTRFS_MAX_DEDUPE_LEN; 3192 olen = BTRFS_MAX_DEDUPE_LEN;
3193 3193
3194 if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) { 3194 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3195 /* 3195 /*
3196 * Btrfs does not support blocksize < page_size. As a 3196 * Btrfs does not support blocksize < page_size. As a
3197 * result, btrfs_cmp_data() won't correctly handle 3197 * result, btrfs_cmp_data() won't correctly handle
@@ -3891,8 +3891,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3891 * data immediately and not the previous data. 3891 * data immediately and not the previous data.
3892 */ 3892 */
3893 truncate_inode_pages_range(&inode->i_data, 3893 truncate_inode_pages_range(&inode->i_data,
3894 round_down(destoff, PAGE_CACHE_SIZE), 3894 round_down(destoff, PAGE_SIZE),
3895 round_up(destoff + len, PAGE_CACHE_SIZE) - 1); 3895 round_up(destoff + len, PAGE_SIZE) - 1);
3896out_unlock: 3896out_unlock:
3897 if (!same_inode) 3897 if (!same_inode)
3898 btrfs_double_inode_unlock(src, inode); 3898 btrfs_double_inode_unlock(src, inode);
@@ -4124,7 +4124,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
4124 /* we generally have at most 6 or so space infos, one for each raid 4124 /* we generally have at most 6 or so space infos, one for each raid
4125 * level. So, a whole page should be more than enough for everyone 4125 * level. So, a whole page should be more than enough for everyone
4126 */ 4126 */
4127 if (alloc_size > PAGE_CACHE_SIZE) 4127 if (alloc_size > PAGE_SIZE)
4128 return -ENOMEM; 4128 return -ENOMEM;
4129 4129
4130 space_args.total_spaces = 0; 4130 space_args.total_spaces = 0;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a2f051347731..1adfbe7be6b8 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -55,8 +55,8 @@ static struct list_head *lzo_alloc_workspace(void)
55 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
56 56
57 workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); 57 workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
58 workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); 58 workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
59 workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); 59 workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
60 if (!workspace->mem || !workspace->buf || !workspace->cbuf) 60 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
61 goto fail; 61 goto fail;
62 62
@@ -116,7 +116,7 @@ static int lzo_compress_pages(struct list_head *ws,
116 *total_out = 0; 116 *total_out = 0;
117 *total_in = 0; 117 *total_in = 0;
118 118
119 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 119 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
120 data_in = kmap(in_page); 120 data_in = kmap(in_page);
121 121
122 /* 122 /*
@@ -133,10 +133,10 @@ static int lzo_compress_pages(struct list_head *ws,
133 tot_out = LZO_LEN; 133 tot_out = LZO_LEN;
134 pages[0] = out_page; 134 pages[0] = out_page;
135 nr_pages = 1; 135 nr_pages = 1;
136 pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; 136 pg_bytes_left = PAGE_SIZE - LZO_LEN;
137 137
138 /* compress at most one page of data each time */ 138 /* compress at most one page of data each time */
139 in_len = min(len, PAGE_CACHE_SIZE); 139 in_len = min(len, PAGE_SIZE);
140 while (tot_in < len) { 140 while (tot_in < len) {
141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, 141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
142 &out_len, workspace->mem); 142 &out_len, workspace->mem);
@@ -201,7 +201,7 @@ static int lzo_compress_pages(struct list_head *ws,
201 cpage_out = kmap(out_page); 201 cpage_out = kmap(out_page);
202 pages[nr_pages++] = out_page; 202 pages[nr_pages++] = out_page;
203 203
204 pg_bytes_left = PAGE_CACHE_SIZE; 204 pg_bytes_left = PAGE_SIZE;
205 out_offset = 0; 205 out_offset = 0;
206 } 206 }
207 } 207 }
@@ -221,12 +221,12 @@ static int lzo_compress_pages(struct list_head *ws,
221 221
222 bytes_left = len - tot_in; 222 bytes_left = len - tot_in;
223 kunmap(in_page); 223 kunmap(in_page);
224 page_cache_release(in_page); 224 put_page(in_page);
225 225
226 start += PAGE_CACHE_SIZE; 226 start += PAGE_SIZE;
227 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 227 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
228 data_in = kmap(in_page); 228 data_in = kmap(in_page);
229 in_len = min(bytes_left, PAGE_CACHE_SIZE); 229 in_len = min(bytes_left, PAGE_SIZE);
230 } 230 }
231 231
232 if (tot_out > tot_in) 232 if (tot_out > tot_in)
@@ -248,7 +248,7 @@ out:
248 248
249 if (in_page) { 249 if (in_page) {
250 kunmap(in_page); 250 kunmap(in_page);
251 page_cache_release(in_page); 251 put_page(in_page);
252 } 252 }
253 253
254 return ret; 254 return ret;
@@ -266,7 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
266 char *data_in; 266 char *data_in;
267 unsigned long page_in_index = 0; 267 unsigned long page_in_index = 0;
268 unsigned long page_out_index = 0; 268 unsigned long page_out_index = 0;
269 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); 269 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
270 unsigned long buf_start; 270 unsigned long buf_start;
271 unsigned long buf_offset = 0; 271 unsigned long buf_offset = 0;
272 unsigned long bytes; 272 unsigned long bytes;
@@ -289,7 +289,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
289 tot_in = LZO_LEN; 289 tot_in = LZO_LEN;
290 in_offset = LZO_LEN; 290 in_offset = LZO_LEN;
291 tot_len = min_t(size_t, srclen, tot_len); 291 tot_len = min_t(size_t, srclen, tot_len);
292 in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; 292 in_page_bytes_left = PAGE_SIZE - LZO_LEN;
293 293
294 tot_out = 0; 294 tot_out = 0;
295 pg_offset = 0; 295 pg_offset = 0;
@@ -345,12 +345,12 @@ cont:
345 345
346 data_in = kmap(pages_in[++page_in_index]); 346 data_in = kmap(pages_in[++page_in_index]);
347 347
348 in_page_bytes_left = PAGE_CACHE_SIZE; 348 in_page_bytes_left = PAGE_SIZE;
349 in_offset = 0; 349 in_offset = 0;
350 } 350 }
351 } 351 }
352 352
353 out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); 353 out_len = lzo1x_worst_compress(PAGE_SIZE);
354 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, 354 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
355 &out_len); 355 &out_len);
356 if (need_unmap) 356 if (need_unmap)
@@ -399,7 +399,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
399 in_len = read_compress_length(data_in); 399 in_len = read_compress_length(data_in);
400 data_in += LZO_LEN; 400 data_in += LZO_LEN;
401 401
402 out_len = PAGE_CACHE_SIZE; 402 out_len = PAGE_SIZE;
403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); 403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
404 if (ret != LZO_E_OK) { 404 if (ret != LZO_E_OK) {
405 printk(KERN_WARNING "BTRFS: decompress failed!\n"); 405 printk(KERN_WARNING "BTRFS: decompress failed!\n");
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5279fdae7142..9e119552ed32 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1463,6 +1463,7 @@ struct btrfs_qgroup_extent_record
1463 u64 bytenr = record->bytenr; 1463 u64 bytenr = record->bytenr;
1464 1464
1465 assert_spin_locked(&delayed_refs->lock); 1465 assert_spin_locked(&delayed_refs->lock);
1466 trace_btrfs_qgroup_insert_dirty_extent(record);
1466 1467
1467 while (*p) { 1468 while (*p) {
1468 parent_node = *p; 1469 parent_node = *p;
@@ -1594,6 +1595,9 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1594 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 1595 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1595 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 1596 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1596 1597
1598 trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
1599 cur_new_count);
1600
1597 /* Rfer update part */ 1601 /* Rfer update part */
1598 if (cur_old_count == 0 && cur_new_count > 0) { 1602 if (cur_old_count == 0 && cur_new_count > 0) {
1599 qg->rfer += num_bytes; 1603 qg->rfer += num_bytes;
@@ -1683,6 +1687,9 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1683 goto out_free; 1687 goto out_free;
1684 BUG_ON(!fs_info->quota_root); 1688 BUG_ON(!fs_info->quota_root);
1685 1689
1690 trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
1691 nr_new_roots);
1692
1686 qgroups = ulist_alloc(GFP_NOFS); 1693 qgroups = ulist_alloc(GFP_NOFS);
1687 if (!qgroups) { 1694 if (!qgroups) {
1688 ret = -ENOMEM; 1695 ret = -ENOMEM;
@@ -1752,6 +1759,8 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
1752 record = rb_entry(node, struct btrfs_qgroup_extent_record, 1759 record = rb_entry(node, struct btrfs_qgroup_extent_record,
1753 node); 1760 node);
1754 1761
1762 trace_btrfs_qgroup_account_extents(record);
1763
1755 if (!ret) { 1764 if (!ret) {
1756 /* 1765 /*
1757 * Use (u64)-1 as time_seq to do special search, which 1766 * Use (u64)-1 as time_seq to do special search, which
@@ -1842,8 +1851,10 @@ out:
1842} 1851}
1843 1852
1844/* 1853/*
1845 * copy the acounting information between qgroups. This is necessary when a 1854 * Copy the acounting information between qgroups. This is necessary
1846 * snapshot or a subvolume is created 1855 * when a snapshot or a subvolume is created. Throwing an error will
1856 * cause a transaction abort so we take extra care here to only error
1857 * when a readonly fs is a reasonable outcome.
1847 */ 1858 */
1848int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, 1859int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1849 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, 1860 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
@@ -1873,15 +1884,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1873 2 * inherit->num_excl_copies; 1884 2 * inherit->num_excl_copies;
1874 for (i = 0; i < nums; ++i) { 1885 for (i = 0; i < nums; ++i) {
1875 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 1886 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1876 if (!srcgroup) {
1877 ret = -EINVAL;
1878 goto out;
1879 }
1880 1887
1881 if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) { 1888 /*
1882 ret = -EINVAL; 1889 * Zero out invalid groups so we can ignore
1883 goto out; 1890 * them later.
1884 } 1891 */
1892 if (!srcgroup ||
1893 ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
1894 *i_qgroups = 0ULL;
1895
1885 ++i_qgroups; 1896 ++i_qgroups;
1886 } 1897 }
1887 } 1898 }
@@ -1916,17 +1927,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1916 */ 1927 */
1917 if (inherit) { 1928 if (inherit) {
1918 i_qgroups = (u64 *)(inherit + 1); 1929 i_qgroups = (u64 *)(inherit + 1);
1919 for (i = 0; i < inherit->num_qgroups; ++i) { 1930 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
1931 if (*i_qgroups == 0)
1932 continue;
1920 ret = add_qgroup_relation_item(trans, quota_root, 1933 ret = add_qgroup_relation_item(trans, quota_root,
1921 objectid, *i_qgroups); 1934 objectid, *i_qgroups);
1922 if (ret) 1935 if (ret && ret != -EEXIST)
1923 goto out; 1936 goto out;
1924 ret = add_qgroup_relation_item(trans, quota_root, 1937 ret = add_qgroup_relation_item(trans, quota_root,
1925 *i_qgroups, objectid); 1938 *i_qgroups, objectid);
1926 if (ret) 1939 if (ret && ret != -EEXIST)
1927 goto out; 1940 goto out;
1928 ++i_qgroups;
1929 } 1941 }
1942 ret = 0;
1930 } 1943 }
1931 1944
1932 1945
@@ -1987,17 +2000,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1987 2000
1988 i_qgroups = (u64 *)(inherit + 1); 2001 i_qgroups = (u64 *)(inherit + 1);
1989 for (i = 0; i < inherit->num_qgroups; ++i) { 2002 for (i = 0; i < inherit->num_qgroups; ++i) {
1990 ret = add_relation_rb(quota_root->fs_info, objectid, 2003 if (*i_qgroups) {
1991 *i_qgroups); 2004 ret = add_relation_rb(quota_root->fs_info, objectid,
1992 if (ret) 2005 *i_qgroups);
1993 goto unlock; 2006 if (ret)
2007 goto unlock;
2008 }
1994 ++i_qgroups; 2009 ++i_qgroups;
1995 } 2010 }
1996 2011
1997 for (i = 0; i < inherit->num_ref_copies; ++i) { 2012 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
1998 struct btrfs_qgroup *src; 2013 struct btrfs_qgroup *src;
1999 struct btrfs_qgroup *dst; 2014 struct btrfs_qgroup *dst;
2000 2015
2016 if (!i_qgroups[0] || !i_qgroups[1])
2017 continue;
2018
2001 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2019 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2002 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2020 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2003 2021
@@ -2008,12 +2026,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2008 2026
2009 dst->rfer = src->rfer - level_size; 2027 dst->rfer = src->rfer - level_size;
2010 dst->rfer_cmpr = src->rfer_cmpr - level_size; 2028 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2011 i_qgroups += 2;
2012 } 2029 }
2013 for (i = 0; i < inherit->num_excl_copies; ++i) { 2030 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
2014 struct btrfs_qgroup *src; 2031 struct btrfs_qgroup *src;
2015 struct btrfs_qgroup *dst; 2032 struct btrfs_qgroup *dst;
2016 2033
2034 if (!i_qgroups[0] || !i_qgroups[1])
2035 continue;
2036
2017 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2037 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2018 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2038 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2019 2039
@@ -2024,7 +2044,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2024 2044
2025 dst->excl = src->excl + level_size; 2045 dst->excl = src->excl + level_size;
2026 dst->excl_cmpr = src->excl_cmpr + level_size; 2046 dst->excl_cmpr = src->excl_cmpr + level_size;
2027 i_qgroups += 2;
2028 } 2047 }
2029 2048
2030unlock: 2049unlock:
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 55161369fab1..0b7792e02dd5 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -270,7 +270,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
270 s = kmap(rbio->bio_pages[i]); 270 s = kmap(rbio->bio_pages[i]);
271 d = kmap(rbio->stripe_pages[i]); 271 d = kmap(rbio->stripe_pages[i]);
272 272
273 memcpy(d, s, PAGE_CACHE_SIZE); 273 memcpy(d, s, PAGE_SIZE);
274 274
275 kunmap(rbio->bio_pages[i]); 275 kunmap(rbio->bio_pages[i]);
276 kunmap(rbio->stripe_pages[i]); 276 kunmap(rbio->stripe_pages[i]);
@@ -962,7 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
962 */ 962 */
963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
964{ 964{
965 return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes; 965 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
966} 966}
967 967
968/* 968/*
@@ -1078,7 +1078,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1078 u64 disk_start; 1078 u64 disk_start;
1079 1079
1080 stripe = &rbio->bbio->stripes[stripe_nr]; 1080 stripe = &rbio->bbio->stripes[stripe_nr];
1081 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); 1081 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1082 1082
1083 /* if the device is missing, just fail this stripe */ 1083 /* if the device is missing, just fail this stripe */
1084 if (!stripe->dev->bdev) 1084 if (!stripe->dev->bdev)
@@ -1096,8 +1096,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1096 if (last_end == disk_start && stripe->dev->bdev && 1096 if (last_end == disk_start && stripe->dev->bdev &&
1097 !last->bi_error && 1097 !last->bi_error &&
1098 last->bi_bdev == stripe->dev->bdev) { 1098 last->bi_bdev == stripe->dev->bdev) {
1099 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); 1099 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1100 if (ret == PAGE_CACHE_SIZE) 1100 if (ret == PAGE_SIZE)
1101 return 0; 1101 return 0;
1102 } 1102 }
1103 } 1103 }
@@ -1111,7 +1111,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1111 bio->bi_bdev = stripe->dev->bdev; 1111 bio->bi_bdev = stripe->dev->bdev;
1112 bio->bi_iter.bi_sector = disk_start >> 9; 1112 bio->bi_iter.bi_sector = disk_start >> 9;
1113 1113
1114 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 1114 bio_add_page(bio, page, PAGE_SIZE, 0);
1115 bio_list_add(bio_list, bio); 1115 bio_list_add(bio_list, bio);
1116 return 0; 1116 return 0;
1117} 1117}
@@ -1154,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1154 bio_list_for_each(bio, &rbio->bio_list) { 1154 bio_list_for_each(bio, &rbio->bio_list) {
1155 start = (u64)bio->bi_iter.bi_sector << 9; 1155 start = (u64)bio->bi_iter.bi_sector << 9;
1156 stripe_offset = start - rbio->bbio->raid_map[0]; 1156 stripe_offset = start - rbio->bbio->raid_map[0];
1157 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1157 page_index = stripe_offset >> PAGE_SHIFT;
1158 1158
1159 for (i = 0; i < bio->bi_vcnt; i++) { 1159 for (i = 0; i < bio->bi_vcnt; i++) {
1160 p = bio->bi_io_vec[i].bv_page; 1160 p = bio->bi_io_vec[i].bv_page;
@@ -1253,7 +1253,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1253 } else { 1253 } else {
1254 /* raid5 */ 1254 /* raid5 */
1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1256 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 1256 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1257 } 1257 }
1258 1258
1259 1259
@@ -1914,7 +1914,7 @@ pstripe:
1914 /* Copy parity block into failed block to start with */ 1914 /* Copy parity block into failed block to start with */
1915 memcpy(pointers[faila], 1915 memcpy(pointers[faila],
1916 pointers[rbio->nr_data], 1916 pointers[rbio->nr_data],
1917 PAGE_CACHE_SIZE); 1917 PAGE_SIZE);
1918 1918
1919 /* rearrange the pointer array */ 1919 /* rearrange the pointer array */
1920 p = pointers[faila]; 1920 p = pointers[faila];
@@ -1923,7 +1923,7 @@ pstripe:
1923 pointers[rbio->nr_data - 1] = p; 1923 pointers[rbio->nr_data - 1] = p;
1924 1924
1925 /* xor in the rest */ 1925 /* xor in the rest */
1926 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); 1926 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1927 } 1927 }
1928 /* if we're doing this rebuild as part of an rmw, go through 1928 /* if we're doing this rebuild as part of an rmw, go through
1929 * and set all of our private rbio pages in the 1929 * and set all of our private rbio pages in the
@@ -2250,7 +2250,7 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + 2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2251 rbio->stripe_len * rbio->nr_data); 2251 rbio->stripe_len * rbio->nr_data);
2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); 2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2253 index = stripe_offset >> PAGE_CACHE_SHIFT; 2253 index = stripe_offset >> PAGE_SHIFT;
2254 rbio->bio_pages[index] = page; 2254 rbio->bio_pages[index] = page;
2255} 2255}
2256 2256
@@ -2365,14 +2365,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2365 } else { 2365 } else {
2366 /* raid5 */ 2366 /* raid5 */
2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2368 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2369 } 2369 }
2370 2370
2371 /* Check scrubbing pairty and repair it */ 2371 /* Check scrubbing pairty and repair it */
2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2373 parity = kmap(p); 2373 parity = kmap(p);
2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) 2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2375 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); 2375 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2376 else 2376 else
2377 /* Parity is right, needn't writeback */ 2377 /* Parity is right, needn't writeback */
2378 bitmap_clear(rbio->dbitmap, pagenr, 1); 2378 bitmap_clear(rbio->dbitmap, pagenr, 1);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index b892914968c1..298631eaee78 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -226,7 +226,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
226 /* find extent */ 226 /* find extent */
227 spin_lock(&fs_info->reada_lock); 227 spin_lock(&fs_info->reada_lock);
228 re = radix_tree_lookup(&fs_info->reada_tree, 228 re = radix_tree_lookup(&fs_info->reada_tree,
229 start >> PAGE_CACHE_SHIFT); 229 start >> PAGE_SHIFT);
230 if (re) 230 if (re)
231 re->refcnt++; 231 re->refcnt++;
232 spin_unlock(&fs_info->reada_lock); 232 spin_unlock(&fs_info->reada_lock);
@@ -257,7 +257,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
257 zone = NULL; 257 zone = NULL;
258 spin_lock(&fs_info->reada_lock); 258 spin_lock(&fs_info->reada_lock);
259 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 259 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
260 logical >> PAGE_CACHE_SHIFT, 1); 260 logical >> PAGE_SHIFT, 1);
261 if (ret == 1 && logical >= zone->start && logical <= zone->end) { 261 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
262 kref_get(&zone->refcnt); 262 kref_get(&zone->refcnt);
263 spin_unlock(&fs_info->reada_lock); 263 spin_unlock(&fs_info->reada_lock);
@@ -294,13 +294,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
294 294
295 spin_lock(&fs_info->reada_lock); 295 spin_lock(&fs_info->reada_lock);
296 ret = radix_tree_insert(&dev->reada_zones, 296 ret = radix_tree_insert(&dev->reada_zones,
297 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), 297 (unsigned long)(zone->end >> PAGE_SHIFT),
298 zone); 298 zone);
299 299
300 if (ret == -EEXIST) { 300 if (ret == -EEXIST) {
301 kfree(zone); 301 kfree(zone);
302 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 302 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
303 logical >> PAGE_CACHE_SHIFT, 1); 303 logical >> PAGE_SHIFT, 1);
304 if (ret == 1 && logical >= zone->start && logical <= zone->end) 304 if (ret == 1 && logical >= zone->start && logical <= zone->end)
305 kref_get(&zone->refcnt); 305 kref_get(&zone->refcnt);
306 else 306 else
@@ -326,7 +326,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
326 u64 length; 326 u64 length;
327 int real_stripes; 327 int real_stripes;
328 int nzones = 0; 328 int nzones = 0;
329 unsigned long index = logical >> PAGE_CACHE_SHIFT; 329 unsigned long index = logical >> PAGE_SHIFT;
330 int dev_replace_is_ongoing; 330 int dev_replace_is_ongoing;
331 int have_zone = 0; 331 int have_zone = 0;
332 332
@@ -495,7 +495,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
495 struct reada_extent *re) 495 struct reada_extent *re)
496{ 496{
497 int i; 497 int i;
498 unsigned long index = re->logical >> PAGE_CACHE_SHIFT; 498 unsigned long index = re->logical >> PAGE_SHIFT;
499 499
500 spin_lock(&fs_info->reada_lock); 500 spin_lock(&fs_info->reada_lock);
501 if (--re->refcnt) { 501 if (--re->refcnt) {
@@ -538,7 +538,7 @@ static void reada_zone_release(struct kref *kref)
538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); 538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
539 539
540 radix_tree_delete(&zone->device->reada_zones, 540 radix_tree_delete(&zone->device->reada_zones,
541 zone->end >> PAGE_CACHE_SHIFT); 541 zone->end >> PAGE_SHIFT);
542 542
543 kfree(zone); 543 kfree(zone);
544} 544}
@@ -587,7 +587,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
587static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) 587static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
588{ 588{
589 int i; 589 int i;
590 unsigned long index = zone->end >> PAGE_CACHE_SHIFT; 590 unsigned long index = zone->end >> PAGE_SHIFT;
591 591
592 for (i = 0; i < zone->ndevs; ++i) { 592 for (i = 0; i < zone->ndevs; ++i) {
593 struct reada_zone *peer; 593 struct reada_zone *peer;
@@ -622,7 +622,7 @@ static int reada_pick_zone(struct btrfs_device *dev)
622 (void **)&zone, index, 1); 622 (void **)&zone, index, 1);
623 if (ret == 0) 623 if (ret == 0)
624 break; 624 break;
625 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 625 index = (zone->end >> PAGE_SHIFT) + 1;
626 if (zone->locked) { 626 if (zone->locked) {
627 if (zone->elems > top_locked_elems) { 627 if (zone->elems > top_locked_elems) {
628 top_locked_elems = zone->elems; 628 top_locked_elems = zone->elems;
@@ -673,7 +673,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
673 * plugging to speed things up 673 * plugging to speed things up
674 */ 674 */
675 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 675 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
676 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 676 dev->reada_next >> PAGE_SHIFT, 1);
677 if (ret == 0 || re->logical > dev->reada_curr_zone->end) { 677 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
678 ret = reada_pick_zone(dev); 678 ret = reada_pick_zone(dev);
679 if (!ret) { 679 if (!ret) {
@@ -682,7 +682,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
682 } 682 }
683 re = NULL; 683 re = NULL;
684 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 684 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
685 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 685 dev->reada_next >> PAGE_SHIFT, 1);
686 } 686 }
687 if (ret == 0) { 687 if (ret == 0) {
688 spin_unlock(&fs_info->reada_lock); 688 spin_unlock(&fs_info->reada_lock);
@@ -838,7 +838,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
838 printk(KERN_CONT " curr off %llu", 838 printk(KERN_CONT " curr off %llu",
839 device->reada_next - zone->start); 839 device->reada_next - zone->start);
840 printk(KERN_CONT "\n"); 840 printk(KERN_CONT "\n");
841 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 841 index = (zone->end >> PAGE_SHIFT) + 1;
842 } 842 }
843 cnt = 0; 843 cnt = 0;
844 index = 0; 844 index = 0;
@@ -864,7 +864,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
864 } 864 }
865 } 865 }
866 printk(KERN_CONT "\n"); 866 printk(KERN_CONT "\n");
867 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 867 index = (re->logical >> PAGE_SHIFT) + 1;
868 if (++cnt > 15) 868 if (++cnt > 15)
869 break; 869 break;
870 } 870 }
@@ -880,7 +880,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
880 if (ret == 0) 880 if (ret == 0)
881 break; 881 break;
882 if (!re->scheduled) { 882 if (!re->scheduled) {
883 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 883 index = (re->logical >> PAGE_SHIFT) + 1;
884 continue; 884 continue;
885 } 885 }
886 printk(KERN_DEBUG 886 printk(KERN_DEBUG
@@ -897,7 +897,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
897 } 897 }
898 } 898 }
899 printk(KERN_CONT "\n"); 899 printk(KERN_CONT "\n");
900 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 900 index = (re->logical >> PAGE_SHIFT) + 1;
901 } 901 }
902 spin_unlock(&fs_info->reada_lock); 902 spin_unlock(&fs_info->reada_lock);
903} 903}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 2bd0011450df..08ef890deca6 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1850,6 +1850,7 @@ again:
1850 eb = read_tree_block(dest, old_bytenr, old_ptr_gen); 1850 eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
1851 if (IS_ERR(eb)) { 1851 if (IS_ERR(eb)) {
1852 ret = PTR_ERR(eb); 1852 ret = PTR_ERR(eb);
1853 break;
1853 } else if (!extent_buffer_uptodate(eb)) { 1854 } else if (!extent_buffer_uptodate(eb)) {
1854 ret = -EIO; 1855 ret = -EIO;
1855 free_extent_buffer(eb); 1856 free_extent_buffer(eb);
@@ -3129,10 +3130,10 @@ static int relocate_file_extent_cluster(struct inode *inode,
3129 if (ret) 3130 if (ret)
3130 goto out; 3131 goto out;
3131 3132
3132 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 3133 index = (cluster->start - offset) >> PAGE_SHIFT;
3133 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 3134 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3134 while (index <= last_index) { 3135 while (index <= last_index) {
3135 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 3136 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
3136 if (ret) 3137 if (ret)
3137 goto out; 3138 goto out;
3138 3139
@@ -3145,7 +3146,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3145 mask); 3146 mask);
3146 if (!page) { 3147 if (!page) {
3147 btrfs_delalloc_release_metadata(inode, 3148 btrfs_delalloc_release_metadata(inode,
3148 PAGE_CACHE_SIZE); 3149 PAGE_SIZE);
3149 ret = -ENOMEM; 3150 ret = -ENOMEM;
3150 goto out; 3151 goto out;
3151 } 3152 }
@@ -3162,16 +3163,16 @@ static int relocate_file_extent_cluster(struct inode *inode,
3162 lock_page(page); 3163 lock_page(page);
3163 if (!PageUptodate(page)) { 3164 if (!PageUptodate(page)) {
3164 unlock_page(page); 3165 unlock_page(page);
3165 page_cache_release(page); 3166 put_page(page);
3166 btrfs_delalloc_release_metadata(inode, 3167 btrfs_delalloc_release_metadata(inode,
3167 PAGE_CACHE_SIZE); 3168 PAGE_SIZE);
3168 ret = -EIO; 3169 ret = -EIO;
3169 goto out; 3170 goto out;
3170 } 3171 }
3171 } 3172 }
3172 3173
3173 page_start = page_offset(page); 3174 page_start = page_offset(page);
3174 page_end = page_start + PAGE_CACHE_SIZE - 1; 3175 page_end = page_start + PAGE_SIZE - 1;
3175 3176
3176 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3177 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3177 3178
@@ -3191,7 +3192,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3191 unlock_extent(&BTRFS_I(inode)->io_tree, 3192 unlock_extent(&BTRFS_I(inode)->io_tree,
3192 page_start, page_end); 3193 page_start, page_end);
3193 unlock_page(page); 3194 unlock_page(page);
3194 page_cache_release(page); 3195 put_page(page);
3195 3196
3196 index++; 3197 index++;
3197 balance_dirty_pages_ratelimited(inode->i_mapping); 3198 balance_dirty_pages_ratelimited(inode->i_mapping);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 39dbdcbf4d13..4678f03e878e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -703,7 +703,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
703 if (IS_ERR(inode)) 703 if (IS_ERR(inode))
704 return PTR_ERR(inode); 704 return PTR_ERR(inode);
705 705
706 index = offset >> PAGE_CACHE_SHIFT; 706 index = offset >> PAGE_SHIFT;
707 707
708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
709 if (!page) { 709 if (!page) {
@@ -1636,7 +1636,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1636 if (spage->io_error) { 1636 if (spage->io_error) {
1637 void *mapped_buffer = kmap_atomic(spage->page); 1637 void *mapped_buffer = kmap_atomic(spage->page);
1638 1638
1639 memset(mapped_buffer, 0, PAGE_CACHE_SIZE); 1639 memset(mapped_buffer, 0, PAGE_SIZE);
1640 flush_dcache_page(spage->page); 1640 flush_dcache_page(spage->page);
1641 kunmap_atomic(mapped_buffer); 1641 kunmap_atomic(mapped_buffer);
1642 } 1642 }
@@ -4294,8 +4294,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4294 goto out; 4294 goto out;
4295 } 4295 }
4296 4296
4297 while (len >= PAGE_CACHE_SIZE) { 4297 while (len >= PAGE_SIZE) {
4298 index = offset >> PAGE_CACHE_SHIFT; 4298 index = offset >> PAGE_SHIFT;
4299again: 4299again:
4300 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 4300 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4301 if (!page) { 4301 if (!page) {
@@ -4326,7 +4326,7 @@ again:
4326 */ 4326 */
4327 if (page->mapping != inode->i_mapping) { 4327 if (page->mapping != inode->i_mapping) {
4328 unlock_page(page); 4328 unlock_page(page);
4329 page_cache_release(page); 4329 put_page(page);
4330 goto again; 4330 goto again;
4331 } 4331 }
4332 if (!PageUptodate(page)) { 4332 if (!PageUptodate(page)) {
@@ -4348,15 +4348,15 @@ again:
4348 ret = err; 4348 ret = err;
4349next_page: 4349next_page:
4350 unlock_page(page); 4350 unlock_page(page);
4351 page_cache_release(page); 4351 put_page(page);
4352 4352
4353 if (ret) 4353 if (ret)
4354 break; 4354 break;
4355 4355
4356 offset += PAGE_CACHE_SIZE; 4356 offset += PAGE_SIZE;
4357 physical_for_dev_replace += PAGE_CACHE_SIZE; 4357 physical_for_dev_replace += PAGE_SIZE;
4358 nocow_ctx_logical += PAGE_CACHE_SIZE; 4358 nocow_ctx_logical += PAGE_SIZE;
4359 len -= PAGE_CACHE_SIZE; 4359 len -= PAGE_SIZE;
4360 } 4360 }
4361 ret = COPY_COMPLETE; 4361 ret = COPY_COMPLETE;
4362out: 4362out:
@@ -4390,8 +4390,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
4390 bio->bi_iter.bi_size = 0; 4390 bio->bi_iter.bi_size = 0;
4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; 4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4392 bio->bi_bdev = dev->bdev; 4392 bio->bi_bdev = dev->bdev;
4393 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 4393 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4394 if (ret != PAGE_CACHE_SIZE) { 4394 if (ret != PAGE_SIZE) {
4395leave_with_eio: 4395leave_with_eio:
4396 bio_put(bio); 4396 bio_put(bio);
4397 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 4397 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 19b7bf4284ee..8d358c547c59 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4449,9 +4449,9 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4449 struct page *page; 4449 struct page *page;
4450 char *addr; 4450 char *addr;
4451 struct btrfs_key key; 4451 struct btrfs_key key;
4452 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 4452 pgoff_t index = offset >> PAGE_SHIFT;
4453 pgoff_t last_index; 4453 pgoff_t last_index;
4454 unsigned pg_offset = offset & ~PAGE_CACHE_MASK; 4454 unsigned pg_offset = offset & ~PAGE_MASK;
4455 ssize_t ret = 0; 4455 ssize_t ret = 0;
4456 4456
4457 key.objectid = sctx->cur_ino; 4457 key.objectid = sctx->cur_ino;
@@ -4471,7 +4471,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4471 if (len == 0) 4471 if (len == 0)
4472 goto out; 4472 goto out;
4473 4473
4474 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; 4474 last_index = (offset + len - 1) >> PAGE_SHIFT;
4475 4475
4476 /* initial readahead */ 4476 /* initial readahead */
4477 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 4477 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
@@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4481 4481
4482 while (index <= last_index) { 4482 while (index <= last_index) {
4483 unsigned cur_len = min_t(unsigned, len, 4483 unsigned cur_len = min_t(unsigned, len,
4484 PAGE_CACHE_SIZE - pg_offset); 4484 PAGE_SIZE - pg_offset);
4485 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 4485 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
4486 if (!page) { 4486 if (!page) {
4487 ret = -ENOMEM; 4487 ret = -ENOMEM;
@@ -4493,7 +4493,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4493 lock_page(page); 4493 lock_page(page);
4494 if (!PageUptodate(page)) { 4494 if (!PageUptodate(page)) {
4495 unlock_page(page); 4495 unlock_page(page);
4496 page_cache_release(page); 4496 put_page(page);
4497 ret = -EIO; 4497 ret = -EIO;
4498 break; 4498 break;
4499 } 4499 }
@@ -4503,7 +4503,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4503 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); 4503 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4504 kunmap(page); 4504 kunmap(page);
4505 unlock_page(page); 4505 unlock_page(page);
4506 page_cache_release(page); 4506 put_page(page);
4507 index++; 4507 index++;
4508 pg_offset = 0; 4508 pg_offset = 0;
4509 len -= cur_len; 4509 len -= cur_len;
@@ -4804,7 +4804,7 @@ static int clone_range(struct send_ctx *sctx,
4804 type = btrfs_file_extent_type(leaf, ei); 4804 type = btrfs_file_extent_type(leaf, ei);
4805 if (type == BTRFS_FILE_EXTENT_INLINE) { 4805 if (type == BTRFS_FILE_EXTENT_INLINE) {
4806 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei); 4806 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
4807 ext_len = PAGE_CACHE_ALIGN(ext_len); 4807 ext_len = PAGE_ALIGN(ext_len);
4808 } else { 4808 } else {
4809 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 4809 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
4810 } 4810 }
@@ -4886,7 +4886,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
4886 * but there may be items after this page. Make 4886 * but there may be items after this page. Make
4887 * sure to send the whole thing 4887 * sure to send the whole thing
4888 */ 4888 */
4889 len = PAGE_CACHE_ALIGN(len); 4889 len = PAGE_ALIGN(len);
4890 } else { 4890 } else {
4891 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); 4891 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
4892 } 4892 }
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597b0721..e05619f241be 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
66 \ 66 \
67 if (token && token->kaddr && token->offset <= offset && \ 67 if (token && token->kaddr && token->offset <= offset && \
68 token->eb == eb && \ 68 token->eb == eb && \
69 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 69 (token->offset + PAGE_SIZE >= offset + size)) { \
70 kaddr = token->kaddr; \ 70 kaddr = token->kaddr; \
71 p = kaddr + part_offset - token->offset; \ 71 p = kaddr + part_offset - token->offset; \
72 res = get_unaligned_le##bits(p + off); \ 72 res = get_unaligned_le##bits(p + off); \
@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
104 \ 104 \
105 if (token && token->kaddr && token->offset <= offset && \ 105 if (token && token->kaddr && token->offset <= offset && \
106 token->eb == eb && \ 106 token->eb == eb && \
107 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 107 (token->offset + PAGE_SIZE >= offset + size)) { \
108 kaddr = token->kaddr; \ 108 kaddr = token->kaddr; \
109 p = kaddr + part_offset - token->offset; \ 109 p = kaddr + part_offset - token->offset; \
110 put_unaligned_le##bits(val, p + off); \ 110 put_unaligned_le##bits(val, p + off); \
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 669b58201e36..70948b13bc81 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -32,8 +32,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
32{ 32{
33 int ret; 33 int ret;
34 struct page *pages[16]; 34 struct page *pages[16];
35 unsigned long index = start >> PAGE_CACHE_SHIFT; 35 unsigned long index = start >> PAGE_SHIFT;
36 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 36 unsigned long end_index = end >> PAGE_SHIFT;
37 unsigned long nr_pages = end_index - index + 1; 37 unsigned long nr_pages = end_index - index + 1;
38 int i; 38 int i;
39 int count = 0; 39 int count = 0;
@@ -49,9 +49,9 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
49 count++; 49 count++;
50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) 50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
51 unlock_page(pages[i]); 51 unlock_page(pages[i]);
52 page_cache_release(pages[i]); 52 put_page(pages[i]);
53 if (flags & PROCESS_RELEASE) 53 if (flags & PROCESS_RELEASE)
54 page_cache_release(pages[i]); 54 put_page(pages[i]);
55 } 55 }
56 nr_pages -= ret; 56 nr_pages -= ret;
57 index += ret; 57 index += ret;
@@ -93,7 +93,7 @@ static int test_find_delalloc(void)
93 * everything to make sure our pages don't get evicted and screw up our 93 * everything to make sure our pages don't get evicted and screw up our
94 * test. 94 * test.
95 */ 95 */
96 for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) { 96 for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
97 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 97 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
98 if (!page) { 98 if (!page) {
99 test_msg("Failed to allocate test page\n"); 99 test_msg("Failed to allocate test page\n");
@@ -104,7 +104,7 @@ static int test_find_delalloc(void)
104 if (index) { 104 if (index) {
105 unlock_page(page); 105 unlock_page(page);
106 } else { 106 } else {
107 page_cache_get(page); 107 get_page(page);
108 locked_page = page; 108 locked_page = page;
109 } 109 }
110 } 110 }
@@ -129,7 +129,7 @@ static int test_find_delalloc(void)
129 } 129 }
130 unlock_extent(&tmp, start, end); 130 unlock_extent(&tmp, start, end);
131 unlock_page(locked_page); 131 unlock_page(locked_page);
132 page_cache_release(locked_page); 132 put_page(locked_page);
133 133
134 /* 134 /*
135 * Test this scenario 135 * Test this scenario
@@ -139,7 +139,7 @@ static int test_find_delalloc(void)
139 */ 139 */
140 test_start = SZ_64M; 140 test_start = SZ_64M;
141 locked_page = find_lock_page(inode->i_mapping, 141 locked_page = find_lock_page(inode->i_mapping,
142 test_start >> PAGE_CACHE_SHIFT); 142 test_start >> PAGE_SHIFT);
143 if (!locked_page) { 143 if (!locked_page) {
144 test_msg("Couldn't find the locked page\n"); 144 test_msg("Couldn't find the locked page\n");
145 goto out_bits; 145 goto out_bits;
@@ -165,7 +165,7 @@ static int test_find_delalloc(void)
165 } 165 }
166 unlock_extent(&tmp, start, end); 166 unlock_extent(&tmp, start, end);
167 /* locked_page was unlocked above */ 167 /* locked_page was unlocked above */
168 page_cache_release(locked_page); 168 put_page(locked_page);
169 169
170 /* 170 /*
171 * Test this scenario 171 * Test this scenario
@@ -174,7 +174,7 @@ static int test_find_delalloc(void)
174 */ 174 */
175 test_start = max_bytes + 4096; 175 test_start = max_bytes + 4096;
176 locked_page = find_lock_page(inode->i_mapping, test_start >> 176 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_CACHE_SHIFT); 177 PAGE_SHIFT);
178 if (!locked_page) { 178 if (!locked_page) {
179 test_msg("Could'nt find the locked page\n"); 179 test_msg("Could'nt find the locked page\n");
180 goto out_bits; 180 goto out_bits;
@@ -225,13 +225,13 @@ static int test_find_delalloc(void)
225 * range we want to find. 225 * range we want to find.
226 */ 226 */
227 page = find_get_page(inode->i_mapping, 227 page = find_get_page(inode->i_mapping,
228 (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT); 228 (max_bytes + SZ_1M) >> PAGE_SHIFT);
229 if (!page) { 229 if (!page) {
230 test_msg("Couldn't find our page\n"); 230 test_msg("Couldn't find our page\n");
231 goto out_bits; 231 goto out_bits;
232 } 232 }
233 ClearPageDirty(page); 233 ClearPageDirty(page);
234 page_cache_release(page); 234 put_page(page);
235 235
236 /* We unlocked it in the previous test */ 236 /* We unlocked it in the previous test */
237 lock_page(locked_page); 237 lock_page(locked_page);
@@ -239,7 +239,7 @@ static int test_find_delalloc(void)
239 end = 0; 239 end = 0;
240 /* 240 /*
241 * Currently if we fail to find dirty pages in the delalloc range we 241 * Currently if we fail to find dirty pages in the delalloc range we
242 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If 242 * will adjust max_bytes down to PAGE_SIZE and then re-search. If
243 * this changes at any point in the future we will need to fix this 243 * this changes at any point in the future we will need to fix this
244 * tests expected behavior. 244 * tests expected behavior.
245 */ 245 */
@@ -249,9 +249,9 @@ static int test_find_delalloc(void)
249 test_msg("Didn't find our range\n"); 249 test_msg("Didn't find our range\n");
250 goto out_bits; 250 goto out_bits;
251 } 251 }
252 if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) { 252 if (start != test_start && end != test_start + PAGE_SIZE - 1) {
253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n", 253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
254 test_start, test_start + PAGE_CACHE_SIZE - 1, start, 254 test_start, test_start + PAGE_SIZE - 1, start,
255 end); 255 end);
256 goto out_bits; 256 goto out_bits;
257 } 257 }
@@ -265,7 +265,7 @@ out_bits:
265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); 265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
266out: 266out:
267 if (locked_page) 267 if (locked_page)
268 page_cache_release(locked_page); 268 put_page(locked_page);
269 process_page_range(inode, 0, total_dirty - 1, 269 process_page_range(inode, 0, total_dirty - 1,
270 PROCESS_UNLOCK | PROCESS_RELEASE); 270 PROCESS_UNLOCK | PROCESS_RELEASE);
271 iput(inode); 271 iput(inode);
@@ -298,9 +298,9 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
298 return -EINVAL; 298 return -EINVAL;
299 } 299 }
300 300
301 bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 301 bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
302 sizeof(long) * BITS_PER_BYTE); 302 sizeof(long) * BITS_PER_BYTE);
303 extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, 303 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
304 sizeof(long) * BITS_PER_BYTE); 304 sizeof(long) * BITS_PER_BYTE);
305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
306 test_msg("Setting straddling pages failed\n"); 306 test_msg("Setting straddling pages failed\n");
@@ -309,10 +309,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
309 309
310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE); 310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
311 bitmap_clear(bitmap, 311 bitmap_clear(bitmap,
312 (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 312 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
313 sizeof(long) * BITS_PER_BYTE); 313 sizeof(long) * BITS_PER_BYTE);
314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); 314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
315 extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, 315 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
316 sizeof(long) * BITS_PER_BYTE); 316 sizeof(long) * BITS_PER_BYTE);
317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
318 test_msg("Clearing straddling pages failed\n"); 318 test_msg("Clearing straddling pages failed\n");
@@ -353,7 +353,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
353 353
354static int test_eb_bitmaps(void) 354static int test_eb_bitmaps(void)
355{ 355{
356 unsigned long len = PAGE_CACHE_SIZE * 4; 356 unsigned long len = PAGE_SIZE * 4;
357 unsigned long *bitmap; 357 unsigned long *bitmap;
358 struct extent_buffer *eb; 358 struct extent_buffer *eb;
359 int ret; 359 int ret;
@@ -379,7 +379,7 @@ static int test_eb_bitmaps(void)
379 379
380 /* Do it over again with an extent buffer which isn't page-aligned. */ 380 /* Do it over again with an extent buffer which isn't page-aligned. */
381 free_extent_buffer(eb); 381 free_extent_buffer(eb);
382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len); 382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
383 if (!eb) { 383 if (!eb) {
384 test_msg("Couldn't allocate test extent buffer\n"); 384 test_msg("Couldn't allocate test extent buffer\n");
385 kfree(bitmap); 385 kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c9ad97b1e690..514247515312 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -22,7 +22,7 @@
22#include "../disk-io.h" 22#include "../disk-io.h"
23#include "../free-space-cache.h" 23#include "../free-space-cache.h"
24 24
25#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 25#define BITS_PER_BITMAP (PAGE_SIZE * 8)
26 26
27/* 27/*
28 * This test just does basic sanity checking, making sure we can add an exten 28 * This test just does basic sanity checking, making sure we can add an exten
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 24d03c751149..517d0ccb351e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4415,6 +4415,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4415 return ret; 4415 return ret;
4416} 4416}
4417 4417
4418/*
4419 * When we are logging a new inode X, check if it doesn't have a reference that
4420 * matches the reference from some other inode Y created in a past transaction
4421 * and that was renamed in the current transaction. If we don't do this, then at
4422 * log replay time we can lose inode Y (and all its files if it's a directory):
4423 *
4424 * mkdir /mnt/x
4425 * echo "hello world" > /mnt/x/foobar
4426 * sync
4427 * mv /mnt/x /mnt/y
4428 * mkdir /mnt/x # or touch /mnt/x
4429 * xfs_io -c fsync /mnt/x
4430 * <power fail>
4431 * mount fs, trigger log replay
4432 *
4433 * After the log replay procedure, we would lose the first directory and all its
4434 * files (file foobar).
4435 * For the case where inode Y is not a directory we simply end up losing it:
4436 *
4437 * echo "123" > /mnt/foo
4438 * sync
4439 * mv /mnt/foo /mnt/bar
4440 * echo "abc" > /mnt/foo
4441 * xfs_io -c fsync /mnt/foo
4442 * <power fail>
4443 *
4444 * We also need this for cases where a snapshot entry is replaced by some other
4445 * entry (file or directory) otherwise we end up with an unreplayable log due to
4446 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4447 * if it were a regular entry:
4448 *
4449 * mkdir /mnt/x
4450 * btrfs subvolume snapshot /mnt /mnt/x/snap
4451 * btrfs subvolume delete /mnt/x/snap
4452 * rmdir /mnt/x
4453 * mkdir /mnt/x
4454 * fsync /mnt/x or fsync some new file inside it
4455 * <power fail>
4456 *
4457 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4458 * the same transaction.
4459 */
4460static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4461 const int slot,
4462 const struct btrfs_key *key,
4463 struct inode *inode)
4464{
4465 int ret;
4466 struct btrfs_path *search_path;
4467 char *name = NULL;
4468 u32 name_len = 0;
4469 u32 item_size = btrfs_item_size_nr(eb, slot);
4470 u32 cur_offset = 0;
4471 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4472
4473 search_path = btrfs_alloc_path();
4474 if (!search_path)
4475 return -ENOMEM;
4476 search_path->search_commit_root = 1;
4477 search_path->skip_locking = 1;
4478
4479 while (cur_offset < item_size) {
4480 u64 parent;
4481 u32 this_name_len;
4482 u32 this_len;
4483 unsigned long name_ptr;
4484 struct btrfs_dir_item *di;
4485
4486 if (key->type == BTRFS_INODE_REF_KEY) {
4487 struct btrfs_inode_ref *iref;
4488
4489 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4490 parent = key->offset;
4491 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4492 name_ptr = (unsigned long)(iref + 1);
4493 this_len = sizeof(*iref) + this_name_len;
4494 } else {
4495 struct btrfs_inode_extref *extref;
4496
4497 extref = (struct btrfs_inode_extref *)(ptr +
4498 cur_offset);
4499 parent = btrfs_inode_extref_parent(eb, extref);
4500 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4501 name_ptr = (unsigned long)&extref->name;
4502 this_len = sizeof(*extref) + this_name_len;
4503 }
4504
4505 if (this_name_len > name_len) {
4506 char *new_name;
4507
4508 new_name = krealloc(name, this_name_len, GFP_NOFS);
4509 if (!new_name) {
4510 ret = -ENOMEM;
4511 goto out;
4512 }
4513 name_len = this_name_len;
4514 name = new_name;
4515 }
4516
4517 read_extent_buffer(eb, name, name_ptr, this_name_len);
4518 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
4519 search_path, parent,
4520 name, this_name_len, 0);
4521 if (di && !IS_ERR(di)) {
4522 ret = 1;
4523 goto out;
4524 } else if (IS_ERR(di)) {
4525 ret = PTR_ERR(di);
4526 goto out;
4527 }
4528 btrfs_release_path(search_path);
4529
4530 cur_offset += this_len;
4531 }
4532 ret = 0;
4533out:
4534 btrfs_free_path(search_path);
4535 kfree(name);
4536 return ret;
4537}
4538
4418/* log a single inode in the tree log. 4539/* log a single inode in the tree log.
4419 * At least one parent directory for this inode must exist in the tree 4540 * At least one parent directory for this inode must exist in the tree
4420 * or be logged already. 4541 * or be logged already.
@@ -4602,6 +4723,22 @@ again:
4602 if (min_key.type == BTRFS_INODE_ITEM_KEY) 4723 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4603 need_log_inode_item = false; 4724 need_log_inode_item = false;
4604 4725
4726 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4727 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4728 BTRFS_I(inode)->generation == trans->transid) {
4729 ret = btrfs_check_ref_name_override(path->nodes[0],
4730 path->slots[0],
4731 &min_key, inode);
4732 if (ret < 0) {
4733 err = ret;
4734 goto out_unlock;
4735 } else if (ret > 0) {
4736 err = 1;
4737 btrfs_set_log_full_commit(root->fs_info, trans);
4738 goto out_unlock;
4739 }
4740 }
4741
4605 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 4742 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4606 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 4743 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4607 if (ins_nr == 0) 4744 if (ins_nr == 0)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e2b54d546b7c..bd0f45fb38c4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1025,16 +1025,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1025 } 1025 }
1026 1026
1027 /* make sure our super fits in the device */ 1027 /* make sure our super fits in the device */
1028 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) 1028 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1029 goto error_bdev_put; 1029 goto error_bdev_put;
1030 1030
1031 /* make sure our super fits in the page */ 1031 /* make sure our super fits in the page */
1032 if (sizeof(*disk_super) > PAGE_CACHE_SIZE) 1032 if (sizeof(*disk_super) > PAGE_SIZE)
1033 goto error_bdev_put; 1033 goto error_bdev_put;
1034 1034
1035 /* make sure our super doesn't straddle pages on disk */ 1035 /* make sure our super doesn't straddle pages on disk */
1036 index = bytenr >> PAGE_CACHE_SHIFT; 1036 index = bytenr >> PAGE_SHIFT;
1037 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index) 1037 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1038 goto error_bdev_put; 1038 goto error_bdev_put;
1039 1039
1040 /* pull in the page with our super */ 1040 /* pull in the page with our super */
@@ -1047,7 +1047,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1047 p = kmap(page); 1047 p = kmap(page);
1048 1048
1049 /* align our pointer to the offset of the super block */ 1049 /* align our pointer to the offset of the super block */
1050 disk_super = p + (bytenr & ~PAGE_CACHE_MASK); 1050 disk_super = p + (bytenr & ~PAGE_MASK);
1051 1051
1052 if (btrfs_super_bytenr(disk_super) != bytenr || 1052 if (btrfs_super_bytenr(disk_super) != bytenr ||
1053 btrfs_super_magic(disk_super) != BTRFS_MAGIC) 1053 btrfs_super_magic(disk_super) != BTRFS_MAGIC)
@@ -1075,7 +1075,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1075 1075
1076error_unmap: 1076error_unmap:
1077 kunmap(page); 1077 kunmap(page);
1078 page_cache_release(page); 1078 put_page(page);
1079 1079
1080error_bdev_put: 1080error_bdev_put:
1081 blkdev_put(bdev, flags); 1081 blkdev_put(bdev, flags);
@@ -6527,7 +6527,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6527 * but sb spans only this function. Add an explicit SetPageUptodate call 6527 * but sb spans only this function. Add an explicit SetPageUptodate call
6528 * to silence the warning eg. on PowerPC 64. 6528 * to silence the warning eg. on PowerPC 64.
6529 */ 6529 */
6530 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) 6530 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6531 SetPageUptodate(sb->pages[0]); 6531 SetPageUptodate(sb->pages[0]);
6532 6532
6533 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6533 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 82990b8f872b..88d274e8ecf2 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -59,7 +59,7 @@ static struct list_head *zlib_alloc_workspace(void)
59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), 59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
60 zlib_inflate_workspacesize()); 60 zlib_inflate_workspacesize());
61 workspace->strm.workspace = vmalloc(workspacesize); 61 workspace->strm.workspace = vmalloc(workspacesize);
62 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); 62 workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
63 if (!workspace->strm.workspace || !workspace->buf) 63 if (!workspace->strm.workspace || !workspace->buf)
64 goto fail; 64 goto fail;
65 65
@@ -103,7 +103,7 @@ static int zlib_compress_pages(struct list_head *ws,
103 workspace->strm.total_in = 0; 103 workspace->strm.total_in = 0;
104 workspace->strm.total_out = 0; 104 workspace->strm.total_out = 0;
105 105
106 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 106 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
107 data_in = kmap(in_page); 107 data_in = kmap(in_page);
108 108
109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
@@ -117,8 +117,8 @@ static int zlib_compress_pages(struct list_head *ws,
117 117
118 workspace->strm.next_in = data_in; 118 workspace->strm.next_in = data_in;
119 workspace->strm.next_out = cpage_out; 119 workspace->strm.next_out = cpage_out;
120 workspace->strm.avail_out = PAGE_CACHE_SIZE; 120 workspace->strm.avail_out = PAGE_SIZE;
121 workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE); 121 workspace->strm.avail_in = min(len, PAGE_SIZE);
122 122
123 while (workspace->strm.total_in < len) { 123 while (workspace->strm.total_in < len) {
124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH); 124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
@@ -156,7 +156,7 @@ static int zlib_compress_pages(struct list_head *ws,
156 cpage_out = kmap(out_page); 156 cpage_out = kmap(out_page);
157 pages[nr_pages] = out_page; 157 pages[nr_pages] = out_page;
158 nr_pages++; 158 nr_pages++;
159 workspace->strm.avail_out = PAGE_CACHE_SIZE; 159 workspace->strm.avail_out = PAGE_SIZE;
160 workspace->strm.next_out = cpage_out; 160 workspace->strm.next_out = cpage_out;
161 } 161 }
162 /* we're all done */ 162 /* we're all done */
@@ -170,14 +170,14 @@ static int zlib_compress_pages(struct list_head *ws,
170 170
171 bytes_left = len - workspace->strm.total_in; 171 bytes_left = len - workspace->strm.total_in;
172 kunmap(in_page); 172 kunmap(in_page);
173 page_cache_release(in_page); 173 put_page(in_page);
174 174
175 start += PAGE_CACHE_SIZE; 175 start += PAGE_SIZE;
176 in_page = find_get_page(mapping, 176 in_page = find_get_page(mapping,
177 start >> PAGE_CACHE_SHIFT); 177 start >> PAGE_SHIFT);
178 data_in = kmap(in_page); 178 data_in = kmap(in_page);
179 workspace->strm.avail_in = min(bytes_left, 179 workspace->strm.avail_in = min(bytes_left,
180 PAGE_CACHE_SIZE); 180 PAGE_SIZE);
181 workspace->strm.next_in = data_in; 181 workspace->strm.next_in = data_in;
182 } 182 }
183 } 183 }
@@ -205,7 +205,7 @@ out:
205 205
206 if (in_page) { 206 if (in_page) {
207 kunmap(in_page); 207 kunmap(in_page);
208 page_cache_release(in_page); 208 put_page(in_page);
209 } 209 }
210 return ret; 210 return ret;
211} 211}
@@ -223,18 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
223 size_t total_out = 0; 223 size_t total_out = 0;
224 unsigned long page_in_index = 0; 224 unsigned long page_in_index = 0;
225 unsigned long page_out_index = 0; 225 unsigned long page_out_index = 0;
226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); 226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
227 unsigned long buf_start; 227 unsigned long buf_start;
228 unsigned long pg_offset; 228 unsigned long pg_offset;
229 229
230 data_in = kmap(pages_in[page_in_index]); 230 data_in = kmap(pages_in[page_in_index]);
231 workspace->strm.next_in = data_in; 231 workspace->strm.next_in = data_in;
232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); 232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
233 workspace->strm.total_in = 0; 233 workspace->strm.total_in = 0;
234 234
235 workspace->strm.total_out = 0; 235 workspace->strm.total_out = 0;
236 workspace->strm.next_out = workspace->buf; 236 workspace->strm.next_out = workspace->buf;
237 workspace->strm.avail_out = PAGE_CACHE_SIZE; 237 workspace->strm.avail_out = PAGE_SIZE;
238 pg_offset = 0; 238 pg_offset = 0;
239 239
240 /* If it's deflate, and it's got no preset dictionary, then 240 /* If it's deflate, and it's got no preset dictionary, then
@@ -274,7 +274,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
274 } 274 }
275 275
276 workspace->strm.next_out = workspace->buf; 276 workspace->strm.next_out = workspace->buf;
277 workspace->strm.avail_out = PAGE_CACHE_SIZE; 277 workspace->strm.avail_out = PAGE_SIZE;
278 278
279 if (workspace->strm.avail_in == 0) { 279 if (workspace->strm.avail_in == 0) {
280 unsigned long tmp; 280 unsigned long tmp;
@@ -288,7 +288,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
288 workspace->strm.next_in = data_in; 288 workspace->strm.next_in = data_in;
289 tmp = srclen - workspace->strm.total_in; 289 tmp = srclen - workspace->strm.total_in;
290 workspace->strm.avail_in = min(tmp, 290 workspace->strm.avail_in = min(tmp,
291 PAGE_CACHE_SIZE); 291 PAGE_SIZE);
292 } 292 }
293 } 293 }
294 if (ret != Z_STREAM_END) 294 if (ret != Z_STREAM_END)
@@ -325,7 +325,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
325 workspace->strm.total_in = 0; 325 workspace->strm.total_in = 0;
326 326
327 workspace->strm.next_out = workspace->buf; 327 workspace->strm.next_out = workspace->buf;
328 workspace->strm.avail_out = PAGE_CACHE_SIZE; 328 workspace->strm.avail_out = PAGE_SIZE;
329 workspace->strm.total_out = 0; 329 workspace->strm.total_out = 0;
330 /* If it's deflate, and it's got no preset dictionary, then 330 /* If it's deflate, and it's got no preset dictionary, then
331 we can tell zlib to skip the adler32 check. */ 331 we can tell zlib to skip the adler32 check. */
@@ -368,8 +368,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
368 else 368 else
369 buf_offset = 0; 369 buf_offset = 0;
370 370
371 bytes = min(PAGE_CACHE_SIZE - pg_offset, 371 bytes = min(PAGE_SIZE - pg_offset,
372 PAGE_CACHE_SIZE - buf_offset); 372 PAGE_SIZE - buf_offset);
373 bytes = min(bytes, bytes_left); 373 bytes = min(bytes, bytes_left);
374 374
375 kaddr = kmap_atomic(dest_page); 375 kaddr = kmap_atomic(dest_page);
@@ -380,7 +380,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
380 bytes_left -= bytes; 380 bytes_left -= bytes;
381next: 381next:
382 workspace->strm.next_out = workspace->buf; 382 workspace->strm.next_out = workspace->buf;
383 workspace->strm.avail_out = PAGE_CACHE_SIZE; 383 workspace->strm.avail_out = PAGE_SIZE;
384 } 384 }
385 385
386 if (ret != Z_STREAM_END && bytes_left != 0) 386 if (ret != Z_STREAM_END && bytes_left != 0)
diff --git a/fs/buffer.c b/fs/buffer.c
index 33be29675358..af0d9a82a8ed 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page)
129{ 129{
130 ClearPagePrivate(page); 130 ClearPagePrivate(page);
131 set_page_private(page, 0); 131 set_page_private(page, 0);
132 page_cache_release(page); 132 put_page(page);
133} 133}
134 134
135static void buffer_io_error(struct buffer_head *bh, char *msg) 135static void buffer_io_error(struct buffer_head *bh, char *msg)
@@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
207 struct page *page; 207 struct page *page;
208 int all_mapped = 1; 208 int all_mapped = 1;
209 209
210 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 210 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
211 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 211 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
212 if (!page) 212 if (!page)
213 goto out; 213 goto out;
@@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
245 } 245 }
246out_unlock: 246out_unlock:
247 spin_unlock(&bd_mapping->private_lock); 247 spin_unlock(&bd_mapping->private_lock);
248 page_cache_release(page); 248 put_page(page);
249out: 249out:
250 return ret; 250 return ret;
251} 251}
@@ -1040,7 +1040,7 @@ done:
1040 ret = (block < end_block) ? 1 : -ENXIO; 1040 ret = (block < end_block) ? 1 : -ENXIO;
1041failed: 1041failed:
1042 unlock_page(page); 1042 unlock_page(page);
1043 page_cache_release(page); 1043 put_page(page);
1044 return ret; 1044 return ret;
1045} 1045}
1046 1046
@@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset,
1533 /* 1533 /*
1534 * Check for overflow 1534 * Check for overflow
1535 */ 1535 */
1536 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1536 BUG_ON(stop > PAGE_SIZE || stop < length);
1537 1537
1538 head = page_buffers(page); 1538 head = page_buffers(page);
1539 bh = head; 1539 bh = head;
@@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1716 blocksize = bh->b_size; 1716 blocksize = bh->b_size;
1717 bbits = block_size_bits(blocksize); 1717 bbits = block_size_bits(blocksize);
1718 1718
1719 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1719 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1720 last_block = (i_size_read(inode) - 1) >> bbits; 1720 last_block = (i_size_read(inode) - 1) >> bbits;
1721 1721
1722 /* 1722 /*
@@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers);
1894int __block_write_begin(struct page *page, loff_t pos, unsigned len, 1894int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1895 get_block_t *get_block) 1895 get_block_t *get_block)
1896{ 1896{
1897 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1897 unsigned from = pos & (PAGE_SIZE - 1);
1898 unsigned to = from + len; 1898 unsigned to = from + len;
1899 struct inode *inode = page->mapping->host; 1899 struct inode *inode = page->mapping->host;
1900 unsigned block_start, block_end; 1900 unsigned block_start, block_end;
@@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1904 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 1904 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1905 1905
1906 BUG_ON(!PageLocked(page)); 1906 BUG_ON(!PageLocked(page));
1907 BUG_ON(from > PAGE_CACHE_SIZE); 1907 BUG_ON(from > PAGE_SIZE);
1908 BUG_ON(to > PAGE_CACHE_SIZE); 1908 BUG_ON(to > PAGE_SIZE);
1909 BUG_ON(from > to); 1909 BUG_ON(from > to);
1910 1910
1911 head = create_page_buffers(page, inode, 0); 1911 head = create_page_buffers(page, inode, 0);
1912 blocksize = head->b_size; 1912 blocksize = head->b_size;
1913 bbits = block_size_bits(blocksize); 1913 bbits = block_size_bits(blocksize);
1914 1914
1915 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1915 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1916 1916
1917 for(bh = head, block_start = 0; bh != head || !block_start; 1917 for(bh = head, block_start = 0; bh != head || !block_start;
1918 block++, block_start=block_end, bh = bh->b_this_page) { 1918 block++, block_start=block_end, bh = bh->b_this_page) {
@@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
2020int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2020int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2021 unsigned flags, struct page **pagep, get_block_t *get_block) 2021 unsigned flags, struct page **pagep, get_block_t *get_block)
2022{ 2022{
2023 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 2023 pgoff_t index = pos >> PAGE_SHIFT;
2024 struct page *page; 2024 struct page *page;
2025 int status; 2025 int status;
2026 2026
@@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2031 status = __block_write_begin(page, pos, len, get_block); 2031 status = __block_write_begin(page, pos, len, get_block);
2032 if (unlikely(status)) { 2032 if (unlikely(status)) {
2033 unlock_page(page); 2033 unlock_page(page);
2034 page_cache_release(page); 2034 put_page(page);
2035 page = NULL; 2035 page = NULL;
2036 } 2036 }
2037 2037
@@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
2047 struct inode *inode = mapping->host; 2047 struct inode *inode = mapping->host;
2048 unsigned start; 2048 unsigned start;
2049 2049
2050 start = pos & (PAGE_CACHE_SIZE - 1); 2050 start = pos & (PAGE_SIZE - 1);
2051 2051
2052 if (unlikely(copied < len)) { 2052 if (unlikely(copied < len)) {
2053 /* 2053 /*
@@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2099 } 2099 }
2100 2100
2101 unlock_page(page); 2101 unlock_page(page);
2102 page_cache_release(page); 2102 put_page(page);
2103 2103
2104 if (old_size < pos) 2104 if (old_size < pos)
2105 pagecache_isize_extended(inode, old_size, pos); 2105 pagecache_isize_extended(inode, old_size, pos);
@@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
2136 2136
2137 head = page_buffers(page); 2137 head = page_buffers(page);
2138 blocksize = head->b_size; 2138 blocksize = head->b_size;
2139 to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); 2139 to = min_t(unsigned, PAGE_SIZE - from, count);
2140 to = from + to; 2140 to = from + to;
2141 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) 2141 if (from < blocksize && to > PAGE_SIZE - blocksize)
2142 return 0; 2142 return 0;
2143 2143
2144 bh = head; 2144 bh = head;
@@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2181 blocksize = head->b_size; 2181 blocksize = head->b_size;
2182 bbits = block_size_bits(blocksize); 2182 bbits = block_size_bits(blocksize);
2183 2183
2184 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 2184 iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
2185 lblock = (i_size_read(inode)+blocksize-1) >> bbits; 2185 lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2186 bh = head; 2186 bh = head;
2187 nr = 0; 2187 nr = 0;
@@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2295 unsigned zerofrom, offset, len; 2295 unsigned zerofrom, offset, len;
2296 int err = 0; 2296 int err = 0;
2297 2297
2298 index = pos >> PAGE_CACHE_SHIFT; 2298 index = pos >> PAGE_SHIFT;
2299 offset = pos & ~PAGE_CACHE_MASK; 2299 offset = pos & ~PAGE_MASK;
2300 2300
2301 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { 2301 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2302 zerofrom = curpos & ~PAGE_CACHE_MASK; 2302 zerofrom = curpos & ~PAGE_MASK;
2303 if (zerofrom & (blocksize-1)) { 2303 if (zerofrom & (blocksize-1)) {
2304 *bytes |= (blocksize-1); 2304 *bytes |= (blocksize-1);
2305 (*bytes)++; 2305 (*bytes)++;
2306 } 2306 }
2307 len = PAGE_CACHE_SIZE - zerofrom; 2307 len = PAGE_SIZE - zerofrom;
2308 2308
2309 err = pagecache_write_begin(file, mapping, curpos, len, 2309 err = pagecache_write_begin(file, mapping, curpos, len,
2310 AOP_FLAG_UNINTERRUPTIBLE, 2310 AOP_FLAG_UNINTERRUPTIBLE,
@@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2329 2329
2330 /* page covers the boundary, find the boundary offset */ 2330 /* page covers the boundary, find the boundary offset */
2331 if (index == curidx) { 2331 if (index == curidx) {
2332 zerofrom = curpos & ~PAGE_CACHE_MASK; 2332 zerofrom = curpos & ~PAGE_MASK;
2333 /* if we will expand the thing last block will be filled */ 2333 /* if we will expand the thing last block will be filled */
2334 if (offset <= zerofrom) { 2334 if (offset <= zerofrom) {
2335 goto out; 2335 goto out;
@@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2375 if (err) 2375 if (err)
2376 return err; 2376 return err;
2377 2377
2378 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2378 zerofrom = *bytes & ~PAGE_MASK;
2379 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2379 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2380 *bytes |= (blocksize-1); 2380 *bytes |= (blocksize-1);
2381 (*bytes)++; 2381 (*bytes)++;
@@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2430 } 2430 }
2431 2431
2432 /* page is wholly or partially inside EOF */ 2432 /* page is wholly or partially inside EOF */
2433 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 2433 if (((page->index + 1) << PAGE_SHIFT) > size)
2434 end = size & ~PAGE_CACHE_MASK; 2434 end = size & ~PAGE_MASK;
2435 else 2435 else
2436 end = PAGE_CACHE_SIZE; 2436 end = PAGE_SIZE;
2437 2437
2438 ret = __block_write_begin(page, 0, end, get_block); 2438 ret = __block_write_begin(page, 0, end, get_block);
2439 if (!ret) 2439 if (!ret)
@@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping,
2508 int ret = 0; 2508 int ret = 0;
2509 int is_mapped_to_disk = 1; 2509 int is_mapped_to_disk = 1;
2510 2510
2511 index = pos >> PAGE_CACHE_SHIFT; 2511 index = pos >> PAGE_SHIFT;
2512 from = pos & (PAGE_CACHE_SIZE - 1); 2512 from = pos & (PAGE_SIZE - 1);
2513 to = from + len; 2513 to = from + len;
2514 2514
2515 page = grab_cache_page_write_begin(mapping, index, flags); 2515 page = grab_cache_page_write_begin(mapping, index, flags);
@@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping,
2543 goto out_release; 2543 goto out_release;
2544 } 2544 }
2545 2545
2546 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 2546 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
2547 2547
2548 /* 2548 /*
2549 * We loop across all blocks in the page, whether or not they are 2549 * We loop across all blocks in the page, whether or not they are
@@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping,
2551 * page is fully mapped-to-disk. 2551 * page is fully mapped-to-disk.
2552 */ 2552 */
2553 for (block_start = 0, block_in_page = 0, bh = head; 2553 for (block_start = 0, block_in_page = 0, bh = head;
2554 block_start < PAGE_CACHE_SIZE; 2554 block_start < PAGE_SIZE;
2555 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 2555 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2556 int create; 2556 int create;
2557 2557
@@ -2623,7 +2623,7 @@ failed:
2623 2623
2624out_release: 2624out_release:
2625 unlock_page(page); 2625 unlock_page(page);
2626 page_cache_release(page); 2626 put_page(page);
2627 *pagep = NULL; 2627 *pagep = NULL;
2628 2628
2629 return ret; 2629 return ret;
@@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
2653 } 2653 }
2654 2654
2655 unlock_page(page); 2655 unlock_page(page);
2656 page_cache_release(page); 2656 put_page(page);
2657 2657
2658 while (head) { 2658 while (head) {
2659 bh = head; 2659 bh = head;
@@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2675{ 2675{
2676 struct inode * const inode = page->mapping->host; 2676 struct inode * const inode = page->mapping->host;
2677 loff_t i_size = i_size_read(inode); 2677 loff_t i_size = i_size_read(inode);
2678 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2678 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2679 unsigned offset; 2679 unsigned offset;
2680 int ret; 2680 int ret;
2681 2681
@@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2684 goto out; 2684 goto out;
2685 2685
2686 /* Is the page fully outside i_size? (truncate in progress) */ 2686 /* Is the page fully outside i_size? (truncate in progress) */
2687 offset = i_size & (PAGE_CACHE_SIZE-1); 2687 offset = i_size & (PAGE_SIZE-1);
2688 if (page->index >= end_index+1 || !offset) { 2688 if (page->index >= end_index+1 || !offset) {
2689 /* 2689 /*
2690 * The page may have dirty, unmapped buffers. For example, 2690 * The page may have dirty, unmapped buffers. For example,
@@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2707 * the page size, the remaining memory is zeroed when mapped, and 2707 * the page size, the remaining memory is zeroed when mapped, and
2708 * writes to that region are not written out to the file." 2708 * writes to that region are not written out to the file."
2709 */ 2709 */
2710 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2710 zero_user_segment(page, offset, PAGE_SIZE);
2711out: 2711out:
2712 ret = mpage_writepage(page, get_block, wbc); 2712 ret = mpage_writepage(page, get_block, wbc);
2713 if (ret == -EAGAIN) 2713 if (ret == -EAGAIN)
@@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage);
2720int nobh_truncate_page(struct address_space *mapping, 2720int nobh_truncate_page(struct address_space *mapping,
2721 loff_t from, get_block_t *get_block) 2721 loff_t from, get_block_t *get_block)
2722{ 2722{
2723 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2723 pgoff_t index = from >> PAGE_SHIFT;
2724 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2724 unsigned offset = from & (PAGE_SIZE-1);
2725 unsigned blocksize; 2725 unsigned blocksize;
2726 sector_t iblock; 2726 sector_t iblock;
2727 unsigned length, pos; 2727 unsigned length, pos;
@@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping,
2738 return 0; 2738 return 0;
2739 2739
2740 length = blocksize - length; 2740 length = blocksize - length;
2741 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2741 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2742 2742
2743 page = grab_cache_page(mapping, index); 2743 page = grab_cache_page(mapping, index);
2744 err = -ENOMEM; 2744 err = -ENOMEM;
@@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping,
2748 if (page_has_buffers(page)) { 2748 if (page_has_buffers(page)) {
2749has_buffers: 2749has_buffers:
2750 unlock_page(page); 2750 unlock_page(page);
2751 page_cache_release(page); 2751 put_page(page);
2752 return block_truncate_page(mapping, from, get_block); 2752 return block_truncate_page(mapping, from, get_block);
2753 } 2753 }
2754 2754
@@ -2772,7 +2772,7 @@ has_buffers:
2772 if (!PageUptodate(page)) { 2772 if (!PageUptodate(page)) {
2773 err = mapping->a_ops->readpage(NULL, page); 2773 err = mapping->a_ops->readpage(NULL, page);
2774 if (err) { 2774 if (err) {
2775 page_cache_release(page); 2775 put_page(page);
2776 goto out; 2776 goto out;
2777 } 2777 }
2778 lock_page(page); 2778 lock_page(page);
@@ -2789,7 +2789,7 @@ has_buffers:
2789 2789
2790unlock: 2790unlock:
2791 unlock_page(page); 2791 unlock_page(page);
2792 page_cache_release(page); 2792 put_page(page);
2793out: 2793out:
2794 return err; 2794 return err;
2795} 2795}
@@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page);
2798int block_truncate_page(struct address_space *mapping, 2798int block_truncate_page(struct address_space *mapping,
2799 loff_t from, get_block_t *get_block) 2799 loff_t from, get_block_t *get_block)
2800{ 2800{
2801 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2801 pgoff_t index = from >> PAGE_SHIFT;
2802 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2802 unsigned offset = from & (PAGE_SIZE-1);
2803 unsigned blocksize; 2803 unsigned blocksize;
2804 sector_t iblock; 2804 sector_t iblock;
2805 unsigned length, pos; 2805 unsigned length, pos;
@@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping,
2816 return 0; 2816 return 0;
2817 2817
2818 length = blocksize - length; 2818 length = blocksize - length;
2819 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2819 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2820 2820
2821 page = grab_cache_page(mapping, index); 2821 page = grab_cache_page(mapping, index);
2822 err = -ENOMEM; 2822 err = -ENOMEM;
@@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping,
2865 2865
2866unlock: 2866unlock:
2867 unlock_page(page); 2867 unlock_page(page);
2868 page_cache_release(page); 2868 put_page(page);
2869out: 2869out:
2870 return err; 2870 return err;
2871} 2871}
@@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2879{ 2879{
2880 struct inode * const inode = page->mapping->host; 2880 struct inode * const inode = page->mapping->host;
2881 loff_t i_size = i_size_read(inode); 2881 loff_t i_size = i_size_read(inode);
2882 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2882 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2883 unsigned offset; 2883 unsigned offset;
2884 2884
2885 /* Is the page fully inside i_size? */ 2885 /* Is the page fully inside i_size? */
@@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2888 end_buffer_async_write); 2888 end_buffer_async_write);
2889 2889
2890 /* Is the page fully outside i_size? (truncate in progress) */ 2890 /* Is the page fully outside i_size? (truncate in progress) */
2891 offset = i_size & (PAGE_CACHE_SIZE-1); 2891 offset = i_size & (PAGE_SIZE-1);
2892 if (page->index >= end_index+1 || !offset) { 2892 if (page->index >= end_index+1 || !offset) {
2893 /* 2893 /*
2894 * The page may have dirty, unmapped buffers. For example, 2894 * The page may have dirty, unmapped buffers. For example,
2895 * they may have been added in ext3_writepage(). Make them 2895 * they may have been added in ext3_writepage(). Make them
2896 * freeable here, so the page does not leak. 2896 * freeable here, so the page does not leak.
2897 */ 2897 */
2898 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 2898 do_invalidatepage(page, 0, PAGE_SIZE);
2899 unlock_page(page); 2899 unlock_page(page);
2900 return 0; /* don't care */ 2900 return 0; /* don't care */
2901 } 2901 }
@@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2907 * the page size, the remaining memory is zeroed when mapped, and 2907 * the page size, the remaining memory is zeroed when mapped, and
2908 * writes to that region are not written out to the file." 2908 * writes to that region are not written out to the file."
2909 */ 2909 */
2910 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2910 zero_user_segment(page, offset, PAGE_SIZE);
2911 return __block_write_full_page(inode, page, get_block, wbc, 2911 return __block_write_full_page(inode, page, get_block, wbc,
2912 end_buffer_async_write); 2912 end_buffer_async_write);
2913} 2913}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0f3da3926a0..afbdc418966d 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -194,10 +194,10 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
194 error = -EIO; 194 error = -EIO;
195 } 195 }
196 196
197 page_cache_release(monitor->back_page); 197 put_page(monitor->back_page);
198 198
199 fscache_end_io(op, monitor->netfs_page, error); 199 fscache_end_io(op, monitor->netfs_page, error);
200 page_cache_release(monitor->netfs_page); 200 put_page(monitor->netfs_page);
201 fscache_retrieval_complete(op, 1); 201 fscache_retrieval_complete(op, 1);
202 fscache_put_retrieval(op); 202 fscache_put_retrieval(op);
203 kfree(monitor); 203 kfree(monitor);
@@ -288,8 +288,8 @@ monitor_backing_page:
288 _debug("- monitor add"); 288 _debug("- monitor add");
289 289
290 /* install the monitor */ 290 /* install the monitor */
291 page_cache_get(monitor->netfs_page); 291 get_page(monitor->netfs_page);
292 page_cache_get(backpage); 292 get_page(backpage);
293 monitor->back_page = backpage; 293 monitor->back_page = backpage;
294 monitor->monitor.private = backpage; 294 monitor->monitor.private = backpage;
295 add_page_wait_queue(backpage, &monitor->monitor); 295 add_page_wait_queue(backpage, &monitor->monitor);
@@ -310,7 +310,7 @@ backing_page_already_present:
310 _debug("- present"); 310 _debug("- present");
311 311
312 if (newpage) { 312 if (newpage) {
313 page_cache_release(newpage); 313 put_page(newpage);
314 newpage = NULL; 314 newpage = NULL;
315 } 315 }
316 316
@@ -342,7 +342,7 @@ success:
342 342
343out: 343out:
344 if (backpage) 344 if (backpage)
345 page_cache_release(backpage); 345 put_page(backpage);
346 if (monitor) { 346 if (monitor) {
347 fscache_put_retrieval(monitor->op); 347 fscache_put_retrieval(monitor->op);
348 kfree(monitor); 348 kfree(monitor);
@@ -363,7 +363,7 @@ io_error:
363 goto out; 363 goto out;
364 364
365nomem_page: 365nomem_page:
366 page_cache_release(newpage); 366 put_page(newpage);
367nomem_monitor: 367nomem_monitor:
368 fscache_put_retrieval(monitor->op); 368 fscache_put_retrieval(monitor->op);
369 kfree(monitor); 369 kfree(monitor);
@@ -530,7 +530,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
530 netpage->index, cachefiles_gfp); 530 netpage->index, cachefiles_gfp);
531 if (ret < 0) { 531 if (ret < 0) {
532 if (ret == -EEXIST) { 532 if (ret == -EEXIST) {
533 page_cache_release(netpage); 533 put_page(netpage);
534 fscache_retrieval_complete(op, 1); 534 fscache_retrieval_complete(op, 1);
535 continue; 535 continue;
536 } 536 }
@@ -538,10 +538,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
538 } 538 }
539 539
540 /* install a monitor */ 540 /* install a monitor */
541 page_cache_get(netpage); 541 get_page(netpage);
542 monitor->netfs_page = netpage; 542 monitor->netfs_page = netpage;
543 543
544 page_cache_get(backpage); 544 get_page(backpage);
545 monitor->back_page = backpage; 545 monitor->back_page = backpage;
546 monitor->monitor.private = backpage; 546 monitor->monitor.private = backpage;
547 add_page_wait_queue(backpage, &monitor->monitor); 547 add_page_wait_queue(backpage, &monitor->monitor);
@@ -555,10 +555,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
555 unlock_page(backpage); 555 unlock_page(backpage);
556 } 556 }
557 557
558 page_cache_release(backpage); 558 put_page(backpage);
559 backpage = NULL; 559 backpage = NULL;
560 560
561 page_cache_release(netpage); 561 put_page(netpage);
562 netpage = NULL; 562 netpage = NULL;
563 continue; 563 continue;
564 564
@@ -603,7 +603,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
603 netpage->index, cachefiles_gfp); 603 netpage->index, cachefiles_gfp);
604 if (ret < 0) { 604 if (ret < 0) {
605 if (ret == -EEXIST) { 605 if (ret == -EEXIST) {
606 page_cache_release(netpage); 606 put_page(netpage);
607 fscache_retrieval_complete(op, 1); 607 fscache_retrieval_complete(op, 1);
608 continue; 608 continue;
609 } 609 }
@@ -612,14 +612,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
612 612
613 copy_highpage(netpage, backpage); 613 copy_highpage(netpage, backpage);
614 614
615 page_cache_release(backpage); 615 put_page(backpage);
616 backpage = NULL; 616 backpage = NULL;
617 617
618 fscache_mark_page_cached(op, netpage); 618 fscache_mark_page_cached(op, netpage);
619 619
620 /* the netpage is unlocked and marked up to date here */ 620 /* the netpage is unlocked and marked up to date here */
621 fscache_end_io(op, netpage, 0); 621 fscache_end_io(op, netpage, 0);
622 page_cache_release(netpage); 622 put_page(netpage);
623 netpage = NULL; 623 netpage = NULL;
624 fscache_retrieval_complete(op, 1); 624 fscache_retrieval_complete(op, 1);
625 continue; 625 continue;
@@ -632,11 +632,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
632out: 632out:
633 /* tidy up */ 633 /* tidy up */
634 if (newpage) 634 if (newpage)
635 page_cache_release(newpage); 635 put_page(newpage);
636 if (netpage) 636 if (netpage)
637 page_cache_release(netpage); 637 put_page(netpage);
638 if (backpage) 638 if (backpage)
639 page_cache_release(backpage); 639 put_page(backpage);
640 if (monitor) { 640 if (monitor) {
641 fscache_put_retrieval(op); 641 fscache_put_retrieval(op);
642 kfree(monitor); 642 kfree(monitor);
@@ -644,7 +644,7 @@ out:
644 644
645 list_for_each_entry_safe(netpage, _n, list, lru) { 645 list_for_each_entry_safe(netpage, _n, list, lru) {
646 list_del(&netpage->lru); 646 list_del(&netpage->lru);
647 page_cache_release(netpage); 647 put_page(netpage);
648 fscache_retrieval_complete(op, 1); 648 fscache_retrieval_complete(op, 1);
649 } 649 }
650 650
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index fc5cae2a0db2..4801571f51cb 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
143 inode = page->mapping->host; 143 inode = page->mapping->host;
144 ci = ceph_inode(inode); 144 ci = ceph_inode(inode);
145 145
146 if (offset != 0 || length != PAGE_CACHE_SIZE) { 146 if (offset != 0 || length != PAGE_SIZE) {
147 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 147 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
148 inode, page, page->index, offset, length); 148 inode, page, page->index, offset, length);
149 return; 149 return;
@@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page)
197 &ceph_inode_to_client(inode)->client->osdc; 197 &ceph_inode_to_client(inode)->client->osdc;
198 int err = 0; 198 int err = 0;
199 u64 off = page_offset(page); 199 u64 off = page_offset(page);
200 u64 len = PAGE_CACHE_SIZE; 200 u64 len = PAGE_SIZE;
201 201
202 if (off >= i_size_read(inode)) { 202 if (off >= i_size_read(inode)) {
203 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 203 zero_user_segment(page, 0, PAGE_SIZE);
204 SetPageUptodate(page); 204 SetPageUptodate(page);
205 return 0; 205 return 0;
206 } 206 }
@@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
212 */ 212 */
213 if (off == 0) 213 if (off == 0)
214 return -EINVAL; 214 return -EINVAL;
215 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 215 zero_user_segment(page, 0, PAGE_SIZE);
216 SetPageUptodate(page); 216 SetPageUptodate(page);
217 return 0; 217 return 0;
218 } 218 }
@@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page)
234 ceph_fscache_readpage_cancel(inode, page); 234 ceph_fscache_readpage_cancel(inode, page);
235 goto out; 235 goto out;
236 } 236 }
237 if (err < PAGE_CACHE_SIZE) 237 if (err < PAGE_SIZE)
238 /* zero fill remainder of page */ 238 /* zero fill remainder of page */
239 zero_user_segment(page, err, PAGE_CACHE_SIZE); 239 zero_user_segment(page, err, PAGE_SIZE);
240 else 240 else
241 flush_dcache_page(page); 241 flush_dcache_page(page);
242 242
@@ -278,10 +278,10 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
278 278
279 if (rc < 0 && rc != -ENOENT) 279 if (rc < 0 && rc != -ENOENT)
280 goto unlock; 280 goto unlock;
281 if (bytes < (int)PAGE_CACHE_SIZE) { 281 if (bytes < (int)PAGE_SIZE) {
282 /* zero (remainder of) page */ 282 /* zero (remainder of) page */
283 int s = bytes < 0 ? 0 : bytes; 283 int s = bytes < 0 ? 0 : bytes;
284 zero_user_segment(page, s, PAGE_CACHE_SIZE); 284 zero_user_segment(page, s, PAGE_SIZE);
285 } 285 }
286 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 286 dout("finish_read %p uptodate %p idx %lu\n", inode, page,
287 page->index); 287 page->index);
@@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
290 ceph_readpage_to_fscache(inode, page); 290 ceph_readpage_to_fscache(inode, page);
291unlock: 291unlock:
292 unlock_page(page); 292 unlock_page(page);
293 page_cache_release(page); 293 put_page(page);
294 bytes -= PAGE_CACHE_SIZE; 294 bytes -= PAGE_SIZE;
295 } 295 }
296 kfree(osd_data->pages); 296 kfree(osd_data->pages);
297} 297}
@@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
336 if (max && nr_pages == max) 336 if (max && nr_pages == max)
337 break; 337 break;
338 } 338 }
339 len = nr_pages << PAGE_CACHE_SHIFT; 339 len = nr_pages << PAGE_SHIFT;
340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
341 off, len); 341 off, len);
342 vino = ceph_vino(inode); 342 vino = ceph_vino(inode);
@@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
364 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 364 if (add_to_page_cache_lru(page, &inode->i_data, page->index,
365 GFP_KERNEL)) { 365 GFP_KERNEL)) {
366 ceph_fscache_uncache_page(inode, page); 366 ceph_fscache_uncache_page(inode, page);
367 page_cache_release(page); 367 put_page(page);
368 dout("start_read %p add_to_page_cache failed %p\n", 368 dout("start_read %p add_to_page_cache failed %p\n",
369 inode, page); 369 inode, page);
370 nr_pages = i; 370 nr_pages = i;
@@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
415 if (rc == 0) 415 if (rc == 0)
416 goto out; 416 goto out;
417 417
418 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) 418 if (fsc->mount_options->rsize >= PAGE_SIZE)
419 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 419 max = (fsc->mount_options->rsize + PAGE_SIZE - 1)
420 >> PAGE_SHIFT; 420 >> PAGE_SHIFT;
421 421
422 dout("readpages %p file %p nr_pages %d max %d\n", inode, 422 dout("readpages %p file %p nr_pages %d max %d\n", inode,
@@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
484 long writeback_stat; 484 long writeback_stat;
485 u64 truncate_size; 485 u64 truncate_size;
486 u32 truncate_seq; 486 u32 truncate_seq;
487 int err = 0, len = PAGE_CACHE_SIZE; 487 int err = 0, len = PAGE_SIZE;
488 488
489 dout("writepage %p idx %lu\n", page, page->index); 489 dout("writepage %p idx %lu\n", page, page->index);
490 490
@@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping,
725 } 725 }
726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
727 wsize = fsc->mount_options->wsize; 727 wsize = fsc->mount_options->wsize;
728 if (wsize < PAGE_CACHE_SIZE) 728 if (wsize < PAGE_SIZE)
729 wsize = PAGE_CACHE_SIZE; 729 wsize = PAGE_SIZE;
730 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 730 max_pages_ever = wsize >> PAGE_SHIFT;
731 731
732 pagevec_init(&pvec, 0); 732 pagevec_init(&pvec, 0);
733 733
@@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping,
737 end = -1; 737 end = -1;
738 dout(" cyclic, start at %lu\n", start); 738 dout(" cyclic, start at %lu\n", start);
739 } else { 739 } else {
740 start = wbc->range_start >> PAGE_CACHE_SHIFT; 740 start = wbc->range_start >> PAGE_SHIFT;
741 end = wbc->range_end >> PAGE_CACHE_SHIFT; 741 end = wbc->range_end >> PAGE_SHIFT;
742 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 742 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
743 range_whole = 1; 743 range_whole = 1;
744 should_loop = 0; 744 should_loop = 0;
@@ -887,7 +887,7 @@ get_more_pages:
887 887
888 num_ops = 1 + do_sync; 888 num_ops = 1 + do_sync;
889 strip_unit_end = page->index + 889 strip_unit_end = page->index +
890 ((len - 1) >> PAGE_CACHE_SHIFT); 890 ((len - 1) >> PAGE_SHIFT);
891 891
892 BUG_ON(pages); 892 BUG_ON(pages);
893 max_pages = calc_pages_for(0, (u64)len); 893 max_pages = calc_pages_for(0, (u64)len);
@@ -901,7 +901,7 @@ get_more_pages:
901 901
902 len = 0; 902 len = 0;
903 } else if (page->index != 903 } else if (page->index !=
904 (offset + len) >> PAGE_CACHE_SHIFT) { 904 (offset + len) >> PAGE_SHIFT) {
905 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : 905 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
906 CEPH_OSD_MAX_OPS)) { 906 CEPH_OSD_MAX_OPS)) {
907 redirty_page_for_writepage(wbc, page); 907 redirty_page_for_writepage(wbc, page);
@@ -929,7 +929,7 @@ get_more_pages:
929 929
930 pages[locked_pages] = page; 930 pages[locked_pages] = page;
931 locked_pages++; 931 locked_pages++;
932 len += PAGE_CACHE_SIZE; 932 len += PAGE_SIZE;
933 } 933 }
934 934
935 /* did we get anything? */ 935 /* did we get anything? */
@@ -981,7 +981,7 @@ new_request:
981 BUG_ON(IS_ERR(req)); 981 BUG_ON(IS_ERR(req));
982 } 982 }
983 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 983 BUG_ON(len < page_offset(pages[locked_pages - 1]) +
984 PAGE_CACHE_SIZE - offset); 984 PAGE_SIZE - offset);
985 985
986 req->r_callback = writepages_finish; 986 req->r_callback = writepages_finish;
987 req->r_inode = inode; 987 req->r_inode = inode;
@@ -1011,7 +1011,7 @@ new_request:
1011 } 1011 }
1012 1012
1013 set_page_writeback(pages[i]); 1013 set_page_writeback(pages[i]);
1014 len += PAGE_CACHE_SIZE; 1014 len += PAGE_SIZE;
1015 } 1015 }
1016 1016
1017 if (snap_size != -1) { 1017 if (snap_size != -1) {
@@ -1020,7 +1020,7 @@ new_request:
1020 /* writepages_finish() clears writeback pages 1020 /* writepages_finish() clears writeback pages
1021 * according to the data length, so make sure 1021 * according to the data length, so make sure
1022 * data length covers all locked pages */ 1022 * data length covers all locked pages */
1023 u64 min_len = len + 1 - PAGE_CACHE_SIZE; 1023 u64 min_len = len + 1 - PAGE_SIZE;
1024 len = min(len, (u64)i_size_read(inode) - offset); 1024 len = min(len, (u64)i_size_read(inode) - offset);
1025 len = max(len, min_len); 1025 len = max(len, min_len);
1026 } 1026 }
@@ -1135,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file,
1135{ 1135{
1136 struct inode *inode = file_inode(file); 1136 struct inode *inode = file_inode(file);
1137 struct ceph_inode_info *ci = ceph_inode(inode); 1137 struct ceph_inode_info *ci = ceph_inode(inode);
1138 loff_t page_off = pos & PAGE_CACHE_MASK; 1138 loff_t page_off = pos & PAGE_MASK;
1139 int pos_in_page = pos & ~PAGE_CACHE_MASK; 1139 int pos_in_page = pos & ~PAGE_MASK;
1140 int end_in_page = pos_in_page + len; 1140 int end_in_page = pos_in_page + len;
1141 loff_t i_size; 1141 loff_t i_size;
1142 int r; 1142 int r;
@@ -1191,7 +1191,7 @@ retry_locked:
1191 } 1191 }
1192 1192
1193 /* full page? */ 1193 /* full page? */
1194 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) 1194 if (pos_in_page == 0 && len == PAGE_SIZE)
1195 return 0; 1195 return 0;
1196 1196
1197 /* past end of file? */ 1197 /* past end of file? */
@@ -1199,12 +1199,12 @@ retry_locked:
1199 1199
1200 if (page_off >= i_size || 1200 if (page_off >= i_size ||
1201 (pos_in_page == 0 && (pos+len) >= i_size && 1201 (pos_in_page == 0 && (pos+len) >= i_size &&
1202 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { 1202 end_in_page - pos_in_page != PAGE_SIZE)) {
1203 dout(" zeroing %p 0 - %d and %d - %d\n", 1203 dout(" zeroing %p 0 - %d and %d - %d\n",
1204 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); 1204 page, pos_in_page, end_in_page, (int)PAGE_SIZE);
1205 zero_user_segments(page, 1205 zero_user_segments(page,
1206 0, pos_in_page, 1206 0, pos_in_page,
1207 end_in_page, PAGE_CACHE_SIZE); 1207 end_in_page, PAGE_SIZE);
1208 return 0; 1208 return 0;
1209 } 1209 }
1210 1210
@@ -1228,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
1228{ 1228{
1229 struct inode *inode = file_inode(file); 1229 struct inode *inode = file_inode(file);
1230 struct page *page; 1230 struct page *page;
1231 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1231 pgoff_t index = pos >> PAGE_SHIFT;
1232 int r; 1232 int r;
1233 1233
1234 do { 1234 do {
@@ -1242,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
1242 1242
1243 r = ceph_update_writeable_page(file, pos, len, page); 1243 r = ceph_update_writeable_page(file, pos, len, page);
1244 if (r < 0) 1244 if (r < 0)
1245 page_cache_release(page); 1245 put_page(page);
1246 else 1246 else
1247 *pagep = page; 1247 *pagep = page;
1248 } while (r == -EAGAIN); 1248 } while (r == -EAGAIN);
@@ -1259,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1259 struct page *page, void *fsdata) 1259 struct page *page, void *fsdata)
1260{ 1260{
1261 struct inode *inode = file_inode(file); 1261 struct inode *inode = file_inode(file);
1262 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1262 unsigned from = pos & (PAGE_SIZE - 1);
1263 int check_cap = 0; 1263 int check_cap = 0;
1264 1264
1265 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1265 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
@@ -1279,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1279 set_page_dirty(page); 1279 set_page_dirty(page);
1280 1280
1281 unlock_page(page); 1281 unlock_page(page);
1282 page_cache_release(page); 1282 put_page(page);
1283 1283
1284 if (check_cap) 1284 if (check_cap)
1285 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1285 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1322,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1322 struct ceph_inode_info *ci = ceph_inode(inode); 1322 struct ceph_inode_info *ci = ceph_inode(inode);
1323 struct ceph_file_info *fi = vma->vm_file->private_data; 1323 struct ceph_file_info *fi = vma->vm_file->private_data;
1324 struct page *pinned_page = NULL; 1324 struct page *pinned_page = NULL;
1325 loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; 1325 loff_t off = vmf->pgoff << PAGE_SHIFT;
1326 int want, got, ret; 1326 int want, got, ret;
1327 1327
1328 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1328 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
1329 inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); 1329 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
1330 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1330 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1331 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1331 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1332 else 1332 else
@@ -1343,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1343 } 1343 }
1344 } 1344 }
1345 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1345 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
1346 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); 1346 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
1347 1347
1348 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1348 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1349 ci->i_inline_version == CEPH_INLINE_NONE) 1349 ci->i_inline_version == CEPH_INLINE_NONE)
@@ -1352,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1352 ret = -EAGAIN; 1352 ret = -EAGAIN;
1353 1353
1354 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", 1354 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
1355 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); 1355 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
1356 if (pinned_page) 1356 if (pinned_page)
1357 page_cache_release(pinned_page); 1357 put_page(pinned_page);
1358 ceph_put_cap_refs(ci, got); 1358 ceph_put_cap_refs(ci, got);
1359 1359
1360 if (ret != -EAGAIN) 1360 if (ret != -EAGAIN)
1361 return ret; 1361 return ret;
1362 1362
1363 /* read inline data */ 1363 /* read inline data */
1364 if (off >= PAGE_CACHE_SIZE) { 1364 if (off >= PAGE_SIZE) {
1365 /* does not support inline data > PAGE_SIZE */ 1365 /* does not support inline data > PAGE_SIZE */
1366 ret = VM_FAULT_SIGBUS; 1366 ret = VM_FAULT_SIGBUS;
1367 } else { 1367 } else {
@@ -1378,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1378 CEPH_STAT_CAP_INLINE_DATA, true); 1378 CEPH_STAT_CAP_INLINE_DATA, true);
1379 if (ret1 < 0 || off >= i_size_read(inode)) { 1379 if (ret1 < 0 || off >= i_size_read(inode)) {
1380 unlock_page(page); 1380 unlock_page(page);
1381 page_cache_release(page); 1381 put_page(page);
1382 ret = VM_FAULT_SIGBUS; 1382 ret = VM_FAULT_SIGBUS;
1383 goto out; 1383 goto out;
1384 } 1384 }
1385 if (ret1 < PAGE_CACHE_SIZE) 1385 if (ret1 < PAGE_SIZE)
1386 zero_user_segment(page, ret1, PAGE_CACHE_SIZE); 1386 zero_user_segment(page, ret1, PAGE_SIZE);
1387 else 1387 else
1388 flush_dcache_page(page); 1388 flush_dcache_page(page);
1389 SetPageUptodate(page); 1389 SetPageUptodate(page);
@@ -1392,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1392 } 1392 }
1393out: 1393out:
1394 dout("filemap_fault %p %llu~%zd read inline data ret %d\n", 1394 dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
1395 inode, off, (size_t)PAGE_CACHE_SIZE, ret); 1395 inode, off, (size_t)PAGE_SIZE, ret);
1396 return ret; 1396 return ret;
1397} 1397}
1398 1398
@@ -1430,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1430 } 1430 }
1431 } 1431 }
1432 1432
1433 if (off + PAGE_CACHE_SIZE <= size) 1433 if (off + PAGE_SIZE <= size)
1434 len = PAGE_CACHE_SIZE; 1434 len = PAGE_SIZE;
1435 else 1435 else
1436 len = size & ~PAGE_CACHE_MASK; 1436 len = size & ~PAGE_MASK;
1437 1437
1438 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1438 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1439 inode, ceph_vinop(inode), off, len, size); 1439 inode, ceph_vinop(inode), off, len, size);
@@ -1519,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1519 return; 1519 return;
1520 if (PageUptodate(page)) { 1520 if (PageUptodate(page)) {
1521 unlock_page(page); 1521 unlock_page(page);
1522 page_cache_release(page); 1522 put_page(page);
1523 return; 1523 return;
1524 } 1524 }
1525 } 1525 }
@@ -1534,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1534 } 1534 }
1535 1535
1536 if (page != locked_page) { 1536 if (page != locked_page) {
1537 if (len < PAGE_CACHE_SIZE) 1537 if (len < PAGE_SIZE)
1538 zero_user_segment(page, len, PAGE_CACHE_SIZE); 1538 zero_user_segment(page, len, PAGE_SIZE);
1539 else 1539 else
1540 flush_dcache_page(page); 1540 flush_dcache_page(page);
1541 1541
1542 SetPageUptodate(page); 1542 SetPageUptodate(page);
1543 unlock_page(page); 1543 unlock_page(page);
1544 page_cache_release(page); 1544 put_page(page);
1545 } 1545 }
1546} 1546}
1547 1547
@@ -1578,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
1578 from_pagecache = true; 1578 from_pagecache = true;
1579 lock_page(page); 1579 lock_page(page);
1580 } else { 1580 } else {
1581 page_cache_release(page); 1581 put_page(page);
1582 page = NULL; 1582 page = NULL;
1583 } 1583 }
1584 } 1584 }
@@ -1586,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
1586 1586
1587 if (page) { 1587 if (page) {
1588 len = i_size_read(inode); 1588 len = i_size_read(inode);
1589 if (len > PAGE_CACHE_SIZE) 1589 if (len > PAGE_SIZE)
1590 len = PAGE_CACHE_SIZE; 1590 len = PAGE_SIZE;
1591 } else { 1591 } else {
1592 page = __page_cache_alloc(GFP_NOFS); 1592 page = __page_cache_alloc(GFP_NOFS);
1593 if (!page) { 1593 if (!page) {
@@ -1670,7 +1670,7 @@ out:
1670 if (page && page != locked_page) { 1670 if (page && page != locked_page) {
1671 if (from_pagecache) { 1671 if (from_pagecache) {
1672 unlock_page(page); 1672 unlock_page(page);
1673 page_cache_release(page); 1673 put_page(page);
1674 } else 1674 } else
1675 __free_pages(page, 0); 1675 __free_pages(page, 0);
1676 } 1676 }
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index de17bb232ff8..cfaeef18cbca 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2510,7 +2510,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2510 *pinned_page = page; 2510 *pinned_page = page;
2511 break; 2511 break;
2512 } 2512 }
2513 page_cache_release(page); 2513 put_page(page);
2514 } 2514 }
2515 /* 2515 /*
2516 * drop cap refs first because getattr while 2516 * drop cap refs first because getattr while
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index fadc243dfb28..4fb2bbc2a272 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -129,7 +129,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
129 struct inode *dir = d_inode(parent); 129 struct inode *dir = d_inode(parent);
130 struct dentry *dentry, *last = NULL; 130 struct dentry *dentry, *last = NULL;
131 struct ceph_dentry_info *di; 131 struct ceph_dentry_info *di;
132 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *); 132 unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
133 int err = 0; 133 int err = 0;
134 loff_t ptr_pos = 0; 134 loff_t ptr_pos = 0;
135 struct ceph_readdir_cache_control cache_ctl = {}; 135 struct ceph_readdir_cache_control cache_ctl = {};
@@ -154,7 +154,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
154 } 154 }
155 155
156 err = -EAGAIN; 156 err = -EAGAIN;
157 pgoff = ptr_pos >> PAGE_CACHE_SHIFT; 157 pgoff = ptr_pos >> PAGE_SHIFT;
158 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) { 158 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
159 ceph_readdir_cache_release(&cache_ctl); 159 ceph_readdir_cache_release(&cache_ctl);
160 cache_ctl.page = find_lock_page(&dir->i_data, pgoff); 160 cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ef38f01c1795..a79f9269831e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -466,7 +466,7 @@ more:
466 ret += zlen; 466 ret += zlen;
467 } 467 }
468 468
469 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; 469 didpages = (page_align + ret) >> PAGE_SHIFT;
470 pos += ret; 470 pos += ret;
471 read = pos - off; 471 read = pos - off;
472 left -= ret; 472 left -= ret;
@@ -806,8 +806,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
806 806
807 if (write) { 807 if (write) {
808 ret = invalidate_inode_pages2_range(inode->i_mapping, 808 ret = invalidate_inode_pages2_range(inode->i_mapping,
809 pos >> PAGE_CACHE_SHIFT, 809 pos >> PAGE_SHIFT,
810 (pos + count) >> PAGE_CACHE_SHIFT); 810 (pos + count) >> PAGE_SHIFT);
811 if (ret < 0) 811 if (ret < 0)
812 dout("invalidate_inode_pages2_range returned %d\n", ret); 812 dout("invalidate_inode_pages2_range returned %d\n", ret);
813 813
@@ -872,7 +872,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
872 * may block. 872 * may block.
873 */ 873 */
874 truncate_inode_pages_range(inode->i_mapping, pos, 874 truncate_inode_pages_range(inode->i_mapping, pos,
875 (pos+len) | (PAGE_CACHE_SIZE - 1)); 875 (pos+len) | (PAGE_SIZE - 1));
876 876
877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
878 } 878 }
@@ -1006,8 +1006,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1006 return ret; 1006 return ret;
1007 1007
1008 ret = invalidate_inode_pages2_range(inode->i_mapping, 1008 ret = invalidate_inode_pages2_range(inode->i_mapping,
1009 pos >> PAGE_CACHE_SHIFT, 1009 pos >> PAGE_SHIFT,
1010 (pos + count) >> PAGE_CACHE_SHIFT); 1010 (pos + count) >> PAGE_SHIFT);
1011 if (ret < 0) 1011 if (ret < 0)
1012 dout("invalidate_inode_pages2_range returned %d\n", ret); 1012 dout("invalidate_inode_pages2_range returned %d\n", ret);
1013 1013
@@ -1036,7 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1036 * write from beginning of first page, 1036 * write from beginning of first page,
1037 * regardless of io alignment 1037 * regardless of io alignment
1038 */ 1038 */
1039 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1039 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1040 1040
1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1042 if (IS_ERR(pages)) { 1042 if (IS_ERR(pages)) {
@@ -1159,7 +1159,7 @@ again:
1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1161 if (pinned_page) { 1161 if (pinned_page) {
1162 page_cache_release(pinned_page); 1162 put_page(pinned_page);
1163 pinned_page = NULL; 1163 pinned_page = NULL;
1164 } 1164 }
1165 ceph_put_cap_refs(ci, got); 1165 ceph_put_cap_refs(ci, got);
@@ -1188,10 +1188,10 @@ again:
1188 if (retry_op == READ_INLINE) { 1188 if (retry_op == READ_INLINE) {
1189 BUG_ON(ret > 0 || read > 0); 1189 BUG_ON(ret > 0 || read > 0);
1190 if (iocb->ki_pos < i_size && 1190 if (iocb->ki_pos < i_size &&
1191 iocb->ki_pos < PAGE_CACHE_SIZE) { 1191 iocb->ki_pos < PAGE_SIZE) {
1192 loff_t end = min_t(loff_t, i_size, 1192 loff_t end = min_t(loff_t, i_size,
1193 iocb->ki_pos + len); 1193 iocb->ki_pos + len);
1194 end = min_t(loff_t, end, PAGE_CACHE_SIZE); 1194 end = min_t(loff_t, end, PAGE_SIZE);
1195 if (statret < end) 1195 if (statret < end)
1196 zero_user_segment(page, statret, end); 1196 zero_user_segment(page, statret, end);
1197 ret = copy_page_to_iter(page, 1197 ret = copy_page_to_iter(page,
@@ -1463,21 +1463,21 @@ static inline void ceph_zero_partial_page(
1463 struct inode *inode, loff_t offset, unsigned size) 1463 struct inode *inode, loff_t offset, unsigned size)
1464{ 1464{
1465 struct page *page; 1465 struct page *page;
1466 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 1466 pgoff_t index = offset >> PAGE_SHIFT;
1467 1467
1468 page = find_lock_page(inode->i_mapping, index); 1468 page = find_lock_page(inode->i_mapping, index);
1469 if (page) { 1469 if (page) {
1470 wait_on_page_writeback(page); 1470 wait_on_page_writeback(page);
1471 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); 1471 zero_user(page, offset & (PAGE_SIZE - 1), size);
1472 unlock_page(page); 1472 unlock_page(page);
1473 page_cache_release(page); 1473 put_page(page);
1474 } 1474 }
1475} 1475}
1476 1476
1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1478 loff_t length) 1478 loff_t length)
1479{ 1479{
1480 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); 1480 loff_t nearly = round_up(offset, PAGE_SIZE);
1481 if (offset < nearly) { 1481 if (offset < nearly) {
1482 loff_t size = nearly - offset; 1482 loff_t size = nearly - offset;
1483 if (length < size) 1483 if (length < size)
@@ -1486,8 +1486,8 @@ static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1486 offset += size; 1486 offset += size;
1487 length -= size; 1487 length -= size;
1488 } 1488 }
1489 if (length >= PAGE_CACHE_SIZE) { 1489 if (length >= PAGE_SIZE) {
1490 loff_t size = round_down(length, PAGE_CACHE_SIZE); 1490 loff_t size = round_down(length, PAGE_SIZE);
1491 truncate_pagecache_range(inode, offset, offset + size - 1); 1491 truncate_pagecache_range(inode, offset, offset + size - 1);
1492 offset += size; 1492 offset += size;
1493 length -= size; 1493 length -= size;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ed58b168904a..edfade037738 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1338,7 +1338,7 @@ void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1338{ 1338{
1339 if (ctl->page) { 1339 if (ctl->page) {
1340 kunmap(ctl->page); 1340 kunmap(ctl->page);
1341 page_cache_release(ctl->page); 1341 put_page(ctl->page);
1342 ctl->page = NULL; 1342 ctl->page = NULL;
1343 } 1343 }
1344} 1344}
@@ -1348,7 +1348,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1348 struct ceph_mds_request *req) 1348 struct ceph_mds_request *req)
1349{ 1349{
1350 struct ceph_inode_info *ci = ceph_inode(dir); 1350 struct ceph_inode_info *ci = ceph_inode(dir);
1351 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*); 1351 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1352 unsigned idx = ctl->index % nsize; 1352 unsigned idx = ctl->index % nsize;
1353 pgoff_t pgoff = ctl->index / nsize; 1353 pgoff_t pgoff = ctl->index / nsize;
1354 1354
@@ -1367,7 +1367,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1367 unlock_page(ctl->page); 1367 unlock_page(ctl->page);
1368 ctl->dentries = kmap(ctl->page); 1368 ctl->dentries = kmap(ctl->page);
1369 if (idx == 0) 1369 if (idx == 0)
1370 memset(ctl->dentries, 0, PAGE_CACHE_SIZE); 1370 memset(ctl->dentries, 0, PAGE_SIZE);
1371 } 1371 }
1372 1372
1373 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1373 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 44852c3ae531..85b8517f17a0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
387 if (atomic_dec_and_test(&s->s_ref)) { 387 if (atomic_dec_and_test(&s->s_ref)) {
388 if (s->s_auth.authorizer) 388 if (s->s_auth.authorizer)
389 ceph_auth_destroy_authorizer( 389 ceph_auth_destroy_authorizer(s->s_auth.authorizer);
390 s->s_mdsc->fsc->client->monc.auth,
391 s->s_auth.authorizer);
392 kfree(s); 390 kfree(s);
393 } 391 }
394} 392}
@@ -1610,7 +1608,7 @@ again:
1610 while (!list_empty(&tmp_list)) { 1608 while (!list_empty(&tmp_list)) {
1611 if (!msg) { 1609 if (!msg) {
1612 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, 1610 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1613 PAGE_CACHE_SIZE, GFP_NOFS, false); 1611 PAGE_SIZE, GFP_NOFS, false);
1614 if (!msg) 1612 if (!msg)
1615 goto out_err; 1613 goto out_err;
1616 head = msg->front.iov_base; 1614 head = msg->front.iov_base;
@@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3900 struct ceph_auth_handshake *auth = &s->s_auth; 3898 struct ceph_auth_handshake *auth = &s->s_auth;
3901 3899
3902 if (force_new && auth->authorizer) { 3900 if (force_new && auth->authorizer) {
3903 ceph_auth_destroy_authorizer(ac, auth->authorizer); 3901 ceph_auth_destroy_authorizer(auth->authorizer);
3904 auth->authorizer = NULL; 3902 auth->authorizer = NULL;
3905 } 3903 }
3906 if (!auth->authorizer) { 3904 if (!auth->authorizer) {
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 37712ccffcc6..ee69a537dba5 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -97,7 +97,7 @@ struct ceph_mds_reply_info_parsed {
97/* 97/*
98 * cap releases are batched and sent to the MDS en masse. 98 * cap releases are batched and sent to the MDS en masse.
99 */ 99 */
100#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ 100#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \
101 sizeof(struct ceph_mds_cap_release)) / \ 101 sizeof(struct ceph_mds_cap_release)) / \
102 sizeof(struct ceph_mds_cap_item)) 102 sizeof(struct ceph_mds_cap_item))
103 103
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index c973043deb0e..f12d5e2955c2 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -560,7 +560,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
560 560
561 /* set up mempools */ 561 /* set up mempools */
562 err = -ENOMEM; 562 err = -ENOMEM;
563 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; 563 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
564 size = sizeof (struct page *) * (page_count ? page_count : 1); 564 size = sizeof (struct page *) * (page_count ? page_count : 1);
565 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); 565 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
566 if (!fsc->wb_pagevec_pool) 566 if (!fsc->wb_pagevec_pool)
@@ -912,13 +912,13 @@ static int ceph_register_bdi(struct super_block *sb,
912 int err; 912 int err;
913 913
914 /* set ra_pages based on rasize mount option? */ 914 /* set ra_pages based on rasize mount option? */
915 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE) 915 if (fsc->mount_options->rasize >= PAGE_SIZE)
916 fsc->backing_dev_info.ra_pages = 916 fsc->backing_dev_info.ra_pages =
917 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1) 917 (fsc->mount_options->rasize + PAGE_SIZE - 1)
918 >> PAGE_SHIFT; 918 >> PAGE_SHIFT;
919 else 919 else
920 fsc->backing_dev_info.ra_pages = 920 fsc->backing_dev_info.ra_pages =
921 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; 921 VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
922 922
923 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", 923 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
924 atomic_long_inc_return(&bdi_seq)); 924 atomic_long_inc_return(&bdi_seq));
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 1d86fc620e5c..89201564c346 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -962,7 +962,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
962 cifs_dbg(FYI, "about to flush pages\n"); 962 cifs_dbg(FYI, "about to flush pages\n");
963 /* should we flush first and last page first */ 963 /* should we flush first and last page first */
964 truncate_inode_pages_range(&target_inode->i_data, destoff, 964 truncate_inode_pages_range(&target_inode->i_data, destoff,
965 PAGE_CACHE_ALIGN(destoff + len)-1); 965 PAGE_ALIGN(destoff + len)-1);
966 966
967 if (target_tcon->ses->server->ops->duplicate_extents) 967 if (target_tcon->ses->server->ops->duplicate_extents)
968 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 968 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d21da9f05bae..f2cc0b3d1af7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
714 * 714 *
715 * Note that this might make for "interesting" allocation problems during 715 * Note that this might make for "interesting" allocation problems during
716 * writeback however as we have to allocate an array of pointers for the 716 * writeback however as we have to allocate an array of pointers for the
717 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 717 * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
718 * 718 *
719 * For reads, there is a similar problem as we need to allocate an array 719 * For reads, there is a similar problem as we need to allocate an array
720 * of kvecs to handle the receive, though that should only need to be done 720 * of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
733 733
734/* 734/*
735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
736 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 736 * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
737 * a single wsize request with a single call. 737 * a single wsize request with a single call.
738 */ 738 */
739#define CIFS_DEFAULT_IOSIZE (1024 * 1024) 739#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 76fcb50295a3..a894bf809ff7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1929,17 +1929,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1929 1929
1930 wsize = server->ops->wp_retry_size(inode); 1930 wsize = server->ops->wp_retry_size(inode);
1931 if (wsize < rest_len) { 1931 if (wsize < rest_len) {
1932 nr_pages = wsize / PAGE_CACHE_SIZE; 1932 nr_pages = wsize / PAGE_SIZE;
1933 if (!nr_pages) { 1933 if (!nr_pages) {
1934 rc = -ENOTSUPP; 1934 rc = -ENOTSUPP;
1935 break; 1935 break;
1936 } 1936 }
1937 cur_len = nr_pages * PAGE_CACHE_SIZE; 1937 cur_len = nr_pages * PAGE_SIZE;
1938 tailsz = PAGE_CACHE_SIZE; 1938 tailsz = PAGE_SIZE;
1939 } else { 1939 } else {
1940 nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE); 1940 nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
1941 cur_len = rest_len; 1941 cur_len = rest_len;
1942 tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE; 1942 tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
1943 } 1943 }
1944 1944
1945 wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); 1945 wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
@@ -1957,7 +1957,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1957 wdata2->sync_mode = wdata->sync_mode; 1957 wdata2->sync_mode = wdata->sync_mode;
1958 wdata2->nr_pages = nr_pages; 1958 wdata2->nr_pages = nr_pages;
1959 wdata2->offset = page_offset(wdata2->pages[0]); 1959 wdata2->offset = page_offset(wdata2->pages[0]);
1960 wdata2->pagesz = PAGE_CACHE_SIZE; 1960 wdata2->pagesz = PAGE_SIZE;
1961 wdata2->tailsz = tailsz; 1961 wdata2->tailsz = tailsz;
1962 wdata2->bytes = cur_len; 1962 wdata2->bytes = cur_len;
1963 1963
@@ -1975,7 +1975,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1975 if (rc != 0 && rc != -EAGAIN) { 1975 if (rc != 0 && rc != -EAGAIN) {
1976 SetPageError(wdata2->pages[j]); 1976 SetPageError(wdata2->pages[j]);
1977 end_page_writeback(wdata2->pages[j]); 1977 end_page_writeback(wdata2->pages[j]);
1978 page_cache_release(wdata2->pages[j]); 1978 put_page(wdata2->pages[j]);
1979 } 1979 }
1980 } 1980 }
1981 1981
@@ -2018,7 +2018,7 @@ cifs_writev_complete(struct work_struct *work)
2018 else if (wdata->result < 0) 2018 else if (wdata->result < 0)
2019 SetPageError(page); 2019 SetPageError(page);
2020 end_page_writeback(page); 2020 end_page_writeback(page);
2021 page_cache_release(page); 2021 put_page(page);
2022 } 2022 }
2023 if (wdata->result != -EAGAIN) 2023 if (wdata->result != -EAGAIN)
2024 mapping_set_error(inode->i_mapping, wdata->result); 2024 mapping_set_error(inode->i_mapping, wdata->result);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a763cd3d9e7c..6f62ac821a84 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3630,7 +3630,7 @@ try_mount_again:
3630 cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info); 3630 cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info);
3631 3631
3632 /* tune readahead according to rsize */ 3632 /* tune readahead according to rsize */
3633 cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; 3633 cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE;
3634 3634
3635remote_path_check: 3635remote_path_check:
3636#ifdef CONFIG_CIFS_DFS_UPCALL 3636#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ff882aeaccc6..c03d0744648b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1833,7 +1833,7 @@ refind_writable:
1833static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) 1833static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1834{ 1834{
1835 struct address_space *mapping = page->mapping; 1835 struct address_space *mapping = page->mapping;
1836 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 1836 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1837 char *write_data; 1837 char *write_data;
1838 int rc = -EFAULT; 1838 int rc = -EFAULT;
1839 int bytes_written = 0; 1839 int bytes_written = 0;
@@ -1849,7 +1849,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1849 write_data = kmap(page); 1849 write_data = kmap(page);
1850 write_data += from; 1850 write_data += from;
1851 1851
1852 if ((to > PAGE_CACHE_SIZE) || (from > to)) { 1852 if ((to > PAGE_SIZE) || (from > to)) {
1853 kunmap(page); 1853 kunmap(page);
1854 return -EIO; 1854 return -EIO;
1855 } 1855 }
@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1902 * find_get_pages_tag seems to return a max of 256 on each 1902 * find_get_pages_tag seems to return a max of 256 on each
1903 * iteration, so we must call it several times in order to 1903 * iteration, so we must call it several times in order to
1904 * fill the array or the wsize is effectively limited to 1904 * fill the array or the wsize is effectively limited to
1905 * 256 * PAGE_CACHE_SIZE. 1905 * 256 * PAGE_SIZE.
1906 */ 1906 */
1907 *found_pages = 0; 1907 *found_pages = 0;
1908 pages = wdata->pages; 1908 pages = wdata->pages;
@@ -1991,7 +1991,7 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1991 1991
1992 /* put any pages we aren't going to use */ 1992 /* put any pages we aren't going to use */
1993 for (i = nr_pages; i < found_pages; i++) { 1993 for (i = nr_pages; i < found_pages; i++) {
1994 page_cache_release(wdata->pages[i]); 1994 put_page(wdata->pages[i]);
1995 wdata->pages[i] = NULL; 1995 wdata->pages[i] = NULL;
1996 } 1996 }
1997 1997
@@ -2009,11 +2009,11 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2009 wdata->sync_mode = wbc->sync_mode; 2009 wdata->sync_mode = wbc->sync_mode;
2010 wdata->nr_pages = nr_pages; 2010 wdata->nr_pages = nr_pages;
2011 wdata->offset = page_offset(wdata->pages[0]); 2011 wdata->offset = page_offset(wdata->pages[0]);
2012 wdata->pagesz = PAGE_CACHE_SIZE; 2012 wdata->pagesz = PAGE_SIZE;
2013 wdata->tailsz = min(i_size_read(mapping->host) - 2013 wdata->tailsz = min(i_size_read(mapping->host) -
2014 page_offset(wdata->pages[nr_pages - 1]), 2014 page_offset(wdata->pages[nr_pages - 1]),
2015 (loff_t)PAGE_CACHE_SIZE); 2015 (loff_t)PAGE_SIZE);
2016 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; 2016 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2017 2017
2018 if (wdata->cfile != NULL) 2018 if (wdata->cfile != NULL)
2019 cifsFileInfo_put(wdata->cfile); 2019 cifsFileInfo_put(wdata->cfile);
@@ -2047,15 +2047,15 @@ static int cifs_writepages(struct address_space *mapping,
2047 * If wsize is smaller than the page cache size, default to writing 2047 * If wsize is smaller than the page cache size, default to writing
2048 * one page at a time via cifs_writepage 2048 * one page at a time via cifs_writepage
2049 */ 2049 */
2050 if (cifs_sb->wsize < PAGE_CACHE_SIZE) 2050 if (cifs_sb->wsize < PAGE_SIZE)
2051 return generic_writepages(mapping, wbc); 2051 return generic_writepages(mapping, wbc);
2052 2052
2053 if (wbc->range_cyclic) { 2053 if (wbc->range_cyclic) {
2054 index = mapping->writeback_index; /* Start from prev offset */ 2054 index = mapping->writeback_index; /* Start from prev offset */
2055 end = -1; 2055 end = -1;
2056 } else { 2056 } else {
2057 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2057 index = wbc->range_start >> PAGE_SHIFT;
2058 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2058 end = wbc->range_end >> PAGE_SHIFT;
2059 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2059 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2060 range_whole = true; 2060 range_whole = true;
2061 scanned = true; 2061 scanned = true;
@@ -2071,7 +2071,7 @@ retry:
2071 if (rc) 2071 if (rc)
2072 break; 2072 break;
2073 2073
2074 tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; 2074 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2075 2075
2076 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index, 2076 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2077 &found_pages); 2077 &found_pages);
@@ -2111,7 +2111,7 @@ retry:
2111 else 2111 else
2112 SetPageError(wdata->pages[i]); 2112 SetPageError(wdata->pages[i]);
2113 end_page_writeback(wdata->pages[i]); 2113 end_page_writeback(wdata->pages[i]);
2114 page_cache_release(wdata->pages[i]); 2114 put_page(wdata->pages[i]);
2115 } 2115 }
2116 if (rc != -EAGAIN) 2116 if (rc != -EAGAIN)
2117 mapping_set_error(mapping, rc); 2117 mapping_set_error(mapping, rc);
@@ -2154,7 +2154,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2154 2154
2155 xid = get_xid(); 2155 xid = get_xid();
2156/* BB add check for wbc flags */ 2156/* BB add check for wbc flags */
2157 page_cache_get(page); 2157 get_page(page);
2158 if (!PageUptodate(page)) 2158 if (!PageUptodate(page))
2159 cifs_dbg(FYI, "ppw - page not up to date\n"); 2159 cifs_dbg(FYI, "ppw - page not up to date\n");
2160 2160
@@ -2170,7 +2170,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2170 */ 2170 */
2171 set_page_writeback(page); 2171 set_page_writeback(page);
2172retry_write: 2172retry_write:
2173 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); 2173 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2174 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL) 2174 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2175 goto retry_write; 2175 goto retry_write;
2176 else if (rc == -EAGAIN) 2176 else if (rc == -EAGAIN)
@@ -2180,7 +2180,7 @@ retry_write:
2180 else 2180 else
2181 SetPageUptodate(page); 2181 SetPageUptodate(page);
2182 end_page_writeback(page); 2182 end_page_writeback(page);
2183 page_cache_release(page); 2183 put_page(page);
2184 free_xid(xid); 2184 free_xid(xid);
2185 return rc; 2185 return rc;
2186} 2186}
@@ -2214,12 +2214,12 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
2214 if (copied == len) 2214 if (copied == len)
2215 SetPageUptodate(page); 2215 SetPageUptodate(page);
2216 ClearPageChecked(page); 2216 ClearPageChecked(page);
2217 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) 2217 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2218 SetPageUptodate(page); 2218 SetPageUptodate(page);
2219 2219
2220 if (!PageUptodate(page)) { 2220 if (!PageUptodate(page)) {
2221 char *page_data; 2221 char *page_data;
2222 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 2222 unsigned offset = pos & (PAGE_SIZE - 1);
2223 unsigned int xid; 2223 unsigned int xid;
2224 2224
2225 xid = get_xid(); 2225 xid = get_xid();
@@ -2248,7 +2248,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
2248 } 2248 }
2249 2249
2250 unlock_page(page); 2250 unlock_page(page);
2251 page_cache_release(page); 2251 put_page(page);
2252 2252
2253 return rc; 2253 return rc;
2254} 2254}
@@ -3286,9 +3286,9 @@ cifs_readv_complete(struct work_struct *work)
3286 (rdata->result == -EAGAIN && got_bytes)) 3286 (rdata->result == -EAGAIN && got_bytes))
3287 cifs_readpage_to_fscache(rdata->mapping->host, page); 3287 cifs_readpage_to_fscache(rdata->mapping->host, page);
3288 3288
3289 got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes); 3289 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3290 3290
3291 page_cache_release(page); 3291 put_page(page);
3292 rdata->pages[i] = NULL; 3292 rdata->pages[i] = NULL;
3293 } 3293 }
3294 kref_put(&rdata->refcount, cifs_readdata_release); 3294 kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3307,21 +3307,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3307 3307
3308 /* determine the eof that the server (probably) has */ 3308 /* determine the eof that the server (probably) has */
3309 eof = CIFS_I(rdata->mapping->host)->server_eof; 3309 eof = CIFS_I(rdata->mapping->host)->server_eof;
3310 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; 3310 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3311 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); 3311 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3312 3312
3313 rdata->got_bytes = 0; 3313 rdata->got_bytes = 0;
3314 rdata->tailsz = PAGE_CACHE_SIZE; 3314 rdata->tailsz = PAGE_SIZE;
3315 for (i = 0; i < nr_pages; i++) { 3315 for (i = 0; i < nr_pages; i++) {
3316 struct page *page = rdata->pages[i]; 3316 struct page *page = rdata->pages[i];
3317 3317
3318 if (len >= PAGE_CACHE_SIZE) { 3318 if (len >= PAGE_SIZE) {
3319 /* enough data to fill the page */ 3319 /* enough data to fill the page */
3320 iov.iov_base = kmap(page); 3320 iov.iov_base = kmap(page);
3321 iov.iov_len = PAGE_CACHE_SIZE; 3321 iov.iov_len = PAGE_SIZE;
3322 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", 3322 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3323 i, page->index, iov.iov_base, iov.iov_len); 3323 i, page->index, iov.iov_base, iov.iov_len);
3324 len -= PAGE_CACHE_SIZE; 3324 len -= PAGE_SIZE;
3325 } else if (len > 0) { 3325 } else if (len > 0) {
3326 /* enough for partial page, fill and zero the rest */ 3326 /* enough for partial page, fill and zero the rest */
3327 iov.iov_base = kmap(page); 3327 iov.iov_base = kmap(page);
@@ -3329,7 +3329,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3329 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", 3329 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3330 i, page->index, iov.iov_base, iov.iov_len); 3330 i, page->index, iov.iov_base, iov.iov_len);
3331 memset(iov.iov_base + len, 3331 memset(iov.iov_base + len,
3332 '\0', PAGE_CACHE_SIZE - len); 3332 '\0', PAGE_SIZE - len);
3333 rdata->tailsz = len; 3333 rdata->tailsz = len;
3334 len = 0; 3334 len = 0;
3335 } else if (page->index > eof_index) { 3335 } else if (page->index > eof_index) {
@@ -3341,12 +3341,12 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3341 * to prevent the VFS from repeatedly attempting to 3341 * to prevent the VFS from repeatedly attempting to
3342 * fill them until the writes are flushed. 3342 * fill them until the writes are flushed.
3343 */ 3343 */
3344 zero_user(page, 0, PAGE_CACHE_SIZE); 3344 zero_user(page, 0, PAGE_SIZE);
3345 lru_cache_add_file(page); 3345 lru_cache_add_file(page);
3346 flush_dcache_page(page); 3346 flush_dcache_page(page);
3347 SetPageUptodate(page); 3347 SetPageUptodate(page);
3348 unlock_page(page); 3348 unlock_page(page);
3349 page_cache_release(page); 3349 put_page(page);
3350 rdata->pages[i] = NULL; 3350 rdata->pages[i] = NULL;
3351 rdata->nr_pages--; 3351 rdata->nr_pages--;
3352 continue; 3352 continue;
@@ -3354,7 +3354,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3354 /* no need to hold page hostage */ 3354 /* no need to hold page hostage */
3355 lru_cache_add_file(page); 3355 lru_cache_add_file(page);
3356 unlock_page(page); 3356 unlock_page(page);
3357 page_cache_release(page); 3357 put_page(page);
3358 rdata->pages[i] = NULL; 3358 rdata->pages[i] = NULL;
3359 rdata->nr_pages--; 3359 rdata->nr_pages--;
3360 continue; 3360 continue;
@@ -3402,8 +3402,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3402 } 3402 }
3403 3403
3404 /* move first page to the tmplist */ 3404 /* move first page to the tmplist */
3405 *offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 3405 *offset = (loff_t)page->index << PAGE_SHIFT;
3406 *bytes = PAGE_CACHE_SIZE; 3406 *bytes = PAGE_SIZE;
3407 *nr_pages = 1; 3407 *nr_pages = 1;
3408 list_move_tail(&page->lru, tmplist); 3408 list_move_tail(&page->lru, tmplist);
3409 3409
@@ -3415,7 +3415,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3415 break; 3415 break;
3416 3416
3417 /* would this page push the read over the rsize? */ 3417 /* would this page push the read over the rsize? */
3418 if (*bytes + PAGE_CACHE_SIZE > rsize) 3418 if (*bytes + PAGE_SIZE > rsize)
3419 break; 3419 break;
3420 3420
3421 __SetPageLocked(page); 3421 __SetPageLocked(page);
@@ -3424,7 +3424,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3424 break; 3424 break;
3425 } 3425 }
3426 list_move_tail(&page->lru, tmplist); 3426 list_move_tail(&page->lru, tmplist);
3427 (*bytes) += PAGE_CACHE_SIZE; 3427 (*bytes) += PAGE_SIZE;
3428 expected_index++; 3428 expected_index++;
3429 (*nr_pages)++; 3429 (*nr_pages)++;
3430 } 3430 }
@@ -3493,7 +3493,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3493 * reach this point however since we set ra_pages to 0 when the 3493 * reach this point however since we set ra_pages to 0 when the
3494 * rsize is smaller than a cache page. 3494 * rsize is smaller than a cache page.
3495 */ 3495 */
3496 if (unlikely(rsize < PAGE_CACHE_SIZE)) { 3496 if (unlikely(rsize < PAGE_SIZE)) {
3497 add_credits_and_wake_if(server, credits, 0); 3497 add_credits_and_wake_if(server, credits, 0);
3498 return 0; 3498 return 0;
3499 } 3499 }
@@ -3512,7 +3512,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3512 list_del(&page->lru); 3512 list_del(&page->lru);
3513 lru_cache_add_file(page); 3513 lru_cache_add_file(page);
3514 unlock_page(page); 3514 unlock_page(page);
3515 page_cache_release(page); 3515 put_page(page);
3516 } 3516 }
3517 rc = -ENOMEM; 3517 rc = -ENOMEM;
3518 add_credits_and_wake_if(server, credits, 0); 3518 add_credits_and_wake_if(server, credits, 0);
@@ -3524,7 +3524,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3524 rdata->offset = offset; 3524 rdata->offset = offset;
3525 rdata->bytes = bytes; 3525 rdata->bytes = bytes;
3526 rdata->pid = pid; 3526 rdata->pid = pid;
3527 rdata->pagesz = PAGE_CACHE_SIZE; 3527 rdata->pagesz = PAGE_SIZE;
3528 rdata->read_into_pages = cifs_readpages_read_into_pages; 3528 rdata->read_into_pages = cifs_readpages_read_into_pages;
3529 rdata->credits = credits; 3529 rdata->credits = credits;
3530 3530
@@ -3542,7 +3542,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3542 page = rdata->pages[i]; 3542 page = rdata->pages[i];
3543 lru_cache_add_file(page); 3543 lru_cache_add_file(page);
3544 unlock_page(page); 3544 unlock_page(page);
3545 page_cache_release(page); 3545 put_page(page);
3546 } 3546 }
3547 /* Fallback to the readpage in error/reconnect cases */ 3547 /* Fallback to the readpage in error/reconnect cases */
3548 kref_put(&rdata->refcount, cifs_readdata_release); 3548 kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3577,7 +3577,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
3577 read_data = kmap(page); 3577 read_data = kmap(page);
3578 /* for reads over a certain size could initiate async read ahead */ 3578 /* for reads over a certain size could initiate async read ahead */
3579 3579
3580 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); 3580 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
3581 3581
3582 if (rc < 0) 3582 if (rc < 0)
3583 goto io_error; 3583 goto io_error;
@@ -3587,8 +3587,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
3587 file_inode(file)->i_atime = 3587 file_inode(file)->i_atime =
3588 current_fs_time(file_inode(file)->i_sb); 3588 current_fs_time(file_inode(file)->i_sb);
3589 3589
3590 if (PAGE_CACHE_SIZE > rc) 3590 if (PAGE_SIZE > rc)
3591 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); 3591 memset(read_data + rc, 0, PAGE_SIZE - rc);
3592 3592
3593 flush_dcache_page(page); 3593 flush_dcache_page(page);
3594 SetPageUptodate(page); 3594 SetPageUptodate(page);
@@ -3608,7 +3608,7 @@ read_complete:
3608 3608
3609static int cifs_readpage(struct file *file, struct page *page) 3609static int cifs_readpage(struct file *file, struct page *page)
3610{ 3610{
3611 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 3611 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
3612 int rc = -EACCES; 3612 int rc = -EACCES;
3613 unsigned int xid; 3613 unsigned int xid;
3614 3614
@@ -3679,8 +3679,8 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
3679 struct page **pagep, void **fsdata) 3679 struct page **pagep, void **fsdata)
3680{ 3680{
3681 int oncethru = 0; 3681 int oncethru = 0;
3682 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 3682 pgoff_t index = pos >> PAGE_SHIFT;
3683 loff_t offset = pos & (PAGE_CACHE_SIZE - 1); 3683 loff_t offset = pos & (PAGE_SIZE - 1);
3684 loff_t page_start = pos & PAGE_MASK; 3684 loff_t page_start = pos & PAGE_MASK;
3685 loff_t i_size; 3685 loff_t i_size;
3686 struct page *page; 3686 struct page *page;
@@ -3703,7 +3703,7 @@ start:
3703 * the server. If the write is short, we'll end up doing a sync write 3703 * the server. If the write is short, we'll end up doing a sync write
3704 * instead. 3704 * instead.
3705 */ 3705 */
3706 if (len == PAGE_CACHE_SIZE) 3706 if (len == PAGE_SIZE)
3707 goto out; 3707 goto out;
3708 3708
3709 /* 3709 /*
@@ -3718,7 +3718,7 @@ start:
3718 (offset == 0 && (pos + len) >= i_size)) { 3718 (offset == 0 && (pos + len) >= i_size)) {
3719 zero_user_segments(page, 0, offset, 3719 zero_user_segments(page, 0, offset,
3720 offset + len, 3720 offset + len,
3721 PAGE_CACHE_SIZE); 3721 PAGE_SIZE);
3722 /* 3722 /*
3723 * PageChecked means that the parts of the page 3723 * PageChecked means that the parts of the page
3724 * to which we're not writing are considered up 3724 * to which we're not writing are considered up
@@ -3737,7 +3737,7 @@ start:
3737 * do a sync write instead since PG_uptodate isn't set. 3737 * do a sync write instead since PG_uptodate isn't set.
3738 */ 3738 */
3739 cifs_readpage_worker(file, page, &page_start); 3739 cifs_readpage_worker(file, page, &page_start);
3740 page_cache_release(page); 3740 put_page(page);
3741 oncethru = 1; 3741 oncethru = 1;
3742 goto start; 3742 goto start;
3743 } else { 3743 } else {
@@ -3764,7 +3764,7 @@ static void cifs_invalidate_page(struct page *page, unsigned int offset,
3764{ 3764{
3765 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); 3765 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3766 3766
3767 if (offset == 0 && length == PAGE_CACHE_SIZE) 3767 if (offset == 0 && length == PAGE_SIZE)
3768 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); 3768 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3769} 3769}
3770 3770
@@ -3772,7 +3772,7 @@ static int cifs_launder_page(struct page *page)
3772{ 3772{
3773 int rc = 0; 3773 int rc = 0;
3774 loff_t range_start = page_offset(page); 3774 loff_t range_start = page_offset(page);
3775 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 3775 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
3776 struct writeback_control wbc = { 3776 struct writeback_control wbc = {
3777 .sync_mode = WB_SYNC_ALL, 3777 .sync_mode = WB_SYNC_ALL,
3778 .nr_to_write = 0, 3778 .nr_to_write = 0,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index aeb26dbfa1bf..5f9ad5c42180 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -59,7 +59,7 @@ static void cifs_set_ops(struct inode *inode)
59 59
60 /* check if server can support readpages */ 60 /* check if server can support readpages */
61 if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf < 61 if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
62 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) 62 PAGE_SIZE + MAX_CIFS_HDR_SIZE)
63 inode->i_data.a_ops = &cifs_addr_ops_smallbuf; 63 inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
64 else 64 else
65 inode->i_data.a_ops = &cifs_addr_ops; 65 inode->i_data.a_ops = &cifs_addr_ops;
@@ -2019,8 +2019,8 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
2019 2019
2020static int cifs_truncate_page(struct address_space *mapping, loff_t from) 2020static int cifs_truncate_page(struct address_space *mapping, loff_t from)
2021{ 2021{
2022 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2022 pgoff_t index = from >> PAGE_SHIFT;
2023 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 2023 unsigned offset = from & (PAGE_SIZE - 1);
2024 struct page *page; 2024 struct page *page;
2025 int rc = 0; 2025 int rc = 0;
2026 2026
@@ -2028,9 +2028,9 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
2028 if (!page) 2028 if (!page)
2029 return -ENOMEM; 2029 return -ENOMEM;
2030 2030
2031 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2031 zero_user_segment(page, offset, PAGE_SIZE);
2032 unlock_page(page); 2032 unlock_page(page);
2033 page_cache_release(page); 2033 put_page(page);
2034 return rc; 2034 return rc;
2035} 2035}
2036 2036
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index a8f3b589a2df..cfd91320e869 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -71,8 +71,8 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
71 struct inode *inode; 71 struct inode *inode;
72 struct dentry *root; 72 struct dentry *root;
73 73
74 sb->s_blocksize = PAGE_CACHE_SIZE; 74 sb->s_blocksize = PAGE_SIZE;
75 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 75 sb->s_blocksize_bits = PAGE_SHIFT;
76 sb->s_magic = CONFIGFS_MAGIC; 76 sb->s_magic = CONFIGFS_MAGIC;
77 sb->s_op = &configfs_ops; 77 sb->s_op = &configfs_ops;
78 sb->s_time_gran = 1; 78 sb->s_time_gran = 1;
diff --git a/fs/cramfs/README b/fs/cramfs/README
index 445d1c2d7646..9d4e7ea311f4 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -86,26 +86,26 @@ Block Size
86 86
87(Block size in cramfs refers to the size of input data that is 87(Block size in cramfs refers to the size of input data that is
88compressed at a time. It's intended to be somewhere around 88compressed at a time. It's intended to be somewhere around
89PAGE_CACHE_SIZE for cramfs_readpage's convenience.) 89PAGE_SIZE for cramfs_readpage's convenience.)
90 90
91The superblock ought to indicate the block size that the fs was 91The superblock ought to indicate the block size that the fs was
92written for, since comments in <linux/pagemap.h> indicate that 92written for, since comments in <linux/pagemap.h> indicate that
93PAGE_CACHE_SIZE may grow in future (if I interpret the comment 93PAGE_SIZE may grow in future (if I interpret the comment
94correctly). 94correctly).
95 95
96Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that 96Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
97for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in 97for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). 98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
99This discrepancy is a bug, though it's not clear which should be 99This discrepancy is a bug, though it's not clear which should be
100changed. 100changed.
101 101
102One option is to change mkcramfs to take its PAGE_CACHE_SIZE from 102One option is to change mkcramfs to take its PAGE_SIZE from
103<asm/page.h>. Personally I don't like this option, but it does 103<asm/page.h>. Personally I don't like this option, but it does
104require the least amount of change: just change `#define 104require the least amount of change: just change `#define
105PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage 105PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
106is that the generated cramfs cannot always be shared between different 106is that the generated cramfs cannot always be shared between different
107kernels, not even necessarily kernels of the same architecture if 107kernels, not even necessarily kernels of the same architecture if
108PAGE_CACHE_SIZE is subject to change between kernel versions 108PAGE_SIZE is subject to change between kernel versions
109(currently possible with arm and ia64). 109(currently possible with arm and ia64).
110 110
111The remaining options try to make cramfs more sharable. 111The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@ size. The options are:
126 1. Always 4096 bytes. 126 1. Always 4096 bytes.
127 127
128 2. Writer chooses blocksize; kernel adapts but rejects blocksize > 128 2. Writer chooses blocksize; kernel adapts but rejects blocksize >
129 PAGE_CACHE_SIZE. 129 PAGE_SIZE.
130 130
131 3. Writer chooses blocksize; kernel adapts even to blocksize > 131 3. Writer chooses blocksize; kernel adapts even to blocksize >
132 PAGE_CACHE_SIZE. 132 PAGE_SIZE.
133 133
134It's easy enough to change the kernel to use a smaller value than 134It's easy enough to change the kernel to use a smaller value than
135PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. 135PAGE_SIZE: just make cramfs_readpage read multiple blocks.
136 136
137The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE 137The cost of option 1 is that kernels with a larger PAGE_SIZE
138value don't get as good compression as they can. 138value don't get as good compression as they can.
139 139
140The cost of option 2 relative to option 1 is that the code uses 140The cost of option 2 relative to option 1 is that the code uses
141variables instead of #define'd constants. The gain is that people 141variables instead of #define'd constants. The gain is that people
142with kernels having larger PAGE_CACHE_SIZE can make use of that if 142with kernels having larger PAGE_SIZE can make use of that if
143they don't mind their cramfs being inaccessible to kernels with 143they don't mind their cramfs being inaccessible to kernels with
144smaller PAGE_CACHE_SIZE values. 144smaller PAGE_SIZE values.
145 145
146Option 3 is easy to implement if we don't mind being CPU-inefficient: 146Option 3 is easy to implement if we don't mind being CPU-inefficient:
147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which 147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b862bc219cd7..3a32ddf98095 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
137 * page cache and dentry tree anyway.. 137 * page cache and dentry tree anyway..
138 * 138 *
139 * This also acts as a way to guarantee contiguous areas of up to 139 * This also acts as a way to guarantee contiguous areas of up to
140 * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to 140 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
141 * worry about end-of-buffer issues even when decompressing a full 141 * worry about end-of-buffer issues even when decompressing a full
142 * page cache. 142 * page cache.
143 */ 143 */
@@ -152,7 +152,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
152 */ 152 */
153#define BLKS_PER_BUF_SHIFT (2) 153#define BLKS_PER_BUF_SHIFT (2)
154#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) 154#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
155#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE) 155#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
156 156
157static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; 157static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
158static unsigned buffer_blocknr[READ_BUFFERS]; 158static unsigned buffer_blocknr[READ_BUFFERS];
@@ -173,8 +173,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
173 173
174 if (!len) 174 if (!len)
175 return NULL; 175 return NULL;
176 blocknr = offset >> PAGE_CACHE_SHIFT; 176 blocknr = offset >> PAGE_SHIFT;
177 offset &= PAGE_CACHE_SIZE - 1; 177 offset &= PAGE_SIZE - 1;
178 178
179 /* Check if an existing buffer already has the data.. */ 179 /* Check if an existing buffer already has the data.. */
180 for (i = 0; i < READ_BUFFERS; i++) { 180 for (i = 0; i < READ_BUFFERS; i++) {
@@ -184,14 +184,14 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
184 continue; 184 continue;
185 if (blocknr < buffer_blocknr[i]) 185 if (blocknr < buffer_blocknr[i])
186 continue; 186 continue;
187 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; 187 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
188 blk_offset += offset; 188 blk_offset += offset;
189 if (blk_offset + len > BUFFER_SIZE) 189 if (blk_offset + len > BUFFER_SIZE)
190 continue; 190 continue;
191 return read_buffers[i] + blk_offset; 191 return read_buffers[i] + blk_offset;
192 } 192 }
193 193
194 devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; 194 devsize = mapping->host->i_size >> PAGE_SHIFT;
195 195
196 /* Ok, read in BLKS_PER_BUF pages completely first. */ 196 /* Ok, read in BLKS_PER_BUF pages completely first. */
197 for (i = 0; i < BLKS_PER_BUF; i++) { 197 for (i = 0; i < BLKS_PER_BUF; i++) {
@@ -213,7 +213,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
213 wait_on_page_locked(page); 213 wait_on_page_locked(page);
214 if (!PageUptodate(page)) { 214 if (!PageUptodate(page)) {
215 /* asynchronous error */ 215 /* asynchronous error */
216 page_cache_release(page); 216 put_page(page);
217 pages[i] = NULL; 217 pages[i] = NULL;
218 } 218 }
219 } 219 }
@@ -229,12 +229,12 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
229 struct page *page = pages[i]; 229 struct page *page = pages[i];
230 230
231 if (page) { 231 if (page) {
232 memcpy(data, kmap(page), PAGE_CACHE_SIZE); 232 memcpy(data, kmap(page), PAGE_SIZE);
233 kunmap(page); 233 kunmap(page);
234 page_cache_release(page); 234 put_page(page);
235 } else 235 } else
236 memset(data, 0, PAGE_CACHE_SIZE); 236 memset(data, 0, PAGE_SIZE);
237 data += PAGE_CACHE_SIZE; 237 data += PAGE_SIZE;
238 } 238 }
239 return read_buffers[buffer] + offset; 239 return read_buffers[buffer] + offset;
240} 240}
@@ -353,7 +353,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
353 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 353 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
354 354
355 buf->f_type = CRAMFS_MAGIC; 355 buf->f_type = CRAMFS_MAGIC;
356 buf->f_bsize = PAGE_CACHE_SIZE; 356 buf->f_bsize = PAGE_SIZE;
357 buf->f_blocks = CRAMFS_SB(sb)->blocks; 357 buf->f_blocks = CRAMFS_SB(sb)->blocks;
358 buf->f_bfree = 0; 358 buf->f_bfree = 0;
359 buf->f_bavail = 0; 359 buf->f_bavail = 0;
@@ -496,7 +496,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
496 int bytes_filled; 496 int bytes_filled;
497 void *pgdata; 497 void *pgdata;
498 498
499 maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 499 maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
500 bytes_filled = 0; 500 bytes_filled = 0;
501 pgdata = kmap(page); 501 pgdata = kmap(page);
502 502
@@ -516,14 +516,14 @@ static int cramfs_readpage(struct file *file, struct page *page)
516 516
517 if (compr_len == 0) 517 if (compr_len == 0)
518 ; /* hole */ 518 ; /* hole */
519 else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) { 519 else if (unlikely(compr_len > (PAGE_SIZE << 1))) {
520 pr_err("bad compressed blocksize %u\n", 520 pr_err("bad compressed blocksize %u\n",
521 compr_len); 521 compr_len);
522 goto err; 522 goto err;
523 } else { 523 } else {
524 mutex_lock(&read_mutex); 524 mutex_lock(&read_mutex);
525 bytes_filled = cramfs_uncompress_block(pgdata, 525 bytes_filled = cramfs_uncompress_block(pgdata,
526 PAGE_CACHE_SIZE, 526 PAGE_SIZE,
527 cramfs_read(sb, start_offset, compr_len), 527 cramfs_read(sb, start_offset, compr_len),
528 compr_len); 528 compr_len);
529 mutex_unlock(&read_mutex); 529 mutex_unlock(&read_mutex);
@@ -532,7 +532,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
532 } 532 }
533 } 533 }
534 534
535 memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled); 535 memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
536 flush_dcache_page(page); 536 flush_dcache_page(page);
537 kunmap(page); 537 kunmap(page);
538 SetPageUptodate(page); 538 SetPageUptodate(page);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 06cd1a22240b..2fc8c43ce531 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -26,6 +26,7 @@
26#include <linux/ratelimit.h> 26#include <linux/ratelimit.h>
27#include <linux/bio.h> 27#include <linux/bio.h>
28#include <linux/dcache.h> 28#include <linux/dcache.h>
29#include <linux/namei.h>
29#include <linux/fscrypto.h> 30#include <linux/fscrypto.h>
30#include <linux/ecryptfs.h> 31#include <linux/ecryptfs.h>
31 32
@@ -81,13 +82,14 @@ EXPORT_SYMBOL(fscrypt_release_ctx);
81/** 82/**
82 * fscrypt_get_ctx() - Gets an encryption context 83 * fscrypt_get_ctx() - Gets an encryption context
83 * @inode: The inode for which we are doing the crypto 84 * @inode: The inode for which we are doing the crypto
85 * @gfp_flags: The gfp flag for memory allocation
84 * 86 *
85 * Allocates and initializes an encryption context. 87 * Allocates and initializes an encryption context.
86 * 88 *
87 * Return: An allocated and initialized encryption context on success; error 89 * Return: An allocated and initialized encryption context on success; error
88 * value or NULL otherwise. 90 * value or NULL otherwise.
89 */ 91 */
90struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode) 92struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
91{ 93{
92 struct fscrypt_ctx *ctx = NULL; 94 struct fscrypt_ctx *ctx = NULL;
93 struct fscrypt_info *ci = inode->i_crypt_info; 95 struct fscrypt_info *ci = inode->i_crypt_info;
@@ -113,7 +115,7 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
113 list_del(&ctx->free_list); 115 list_del(&ctx->free_list);
114 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
115 if (!ctx) { 117 if (!ctx) {
116 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
117 if (!ctx) 119 if (!ctx)
118 return ERR_PTR(-ENOMEM); 120 return ERR_PTR(-ENOMEM);
119 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
@@ -147,7 +149,8 @@ typedef enum {
147 149
148static int do_page_crypto(struct inode *inode, 150static int do_page_crypto(struct inode *inode,
149 fscrypt_direction_t rw, pgoff_t index, 151 fscrypt_direction_t rw, pgoff_t index,
150 struct page *src_page, struct page *dest_page) 152 struct page *src_page, struct page *dest_page,
153 gfp_t gfp_flags)
151{ 154{
152 u8 xts_tweak[FS_XTS_TWEAK_SIZE]; 155 u8 xts_tweak[FS_XTS_TWEAK_SIZE];
153 struct skcipher_request *req = NULL; 156 struct skcipher_request *req = NULL;
@@ -157,7 +160,7 @@ static int do_page_crypto(struct inode *inode,
157 struct crypto_skcipher *tfm = ci->ci_ctfm; 160 struct crypto_skcipher *tfm = ci->ci_ctfm;
158 int res = 0; 161 int res = 0;
159 162
160 req = skcipher_request_alloc(tfm, GFP_NOFS); 163 req = skcipher_request_alloc(tfm, gfp_flags);
161 if (!req) { 164 if (!req) {
162 printk_ratelimited(KERN_ERR 165 printk_ratelimited(KERN_ERR
163 "%s: crypto_request_alloc() failed\n", 166 "%s: crypto_request_alloc() failed\n",
@@ -175,10 +178,10 @@ static int do_page_crypto(struct inode *inode,
175 FS_XTS_TWEAK_SIZE - sizeof(index)); 178 FS_XTS_TWEAK_SIZE - sizeof(index));
176 179
177 sg_init_table(&dst, 1); 180 sg_init_table(&dst, 1);
178 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 181 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
179 sg_init_table(&src, 1); 182 sg_init_table(&src, 1);
180 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 183 sg_set_page(&src, src_page, PAGE_SIZE, 0);
181 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 184 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
182 xts_tweak); 185 xts_tweak);
183 if (rw == FS_DECRYPT) 186 if (rw == FS_DECRYPT)
184 res = crypto_skcipher_decrypt(req); 187 res = crypto_skcipher_decrypt(req);
@@ -199,10 +202,9 @@ static int do_page_crypto(struct inode *inode,
199 return 0; 202 return 0;
200} 203}
201 204
202static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx) 205static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
203{ 206{
204 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, 207 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
205 GFP_NOWAIT);
206 if (ctx->w.bounce_page == NULL) 208 if (ctx->w.bounce_page == NULL)
207 return ERR_PTR(-ENOMEM); 209 return ERR_PTR(-ENOMEM);
208 ctx->flags |= FS_WRITE_PATH_FL; 210 ctx->flags |= FS_WRITE_PATH_FL;
@@ -213,6 +215,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
213 * fscypt_encrypt_page() - Encrypts a page 215 * fscypt_encrypt_page() - Encrypts a page
214 * @inode: The inode for which the encryption should take place 216 * @inode: The inode for which the encryption should take place
215 * @plaintext_page: The page to encrypt. Must be locked. 217 * @plaintext_page: The page to encrypt. Must be locked.
218 * @gfp_flags: The gfp flag for memory allocation
216 * 219 *
217 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 220 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
218 * encryption context. 221 * encryption context.
@@ -225,7 +228,7 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
225 * error value or NULL. 228 * error value or NULL.
226 */ 229 */
227struct page *fscrypt_encrypt_page(struct inode *inode, 230struct page *fscrypt_encrypt_page(struct inode *inode,
228 struct page *plaintext_page) 231 struct page *plaintext_page, gfp_t gfp_flags)
229{ 232{
230 struct fscrypt_ctx *ctx; 233 struct fscrypt_ctx *ctx;
231 struct page *ciphertext_page = NULL; 234 struct page *ciphertext_page = NULL;
@@ -233,18 +236,19 @@ struct page *fscrypt_encrypt_page(struct inode *inode,
233 236
234 BUG_ON(!PageLocked(plaintext_page)); 237 BUG_ON(!PageLocked(plaintext_page));
235 238
236 ctx = fscrypt_get_ctx(inode); 239 ctx = fscrypt_get_ctx(inode, gfp_flags);
237 if (IS_ERR(ctx)) 240 if (IS_ERR(ctx))
238 return (struct page *)ctx; 241 return (struct page *)ctx;
239 242
240 /* The encryption operation will require a bounce page. */ 243 /* The encryption operation will require a bounce page. */
241 ciphertext_page = alloc_bounce_page(ctx); 244 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
242 if (IS_ERR(ciphertext_page)) 245 if (IS_ERR(ciphertext_page))
243 goto errout; 246 goto errout;
244 247
245 ctx->w.control_page = plaintext_page; 248 ctx->w.control_page = plaintext_page;
246 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, 249 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
247 plaintext_page, ciphertext_page); 250 plaintext_page, ciphertext_page,
251 gfp_flags);
248 if (err) { 252 if (err) {
249 ciphertext_page = ERR_PTR(err); 253 ciphertext_page = ERR_PTR(err);
250 goto errout; 254 goto errout;
@@ -275,7 +279,7 @@ int fscrypt_decrypt_page(struct page *page)
275 BUG_ON(!PageLocked(page)); 279 BUG_ON(!PageLocked(page));
276 280
277 return do_page_crypto(page->mapping->host, 281 return do_page_crypto(page->mapping->host,
278 FS_DECRYPT, page->index, page, page); 282 FS_DECRYPT, page->index, page, page, GFP_NOFS);
279} 283}
280EXPORT_SYMBOL(fscrypt_decrypt_page); 284EXPORT_SYMBOL(fscrypt_decrypt_page);
281 285
@@ -287,13 +291,13 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
287 struct bio *bio; 291 struct bio *bio;
288 int ret, err = 0; 292 int ret, err = 0;
289 293
290 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); 294 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
291 295
292 ctx = fscrypt_get_ctx(inode); 296 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
293 if (IS_ERR(ctx)) 297 if (IS_ERR(ctx))
294 return PTR_ERR(ctx); 298 return PTR_ERR(ctx);
295 299
296 ciphertext_page = alloc_bounce_page(ctx); 300 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
297 if (IS_ERR(ciphertext_page)) { 301 if (IS_ERR(ciphertext_page)) {
298 err = PTR_ERR(ciphertext_page); 302 err = PTR_ERR(ciphertext_page);
299 goto errout; 303 goto errout;
@@ -301,11 +305,12 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
301 305
302 while (len--) { 306 while (len--) {
303 err = do_page_crypto(inode, FS_ENCRYPT, lblk, 307 err = do_page_crypto(inode, FS_ENCRYPT, lblk,
304 ZERO_PAGE(0), ciphertext_page); 308 ZERO_PAGE(0), ciphertext_page,
309 GFP_NOFS);
305 if (err) 310 if (err)
306 goto errout; 311 goto errout;
307 312
308 bio = bio_alloc(GFP_KERNEL, 1); 313 bio = bio_alloc(GFP_NOWAIT, 1);
309 if (!bio) { 314 if (!bio) {
310 err = -ENOMEM; 315 err = -ENOMEM;
311 goto errout; 316 goto errout;
@@ -345,13 +350,20 @@ EXPORT_SYMBOL(fscrypt_zeroout_range);
345 */ 350 */
346static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 351static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
347{ 352{
348 struct inode *dir = d_inode(dentry->d_parent); 353 struct dentry *dir;
349 struct fscrypt_info *ci = dir->i_crypt_info; 354 struct fscrypt_info *ci;
350 int dir_has_key, cached_with_key; 355 int dir_has_key, cached_with_key;
351 356
352 if (!dir->i_sb->s_cop->is_encrypted(dir)) 357 if (flags & LOOKUP_RCU)
358 return -ECHILD;
359
360 dir = dget_parent(dentry);
361 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
362 dput(dir);
353 return 0; 363 return 0;
364 }
354 365
366 ci = d_inode(dir)->i_crypt_info;
355 if (ci && ci->ci_keyring_key && 367 if (ci && ci->ci_keyring_key &&
356 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 368 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
357 (1 << KEY_FLAG_REVOKED) | 369 (1 << KEY_FLAG_REVOKED) |
@@ -363,6 +375,7 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
363 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 375 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
364 spin_unlock(&dentry->d_lock); 376 spin_unlock(&dentry->d_lock);
365 dir_has_key = (ci != NULL); 377 dir_has_key = (ci != NULL);
378 dput(dir);
366 379
367 /* 380 /*
368 * If the dentry was cached without the key, and it is a 381 * If the dentry was cached without the key, and it is a
diff --git a/fs/dax.c b/fs/dax.c
index 90322eb7498c..75ba46d82a76 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
323 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 323 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
324 if (vmf->pgoff >= size) { 324 if (vmf->pgoff >= size) {
325 unlock_page(page); 325 unlock_page(page);
326 page_cache_release(page); 326 put_page(page);
327 return VM_FAULT_SIGBUS; 327 return VM_FAULT_SIGBUS;
328 } 328 }
329 329
@@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode,
351} 351}
352 352
353#define NO_SECTOR -1 353#define NO_SECTOR -1
354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT)) 354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
355 355
356static int dax_radix_entry(struct address_space *mapping, pgoff_t index, 356static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
357 sector_t sector, bool pmd_entry, bool dirty) 357 sector_t sector, bool pmd_entry, bool dirty)
@@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
506 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 506 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
507 return 0; 507 return 0;
508 508
509 start_index = wbc->range_start >> PAGE_CACHE_SHIFT; 509 start_index = wbc->range_start >> PAGE_SHIFT;
510 end_index = wbc->range_end >> PAGE_CACHE_SHIFT; 510 end_index = wbc->range_end >> PAGE_SHIFT;
511 pmd_index = DAX_PMD_INDEX(start_index); 511 pmd_index = DAX_PMD_INDEX(start_index);
512 512
513 rcu_read_lock(); 513 rcu_read_lock();
@@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
642 page = find_get_page(mapping, vmf->pgoff); 642 page = find_get_page(mapping, vmf->pgoff);
643 if (page) { 643 if (page) {
644 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 644 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
645 page_cache_release(page); 645 put_page(page);
646 return VM_FAULT_RETRY; 646 return VM_FAULT_RETRY;
647 } 647 }
648 if (unlikely(page->mapping != mapping)) { 648 if (unlikely(page->mapping != mapping)) {
649 unlock_page(page); 649 unlock_page(page);
650 page_cache_release(page); 650 put_page(page);
651 goto repeat; 651 goto repeat;
652 } 652 }
653 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 653 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
711 711
712 if (page) { 712 if (page) {
713 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 713 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
714 PAGE_CACHE_SIZE, 0); 714 PAGE_SIZE, 0);
715 delete_from_page_cache(page); 715 delete_from_page_cache(page);
716 unlock_page(page); 716 unlock_page(page);
717 page_cache_release(page); 717 put_page(page);
718 page = NULL; 718 page = NULL;
719 } 719 }
720 720
@@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
747 unlock_page: 747 unlock_page:
748 if (page) { 748 if (page) {
749 unlock_page(page); 749 unlock_page(page);
750 page_cache_release(page); 750 put_page(page);
751 } 751 }
752 goto out; 752 goto out;
753} 753}
@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1094 * you are truncating a file, the helper function dax_truncate_page() may be 1094 * you are truncating a file, the helper function dax_truncate_page() may be
1095 * more convenient. 1095 * more convenient.
1096 * 1096 *
1097 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1097 * We work in terms of PAGE_SIZE here for commonality with
1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1099 * took care of disposing of the unnecessary blocks. Even if the filesystem 1099 * took care of disposing of the unnecessary blocks. Even if the filesystem
1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1104 get_block_t get_block) 1104 get_block_t get_block)
1105{ 1105{
1106 struct buffer_head bh; 1106 struct buffer_head bh;
1107 pgoff_t index = from >> PAGE_CACHE_SHIFT; 1107 pgoff_t index = from >> PAGE_SHIFT;
1108 unsigned offset = from & (PAGE_CACHE_SIZE-1); 1108 unsigned offset = from & (PAGE_SIZE-1);
1109 int err; 1109 int err;
1110 1110
1111 /* Block boundary? Nothing to do */ 1111 /* Block boundary? Nothing to do */
1112 if (!length) 1112 if (!length)
1113 return 0; 1113 return 0;
1114 BUG_ON((offset + length) > PAGE_CACHE_SIZE); 1114 BUG_ON((offset + length) > PAGE_SIZE);
1115 1115
1116 memset(&bh, 0, sizeof(bh)); 1116 memset(&bh, 0, sizeof(bh));
1117 bh.b_bdev = inode->i_sb->s_bdev; 1117 bh.b_bdev = inode->i_sb->s_bdev;
1118 bh.b_size = PAGE_CACHE_SIZE; 1118 bh.b_size = PAGE_SIZE;
1119 err = get_block(inode, index, &bh, 0); 1119 err = get_block(inode, index, &bh, 0);
1120 if (err < 0) 1120 if (err < 0)
1121 return err; 1121 return err;
@@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1123 struct block_device *bdev = bh.b_bdev; 1123 struct block_device *bdev = bh.b_bdev;
1124 struct blk_dax_ctl dax = { 1124 struct blk_dax_ctl dax = {
1125 .sector = to_sector(&bh, inode), 1125 .sector = to_sector(&bh, inode),
1126 .size = PAGE_CACHE_SIZE, 1126 .size = PAGE_SIZE,
1127 }; 1127 };
1128 1128
1129 if (dax_map_atomic(bdev, &dax) < 0) 1129 if (dax_map_atomic(bdev, &dax) < 0)
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
1146 * Similar to block_truncate_page(), this function can be called by a 1146 * Similar to block_truncate_page(), this function can be called by a
1147 * filesystem when it is truncating a DAX file to handle the partial page. 1147 * filesystem when it is truncating a DAX file to handle the partial page.
1148 * 1148 *
1149 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1149 * We work in terms of PAGE_SIZE here for commonality with
1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1151 * took care of disposing of the unnecessary blocks. Even if the filesystem 1151 * took care of disposing of the unnecessary blocks. Even if the filesystem
1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
1154 */ 1154 */
1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) 1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1156{ 1156{
1157 unsigned length = PAGE_CACHE_ALIGN(from) - from; 1157 unsigned length = PAGE_ALIGN(from) - from;
1158 return dax_zero_page_range(inode, from, length, get_block); 1158 return dax_zero_page_range(inode, from, length, get_block);
1159} 1159}
1160EXPORT_SYMBOL_GPL(dax_truncate_page); 1160EXPORT_SYMBOL_GPL(dax_truncate_page);
diff --git a/fs/dcache.c b/fs/dcache.c
index 32ceae3e6112..d5ecc6e477da 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1667 DCACHE_OP_REVALIDATE | 1667 DCACHE_OP_REVALIDATE |
1668 DCACHE_OP_WEAK_REVALIDATE | 1668 DCACHE_OP_WEAK_REVALIDATE |
1669 DCACHE_OP_DELETE | 1669 DCACHE_OP_DELETE |
1670 DCACHE_OP_SELECT_INODE)); 1670 DCACHE_OP_SELECT_INODE |
1671 DCACHE_OP_REAL));
1671 dentry->d_op = op; 1672 dentry->d_op = op;
1672 if (!op) 1673 if (!op)
1673 return; 1674 return;
@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1685 dentry->d_flags |= DCACHE_OP_PRUNE; 1686 dentry->d_flags |= DCACHE_OP_PRUNE;
1686 if (op->d_select_inode) 1687 if (op->d_select_inode)
1687 dentry->d_flags |= DCACHE_OP_SELECT_INODE; 1688 dentry->d_flags |= DCACHE_OP_SELECT_INODE;
1689 if (op->d_real)
1690 dentry->d_flags |= DCACHE_OP_REAL;
1688 1691
1689} 1692}
1690EXPORT_SYMBOL(d_set_d_op); 1693EXPORT_SYMBOL(d_set_d_op);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index bece948b363d..8580831ed237 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
457 if (unlikely(!inode)) 457 if (unlikely(!inode))
458 return failed_creating(dentry); 458 return failed_creating(dentry);
459 459
460 inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; 460 make_empty_dir_inode(inode);
461 inode->i_flags |= S_AUTOMOUNT; 461 inode->i_flags |= S_AUTOMOUNT;
462 inode->i_private = data; 462 inode->i_private = data;
463 dentry->d_fsdata = (void *)f; 463 dentry->d_fsdata = (void *)f;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 655f21f99160..0b2954d7172d 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -128,6 +128,7 @@ static const match_table_t tokens = {
128struct pts_fs_info { 128struct pts_fs_info {
129 struct ida allocated_ptys; 129 struct ida allocated_ptys;
130 struct pts_mount_opts mount_opts; 130 struct pts_mount_opts mount_opts;
131 struct super_block *sb;
131 struct dentry *ptmx_dentry; 132 struct dentry *ptmx_dentry;
132}; 133};
133 134
@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = {
358 .show_options = devpts_show_options, 359 .show_options = devpts_show_options,
359}; 360};
360 361
361static void *new_pts_fs_info(void) 362static void *new_pts_fs_info(struct super_block *sb)
362{ 363{
363 struct pts_fs_info *fsi; 364 struct pts_fs_info *fsi;
364 365
@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void)
369 ida_init(&fsi->allocated_ptys); 370 ida_init(&fsi->allocated_ptys);
370 fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; 371 fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
371 fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; 372 fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
373 fsi->sb = sb;
372 374
373 return fsi; 375 return fsi;
374} 376}
@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
384 s->s_op = &devpts_sops; 386 s->s_op = &devpts_sops;
385 s->s_time_gran = 1; 387 s->s_time_gran = 1;
386 388
387 s->s_fs_info = new_pts_fs_info(); 389 s->s_fs_info = new_pts_fs_info(s);
388 if (!s->s_fs_info) 390 if (!s->s_fs_info)
389 goto fail; 391 goto fail;
390 392
@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = {
524 * to the System V naming convention 526 * to the System V naming convention
525 */ 527 */
526 528
527int devpts_new_index(struct inode *ptmx_inode) 529int devpts_new_index(struct pts_fs_info *fsi)
528{ 530{
529 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
530 struct pts_fs_info *fsi;
531 int index; 531 int index;
532 int ida_ret; 532 int ida_ret;
533 533
534 if (!sb) 534 if (!fsi)
535 return -ENODEV; 535 return -ENODEV;
536 536
537 fsi = DEVPTS_SB(sb);
538retry: 537retry:
539 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 538 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
540 return -ENOMEM; 539 return -ENOMEM;
@@ -564,11 +563,8 @@ retry:
564 return index; 563 return index;
565} 564}
566 565
567void devpts_kill_index(struct inode *ptmx_inode, int idx) 566void devpts_kill_index(struct pts_fs_info *fsi, int idx)
568{ 567{
569 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
570 struct pts_fs_info *fsi = DEVPTS_SB(sb);
571
572 mutex_lock(&allocated_ptys_lock); 568 mutex_lock(&allocated_ptys_lock);
573 ida_remove(&fsi->allocated_ptys, idx); 569 ida_remove(&fsi->allocated_ptys, idx);
574 pty_count--; 570 pty_count--;
@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
578/* 574/*
579 * pty code needs to hold extra references in case of last /dev/tty close 575 * pty code needs to hold extra references in case of last /dev/tty close
580 */ 576 */
581 577struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
582void devpts_add_ref(struct inode *ptmx_inode)
583{ 578{
584 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 579 struct super_block *sb;
580 struct pts_fs_info *fsi;
581
582 sb = pts_sb_from_inode(ptmx_inode);
583 if (!sb)
584 return NULL;
585 fsi = DEVPTS_SB(sb);
586 if (!fsi)
587 return NULL;
585 588
586 atomic_inc(&sb->s_active); 589 atomic_inc(&sb->s_active);
587 ihold(ptmx_inode); 590 return fsi;
588} 591}
589 592
590void devpts_del_ref(struct inode *ptmx_inode) 593void devpts_put_ref(struct pts_fs_info *fsi)
591{ 594{
592 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 595 deactivate_super(fsi->sb);
593
594 iput(ptmx_inode);
595 deactivate_super(sb);
596} 596}
597 597
598/** 598/**
@@ -604,22 +604,20 @@ void devpts_del_ref(struct inode *ptmx_inode)
604 * 604 *
605 * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. 605 * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
606 */ 606 */
607struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 607struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
608 void *priv)
609{ 608{
610 struct dentry *dentry; 609 struct dentry *dentry;
611 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 610 struct super_block *sb;
612 struct inode *inode; 611 struct inode *inode;
613 struct dentry *root; 612 struct dentry *root;
614 struct pts_fs_info *fsi;
615 struct pts_mount_opts *opts; 613 struct pts_mount_opts *opts;
616 char s[12]; 614 char s[12];
617 615
618 if (!sb) 616 if (!fsi)
619 return ERR_PTR(-ENODEV); 617 return ERR_PTR(-ENODEV);
620 618
619 sb = fsi->sb;
621 root = sb->s_root; 620 root = sb->s_root;
622 fsi = DEVPTS_SB(sb);
623 opts = &fsi->mount_opts; 621 opts = &fsi->mount_opts;
624 622
625 inode = new_inode(sb); 623 inode = new_inode(sb);
@@ -630,25 +628,21 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
630 inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); 628 inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
631 inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); 629 inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
632 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 630 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
633 init_special_inode(inode, S_IFCHR|opts->mode, device); 631 init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
634 inode->i_private = priv;
635 632
636 sprintf(s, "%d", index); 633 sprintf(s, "%d", index);
637 634
638 inode_lock(d_inode(root));
639
640 dentry = d_alloc_name(root, s); 635 dentry = d_alloc_name(root, s);
641 if (dentry) { 636 if (dentry) {
637 dentry->d_fsdata = priv;
642 d_add(dentry, inode); 638 d_add(dentry, inode);
643 fsnotify_create(d_inode(root), dentry); 639 fsnotify_create(d_inode(root), dentry);
644 } else { 640 } else {
645 iput(inode); 641 iput(inode);
646 inode = ERR_PTR(-ENOMEM); 642 dentry = ERR_PTR(-ENOMEM);
647 } 643 }
648 644
649 inode_unlock(d_inode(root)); 645 return dentry;
650
651 return inode;
652} 646}
653 647
654/** 648/**
@@ -657,24 +651,10 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
657 * 651 *
658 * Returns whatever was passed as priv in devpts_pty_new for a given inode. 652 * Returns whatever was passed as priv in devpts_pty_new for a given inode.
659 */ 653 */
660void *devpts_get_priv(struct inode *pts_inode) 654void *devpts_get_priv(struct dentry *dentry)
661{ 655{
662 struct dentry *dentry; 656 WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
663 void *priv = NULL; 657 return dentry->d_fsdata;
664
665 BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
666
667 /* Ensure dentry has not been deleted by devpts_pty_kill() */
668 dentry = d_find_alias(pts_inode);
669 if (!dentry)
670 return NULL;
671
672 if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
673 priv = pts_inode->i_private;
674
675 dput(dentry);
676
677 return priv;
678} 658}
679 659
680/** 660/**
@@ -683,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode)
683 * 663 *
684 * This is an inverse operation of devpts_pty_new. 664 * This is an inverse operation of devpts_pty_new.
685 */ 665 */
686void devpts_pty_kill(struct inode *inode) 666void devpts_pty_kill(struct dentry *dentry)
687{ 667{
688 struct super_block *sb = pts_sb_from_inode(inode); 668 WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
689 struct dentry *root = sb->s_root;
690 struct dentry *dentry;
691 669
692 BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); 670 dentry->d_fsdata = NULL;
693 671 drop_nlink(dentry->d_inode);
694 inode_lock(d_inode(root));
695
696 dentry = d_find_alias(inode);
697
698 drop_nlink(inode);
699 d_delete(dentry); 672 d_delete(dentry);
700 dput(dentry); /* d_alloc_name() in devpts_pty_new() */ 673 dput(dentry); /* d_alloc_name() in devpts_pty_new() */
701 dput(dentry); /* d_find_alias above */
702
703 inode_unlock(d_inode(root));
704} 674}
705 675
706static int __init init_devpts_fs(void) 676static int __init init_devpts_fs(void)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 476f1ecbd1f0..472037732daf 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -172,7 +172,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
172 */ 172 */
173 if (dio->page_errors == 0) 173 if (dio->page_errors == 0)
174 dio->page_errors = ret; 174 dio->page_errors = ret;
175 page_cache_get(page); 175 get_page(page);
176 dio->pages[0] = page; 176 dio->pages[0] = page;
177 sdio->head = 0; 177 sdio->head = 0;
178 sdio->tail = 1; 178 sdio->tail = 1;
@@ -424,7 +424,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
424static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) 424static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
425{ 425{
426 while (sdio->head < sdio->tail) 426 while (sdio->head < sdio->tail)
427 page_cache_release(dio->pages[sdio->head++]); 427 put_page(dio->pages[sdio->head++]);
428} 428}
429 429
430/* 430/*
@@ -487,7 +487,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
487 if (dio->rw == READ && !PageCompound(page) && 487 if (dio->rw == READ && !PageCompound(page) &&
488 dio->should_dirty) 488 dio->should_dirty)
489 set_page_dirty_lock(page); 489 set_page_dirty_lock(page);
490 page_cache_release(page); 490 put_page(page);
491 } 491 }
492 err = bio->bi_error; 492 err = bio->bi_error;
493 bio_put(bio); 493 bio_put(bio);
@@ -696,7 +696,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio)
696 */ 696 */
697 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) 697 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
698 sdio->pages_in_io--; 698 sdio->pages_in_io--;
699 page_cache_get(sdio->cur_page); 699 get_page(sdio->cur_page);
700 sdio->final_block_in_bio = sdio->cur_page_block + 700 sdio->final_block_in_bio = sdio->cur_page_block +
701 (sdio->cur_page_len >> sdio->blkbits); 701 (sdio->cur_page_len >> sdio->blkbits);
702 ret = 0; 702 ret = 0;
@@ -810,13 +810,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
810 */ 810 */
811 if (sdio->cur_page) { 811 if (sdio->cur_page) {
812 ret = dio_send_cur_page(dio, sdio, map_bh); 812 ret = dio_send_cur_page(dio, sdio, map_bh);
813 page_cache_release(sdio->cur_page); 813 put_page(sdio->cur_page);
814 sdio->cur_page = NULL; 814 sdio->cur_page = NULL;
815 if (ret) 815 if (ret)
816 return ret; 816 return ret;
817 } 817 }
818 818
819 page_cache_get(page); /* It is in dio */ 819 get_page(page); /* It is in dio */
820 sdio->cur_page = page; 820 sdio->cur_page = page;
821 sdio->cur_page_offset = offset; 821 sdio->cur_page_offset = offset;
822 sdio->cur_page_len = len; 822 sdio->cur_page_len = len;
@@ -830,7 +830,7 @@ out:
830 if (sdio->boundary) { 830 if (sdio->boundary) {
831 ret = dio_send_cur_page(dio, sdio, map_bh); 831 ret = dio_send_cur_page(dio, sdio, map_bh);
832 dio_bio_submit(dio, sdio); 832 dio_bio_submit(dio, sdio);
833 page_cache_release(sdio->cur_page); 833 put_page(sdio->cur_page);
834 sdio->cur_page = NULL; 834 sdio->cur_page = NULL;
835 } 835 }
836 return ret; 836 return ret;
@@ -947,7 +947,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
947 947
948 ret = get_more_blocks(dio, sdio, map_bh); 948 ret = get_more_blocks(dio, sdio, map_bh);
949 if (ret) { 949 if (ret) {
950 page_cache_release(page); 950 put_page(page);
951 goto out; 951 goto out;
952 } 952 }
953 if (!buffer_mapped(map_bh)) 953 if (!buffer_mapped(map_bh))
@@ -988,7 +988,7 @@ do_holes:
988 988
989 /* AKPM: eargh, -ENOTBLK is a hack */ 989 /* AKPM: eargh, -ENOTBLK is a hack */
990 if (dio->rw & WRITE) { 990 if (dio->rw & WRITE) {
991 page_cache_release(page); 991 put_page(page);
992 return -ENOTBLK; 992 return -ENOTBLK;
993 } 993 }
994 994
@@ -1001,7 +1001,7 @@ do_holes:
1001 if (sdio->block_in_file >= 1001 if (sdio->block_in_file >=
1002 i_size_aligned >> blkbits) { 1002 i_size_aligned >> blkbits) {
1003 /* We hit eof */ 1003 /* We hit eof */
1004 page_cache_release(page); 1004 put_page(page);
1005 goto out; 1005 goto out;
1006 } 1006 }
1007 zero_user(page, from, 1 << blkbits); 1007 zero_user(page, from, 1 << blkbits);
@@ -1041,7 +1041,7 @@ do_holes:
1041 sdio->next_block_for_io, 1041 sdio->next_block_for_io,
1042 map_bh); 1042 map_bh);
1043 if (ret) { 1043 if (ret) {
1044 page_cache_release(page); 1044 put_page(page);
1045 goto out; 1045 goto out;
1046 } 1046 }
1047 sdio->next_block_for_io += this_chunk_blocks; 1047 sdio->next_block_for_io += this_chunk_blocks;
@@ -1057,7 +1057,7 @@ next_block:
1057 } 1057 }
1058 1058
1059 /* Drop the ref which was taken in get_user_pages() */ 1059 /* Drop the ref which was taken in get_user_pages() */
1060 page_cache_release(page); 1060 put_page(page);
1061 } 1061 }
1062out: 1062out:
1063 return ret; 1063 return ret;
@@ -1281,7 +1281,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1281 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); 1281 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1282 if (retval == 0) 1282 if (retval == 0)
1283 retval = ret2; 1283 retval = ret2;
1284 page_cache_release(sdio.cur_page); 1284 put_page(sdio.cur_page);
1285 sdio.cur_page = NULL; 1285 sdio.cur_page = NULL;
1286 } 1286 }
1287 if (sdio.bio) 1287 if (sdio.bio)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 00640e70ed7a..1ab012a27d9f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con)
640 con->rx_page = alloc_page(GFP_ATOMIC); 640 con->rx_page = alloc_page(GFP_ATOMIC);
641 if (con->rx_page == NULL) 641 if (con->rx_page == NULL)
642 goto out_resched; 642 goto out_resched;
643 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 643 cbuf_init(&con->cb, PAGE_SIZE);
644 } 644 }
645 645
646 /* 646 /*
@@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con)
657 * buffer and the start of the currently used section (cb.base) 657 * buffer and the start of the currently used section (cb.base)
658 */ 658 */
659 if (cbuf_data(&con->cb) >= con->cb.base) { 659 if (cbuf_data(&con->cb) >= con->cb.base) {
660 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 660 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
661 iov[1].iov_len = con->cb.base; 661 iov[1].iov_len = con->cb.base;
662 iov[1].iov_base = page_address(con->rx_page); 662 iov[1].iov_base = page_address(con->rx_page);
663 nvec = 2; 663 nvec = 2;
@@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con)
675 ret = dlm_process_incoming_buffer(con->nodeid, 675 ret = dlm_process_incoming_buffer(con->nodeid,
676 page_address(con->rx_page), 676 page_address(con->rx_page),
677 con->cb.base, con->cb.len, 677 con->cb.base, con->cb.len,
678 PAGE_CACHE_SIZE); 678 PAGE_SIZE);
679 if (ret == -EBADMSG) { 679 if (ret == -EBADMSG) {
680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", 680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
681 page_address(con->rx_page), con->cb.base, 681 page_address(con->rx_page), con->cb.base,
@@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1416 spin_lock(&con->writequeue_lock); 1416 spin_lock(&con->writequeue_lock);
1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1418 if ((&e->list == &con->writequeue) || 1418 if ((&e->list == &con->writequeue) ||
1419 (PAGE_CACHE_SIZE - e->end < len)) { 1419 (PAGE_SIZE - e->end < len)) {
1420 e = NULL; 1420 e = NULL;
1421 } else { 1421 } else {
1422 offset = e->end; 1422 offset = e->end;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 64026e53722a..d09cb4cdd09f 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -286,7 +286,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
286 pg = virt_to_page(addr); 286 pg = virt_to_page(addr);
287 offset = offset_in_page(addr); 287 offset = offset_in_page(addr);
288 sg_set_page(&sg[i], pg, 0, offset); 288 sg_set_page(&sg[i], pg, 0, offset);
289 remainder_of_page = PAGE_CACHE_SIZE - offset; 289 remainder_of_page = PAGE_SIZE - offset;
290 if (size >= remainder_of_page) { 290 if (size >= remainder_of_page) {
291 sg[i].length = remainder_of_page; 291 sg[i].length = remainder_of_page;
292 addr += remainder_of_page; 292 addr += remainder_of_page;
@@ -400,7 +400,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
400 struct page *page) 400 struct page *page)
401{ 401{
402 return ecryptfs_lower_header_size(crypt_stat) + 402 return ecryptfs_lower_header_size(crypt_stat) +
403 ((loff_t)page->index << PAGE_CACHE_SHIFT); 403 ((loff_t)page->index << PAGE_SHIFT);
404} 404}
405 405
406/** 406/**
@@ -428,7 +428,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
428 size_t extent_size = crypt_stat->extent_size; 428 size_t extent_size = crypt_stat->extent_size;
429 int rc; 429 int rc;
430 430
431 extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); 431 extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
432 rc = ecryptfs_derive_iv(extent_iv, crypt_stat, 432 rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
433 (extent_base + extent_offset)); 433 (extent_base + extent_offset));
434 if (rc) { 434 if (rc) {
@@ -498,7 +498,7 @@ int ecryptfs_encrypt_page(struct page *page)
498 } 498 }
499 499
500 for (extent_offset = 0; 500 for (extent_offset = 0;
501 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); 501 extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
502 extent_offset++) { 502 extent_offset++) {
503 rc = crypt_extent(crypt_stat, enc_extent_page, page, 503 rc = crypt_extent(crypt_stat, enc_extent_page, page,
504 extent_offset, ENCRYPT); 504 extent_offset, ENCRYPT);
@@ -512,7 +512,7 @@ int ecryptfs_encrypt_page(struct page *page)
512 lower_offset = lower_offset_for_page(crypt_stat, page); 512 lower_offset = lower_offset_for_page(crypt_stat, page);
513 enc_extent_virt = kmap(enc_extent_page); 513 enc_extent_virt = kmap(enc_extent_page);
514 rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, 514 rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
515 PAGE_CACHE_SIZE); 515 PAGE_SIZE);
516 kunmap(enc_extent_page); 516 kunmap(enc_extent_page);
517 if (rc < 0) { 517 if (rc < 0) {
518 ecryptfs_printk(KERN_ERR, 518 ecryptfs_printk(KERN_ERR,
@@ -560,7 +560,7 @@ int ecryptfs_decrypt_page(struct page *page)
560 560
561 lower_offset = lower_offset_for_page(crypt_stat, page); 561 lower_offset = lower_offset_for_page(crypt_stat, page);
562 page_virt = kmap(page); 562 page_virt = kmap(page);
563 rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, 563 rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
564 ecryptfs_inode); 564 ecryptfs_inode);
565 kunmap(page); 565 kunmap(page);
566 if (rc < 0) { 566 if (rc < 0) {
@@ -571,7 +571,7 @@ int ecryptfs_decrypt_page(struct page *page)
571 } 571 }
572 572
573 for (extent_offset = 0; 573 for (extent_offset = 0;
574 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); 574 extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
575 extent_offset++) { 575 extent_offset++) {
576 rc = crypt_extent(crypt_stat, page, page, 576 rc = crypt_extent(crypt_stat, page, page,
577 extent_offset, DECRYPT); 577 extent_offset, DECRYPT);
@@ -659,11 +659,11 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
659 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 659 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
660 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 660 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
661 else { 661 else {
662 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) 662 if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
663 crypt_stat->metadata_size = 663 crypt_stat->metadata_size =
664 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 664 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
665 else 665 else
666 crypt_stat->metadata_size = PAGE_CACHE_SIZE; 666 crypt_stat->metadata_size = PAGE_SIZE;
667 } 667 }
668} 668}
669 669
@@ -1442,7 +1442,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1442 ECRYPTFS_VALIDATE_HEADER_SIZE); 1442 ECRYPTFS_VALIDATE_HEADER_SIZE);
1443 if (rc) { 1443 if (rc) {
1444 /* metadata is not in the file header, so try xattrs */ 1444 /* metadata is not in the file header, so try xattrs */
1445 memset(page_virt, 0, PAGE_CACHE_SIZE); 1445 memset(page_virt, 0, PAGE_SIZE);
1446 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); 1446 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
1447 if (rc) { 1447 if (rc) {
1448 printk(KERN_DEBUG "Valid eCryptfs headers not found in " 1448 printk(KERN_DEBUG "Valid eCryptfs headers not found in "
@@ -1475,7 +1475,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1475 } 1475 }
1476out: 1476out:
1477 if (page_virt) { 1477 if (page_virt) {
1478 memset(page_virt, 0, PAGE_CACHE_SIZE); 1478 memset(page_virt, 0, PAGE_SIZE);
1479 kmem_cache_free(ecryptfs_header_cache, page_virt); 1479 kmem_cache_free(ecryptfs_header_cache, page_virt);
1480 } 1480 }
1481 return rc; 1481 return rc;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 121114e9a464..224b49e71aa4 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -763,10 +763,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
763 } else { /* ia->ia_size < i_size_read(inode) */ 763 } else { /* ia->ia_size < i_size_read(inode) */
764 /* We're chopping off all the pages down to the page 764 /* We're chopping off all the pages down to the page
765 * in which ia->ia_size is located. Fill in the end of 765 * in which ia->ia_size is located. Fill in the end of
766 * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to 766 * that page from (ia->ia_size & ~PAGE_MASK) to
767 * PAGE_CACHE_SIZE with zeros. */ 767 * PAGE_SIZE with zeros. */
768 size_t num_zeros = (PAGE_CACHE_SIZE 768 size_t num_zeros = (PAGE_SIZE
769 - (ia->ia_size & ~PAGE_CACHE_MASK)); 769 - (ia->ia_size & ~PAGE_MASK));
770 770
771 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 771 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
772 truncate_setsize(inode, ia->ia_size); 772 truncate_setsize(inode, ia->ia_size);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 9893d1538122..3cf1546dca82 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1798,7 +1798,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
1798 * added the our &auth_tok_list */ 1798 * added the our &auth_tok_list */
1799 next_packet_is_auth_tok_packet = 1; 1799 next_packet_is_auth_tok_packet = 1;
1800 while (next_packet_is_auth_tok_packet) { 1800 while (next_packet_is_auth_tok_packet) {
1801 size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i); 1801 size_t max_packet_size = ((PAGE_SIZE - 8) - i);
1802 1802
1803 switch (src[i]) { 1803 switch (src[i]) {
1804 case ECRYPTFS_TAG_3_PACKET_TYPE: 1804 case ECRYPTFS_TAG_3_PACKET_TYPE:
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 8b0b4a73116d..1698132d0e57 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -695,12 +695,12 @@ static struct ecryptfs_cache_info {
695 { 695 {
696 .cache = &ecryptfs_header_cache, 696 .cache = &ecryptfs_header_cache,
697 .name = "ecryptfs_headers", 697 .name = "ecryptfs_headers",
698 .size = PAGE_CACHE_SIZE, 698 .size = PAGE_SIZE,
699 }, 699 },
700 { 700 {
701 .cache = &ecryptfs_xattr_cache, 701 .cache = &ecryptfs_xattr_cache,
702 .name = "ecryptfs_xattr_cache", 702 .name = "ecryptfs_xattr_cache",
703 .size = PAGE_CACHE_SIZE, 703 .size = PAGE_SIZE,
704 }, 704 },
705 { 705 {
706 .cache = &ecryptfs_key_record_cache, 706 .cache = &ecryptfs_key_record_cache,
@@ -818,7 +818,7 @@ static int __init ecryptfs_init(void)
818{ 818{
819 int rc; 819 int rc;
820 820
821 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { 821 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
822 rc = -EINVAL; 822 rc = -EINVAL;
823 ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " 823 ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
824 "larger than the host's page size, and so " 824 "larger than the host's page size, and so "
@@ -826,7 +826,7 @@ static int __init ecryptfs_init(void)
826 "default eCryptfs extent size is [%u] bytes; " 826 "default eCryptfs extent size is [%u] bytes; "
827 "the page size is [%lu] bytes.\n", 827 "the page size is [%lu] bytes.\n",
828 ECRYPTFS_DEFAULT_EXTENT_SIZE, 828 ECRYPTFS_DEFAULT_EXTENT_SIZE,
829 (unsigned long)PAGE_CACHE_SIZE); 829 (unsigned long)PAGE_SIZE);
830 goto out; 830 goto out;
831 } 831 }
832 rc = ecryptfs_init_kmem_caches(); 832 rc = ecryptfs_init_kmem_caches();
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 1f5865263b3e..e6b1d80952b9 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -122,7 +122,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
122 struct ecryptfs_crypt_stat *crypt_stat) 122 struct ecryptfs_crypt_stat *crypt_stat)
123{ 123{
124 loff_t extent_num_in_page = 0; 124 loff_t extent_num_in_page = 0;
125 loff_t num_extents_per_page = (PAGE_CACHE_SIZE 125 loff_t num_extents_per_page = (PAGE_SIZE
126 / crypt_stat->extent_size); 126 / crypt_stat->extent_size);
127 int rc = 0; 127 int rc = 0;
128 128
@@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
138 char *page_virt; 138 char *page_virt;
139 139
140 page_virt = kmap_atomic(page); 140 page_virt = kmap_atomic(page);
141 memset(page_virt, 0, PAGE_CACHE_SIZE); 141 memset(page_virt, 0, PAGE_SIZE);
142 /* TODO: Support more than one header extent */ 142 /* TODO: Support more than one header extent */
143 if (view_extent_num == 0) { 143 if (view_extent_num == 0) {
144 size_t written; 144 size_t written;
@@ -164,8 +164,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
164 - crypt_stat->metadata_size); 164 - crypt_stat->metadata_size);
165 165
166 rc = ecryptfs_read_lower_page_segment( 166 rc = ecryptfs_read_lower_page_segment(
167 page, (lower_offset >> PAGE_CACHE_SHIFT), 167 page, (lower_offset >> PAGE_SHIFT),
168 (lower_offset & ~PAGE_CACHE_MASK), 168 (lower_offset & ~PAGE_MASK),
169 crypt_stat->extent_size, page->mapping->host); 169 crypt_stat->extent_size, page->mapping->host);
170 if (rc) { 170 if (rc) {
171 printk(KERN_ERR "%s: Error attempting to read " 171 printk(KERN_ERR "%s: Error attempting to read "
@@ -198,7 +198,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
198 198
199 if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 199 if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
200 rc = ecryptfs_read_lower_page_segment(page, page->index, 0, 200 rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
201 PAGE_CACHE_SIZE, 201 PAGE_SIZE,
202 page->mapping->host); 202 page->mapping->host);
203 } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { 203 } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
204 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { 204 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
@@ -215,7 +215,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
215 215
216 } else { 216 } else {
217 rc = ecryptfs_read_lower_page_segment( 217 rc = ecryptfs_read_lower_page_segment(
218 page, page->index, 0, PAGE_CACHE_SIZE, 218 page, page->index, 0, PAGE_SIZE,
219 page->mapping->host); 219 page->mapping->host);
220 if (rc) { 220 if (rc) {
221 printk(KERN_ERR "Error reading page; rc = " 221 printk(KERN_ERR "Error reading page; rc = "
@@ -250,12 +250,12 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
250 struct inode *inode = page->mapping->host; 250 struct inode *inode = page->mapping->host;
251 int end_byte_in_page; 251 int end_byte_in_page;
252 252
253 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) 253 if ((i_size_read(inode) / PAGE_SIZE) != page->index)
254 goto out; 254 goto out;
255 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 255 end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
256 if (to > end_byte_in_page) 256 if (to > end_byte_in_page)
257 end_byte_in_page = to; 257 end_byte_in_page = to;
258 zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE); 258 zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
259out: 259out:
260 return 0; 260 return 0;
261} 261}
@@ -279,7 +279,7 @@ static int ecryptfs_write_begin(struct file *file,
279 loff_t pos, unsigned len, unsigned flags, 279 loff_t pos, unsigned len, unsigned flags,
280 struct page **pagep, void **fsdata) 280 struct page **pagep, void **fsdata)
281{ 281{
282 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 282 pgoff_t index = pos >> PAGE_SHIFT;
283 struct page *page; 283 struct page *page;
284 loff_t prev_page_end_size; 284 loff_t prev_page_end_size;
285 int rc = 0; 285 int rc = 0;
@@ -289,14 +289,14 @@ static int ecryptfs_write_begin(struct file *file,
289 return -ENOMEM; 289 return -ENOMEM;
290 *pagep = page; 290 *pagep = page;
291 291
292 prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT); 292 prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
293 if (!PageUptodate(page)) { 293 if (!PageUptodate(page)) {
294 struct ecryptfs_crypt_stat *crypt_stat = 294 struct ecryptfs_crypt_stat *crypt_stat =
295 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; 295 &ecryptfs_inode_to_private(mapping->host)->crypt_stat;
296 296
297 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 297 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
298 rc = ecryptfs_read_lower_page_segment( 298 rc = ecryptfs_read_lower_page_segment(
299 page, index, 0, PAGE_CACHE_SIZE, mapping->host); 299 page, index, 0, PAGE_SIZE, mapping->host);
300 if (rc) { 300 if (rc) {
301 printk(KERN_ERR "%s: Error attempting to read " 301 printk(KERN_ERR "%s: Error attempting to read "
302 "lower page segment; rc = [%d]\n", 302 "lower page segment; rc = [%d]\n",
@@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file,
322 SetPageUptodate(page); 322 SetPageUptodate(page);
323 } else { 323 } else {
324 rc = ecryptfs_read_lower_page_segment( 324 rc = ecryptfs_read_lower_page_segment(
325 page, index, 0, PAGE_CACHE_SIZE, 325 page, index, 0, PAGE_SIZE,
326 mapping->host); 326 mapping->host);
327 if (rc) { 327 if (rc) {
328 printk(KERN_ERR "%s: Error reading " 328 printk(KERN_ERR "%s: Error reading "
@@ -336,9 +336,9 @@ static int ecryptfs_write_begin(struct file *file,
336 } else { 336 } else {
337 if (prev_page_end_size 337 if (prev_page_end_size
338 >= i_size_read(page->mapping->host)) { 338 >= i_size_read(page->mapping->host)) {
339 zero_user(page, 0, PAGE_CACHE_SIZE); 339 zero_user(page, 0, PAGE_SIZE);
340 SetPageUptodate(page); 340 SetPageUptodate(page);
341 } else if (len < PAGE_CACHE_SIZE) { 341 } else if (len < PAGE_SIZE) {
342 rc = ecryptfs_decrypt_page(page); 342 rc = ecryptfs_decrypt_page(page);
343 if (rc) { 343 if (rc) {
344 printk(KERN_ERR "%s: Error decrypting " 344 printk(KERN_ERR "%s: Error decrypting "
@@ -371,11 +371,11 @@ static int ecryptfs_write_begin(struct file *file,
371 * of page? Zero it out. */ 371 * of page? Zero it out. */
372 if ((i_size_read(mapping->host) == prev_page_end_size) 372 if ((i_size_read(mapping->host) == prev_page_end_size)
373 && (pos != 0)) 373 && (pos != 0))
374 zero_user(page, 0, PAGE_CACHE_SIZE); 374 zero_user(page, 0, PAGE_SIZE);
375out: 375out:
376 if (unlikely(rc)) { 376 if (unlikely(rc)) {
377 unlock_page(page); 377 unlock_page(page);
378 page_cache_release(page); 378 put_page(page);
379 *pagep = NULL; 379 *pagep = NULL;
380 } 380 }
381 return rc; 381 return rc;
@@ -437,7 +437,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
437 } 437 }
438 inode_lock(lower_inode); 438 inode_lock(lower_inode);
439 size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME, 439 size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
440 xattr_virt, PAGE_CACHE_SIZE); 440 xattr_virt, PAGE_SIZE);
441 if (size < 0) 441 if (size < 0)
442 size = 8; 442 size = 8;
443 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt); 443 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
@@ -479,8 +479,8 @@ static int ecryptfs_write_end(struct file *file,
479 loff_t pos, unsigned len, unsigned copied, 479 loff_t pos, unsigned len, unsigned copied,
480 struct page *page, void *fsdata) 480 struct page *page, void *fsdata)
481{ 481{
482 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 482 pgoff_t index = pos >> PAGE_SHIFT;
483 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 483 unsigned from = pos & (PAGE_SIZE - 1);
484 unsigned to = from + copied; 484 unsigned to = from + copied;
485 struct inode *ecryptfs_inode = mapping->host; 485 struct inode *ecryptfs_inode = mapping->host;
486 struct ecryptfs_crypt_stat *crypt_stat = 486 struct ecryptfs_crypt_stat *crypt_stat =
@@ -500,7 +500,7 @@ static int ecryptfs_write_end(struct file *file,
500 goto out; 500 goto out;
501 } 501 }
502 if (!PageUptodate(page)) { 502 if (!PageUptodate(page)) {
503 if (copied < PAGE_CACHE_SIZE) { 503 if (copied < PAGE_SIZE) {
504 rc = 0; 504 rc = 0;
505 goto out; 505 goto out;
506 } 506 }
@@ -533,7 +533,7 @@ static int ecryptfs_write_end(struct file *file,
533 rc = copied; 533 rc = copied;
534out: 534out:
535 unlock_page(page); 535 unlock_page(page);
536 page_cache_release(page); 536 put_page(page);
537 return rc; 537 return rc;
538} 538}
539 539
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 09fe622274e4..158a3a39f82d 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -74,7 +74,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
74 loff_t offset; 74 loff_t offset;
75 int rc; 75 int rc;
76 76
77 offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) 77 offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
78 + offset_in_page); 78 + offset_in_page);
79 virt = kmap(page_for_lower); 79 virt = kmap(page_for_lower);
80 rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size); 80 rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
@@ -123,9 +123,9 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
123 else 123 else
124 pos = offset; 124 pos = offset;
125 while (pos < (offset + size)) { 125 while (pos < (offset + size)) {
126 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); 126 pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
127 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); 127 size_t start_offset_in_page = (pos & ~PAGE_MASK);
128 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); 128 size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
129 loff_t total_remaining_bytes = ((offset + size) - pos); 129 loff_t total_remaining_bytes = ((offset + size) - pos);
130 130
131 if (fatal_signal_pending(current)) { 131 if (fatal_signal_pending(current)) {
@@ -165,7 +165,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
165 * Fill in zero values to the end of the page */ 165 * Fill in zero values to the end of the page */
166 memset(((char *)ecryptfs_page_virt 166 memset(((char *)ecryptfs_page_virt
167 + start_offset_in_page), 0, 167 + start_offset_in_page), 0,
168 PAGE_CACHE_SIZE - start_offset_in_page); 168 PAGE_SIZE - start_offset_in_page);
169 } 169 }
170 170
171 /* pos >= offset, we are now writing the data request */ 171 /* pos >= offset, we are now writing the data request */
@@ -186,7 +186,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
186 ecryptfs_page, 186 ecryptfs_page,
187 start_offset_in_page, 187 start_offset_in_page,
188 data_offset); 188 data_offset);
189 page_cache_release(ecryptfs_page); 189 put_page(ecryptfs_page);
190 if (rc) { 190 if (rc) {
191 printk(KERN_ERR "%s: Error encrypting " 191 printk(KERN_ERR "%s: Error encrypting "
192 "page; rc = [%d]\n", __func__, rc); 192 "page; rc = [%d]\n", __func__, rc);
@@ -262,7 +262,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
262 loff_t offset; 262 loff_t offset;
263 int rc; 263 int rc;
264 264
265 offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page); 265 offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
266 virt = kmap(page_for_ecryptfs); 266 virt = kmap(page_for_ecryptfs);
267 rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode); 267 rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
268 if (rc > 0) 268 if (rc > 0)
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index dd029d13ea61..553c5d2db4a4 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -197,8 +197,8 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
197 efivarfs_sb = sb; 197 efivarfs_sb = sb;
198 198
199 sb->s_maxbytes = MAX_LFS_FILESIZE; 199 sb->s_maxbytes = MAX_LFS_FILESIZE;
200 sb->s_blocksize = PAGE_CACHE_SIZE; 200 sb->s_blocksize = PAGE_SIZE;
201 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 201 sb->s_blocksize_bits = PAGE_SHIFT;
202 sb->s_magic = EFIVARFS_MAGIC; 202 sb->s_magic = EFIVARFS_MAGIC;
203 sb->s_op = &efivarfs_ops; 203 sb->s_op = &efivarfs_ops;
204 sb->s_d_op = &efivarfs_d_ops; 204 sb->s_d_op = &efivarfs_d_ops;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index e5bb2abf77f9..547b93cbea63 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -41,16 +41,16 @@ static inline unsigned exofs_chunk_size(struct inode *inode)
41static inline void exofs_put_page(struct page *page) 41static inline void exofs_put_page(struct page *page)
42{ 42{
43 kunmap(page); 43 kunmap(page);
44 page_cache_release(page); 44 put_page(page);
45} 45}
46 46
47static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) 47static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
48{ 48{
49 loff_t last_byte = inode->i_size; 49 loff_t last_byte = inode->i_size;
50 50
51 last_byte -= page_nr << PAGE_CACHE_SHIFT; 51 last_byte -= page_nr << PAGE_SHIFT;
52 if (last_byte > PAGE_CACHE_SIZE) 52 if (last_byte > PAGE_SIZE)
53 last_byte = PAGE_CACHE_SIZE; 53 last_byte = PAGE_SIZE;
54 return last_byte; 54 return last_byte;
55} 55}
56 56
@@ -85,13 +85,13 @@ static void exofs_check_page(struct page *page)
85 unsigned chunk_size = exofs_chunk_size(dir); 85 unsigned chunk_size = exofs_chunk_size(dir);
86 char *kaddr = page_address(page); 86 char *kaddr = page_address(page);
87 unsigned offs, rec_len; 87 unsigned offs, rec_len;
88 unsigned limit = PAGE_CACHE_SIZE; 88 unsigned limit = PAGE_SIZE;
89 struct exofs_dir_entry *p; 89 struct exofs_dir_entry *p;
90 char *error; 90 char *error;
91 91
92 /* if the page is the last one in the directory */ 92 /* if the page is the last one in the directory */
93 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 93 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
94 limit = dir->i_size & ~PAGE_CACHE_MASK; 94 limit = dir->i_size & ~PAGE_MASK;
95 if (limit & (chunk_size - 1)) 95 if (limit & (chunk_size - 1))
96 goto Ebadsize; 96 goto Ebadsize;
97 if (!limit) 97 if (!limit)
@@ -138,7 +138,7 @@ bad_entry:
138 EXOFS_ERR( 138 EXOFS_ERR(
139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " 139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
140 "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", 140 "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
141 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 141 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
142 _LLU(le64_to_cpu(p->inode_no)), 142 _LLU(le64_to_cpu(p->inode_no)),
143 rec_len, p->name_len); 143 rec_len, p->name_len);
144 goto fail; 144 goto fail;
@@ -147,7 +147,7 @@ Eend:
147 EXOFS_ERR("ERROR [exofs_check_page]: " 147 EXOFS_ERR("ERROR [exofs_check_page]: "
148 "entry in directory(0x%lx) spans the page boundary" 148 "entry in directory(0x%lx) spans the page boundary"
149 "offset=%lu, inode=0x%llx\n", 149 "offset=%lu, inode=0x%llx\n",
150 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 150 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
151 _LLU(le64_to_cpu(p->inode_no))); 151 _LLU(le64_to_cpu(p->inode_no)));
152fail: 152fail:
153 SetPageChecked(page); 153 SetPageChecked(page);
@@ -237,8 +237,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
237{ 237{
238 loff_t pos = ctx->pos; 238 loff_t pos = ctx->pos;
239 struct inode *inode = file_inode(file); 239 struct inode *inode = file_inode(file);
240 unsigned int offset = pos & ~PAGE_CACHE_MASK; 240 unsigned int offset = pos & ~PAGE_MASK;
241 unsigned long n = pos >> PAGE_CACHE_SHIFT; 241 unsigned long n = pos >> PAGE_SHIFT;
242 unsigned long npages = dir_pages(inode); 242 unsigned long npages = dir_pages(inode);
243 unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); 243 unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
244 int need_revalidate = (file->f_version != inode->i_version); 244 int need_revalidate = (file->f_version != inode->i_version);
@@ -254,7 +254,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
254 if (IS_ERR(page)) { 254 if (IS_ERR(page)) {
255 EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", 255 EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
256 inode->i_ino); 256 inode->i_ino);
257 ctx->pos += PAGE_CACHE_SIZE - offset; 257 ctx->pos += PAGE_SIZE - offset;
258 return PTR_ERR(page); 258 return PTR_ERR(page);
259 } 259 }
260 kaddr = page_address(page); 260 kaddr = page_address(page);
@@ -262,7 +262,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
262 if (offset) { 262 if (offset) {
263 offset = exofs_validate_entry(kaddr, offset, 263 offset = exofs_validate_entry(kaddr, offset,
264 chunk_mask); 264 chunk_mask);
265 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 265 ctx->pos = (n<<PAGE_SHIFT) + offset;
266 } 266 }
267 file->f_version = inode->i_version; 267 file->f_version = inode->i_version;
268 need_revalidate = 0; 268 need_revalidate = 0;
@@ -449,7 +449,7 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
449 kaddr = page_address(page); 449 kaddr = page_address(page);
450 dir_end = kaddr + exofs_last_byte(dir, n); 450 dir_end = kaddr + exofs_last_byte(dir, n);
451 de = (struct exofs_dir_entry *)kaddr; 451 de = (struct exofs_dir_entry *)kaddr;
452 kaddr += PAGE_CACHE_SIZE - reclen; 452 kaddr += PAGE_SIZE - reclen;
453 while ((char *)de <= kaddr) { 453 while ((char *)de <= kaddr) {
454 if ((char *)de == dir_end) { 454 if ((char *)de == dir_end) {
455 name_len = 0; 455 name_len = 0;
@@ -602,7 +602,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
602 kunmap_atomic(kaddr); 602 kunmap_atomic(kaddr);
603 err = exofs_commit_chunk(page, 0, chunk_size); 603 err = exofs_commit_chunk(page, 0, chunk_size);
604fail: 604fail:
605 page_cache_release(page); 605 put_page(page);
606 return err; 606 return err;
607} 607}
608 608
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 9eaf595aeaf8..49e1bd00b4ec 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -317,7 +317,7 @@ static int read_exec(struct page_collect *pcol)
317 317
318 if (!pcol->ios) { 318 if (!pcol->ios) {
319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true, 319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
320 pcol->pg_first << PAGE_CACHE_SHIFT, 320 pcol->pg_first << PAGE_SHIFT,
321 pcol->length, &pcol->ios); 321 pcol->length, &pcol->ios);
322 322
323 if (ret) 323 if (ret)
@@ -383,7 +383,7 @@ static int readpage_strip(void *data, struct page *page)
383 struct inode *inode = pcol->inode; 383 struct inode *inode = pcol->inode;
384 struct exofs_i_info *oi = exofs_i(inode); 384 struct exofs_i_info *oi = exofs_i(inode);
385 loff_t i_size = i_size_read(inode); 385 loff_t i_size = i_size_read(inode);
386 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 386 pgoff_t end_index = i_size >> PAGE_SHIFT;
387 size_t len; 387 size_t len;
388 int ret; 388 int ret;
389 389
@@ -397,9 +397,9 @@ static int readpage_strip(void *data, struct page *page)
397 pcol->that_locked_page = page; 397 pcol->that_locked_page = page;
398 398
399 if (page->index < end_index) 399 if (page->index < end_index)
400 len = PAGE_CACHE_SIZE; 400 len = PAGE_SIZE;
401 else if (page->index == end_index) 401 else if (page->index == end_index)
402 len = i_size & ~PAGE_CACHE_MASK; 402 len = i_size & ~PAGE_MASK;
403 else 403 else
404 len = 0; 404 len = 0;
405 405
@@ -442,8 +442,8 @@ try_again:
442 goto fail; 442 goto fail;
443 } 443 }
444 444
445 if (len != PAGE_CACHE_SIZE) 445 if (len != PAGE_SIZE)
446 zero_user(page, len, PAGE_CACHE_SIZE - len); 446 zero_user(page, len, PAGE_SIZE - len);
447 447
448 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n", 448 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
449 inode->i_ino, page->index, len); 449 inode->i_ino, page->index, len);
@@ -609,7 +609,7 @@ static void __r4w_put_page(void *priv, struct page *page)
609 609
610 if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) { 610 if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
611 EXOFS_DBGMSG2("index=0x%lx\n", page->index); 611 EXOFS_DBGMSG2("index=0x%lx\n", page->index);
612 page_cache_release(page); 612 put_page(page);
613 return; 613 return;
614 } 614 }
615 EXOFS_DBGMSG2("that_locked_page index=0x%lx\n", 615 EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
@@ -633,7 +633,7 @@ static int write_exec(struct page_collect *pcol)
633 633
634 BUG_ON(pcol->ios); 634 BUG_ON(pcol->ios);
635 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false, 635 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
636 pcol->pg_first << PAGE_CACHE_SHIFT, 636 pcol->pg_first << PAGE_SHIFT,
637 pcol->length, &pcol->ios); 637 pcol->length, &pcol->ios);
638 if (unlikely(ret)) 638 if (unlikely(ret))
639 goto err; 639 goto err;
@@ -696,7 +696,7 @@ static int writepage_strip(struct page *page,
696 struct inode *inode = pcol->inode; 696 struct inode *inode = pcol->inode;
697 struct exofs_i_info *oi = exofs_i(inode); 697 struct exofs_i_info *oi = exofs_i(inode);
698 loff_t i_size = i_size_read(inode); 698 loff_t i_size = i_size_read(inode);
699 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 699 pgoff_t end_index = i_size >> PAGE_SHIFT;
700 size_t len; 700 size_t len;
701 int ret; 701 int ret;
702 702
@@ -708,9 +708,9 @@ static int writepage_strip(struct page *page,
708 708
709 if (page->index < end_index) 709 if (page->index < end_index)
710 /* in this case, the page is within the limits of the file */ 710 /* in this case, the page is within the limits of the file */
711 len = PAGE_CACHE_SIZE; 711 len = PAGE_SIZE;
712 else { 712 else {
713 len = i_size & ~PAGE_CACHE_MASK; 713 len = i_size & ~PAGE_MASK;
714 714
715 if (page->index > end_index || !len) { 715 if (page->index > end_index || !len) {
716 /* in this case, the page is outside the limits 716 /* in this case, the page is outside the limits
@@ -790,10 +790,10 @@ static int exofs_writepages(struct address_space *mapping,
790 long start, end, expected_pages; 790 long start, end, expected_pages;
791 int ret; 791 int ret;
792 792
793 start = wbc->range_start >> PAGE_CACHE_SHIFT; 793 start = wbc->range_start >> PAGE_SHIFT;
794 end = (wbc->range_end == LLONG_MAX) ? 794 end = (wbc->range_end == LLONG_MAX) ?
795 start + mapping->nrpages : 795 start + mapping->nrpages :
796 wbc->range_end >> PAGE_CACHE_SHIFT; 796 wbc->range_end >> PAGE_SHIFT;
797 797
798 if (start || end) 798 if (start || end)
799 expected_pages = end - start + 1; 799 expected_pages = end - start + 1;
@@ -881,15 +881,15 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
881 } 881 }
882 882
883 /* read modify write */ 883 /* read modify write */
884 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { 884 if (!PageUptodate(page) && (len != PAGE_SIZE)) {
885 loff_t i_size = i_size_read(mapping->host); 885 loff_t i_size = i_size_read(mapping->host);
886 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 886 pgoff_t end_index = i_size >> PAGE_SHIFT;
887 size_t rlen; 887 size_t rlen;
888 888
889 if (page->index < end_index) 889 if (page->index < end_index)
890 rlen = PAGE_CACHE_SIZE; 890 rlen = PAGE_SIZE;
891 else if (page->index == end_index) 891 else if (page->index == end_index)
892 rlen = i_size & ~PAGE_CACHE_MASK; 892 rlen = i_size & ~PAGE_MASK;
893 else 893 else
894 rlen = 0; 894 rlen = 0;
895 895
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index c20d77df2679..622a686bb08b 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -292,11 +292,11 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
292out_dir: 292out_dir:
293 if (dir_de) { 293 if (dir_de) {
294 kunmap(dir_page); 294 kunmap(dir_page);
295 page_cache_release(dir_page); 295 put_page(dir_page);
296 } 296 }
297out_old: 297out_old:
298 kunmap(old_page); 298 kunmap(old_page);
299 page_cache_release(old_page); 299 put_page(old_page);
300out: 300out:
301 return err; 301 return err;
302} 302}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 0c6638b40f21..7ff6fcfa685d 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
37{ 37{
38 unsigned len = le16_to_cpu(dlen); 38 unsigned len = le16_to_cpu(dlen);
39 39
40#if (PAGE_CACHE_SIZE >= 65536) 40#if (PAGE_SIZE >= 65536)
41 if (len == EXT2_MAX_REC_LEN) 41 if (len == EXT2_MAX_REC_LEN)
42 return 1 << 16; 42 return 1 << 16;
43#endif 43#endif
@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
46 46
47static inline __le16 ext2_rec_len_to_disk(unsigned len) 47static inline __le16 ext2_rec_len_to_disk(unsigned len)
48{ 48{
49#if (PAGE_CACHE_SIZE >= 65536) 49#if (PAGE_SIZE >= 65536)
50 if (len == (1 << 16)) 50 if (len == (1 << 16))
51 return cpu_to_le16(EXT2_MAX_REC_LEN); 51 return cpu_to_le16(EXT2_MAX_REC_LEN);
52 else 52 else
@@ -67,7 +67,7 @@ static inline unsigned ext2_chunk_size(struct inode *inode)
67static inline void ext2_put_page(struct page *page) 67static inline void ext2_put_page(struct page *page)
68{ 68{
69 kunmap(page); 69 kunmap(page);
70 page_cache_release(page); 70 put_page(page);
71} 71}
72 72
73/* 73/*
@@ -79,9 +79,9 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr)
79{ 79{
80 unsigned last_byte = inode->i_size; 80 unsigned last_byte = inode->i_size;
81 81
82 last_byte -= page_nr << PAGE_CACHE_SHIFT; 82 last_byte -= page_nr << PAGE_SHIFT;
83 if (last_byte > PAGE_CACHE_SIZE) 83 if (last_byte > PAGE_SIZE)
84 last_byte = PAGE_CACHE_SIZE; 84 last_byte = PAGE_SIZE;
85 return last_byte; 85 return last_byte;
86} 86}
87 87
@@ -118,12 +118,12 @@ static void ext2_check_page(struct page *page, int quiet)
118 char *kaddr = page_address(page); 118 char *kaddr = page_address(page);
119 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); 119 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
120 unsigned offs, rec_len; 120 unsigned offs, rec_len;
121 unsigned limit = PAGE_CACHE_SIZE; 121 unsigned limit = PAGE_SIZE;
122 ext2_dirent *p; 122 ext2_dirent *p;
123 char *error; 123 char *error;
124 124
125 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 125 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
126 limit = dir->i_size & ~PAGE_CACHE_MASK; 126 limit = dir->i_size & ~PAGE_MASK;
127 if (limit & (chunk_size - 1)) 127 if (limit & (chunk_size - 1))
128 goto Ebadsize; 128 goto Ebadsize;
129 if (!limit) 129 if (!limit)
@@ -176,7 +176,7 @@ bad_entry:
176 if (!quiet) 176 if (!quiet)
177 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " 177 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
178 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 178 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
179 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 179 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
180 (unsigned long) le32_to_cpu(p->inode), 180 (unsigned long) le32_to_cpu(p->inode),
181 rec_len, p->name_len); 181 rec_len, p->name_len);
182 goto fail; 182 goto fail;
@@ -186,7 +186,7 @@ Eend:
186 ext2_error(sb, "ext2_check_page", 186 ext2_error(sb, "ext2_check_page",
187 "entry in directory #%lu spans the page boundary" 187 "entry in directory #%lu spans the page boundary"
188 "offset=%lu, inode=%lu", 188 "offset=%lu, inode=%lu",
189 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 189 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
190 (unsigned long) le32_to_cpu(p->inode)); 190 (unsigned long) le32_to_cpu(p->inode));
191 } 191 }
192fail: 192fail:
@@ -287,8 +287,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
287 loff_t pos = ctx->pos; 287 loff_t pos = ctx->pos;
288 struct inode *inode = file_inode(file); 288 struct inode *inode = file_inode(file);
289 struct super_block *sb = inode->i_sb; 289 struct super_block *sb = inode->i_sb;
290 unsigned int offset = pos & ~PAGE_CACHE_MASK; 290 unsigned int offset = pos & ~PAGE_MASK;
291 unsigned long n = pos >> PAGE_CACHE_SHIFT; 291 unsigned long n = pos >> PAGE_SHIFT;
292 unsigned long npages = dir_pages(inode); 292 unsigned long npages = dir_pages(inode);
293 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); 293 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
294 unsigned char *types = NULL; 294 unsigned char *types = NULL;
@@ -309,14 +309,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
309 ext2_error(sb, __func__, 309 ext2_error(sb, __func__,
310 "bad page in #%lu", 310 "bad page in #%lu",
311 inode->i_ino); 311 inode->i_ino);
312 ctx->pos += PAGE_CACHE_SIZE - offset; 312 ctx->pos += PAGE_SIZE - offset;
313 return PTR_ERR(page); 313 return PTR_ERR(page);
314 } 314 }
315 kaddr = page_address(page); 315 kaddr = page_address(page);
316 if (unlikely(need_revalidate)) { 316 if (unlikely(need_revalidate)) {
317 if (offset) { 317 if (offset) {
318 offset = ext2_validate_entry(kaddr, offset, chunk_mask); 318 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
319 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 319 ctx->pos = (n<<PAGE_SHIFT) + offset;
320 } 320 }
321 file->f_version = inode->i_version; 321 file->f_version = inode->i_version;
322 need_revalidate = 0; 322 need_revalidate = 0;
@@ -406,7 +406,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
406 if (++n >= npages) 406 if (++n >= npages)
407 n = 0; 407 n = 0;
408 /* next page is past the blocks we've got */ 408 /* next page is past the blocks we've got */
409 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { 409 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
410 ext2_error(dir->i_sb, __func__, 410 ext2_error(dir->i_sb, __func__,
411 "dir %lu size %lld exceeds block count %llu", 411 "dir %lu size %lld exceeds block count %llu",
412 dir->i_ino, dir->i_size, 412 dir->i_ino, dir->i_size,
@@ -511,7 +511,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
511 kaddr = page_address(page); 511 kaddr = page_address(page);
512 dir_end = kaddr + ext2_last_byte(dir, n); 512 dir_end = kaddr + ext2_last_byte(dir, n);
513 de = (ext2_dirent *)kaddr; 513 de = (ext2_dirent *)kaddr;
514 kaddr += PAGE_CACHE_SIZE - reclen; 514 kaddr += PAGE_SIZE - reclen;
515 while ((char *)de <= kaddr) { 515 while ((char *)de <= kaddr) {
516 if ((char *)de == dir_end) { 516 if ((char *)de == dir_end) {
517 /* We hit i_size */ 517 /* We hit i_size */
@@ -655,7 +655,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
655 kunmap_atomic(kaddr); 655 kunmap_atomic(kaddr);
656 err = ext2_commit_chunk(page, 0, chunk_size); 656 err = ext2_commit_chunk(page, 0, chunk_size);
657fail: 657fail:
658 page_cache_release(page); 658 put_page(page);
659 return err; 659 return err;
660} 660}
661 661
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 7a2be8f7f3c3..d34843925b23 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -398,7 +398,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
398 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); 398 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
399 else { 399 else {
400 kunmap(dir_page); 400 kunmap(dir_page);
401 page_cache_release(dir_page); 401 put_page(dir_page);
402 } 402 }
403 inode_dec_link_count(old_dir); 403 inode_dec_link_count(old_dir);
404 } 404 }
@@ -408,11 +408,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
408out_dir: 408out_dir:
409 if (dir_de) { 409 if (dir_de) {
410 kunmap(dir_page); 410 kunmap(dir_page);
411 page_cache_release(dir_page); 411 put_page(dir_page);
412 } 412 }
413out_old: 413out_old:
414 kunmap(old_page); 414 kunmap(old_page);
415 page_cache_release(old_page); 415 put_page(old_page);
416out: 416out:
417 return err; 417 return err;
418} 418}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index edc053a81914..6a6c27373b54 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -32,6 +32,7 @@
32#include <linux/random.h> 32#include <linux/random.h>
33#include <linux/scatterlist.h> 33#include <linux/scatterlist.h>
34#include <linux/spinlock_types.h> 34#include <linux/spinlock_types.h>
35#include <linux/namei.h>
35 36
36#include "ext4_extents.h" 37#include "ext4_extents.h"
37#include "xattr.h" 38#include "xattr.h"
@@ -91,7 +92,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
91 * Return: An allocated and initialized encryption context on success; error 92 * Return: An allocated and initialized encryption context on success; error
92 * value or NULL otherwise. 93 * value or NULL otherwise.
93 */ 94 */
94struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) 95struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
96 gfp_t gfp_flags)
95{ 97{
96 struct ext4_crypto_ctx *ctx = NULL; 98 struct ext4_crypto_ctx *ctx = NULL;
97 int res = 0; 99 int res = 0;
@@ -118,7 +120,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
118 list_del(&ctx->free_list); 120 list_del(&ctx->free_list);
119 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); 121 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
120 if (!ctx) { 122 if (!ctx) {
121 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); 123 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
122 if (!ctx) { 124 if (!ctx) {
123 res = -ENOMEM; 125 res = -ENOMEM;
124 goto out; 126 goto out;
@@ -255,7 +257,8 @@ static int ext4_page_crypto(struct inode *inode,
255 ext4_direction_t rw, 257 ext4_direction_t rw,
256 pgoff_t index, 258 pgoff_t index,
257 struct page *src_page, 259 struct page *src_page,
258 struct page *dest_page) 260 struct page *dest_page,
261 gfp_t gfp_flags)
259 262
260{ 263{
261 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; 264 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@@ -266,7 +269,7 @@ static int ext4_page_crypto(struct inode *inode,
266 struct crypto_skcipher *tfm = ci->ci_ctfm; 269 struct crypto_skcipher *tfm = ci->ci_ctfm;
267 int res = 0; 270 int res = 0;
268 271
269 req = skcipher_request_alloc(tfm, GFP_NOFS); 272 req = skcipher_request_alloc(tfm, gfp_flags);
270 if (!req) { 273 if (!req) {
271 printk_ratelimited(KERN_ERR 274 printk_ratelimited(KERN_ERR
272 "%s: crypto_request_alloc() failed\n", 275 "%s: crypto_request_alloc() failed\n",
@@ -283,10 +286,10 @@ static int ext4_page_crypto(struct inode *inode,
283 EXT4_XTS_TWEAK_SIZE - sizeof(index)); 286 EXT4_XTS_TWEAK_SIZE - sizeof(index));
284 287
285 sg_init_table(&dst, 1); 288 sg_init_table(&dst, 1);
286 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 289 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
287 sg_init_table(&src, 1); 290 sg_init_table(&src, 1);
288 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 291 sg_set_page(&src, src_page, PAGE_SIZE, 0);
289 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 292 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
290 xts_tweak); 293 xts_tweak);
291 if (rw == EXT4_DECRYPT) 294 if (rw == EXT4_DECRYPT)
292 res = crypto_skcipher_decrypt(req); 295 res = crypto_skcipher_decrypt(req);
@@ -307,9 +310,10 @@ static int ext4_page_crypto(struct inode *inode,
307 return 0; 310 return 0;
308} 311}
309 312
310static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) 313static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
314 gfp_t gfp_flags)
311{ 315{
312 ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT); 316 ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
313 if (ctx->w.bounce_page == NULL) 317 if (ctx->w.bounce_page == NULL)
314 return ERR_PTR(-ENOMEM); 318 return ERR_PTR(-ENOMEM);
315 ctx->flags |= EXT4_WRITE_PATH_FL; 319 ctx->flags |= EXT4_WRITE_PATH_FL;
@@ -332,7 +336,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
332 * error value or NULL. 336 * error value or NULL.
333 */ 337 */
334struct page *ext4_encrypt(struct inode *inode, 338struct page *ext4_encrypt(struct inode *inode,
335 struct page *plaintext_page) 339 struct page *plaintext_page,
340 gfp_t gfp_flags)
336{ 341{
337 struct ext4_crypto_ctx *ctx; 342 struct ext4_crypto_ctx *ctx;
338 struct page *ciphertext_page = NULL; 343 struct page *ciphertext_page = NULL;
@@ -340,17 +345,17 @@ struct page *ext4_encrypt(struct inode *inode,
340 345
341 BUG_ON(!PageLocked(plaintext_page)); 346 BUG_ON(!PageLocked(plaintext_page));
342 347
343 ctx = ext4_get_crypto_ctx(inode); 348 ctx = ext4_get_crypto_ctx(inode, gfp_flags);
344 if (IS_ERR(ctx)) 349 if (IS_ERR(ctx))
345 return (struct page *) ctx; 350 return (struct page *) ctx;
346 351
347 /* The encryption operation will require a bounce page. */ 352 /* The encryption operation will require a bounce page. */
348 ciphertext_page = alloc_bounce_page(ctx); 353 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
349 if (IS_ERR(ciphertext_page)) 354 if (IS_ERR(ciphertext_page))
350 goto errout; 355 goto errout;
351 ctx->w.control_page = plaintext_page; 356 ctx->w.control_page = plaintext_page;
352 err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index, 357 err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
353 plaintext_page, ciphertext_page); 358 plaintext_page, ciphertext_page, gfp_flags);
354 if (err) { 359 if (err) {
355 ciphertext_page = ERR_PTR(err); 360 ciphertext_page = ERR_PTR(err);
356 errout: 361 errout:
@@ -378,8 +383,8 @@ int ext4_decrypt(struct page *page)
378{ 383{
379 BUG_ON(!PageLocked(page)); 384 BUG_ON(!PageLocked(page));
380 385
381 return ext4_page_crypto(page->mapping->host, 386 return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
382 EXT4_DECRYPT, page->index, page, page); 387 page->index, page, page, GFP_NOFS);
383} 388}
384 389
385int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 390int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -396,13 +401,13 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
396 (unsigned long) inode->i_ino, lblk, len); 401 (unsigned long) inode->i_ino, lblk, len);
397#endif 402#endif
398 403
399 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); 404 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
400 405
401 ctx = ext4_get_crypto_ctx(inode); 406 ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
402 if (IS_ERR(ctx)) 407 if (IS_ERR(ctx))
403 return PTR_ERR(ctx); 408 return PTR_ERR(ctx);
404 409
405 ciphertext_page = alloc_bounce_page(ctx); 410 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
406 if (IS_ERR(ciphertext_page)) { 411 if (IS_ERR(ciphertext_page)) {
407 err = PTR_ERR(ciphertext_page); 412 err = PTR_ERR(ciphertext_page);
408 goto errout; 413 goto errout;
@@ -410,11 +415,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
410 415
411 while (len--) { 416 while (len--) {
412 err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, 417 err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
413 ZERO_PAGE(0), ciphertext_page); 418 ZERO_PAGE(0), ciphertext_page,
419 GFP_NOFS);
414 if (err) 420 if (err)
415 goto errout; 421 goto errout;
416 422
417 bio = bio_alloc(GFP_KERNEL, 1); 423 bio = bio_alloc(GFP_NOWAIT, 1);
418 if (!bio) { 424 if (!bio) {
419 err = -ENOMEM; 425 err = -ENOMEM;
420 goto errout; 426 goto errout;
@@ -473,13 +479,19 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
473 */ 479 */
474static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) 480static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
475{ 481{
476 struct inode *dir = d_inode(dentry->d_parent); 482 struct dentry *dir;
477 struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info; 483 struct ext4_crypt_info *ci;
478 int dir_has_key, cached_with_key; 484 int dir_has_key, cached_with_key;
479 485
480 if (!ext4_encrypted_inode(dir)) 486 if (flags & LOOKUP_RCU)
481 return 0; 487 return -ECHILD;
482 488
489 dir = dget_parent(dentry);
490 if (!ext4_encrypted_inode(d_inode(dir))) {
491 dput(dir);
492 return 0;
493 }
494 ci = EXT4_I(d_inode(dir))->i_crypt_info;
483 if (ci && ci->ci_keyring_key && 495 if (ci && ci->ci_keyring_key &&
484 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 496 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
485 (1 << KEY_FLAG_REVOKED) | 497 (1 << KEY_FLAG_REVOKED) |
@@ -489,6 +501,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
489 /* this should eventually be an flag in d_flags */ 501 /* this should eventually be an flag in d_flags */
490 cached_with_key = dentry->d_fsdata != NULL; 502 cached_with_key = dentry->d_fsdata != NULL;
491 dir_has_key = (ci != NULL); 503 dir_has_key = (ci != NULL);
504 dput(dir);
492 505
493 /* 506 /*
494 * If the dentry was cached without the key, and it is a 507 * If the dentry was cached without the key, and it is a
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 50ba27cbed03..561d7308b393 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -155,13 +155,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
155 err = ext4_map_blocks(NULL, inode, &map, 0); 155 err = ext4_map_blocks(NULL, inode, &map, 0);
156 if (err > 0) { 156 if (err > 0) {
157 pgoff_t index = map.m_pblk >> 157 pgoff_t index = map.m_pblk >>
158 (PAGE_CACHE_SHIFT - inode->i_blkbits); 158 (PAGE_SHIFT - inode->i_blkbits);
159 if (!ra_has_index(&file->f_ra, index)) 159 if (!ra_has_index(&file->f_ra, index))
160 page_cache_sync_readahead( 160 page_cache_sync_readahead(
161 sb->s_bdev->bd_inode->i_mapping, 161 sb->s_bdev->bd_inode->i_mapping,
162 &file->f_ra, file, 162 &file->f_ra, file,
163 index, 1); 163 index, 1);
164 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 164 file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
165 bh = ext4_bread(NULL, inode, map.m_lblk, 0); 165 bh = ext4_bread(NULL, inode, map.m_lblk, 0);
166 if (IS_ERR(bh)) { 166 if (IS_ERR(bh)) {
167 err = PTR_ERR(bh); 167 err = PTR_ERR(bh);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c04743519865..349afebe21ee 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -912,6 +912,29 @@ do { \
912#include "extents_status.h" 912#include "extents_status.h"
913 913
914/* 914/*
915 * Lock subclasses for i_data_sem in the ext4_inode_info structure.
916 *
917 * These are needed to avoid lockdep false positives when we need to
918 * allocate blocks to the quota inode during ext4_map_blocks(), while
919 * holding i_data_sem for a normal (non-quota) inode. Since we don't
920 * do quota tracking for the quota inode, this avoids deadlock (as
921 * well as infinite recursion, since it isn't turtles all the way
922 * down...)
923 *
924 * I_DATA_SEM_NORMAL - Used for most inodes
925 * I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
926 * where the second inode has larger inode number
927 * than the first
928 * I_DATA_SEM_QUOTA - Used for quota inodes only
929 */
930enum {
931 I_DATA_SEM_NORMAL = 0,
932 I_DATA_SEM_OTHER,
933 I_DATA_SEM_QUOTA,
934};
935
936
937/*
915 * fourth extended file system inode data in memory 938 * fourth extended file system inode data in memory
916 */ 939 */
917struct ext4_inode_info { 940struct ext4_inode_info {
@@ -1961,7 +1984,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
1961{ 1984{
1962 unsigned len = le16_to_cpu(dlen); 1985 unsigned len = le16_to_cpu(dlen);
1963 1986
1964#if (PAGE_CACHE_SIZE >= 65536) 1987#if (PAGE_SIZE >= 65536)
1965 if (len == EXT4_MAX_REC_LEN || len == 0) 1988 if (len == EXT4_MAX_REC_LEN || len == 0)
1966 return blocksize; 1989 return blocksize;
1967 return (len & 65532) | ((len & 3) << 16); 1990 return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1997,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
1974{ 1997{
1975 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) 1998 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
1976 BUG(); 1999 BUG();
1977#if (PAGE_CACHE_SIZE >= 65536) 2000#if (PAGE_SIZE >= 65536)
1978 if (len < 65536) 2001 if (len < 65536)
1979 return cpu_to_le16(len); 2002 return cpu_to_le16(len);
1980 if (len == blocksize) { 2003 if (len == blocksize) {
@@ -2282,11 +2305,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep;
2282bool ext4_valid_contents_enc_mode(uint32_t mode); 2305bool ext4_valid_contents_enc_mode(uint32_t mode);
2283uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size); 2306uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
2284extern struct workqueue_struct *ext4_read_workqueue; 2307extern struct workqueue_struct *ext4_read_workqueue;
2285struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode); 2308struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
2309 gfp_t gfp_flags);
2286void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx); 2310void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
2287void ext4_restore_control_page(struct page *data_page); 2311void ext4_restore_control_page(struct page *data_page);
2288struct page *ext4_encrypt(struct inode *inode, 2312struct page *ext4_encrypt(struct inode *inode,
2289 struct page *plaintext_page); 2313 struct page *plaintext_page,
2314 gfp_t gfp_flags);
2290int ext4_decrypt(struct page *page); 2315int ext4_decrypt(struct page *page);
2291int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 2316int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
2292 ext4_fsblk_t pblk, ext4_lblk_t len); 2317 ext4_fsblk_t pblk, ext4_lblk_t len);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6659e216385e..fa2208bae2e1 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -329,7 +329,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
329 struct super_block *sb = inode->i_sb; 329 struct super_block *sb = inode->i_sb;
330 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 330 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
331 struct vfsmount *mnt = filp->f_path.mnt; 331 struct vfsmount *mnt = filp->f_path.mnt;
332 struct inode *dir = filp->f_path.dentry->d_parent->d_inode; 332 struct dentry *dir;
333 struct path path; 333 struct path path;
334 char buf[64], *cp; 334 char buf[64], *cp;
335 int ret; 335 int ret;
@@ -373,14 +373,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
373 if (ext4_encryption_info(inode) == NULL) 373 if (ext4_encryption_info(inode) == NULL)
374 return -ENOKEY; 374 return -ENOKEY;
375 } 375 }
376 if (ext4_encrypted_inode(dir) && 376
377 !ext4_is_child_context_consistent_with_parent(dir, inode)) { 377 dir = dget_parent(file_dentry(filp));
378 if (ext4_encrypted_inode(d_inode(dir)) &&
379 !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
378 ext4_warning(inode->i_sb, 380 ext4_warning(inode->i_sb,
379 "Inconsistent encryption contexts: %lu/%lu\n", 381 "Inconsistent encryption contexts: %lu/%lu\n",
380 (unsigned long) dir->i_ino, 382 (unsigned long) d_inode(dir)->i_ino,
381 (unsigned long) inode->i_ino); 383 (unsigned long) inode->i_ino);
384 dput(dir);
382 return -EPERM; 385 return -EPERM;
383 } 386 }
387 dput(dir);
384 /* 388 /*
385 * Set up the jbd2_inode if we are opening the inode for 389 * Set up the jbd2_inode if we are opening the inode for
386 * writing and the journal is present 390 * writing and the journal is present
@@ -428,8 +432,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
428 lastoff = startoff; 432 lastoff = startoff;
429 endoff = (loff_t)end_blk << blkbits; 433 endoff = (loff_t)end_blk << blkbits;
430 434
431 index = startoff >> PAGE_CACHE_SHIFT; 435 index = startoff >> PAGE_SHIFT;
432 end = endoff >> PAGE_CACHE_SHIFT; 436 end = endoff >> PAGE_SHIFT;
433 437
434 pagevec_init(&pvec, 0); 438 pagevec_init(&pvec, 0);
435 do { 439 do {
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 7cbdd3752ba5..7bc6c855cc18 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -482,7 +482,7 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page)
482 ret = ext4_read_inline_data(inode, kaddr, len, &iloc); 482 ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
483 flush_dcache_page(page); 483 flush_dcache_page(page);
484 kunmap_atomic(kaddr); 484 kunmap_atomic(kaddr);
485 zero_user_segment(page, len, PAGE_CACHE_SIZE); 485 zero_user_segment(page, len, PAGE_SIZE);
486 SetPageUptodate(page); 486 SetPageUptodate(page);
487 brelse(iloc.bh); 487 brelse(iloc.bh);
488 488
@@ -507,7 +507,7 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
507 if (!page->index) 507 if (!page->index)
508 ret = ext4_read_inline_page(inode, page); 508 ret = ext4_read_inline_page(inode, page);
509 else if (!PageUptodate(page)) { 509 else if (!PageUptodate(page)) {
510 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 510 zero_user_segment(page, 0, PAGE_SIZE);
511 SetPageUptodate(page); 511 SetPageUptodate(page);
512 } 512 }
513 513
@@ -595,7 +595,7 @@ retry:
595 595
596 if (ret) { 596 if (ret) {
597 unlock_page(page); 597 unlock_page(page);
598 page_cache_release(page); 598 put_page(page);
599 page = NULL; 599 page = NULL;
600 ext4_orphan_add(handle, inode); 600 ext4_orphan_add(handle, inode);
601 up_write(&EXT4_I(inode)->xattr_sem); 601 up_write(&EXT4_I(inode)->xattr_sem);
@@ -621,7 +621,7 @@ retry:
621out: 621out:
622 if (page) { 622 if (page) {
623 unlock_page(page); 623 unlock_page(page);
624 page_cache_release(page); 624 put_page(page);
625 } 625 }
626 if (sem_held) 626 if (sem_held)
627 up_write(&EXT4_I(inode)->xattr_sem); 627 up_write(&EXT4_I(inode)->xattr_sem);
@@ -690,7 +690,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
690 if (!ext4_has_inline_data(inode)) { 690 if (!ext4_has_inline_data(inode)) {
691 ret = 0; 691 ret = 0;
692 unlock_page(page); 692 unlock_page(page);
693 page_cache_release(page); 693 put_page(page);
694 goto out_up_read; 694 goto out_up_read;
695 } 695 }
696 696
@@ -815,7 +815,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
815 if (ret) { 815 if (ret) {
816 up_read(&EXT4_I(inode)->xattr_sem); 816 up_read(&EXT4_I(inode)->xattr_sem);
817 unlock_page(page); 817 unlock_page(page);
818 page_cache_release(page); 818 put_page(page);
819 ext4_truncate_failed_write(inode); 819 ext4_truncate_failed_write(inode);
820 return ret; 820 return ret;
821 } 821 }
@@ -829,7 +829,7 @@ out:
829 up_read(&EXT4_I(inode)->xattr_sem); 829 up_read(&EXT4_I(inode)->xattr_sem);
830 if (page) { 830 if (page) {
831 unlock_page(page); 831 unlock_page(page);
832 page_cache_release(page); 832 put_page(page);
833 } 833 }
834 return ret; 834 return ret;
835} 835}
@@ -919,7 +919,7 @@ retry_journal:
919out_release_page: 919out_release_page:
920 up_read(&EXT4_I(inode)->xattr_sem); 920 up_read(&EXT4_I(inode)->xattr_sem);
921 unlock_page(page); 921 unlock_page(page);
922 page_cache_release(page); 922 put_page(page);
923out_journal: 923out_journal:
924 ext4_journal_stop(handle); 924 ext4_journal_stop(handle);
925out: 925out:
@@ -947,7 +947,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
947 i_size_changed = 1; 947 i_size_changed = 1;
948 } 948 }
949 unlock_page(page); 949 unlock_page(page);
950 page_cache_release(page); 950 put_page(page);
951 951
952 /* 952 /*
953 * Don't mark the inode dirty under page lock. First, it unnecessarily 953 * Don't mark the inode dirty under page lock. First, it unnecessarily
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dab84a2530ff..981a1fc30eaa 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -763,39 +763,47 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
763/* Maximum number of blocks we map for direct IO at once. */ 763/* Maximum number of blocks we map for direct IO at once. */
764#define DIO_MAX_BLOCKS 4096 764#define DIO_MAX_BLOCKS 4096
765 765
766static handle_t *start_dio_trans(struct inode *inode, 766/*
767 struct buffer_head *bh_result) 767 * Get blocks function for the cases that need to start a transaction -
768 * generally difference cases of direct IO and DAX IO. It also handles retries
769 * in case of ENOSPC.
770 */
771static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
772 struct buffer_head *bh_result, int flags)
768{ 773{
769 int dio_credits; 774 int dio_credits;
775 handle_t *handle;
776 int retries = 0;
777 int ret;
770 778
771 /* Trim mapping request to maximum we can map at once for DIO */ 779 /* Trim mapping request to maximum we can map at once for DIO */
772 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) 780 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
773 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; 781 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
774 dio_credits = ext4_chunk_trans_blocks(inode, 782 dio_credits = ext4_chunk_trans_blocks(inode,
775 bh_result->b_size >> inode->i_blkbits); 783 bh_result->b_size >> inode->i_blkbits);
776 return ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 784retry:
785 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
786 if (IS_ERR(handle))
787 return PTR_ERR(handle);
788
789 ret = _ext4_get_block(inode, iblock, bh_result, flags);
790 ext4_journal_stop(handle);
791
792 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
793 goto retry;
794 return ret;
777} 795}
778 796
779/* Get block function for DIO reads and writes to inodes without extents */ 797/* Get block function for DIO reads and writes to inodes without extents */
780int ext4_dio_get_block(struct inode *inode, sector_t iblock, 798int ext4_dio_get_block(struct inode *inode, sector_t iblock,
781 struct buffer_head *bh, int create) 799 struct buffer_head *bh, int create)
782{ 800{
783 handle_t *handle;
784 int ret;
785
786 /* We don't expect handle for direct IO */ 801 /* We don't expect handle for direct IO */
787 WARN_ON_ONCE(ext4_journal_current_handle()); 802 WARN_ON_ONCE(ext4_journal_current_handle());
788 803
789 if (create) { 804 if (!create)
790 handle = start_dio_trans(inode, bh); 805 return _ext4_get_block(inode, iblock, bh, 0);
791 if (IS_ERR(handle)) 806 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
792 return PTR_ERR(handle);
793 }
794 ret = _ext4_get_block(inode, iblock, bh,
795 create ? EXT4_GET_BLOCKS_CREATE : 0);
796 if (create)
797 ext4_journal_stop(handle);
798 return ret;
799} 807}
800 808
801/* 809/*
@@ -806,18 +814,13 @@ int ext4_dio_get_block(struct inode *inode, sector_t iblock,
806static int ext4_dio_get_block_unwritten_async(struct inode *inode, 814static int ext4_dio_get_block_unwritten_async(struct inode *inode,
807 sector_t iblock, struct buffer_head *bh_result, int create) 815 sector_t iblock, struct buffer_head *bh_result, int create)
808{ 816{
809 handle_t *handle;
810 int ret; 817 int ret;
811 818
812 /* We don't expect handle for direct IO */ 819 /* We don't expect handle for direct IO */
813 WARN_ON_ONCE(ext4_journal_current_handle()); 820 WARN_ON_ONCE(ext4_journal_current_handle());
814 821
815 handle = start_dio_trans(inode, bh_result); 822 ret = ext4_get_block_trans(inode, iblock, bh_result,
816 if (IS_ERR(handle)) 823 EXT4_GET_BLOCKS_IO_CREATE_EXT);
817 return PTR_ERR(handle);
818 ret = _ext4_get_block(inode, iblock, bh_result,
819 EXT4_GET_BLOCKS_IO_CREATE_EXT);
820 ext4_journal_stop(handle);
821 824
822 /* 825 /*
823 * When doing DIO using unwritten extents, we need io_end to convert 826 * When doing DIO using unwritten extents, we need io_end to convert
@@ -850,18 +853,13 @@ static int ext4_dio_get_block_unwritten_async(struct inode *inode,
850static int ext4_dio_get_block_unwritten_sync(struct inode *inode, 853static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
851 sector_t iblock, struct buffer_head *bh_result, int create) 854 sector_t iblock, struct buffer_head *bh_result, int create)
852{ 855{
853 handle_t *handle;
854 int ret; 856 int ret;
855 857
856 /* We don't expect handle for direct IO */ 858 /* We don't expect handle for direct IO */
857 WARN_ON_ONCE(ext4_journal_current_handle()); 859 WARN_ON_ONCE(ext4_journal_current_handle());
858 860
859 handle = start_dio_trans(inode, bh_result); 861 ret = ext4_get_block_trans(inode, iblock, bh_result,
860 if (IS_ERR(handle)) 862 EXT4_GET_BLOCKS_IO_CREATE_EXT);
861 return PTR_ERR(handle);
862 ret = _ext4_get_block(inode, iblock, bh_result,
863 EXT4_GET_BLOCKS_IO_CREATE_EXT);
864 ext4_journal_stop(handle);
865 863
866 /* 864 /*
867 * Mark inode as having pending DIO writes to unwritten extents. 865 * Mark inode as having pending DIO writes to unwritten extents.
@@ -1057,7 +1055,7 @@ int do_journal_get_write_access(handle_t *handle,
1057static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1055static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1058 get_block_t *get_block) 1056 get_block_t *get_block)
1059{ 1057{
1060 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1058 unsigned from = pos & (PAGE_SIZE - 1);
1061 unsigned to = from + len; 1059 unsigned to = from + len;
1062 struct inode *inode = page->mapping->host; 1060 struct inode *inode = page->mapping->host;
1063 unsigned block_start, block_end; 1061 unsigned block_start, block_end;
@@ -1069,15 +1067,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1069 bool decrypt = false; 1067 bool decrypt = false;
1070 1068
1071 BUG_ON(!PageLocked(page)); 1069 BUG_ON(!PageLocked(page));
1072 BUG_ON(from > PAGE_CACHE_SIZE); 1070 BUG_ON(from > PAGE_SIZE);
1073 BUG_ON(to > PAGE_CACHE_SIZE); 1071 BUG_ON(to > PAGE_SIZE);
1074 BUG_ON(from > to); 1072 BUG_ON(from > to);
1075 1073
1076 if (!page_has_buffers(page)) 1074 if (!page_has_buffers(page))
1077 create_empty_buffers(page, blocksize, 0); 1075 create_empty_buffers(page, blocksize, 0);
1078 head = page_buffers(page); 1076 head = page_buffers(page);
1079 bbits = ilog2(blocksize); 1077 bbits = ilog2(blocksize);
1080 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1078 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1081 1079
1082 for (bh = head, block_start = 0; bh != head || !block_start; 1080 for (bh = head, block_start = 0; bh != head || !block_start;
1083 block++, block_start = block_end, bh = bh->b_this_page) { 1081 block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1159,8 +1157,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
1159 * we allocate blocks but write fails for some reason 1157 * we allocate blocks but write fails for some reason
1160 */ 1158 */
1161 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1159 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1162 index = pos >> PAGE_CACHE_SHIFT; 1160 index = pos >> PAGE_SHIFT;
1163 from = pos & (PAGE_CACHE_SIZE - 1); 1161 from = pos & (PAGE_SIZE - 1);
1164 to = from + len; 1162 to = from + len;
1165 1163
1166 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1164 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -1188,7 +1186,7 @@ retry_grab:
1188retry_journal: 1186retry_journal:
1189 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1187 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1190 if (IS_ERR(handle)) { 1188 if (IS_ERR(handle)) {
1191 page_cache_release(page); 1189 put_page(page);
1192 return PTR_ERR(handle); 1190 return PTR_ERR(handle);
1193 } 1191 }
1194 1192
@@ -1196,7 +1194,7 @@ retry_journal:
1196 if (page->mapping != mapping) { 1194 if (page->mapping != mapping) {
1197 /* The page got truncated from under us */ 1195 /* The page got truncated from under us */
1198 unlock_page(page); 1196 unlock_page(page);
1199 page_cache_release(page); 1197 put_page(page);
1200 ext4_journal_stop(handle); 1198 ext4_journal_stop(handle);
1201 goto retry_grab; 1199 goto retry_grab;
1202 } 1200 }
@@ -1252,7 +1250,7 @@ retry_journal:
1252 if (ret == -ENOSPC && 1250 if (ret == -ENOSPC &&
1253 ext4_should_retry_alloc(inode->i_sb, &retries)) 1251 ext4_should_retry_alloc(inode->i_sb, &retries))
1254 goto retry_journal; 1252 goto retry_journal;
1255 page_cache_release(page); 1253 put_page(page);
1256 return ret; 1254 return ret;
1257 } 1255 }
1258 *pagep = page; 1256 *pagep = page;
@@ -1295,7 +1293,7 @@ static int ext4_write_end(struct file *file,
1295 ret = ext4_jbd2_file_inode(handle, inode); 1293 ret = ext4_jbd2_file_inode(handle, inode);
1296 if (ret) { 1294 if (ret) {
1297 unlock_page(page); 1295 unlock_page(page);
1298 page_cache_release(page); 1296 put_page(page);
1299 goto errout; 1297 goto errout;
1300 } 1298 }
1301 } 1299 }
@@ -1315,7 +1313,7 @@ static int ext4_write_end(struct file *file,
1315 */ 1313 */
1316 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1314 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1317 unlock_page(page); 1315 unlock_page(page);
1318 page_cache_release(page); 1316 put_page(page);
1319 1317
1320 if (old_size < pos) 1318 if (old_size < pos)
1321 pagecache_isize_extended(inode, old_size, pos); 1319 pagecache_isize_extended(inode, old_size, pos);
@@ -1399,7 +1397,7 @@ static int ext4_journalled_write_end(struct file *file,
1399 int size_changed = 0; 1397 int size_changed = 0;
1400 1398
1401 trace_ext4_journalled_write_end(inode, pos, len, copied); 1399 trace_ext4_journalled_write_end(inode, pos, len, copied);
1402 from = pos & (PAGE_CACHE_SIZE - 1); 1400 from = pos & (PAGE_SIZE - 1);
1403 to = from + len; 1401 to = from + len;
1404 1402
1405 BUG_ON(!ext4_handle_valid(handle)); 1403 BUG_ON(!ext4_handle_valid(handle));
@@ -1423,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
1423 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1421 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1424 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1422 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1425 unlock_page(page); 1423 unlock_page(page);
1426 page_cache_release(page); 1424 put_page(page);
1427 1425
1428 if (old_size < pos) 1426 if (old_size < pos)
1429 pagecache_isize_extended(inode, old_size, pos); 1427 pagecache_isize_extended(inode, old_size, pos);
@@ -1537,7 +1535,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1537 int num_clusters; 1535 int num_clusters;
1538 ext4_fsblk_t lblk; 1536 ext4_fsblk_t lblk;
1539 1537
1540 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1538 BUG_ON(stop > PAGE_SIZE || stop < length);
1541 1539
1542 head = page_buffers(page); 1540 head = page_buffers(page);
1543 bh = head; 1541 bh = head;
@@ -1553,7 +1551,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1553 clear_buffer_delay(bh); 1551 clear_buffer_delay(bh);
1554 } else if (contiguous_blks) { 1552 } else if (contiguous_blks) {
1555 lblk = page->index << 1553 lblk = page->index <<
1556 (PAGE_CACHE_SHIFT - inode->i_blkbits); 1554 (PAGE_SHIFT - inode->i_blkbits);
1557 lblk += (curr_off >> inode->i_blkbits) - 1555 lblk += (curr_off >> inode->i_blkbits) -
1558 contiguous_blks; 1556 contiguous_blks;
1559 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1557 ext4_es_remove_extent(inode, lblk, contiguous_blks);
@@ -1563,7 +1561,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1563 } while ((bh = bh->b_this_page) != head); 1561 } while ((bh = bh->b_this_page) != head);
1564 1562
1565 if (contiguous_blks) { 1563 if (contiguous_blks) {
1566 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1564 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
1567 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1565 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1568 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1566 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1569 } 1567 }
@@ -1572,7 +1570,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1572 * need to release the reserved space for that cluster. */ 1570 * need to release the reserved space for that cluster. */
1573 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1571 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1574 while (num_clusters > 0) { 1572 while (num_clusters > 0) {
1575 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 1573 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
1576 ((num_clusters - 1) << sbi->s_cluster_bits); 1574 ((num_clusters - 1) << sbi->s_cluster_bits);
1577 if (sbi->s_cluster_ratio == 1 || 1575 if (sbi->s_cluster_ratio == 1 ||
1578 !ext4_find_delalloc_cluster(inode, lblk)) 1576 !ext4_find_delalloc_cluster(inode, lblk))
@@ -1619,8 +1617,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1619 end = mpd->next_page - 1; 1617 end = mpd->next_page - 1;
1620 if (invalidate) { 1618 if (invalidate) {
1621 ext4_lblk_t start, last; 1619 ext4_lblk_t start, last;
1622 start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1620 start = index << (PAGE_SHIFT - inode->i_blkbits);
1623 last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1621 last = end << (PAGE_SHIFT - inode->i_blkbits);
1624 ext4_es_remove_extent(inode, start, last - start + 1); 1622 ext4_es_remove_extent(inode, start, last - start + 1);
1625 } 1623 }
1626 1624
@@ -1636,7 +1634,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1636 BUG_ON(!PageLocked(page)); 1634 BUG_ON(!PageLocked(page));
1637 BUG_ON(PageWriteback(page)); 1635 BUG_ON(PageWriteback(page));
1638 if (invalidate) { 1636 if (invalidate) {
1639 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1637 block_invalidatepage(page, 0, PAGE_SIZE);
1640 ClearPageUptodate(page); 1638 ClearPageUptodate(page);
1641 } 1639 }
1642 unlock_page(page); 1640 unlock_page(page);
@@ -2007,10 +2005,10 @@ static int ext4_writepage(struct page *page,
2007 2005
2008 trace_ext4_writepage(page); 2006 trace_ext4_writepage(page);
2009 size = i_size_read(inode); 2007 size = i_size_read(inode);
2010 if (page->index == size >> PAGE_CACHE_SHIFT) 2008 if (page->index == size >> PAGE_SHIFT)
2011 len = size & ~PAGE_CACHE_MASK; 2009 len = size & ~PAGE_MASK;
2012 else 2010 else
2013 len = PAGE_CACHE_SIZE; 2011 len = PAGE_SIZE;
2014 2012
2015 page_bufs = page_buffers(page); 2013 page_bufs = page_buffers(page);
2016 /* 2014 /*
@@ -2034,7 +2032,7 @@ static int ext4_writepage(struct page *page,
2034 ext4_bh_delay_or_unwritten)) { 2032 ext4_bh_delay_or_unwritten)) {
2035 redirty_page_for_writepage(wbc, page); 2033 redirty_page_for_writepage(wbc, page);
2036 if ((current->flags & PF_MEMALLOC) || 2034 if ((current->flags & PF_MEMALLOC) ||
2037 (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { 2035 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2038 /* 2036 /*
2039 * For memory cleaning there's no point in writing only 2037 * For memory cleaning there's no point in writing only
2040 * some buffers. So just bail out. Warn if we came here 2038 * some buffers. So just bail out. Warn if we came here
@@ -2076,10 +2074,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2076 int err; 2074 int err;
2077 2075
2078 BUG_ON(page->index != mpd->first_page); 2076 BUG_ON(page->index != mpd->first_page);
2079 if (page->index == size >> PAGE_CACHE_SHIFT) 2077 if (page->index == size >> PAGE_SHIFT)
2080 len = size & ~PAGE_CACHE_MASK; 2078 len = size & ~PAGE_MASK;
2081 else 2079 else
2082 len = PAGE_CACHE_SIZE; 2080 len = PAGE_SIZE;
2083 clear_page_dirty_for_io(page); 2081 clear_page_dirty_for_io(page);
2084 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2082 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2085 if (!err) 2083 if (!err)
@@ -2213,7 +2211,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2213 int nr_pages, i; 2211 int nr_pages, i;
2214 struct inode *inode = mpd->inode; 2212 struct inode *inode = mpd->inode;
2215 struct buffer_head *head, *bh; 2213 struct buffer_head *head, *bh;
2216 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; 2214 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2217 pgoff_t start, end; 2215 pgoff_t start, end;
2218 ext4_lblk_t lblk; 2216 ext4_lblk_t lblk;
2219 sector_t pblock; 2217 sector_t pblock;
@@ -2274,7 +2272,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2274 * supports blocksize < pagesize as we will try to 2272 * supports blocksize < pagesize as we will try to
2275 * convert potentially unmapped parts of inode. 2273 * convert potentially unmapped parts of inode.
2276 */ 2274 */
2277 mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; 2275 mpd->io_submit.io_end->size += PAGE_SIZE;
2278 /* Page fully mapped - let IO run! */ 2276 /* Page fully mapped - let IO run! */
2279 err = mpage_submit_page(mpd, page); 2277 err = mpage_submit_page(mpd, page);
2280 if (err < 0) { 2278 if (err < 0) {
@@ -2426,7 +2424,7 @@ update_disksize:
2426 * Update on-disk size after IO is submitted. Races with 2424 * Update on-disk size after IO is submitted. Races with
2427 * truncate are avoided by checking i_size under i_data_sem. 2425 * truncate are avoided by checking i_size under i_data_sem.
2428 */ 2426 */
2429 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; 2427 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2430 if (disksize > EXT4_I(inode)->i_disksize) { 2428 if (disksize > EXT4_I(inode)->i_disksize) {
2431 int err2; 2429 int err2;
2432 loff_t i_size; 2430 loff_t i_size;
@@ -2562,7 +2560,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2562 mpd->next_page = page->index + 1; 2560 mpd->next_page = page->index + 1;
2563 /* Add all dirty buffers to mpd */ 2561 /* Add all dirty buffers to mpd */
2564 lblk = ((ext4_lblk_t)page->index) << 2562 lblk = ((ext4_lblk_t)page->index) <<
2565 (PAGE_CACHE_SHIFT - blkbits); 2563 (PAGE_SHIFT - blkbits);
2566 head = page_buffers(page); 2564 head = page_buffers(page);
2567 err = mpage_process_page_bufs(mpd, head, head, lblk); 2565 err = mpage_process_page_bufs(mpd, head, head, lblk);
2568 if (err <= 0) 2566 if (err <= 0)
@@ -2647,7 +2645,7 @@ static int ext4_writepages(struct address_space *mapping,
2647 * We may need to convert up to one extent per block in 2645 * We may need to convert up to one extent per block in
2648 * the page and we may dirty the inode. 2646 * the page and we may dirty the inode.
2649 */ 2647 */
2650 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); 2648 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
2651 } 2649 }
2652 2650
2653 /* 2651 /*
@@ -2678,8 +2676,8 @@ static int ext4_writepages(struct address_space *mapping,
2678 mpd.first_page = writeback_index; 2676 mpd.first_page = writeback_index;
2679 mpd.last_page = -1; 2677 mpd.last_page = -1;
2680 } else { 2678 } else {
2681 mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; 2679 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2682 mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; 2680 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2683 } 2681 }
2684 2682
2685 mpd.inode = inode; 2683 mpd.inode = inode;
@@ -2838,7 +2836,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2838 struct inode *inode = mapping->host; 2836 struct inode *inode = mapping->host;
2839 handle_t *handle; 2837 handle_t *handle;
2840 2838
2841 index = pos >> PAGE_CACHE_SHIFT; 2839 index = pos >> PAGE_SHIFT;
2842 2840
2843 if (ext4_nonda_switch(inode->i_sb)) { 2841 if (ext4_nonda_switch(inode->i_sb)) {
2844 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2842 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -2881,7 +2879,7 @@ retry_journal:
2881 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2879 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2882 ext4_da_write_credits(inode, pos, len)); 2880 ext4_da_write_credits(inode, pos, len));
2883 if (IS_ERR(handle)) { 2881 if (IS_ERR(handle)) {
2884 page_cache_release(page); 2882 put_page(page);
2885 return PTR_ERR(handle); 2883 return PTR_ERR(handle);
2886 } 2884 }
2887 2885
@@ -2889,7 +2887,7 @@ retry_journal:
2889 if (page->mapping != mapping) { 2887 if (page->mapping != mapping) {
2890 /* The page got truncated from under us */ 2888 /* The page got truncated from under us */
2891 unlock_page(page); 2889 unlock_page(page);
2892 page_cache_release(page); 2890 put_page(page);
2893 ext4_journal_stop(handle); 2891 ext4_journal_stop(handle);
2894 goto retry_grab; 2892 goto retry_grab;
2895 } 2893 }
@@ -2917,7 +2915,7 @@ retry_journal:
2917 ext4_should_retry_alloc(inode->i_sb, &retries)) 2915 ext4_should_retry_alloc(inode->i_sb, &retries))
2918 goto retry_journal; 2916 goto retry_journal;
2919 2917
2920 page_cache_release(page); 2918 put_page(page);
2921 return ret; 2919 return ret;
2922 } 2920 }
2923 2921
@@ -2965,7 +2963,7 @@ static int ext4_da_write_end(struct file *file,
2965 len, copied, page, fsdata); 2963 len, copied, page, fsdata);
2966 2964
2967 trace_ext4_da_write_end(inode, pos, len, copied); 2965 trace_ext4_da_write_end(inode, pos, len, copied);
2968 start = pos & (PAGE_CACHE_SIZE - 1); 2966 start = pos & (PAGE_SIZE - 1);
2969 end = start + copied - 1; 2967 end = start + copied - 1;
2970 2968
2971 /* 2969 /*
@@ -3187,7 +3185,7 @@ static int __ext4_journalled_invalidatepage(struct page *page,
3187 /* 3185 /*
3188 * If it's a full truncate we just forget about the pending dirtying 3186 * If it's a full truncate we just forget about the pending dirtying
3189 */ 3187 */
3190 if (offset == 0 && length == PAGE_CACHE_SIZE) 3188 if (offset == 0 && length == PAGE_SIZE)
3191 ClearPageChecked(page); 3189 ClearPageChecked(page);
3192 3190
3193 return jbd2_journal_invalidatepage(journal, page, offset, length); 3191 return jbd2_journal_invalidatepage(journal, page, offset, length);
@@ -3556,8 +3554,8 @@ void ext4_set_aops(struct inode *inode)
3556static int __ext4_block_zero_page_range(handle_t *handle, 3554static int __ext4_block_zero_page_range(handle_t *handle,
3557 struct address_space *mapping, loff_t from, loff_t length) 3555 struct address_space *mapping, loff_t from, loff_t length)
3558{ 3556{
3559 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3557 ext4_fsblk_t index = from >> PAGE_SHIFT;
3560 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3558 unsigned offset = from & (PAGE_SIZE-1);
3561 unsigned blocksize, pos; 3559 unsigned blocksize, pos;
3562 ext4_lblk_t iblock; 3560 ext4_lblk_t iblock;
3563 struct inode *inode = mapping->host; 3561 struct inode *inode = mapping->host;
@@ -3565,14 +3563,14 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3565 struct page *page; 3563 struct page *page;
3566 int err = 0; 3564 int err = 0;
3567 3565
3568 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3566 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3569 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3567 mapping_gfp_constraint(mapping, ~__GFP_FS));
3570 if (!page) 3568 if (!page)
3571 return -ENOMEM; 3569 return -ENOMEM;
3572 3570
3573 blocksize = inode->i_sb->s_blocksize; 3571 blocksize = inode->i_sb->s_blocksize;
3574 3572
3575 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3573 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3576 3574
3577 if (!page_has_buffers(page)) 3575 if (!page_has_buffers(page))
3578 create_empty_buffers(page, blocksize, 0); 3576 create_empty_buffers(page, blocksize, 0);
@@ -3614,7 +3612,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3614 ext4_encrypted_inode(inode)) { 3612 ext4_encrypted_inode(inode)) {
3615 /* We expect the key to be set. */ 3613 /* We expect the key to be set. */
3616 BUG_ON(!ext4_has_encryption_key(inode)); 3614 BUG_ON(!ext4_has_encryption_key(inode));
3617 BUG_ON(blocksize != PAGE_CACHE_SIZE); 3615 BUG_ON(blocksize != PAGE_SIZE);
3618 WARN_ON_ONCE(ext4_decrypt(page)); 3616 WARN_ON_ONCE(ext4_decrypt(page));
3619 } 3617 }
3620 } 3618 }
@@ -3638,7 +3636,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3638 3636
3639unlock: 3637unlock:
3640 unlock_page(page); 3638 unlock_page(page);
3641 page_cache_release(page); 3639 put_page(page);
3642 return err; 3640 return err;
3643} 3641}
3644 3642
@@ -3653,7 +3651,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3653 struct address_space *mapping, loff_t from, loff_t length) 3651 struct address_space *mapping, loff_t from, loff_t length)
3654{ 3652{
3655 struct inode *inode = mapping->host; 3653 struct inode *inode = mapping->host;
3656 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3654 unsigned offset = from & (PAGE_SIZE-1);
3657 unsigned blocksize = inode->i_sb->s_blocksize; 3655 unsigned blocksize = inode->i_sb->s_blocksize;
3658 unsigned max = blocksize - (offset & (blocksize - 1)); 3656 unsigned max = blocksize - (offset & (blocksize - 1));
3659 3657
@@ -3678,7 +3676,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3678static int ext4_block_truncate_page(handle_t *handle, 3676static int ext4_block_truncate_page(handle_t *handle,
3679 struct address_space *mapping, loff_t from) 3677 struct address_space *mapping, loff_t from)
3680{ 3678{
3681 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3679 unsigned offset = from & (PAGE_SIZE-1);
3682 unsigned length; 3680 unsigned length;
3683 unsigned blocksize; 3681 unsigned blocksize;
3684 struct inode *inode = mapping->host; 3682 struct inode *inode = mapping->host;
@@ -3816,7 +3814,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3816 */ 3814 */
3817 if (offset + length > inode->i_size) { 3815 if (offset + length > inode->i_size) {
3818 length = inode->i_size + 3816 length = inode->i_size +
3819 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 3817 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3820 offset; 3818 offset;
3821 } 3819 }
3822 3820
@@ -4891,23 +4889,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
4891 tid_t commit_tid = 0; 4889 tid_t commit_tid = 0;
4892 int ret; 4890 int ret;
4893 4891
4894 offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 4892 offset = inode->i_size & (PAGE_SIZE - 1);
4895 /* 4893 /*
4896 * All buffers in the last page remain valid? Then there's nothing to 4894 * All buffers in the last page remain valid? Then there's nothing to
4897 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4895 * do. We do the check mainly to optimize the common PAGE_SIZE ==
4898 * blocksize case 4896 * blocksize case
4899 */ 4897 */
4900 if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 4898 if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
4901 return; 4899 return;
4902 while (1) { 4900 while (1) {
4903 page = find_lock_page(inode->i_mapping, 4901 page = find_lock_page(inode->i_mapping,
4904 inode->i_size >> PAGE_CACHE_SHIFT); 4902 inode->i_size >> PAGE_SHIFT);
4905 if (!page) 4903 if (!page)
4906 return; 4904 return;
4907 ret = __ext4_journalled_invalidatepage(page, offset, 4905 ret = __ext4_journalled_invalidatepage(page, offset,
4908 PAGE_CACHE_SIZE - offset); 4906 PAGE_SIZE - offset);
4909 unlock_page(page); 4907 unlock_page(page);
4910 page_cache_release(page); 4908 put_page(page);
4911 if (ret != -EBUSY) 4909 if (ret != -EBUSY)
4912 return; 4910 return;
4913 commit_tid = 0; 4911 commit_tid = 0;
@@ -5546,10 +5544,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5546 goto out; 5544 goto out;
5547 } 5545 }
5548 5546
5549 if (page->index == size >> PAGE_CACHE_SHIFT) 5547 if (page->index == size >> PAGE_SHIFT)
5550 len = size & ~PAGE_CACHE_MASK; 5548 len = size & ~PAGE_MASK;
5551 else 5549 else
5552 len = PAGE_CACHE_SIZE; 5550 len = PAGE_SIZE;
5553 /* 5551 /*
5554 * Return if we have all the buffers mapped. This avoids the need to do 5552 * Return if we have all the buffers mapped. This avoids the need to do
5555 * journal_start/journal_stop which can block and take a long time 5553 * journal_start/journal_stop which can block and take a long time
@@ -5580,7 +5578,7 @@ retry_alloc:
5580 ret = block_page_mkwrite(vma, vmf, get_block); 5578 ret = block_page_mkwrite(vma, vmf, get_block);
5581 if (!ret && ext4_should_journal_data(inode)) { 5579 if (!ret && ext4_should_journal_data(inode)) {
5582 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5580 if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5583 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 5581 PAGE_SIZE, NULL, do_journal_get_write_access)) {
5584 unlock_page(page); 5582 unlock_page(page);
5585 ret = VM_FAULT_SIGBUS; 5583 ret = VM_FAULT_SIGBUS;
5586 ext4_journal_stop(handle); 5584 ext4_journal_stop(handle);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 50e05df28f66..eeeade76012e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
119 * 119 *
120 * 120 *
121 * one block each for bitmap and buddy information. So for each group we 121 * one block each for bitmap and buddy information. So for each group we
122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
123 * blocksize) blocks. So it can have information regarding groups_per_page 123 * blocksize) blocks. So it can have information regarding groups_per_page
124 * which is blocks_per_page/2 124 * which is blocks_per_page/2
125 * 125 *
@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
807 * 807 *
808 * one block each for bitmap and buddy information. 808 * one block each for bitmap and buddy information.
809 * So for each group we take up 2 blocks. A page can 809 * So for each group we take up 2 blocks. A page can
810 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 810 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
811 * So it can have information regarding groups_per_page which 811 * So it can have information regarding groups_per_page which
812 * is blocks_per_page/2 812 * is blocks_per_page/2
813 * 813 *
@@ -839,7 +839,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
839 sb = inode->i_sb; 839 sb = inode->i_sb;
840 ngroups = ext4_get_groups_count(sb); 840 ngroups = ext4_get_groups_count(sb);
841 blocksize = 1 << inode->i_blkbits; 841 blocksize = 1 << inode->i_blkbits;
842 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 842 blocks_per_page = PAGE_SIZE / blocksize;
843 843
844 groups_per_page = blocks_per_page >> 1; 844 groups_per_page = blocks_per_page >> 1;
845 if (groups_per_page == 0) 845 if (groups_per_page == 0)
@@ -993,7 +993,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
993 e4b->bd_buddy_page = NULL; 993 e4b->bd_buddy_page = NULL;
994 e4b->bd_bitmap_page = NULL; 994 e4b->bd_bitmap_page = NULL;
995 995
996 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 996 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
997 /* 997 /*
998 * the buddy cache inode stores the block bitmap 998 * the buddy cache inode stores the block bitmap
999 * and buddy information in consecutive blocks. 999 * and buddy information in consecutive blocks.
@@ -1028,11 +1028,11 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1028{ 1028{
1029 if (e4b->bd_bitmap_page) { 1029 if (e4b->bd_bitmap_page) {
1030 unlock_page(e4b->bd_bitmap_page); 1030 unlock_page(e4b->bd_bitmap_page);
1031 page_cache_release(e4b->bd_bitmap_page); 1031 put_page(e4b->bd_bitmap_page);
1032 } 1032 }
1033 if (e4b->bd_buddy_page) { 1033 if (e4b->bd_buddy_page) {
1034 unlock_page(e4b->bd_buddy_page); 1034 unlock_page(e4b->bd_buddy_page);
1035 page_cache_release(e4b->bd_buddy_page); 1035 put_page(e4b->bd_buddy_page);
1036 } 1036 }
1037} 1037}
1038 1038
@@ -1125,7 +1125,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1125 might_sleep(); 1125 might_sleep();
1126 mb_debug(1, "load group %u\n", group); 1126 mb_debug(1, "load group %u\n", group);
1127 1127
1128 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1128 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1129 grp = ext4_get_group_info(sb, group); 1129 grp = ext4_get_group_info(sb, group);
1130 1130
1131 e4b->bd_blkbits = sb->s_blocksize_bits; 1131 e4b->bd_blkbits = sb->s_blocksize_bits;
@@ -1167,7 +1167,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1167 * is yet to initialize the same. So 1167 * is yet to initialize the same. So
1168 * wait for it to initialize. 1168 * wait for it to initialize.
1169 */ 1169 */
1170 page_cache_release(page); 1170 put_page(page);
1171 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1171 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1172 if (page) { 1172 if (page) {
1173 BUG_ON(page->mapping != inode->i_mapping); 1173 BUG_ON(page->mapping != inode->i_mapping);
@@ -1203,7 +1203,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1204 if (page == NULL || !PageUptodate(page)) { 1204 if (page == NULL || !PageUptodate(page)) {
1205 if (page) 1205 if (page)
1206 page_cache_release(page); 1206 put_page(page);
1207 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1207 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1208 if (page) { 1208 if (page) {
1209 BUG_ON(page->mapping != inode->i_mapping); 1209 BUG_ON(page->mapping != inode->i_mapping);
@@ -1238,11 +1238,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1238 1238
1239err: 1239err:
1240 if (page) 1240 if (page)
1241 page_cache_release(page); 1241 put_page(page);
1242 if (e4b->bd_bitmap_page) 1242 if (e4b->bd_bitmap_page)
1243 page_cache_release(e4b->bd_bitmap_page); 1243 put_page(e4b->bd_bitmap_page);
1244 if (e4b->bd_buddy_page) 1244 if (e4b->bd_buddy_page)
1245 page_cache_release(e4b->bd_buddy_page); 1245 put_page(e4b->bd_buddy_page);
1246 e4b->bd_buddy = NULL; 1246 e4b->bd_buddy = NULL;
1247 e4b->bd_bitmap = NULL; 1247 e4b->bd_bitmap = NULL;
1248 return ret; 1248 return ret;
@@ -1257,9 +1257,9 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1257static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1257static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1258{ 1258{
1259 if (e4b->bd_bitmap_page) 1259 if (e4b->bd_bitmap_page)
1260 page_cache_release(e4b->bd_bitmap_page); 1260 put_page(e4b->bd_bitmap_page);
1261 if (e4b->bd_buddy_page) 1261 if (e4b->bd_buddy_page)
1262 page_cache_release(e4b->bd_buddy_page); 1262 put_page(e4b->bd_buddy_page);
1263} 1263}
1264 1264
1265 1265
@@ -2833,8 +2833,8 @@ static void ext4_free_data_callback(struct super_block *sb,
2833 /* No more items in the per group rb tree 2833 /* No more items in the per group rb tree
2834 * balance refcounts from ext4_mb_free_metadata() 2834 * balance refcounts from ext4_mb_free_metadata()
2835 */ 2835 */
2836 page_cache_release(e4b.bd_buddy_page); 2836 put_page(e4b.bd_buddy_page);
2837 page_cache_release(e4b.bd_bitmap_page); 2837 put_page(e4b.bd_bitmap_page);
2838 } 2838 }
2839 ext4_unlock_group(sb, entry->efd_group); 2839 ext4_unlock_group(sb, entry->efd_group);
2840 kmem_cache_free(ext4_free_data_cachep, entry); 2840 kmem_cache_free(ext4_free_data_cachep, entry);
@@ -4385,9 +4385,9 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4385 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4385 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4386 } 4386 }
4387 if (ac->ac_bitmap_page) 4387 if (ac->ac_bitmap_page)
4388 page_cache_release(ac->ac_bitmap_page); 4388 put_page(ac->ac_bitmap_page);
4389 if (ac->ac_buddy_page) 4389 if (ac->ac_buddy_page)
4390 page_cache_release(ac->ac_buddy_page); 4390 put_page(ac->ac_buddy_page);
4391 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4391 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4392 mutex_unlock(&ac->ac_lg->lg_mutex); 4392 mutex_unlock(&ac->ac_lg->lg_mutex);
4393 ext4_mb_collect_stats(ac); 4393 ext4_mb_collect_stats(ac);
@@ -4599,8 +4599,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4599 * otherwise we'll refresh it from 4599 * otherwise we'll refresh it from
4600 * on-disk bitmap and lose not-yet-available 4600 * on-disk bitmap and lose not-yet-available
4601 * blocks */ 4601 * blocks */
4602 page_cache_get(e4b->bd_buddy_page); 4602 get_page(e4b->bd_buddy_page);
4603 page_cache_get(e4b->bd_bitmap_page); 4603 get_page(e4b->bd_bitmap_page);
4604 } 4604 }
4605 while (*n) { 4605 while (*n) {
4606 parent = *n; 4606 parent = *n;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 4098acc701c3..325cef48b39a 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
60{ 60{
61 if (first < second) { 61 if (first < second) {
62 down_write(&EXT4_I(first)->i_data_sem); 62 down_write(&EXT4_I(first)->i_data_sem);
63 down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); 63 down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
64 } else { 64 } else {
65 down_write(&EXT4_I(second)->i_data_sem); 65 down_write(&EXT4_I(second)->i_data_sem);
66 down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING); 66 down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
67 67
68 } 68 }
69} 69}
@@ -156,7 +156,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); 156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
157 if (!page[1]) { 157 if (!page[1]) {
158 unlock_page(page[0]); 158 unlock_page(page[0]);
159 page_cache_release(page[0]); 159 put_page(page[0]);
160 return -ENOMEM; 160 return -ENOMEM;
161 } 161 }
162 /* 162 /*
@@ -192,7 +192,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
192 create_empty_buffers(page, blocksize, 0); 192 create_empty_buffers(page, blocksize, 0);
193 193
194 head = page_buffers(page); 194 head = page_buffers(page);
195 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 195 block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
196 for (bh = head, block_start = 0; bh != head || !block_start; 196 for (bh = head, block_start = 0; bh != head || !block_start;
197 block++, block_start = block_end, bh = bh->b_this_page) { 197 block++, block_start = block_end, bh = bh->b_this_page) {
198 block_end = block_start + blocksize; 198 block_end = block_start + blocksize;
@@ -268,7 +268,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
268 int i, err2, jblocks, retries = 0; 268 int i, err2, jblocks, retries = 0;
269 int replaced_count = 0; 269 int replaced_count = 0;
270 int from = data_offset_in_page << orig_inode->i_blkbits; 270 int from = data_offset_in_page << orig_inode->i_blkbits;
271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 271 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
272 struct super_block *sb = orig_inode->i_sb; 272 struct super_block *sb = orig_inode->i_sb;
273 struct buffer_head *bh = NULL; 273 struct buffer_head *bh = NULL;
274 274
@@ -404,9 +404,9 @@ data_copy:
404 404
405unlock_pages: 405unlock_pages:
406 unlock_page(pagep[0]); 406 unlock_page(pagep[0]);
407 page_cache_release(pagep[0]); 407 put_page(pagep[0]);
408 unlock_page(pagep[1]); 408 unlock_page(pagep[1]);
409 page_cache_release(pagep[1]); 409 put_page(pagep[1]);
410stop_journal: 410stop_journal:
411 ext4_journal_stop(handle); 411 ext4_journal_stop(handle);
412 if (*err == -ENOSPC && 412 if (*err == -ENOSPC &&
@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
484 return -EBUSY; 484 return -EBUSY;
485 } 485 }
486 486
487 if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
488 ext4_debug("ext4 move extent: The argument files should "
489 "not be quota files [ino:orig %lu, donor %lu]\n",
490 orig_inode->i_ino, donor_inode->i_ino);
491 return -EBUSY;
492 }
493
487 /* Ext4 move extent supports only extent based file */ 494 /* Ext4 move extent supports only extent based file */
488 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { 495 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
489 ext4_debug("ext4 move extent: orig file is not extents " 496 ext4_debug("ext4 move extent: orig file is not extents "
@@ -554,7 +561,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
554 struct inode *orig_inode = file_inode(o_filp); 561 struct inode *orig_inode = file_inode(o_filp);
555 struct inode *donor_inode = file_inode(d_filp); 562 struct inode *donor_inode = file_inode(d_filp);
556 struct ext4_ext_path *path = NULL; 563 struct ext4_ext_path *path = NULL;
557 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 564 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
558 ext4_lblk_t o_end, o_start = orig_blk; 565 ext4_lblk_t o_end, o_start = orig_blk;
559 ext4_lblk_t d_start = donor_blk; 566 ext4_lblk_t d_start = donor_blk;
560 int ret; 567 int ret;
@@ -648,9 +655,9 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
648 if (o_end - o_start < cur_len) 655 if (o_end - o_start < cur_len)
649 cur_len = o_end - o_start; 656 cur_len = o_end - o_start;
650 657
651 orig_page_index = o_start >> (PAGE_CACHE_SHIFT - 658 orig_page_index = o_start >> (PAGE_SHIFT -
652 orig_inode->i_blkbits); 659 orig_inode->i_blkbits);
653 donor_page_index = d_start >> (PAGE_CACHE_SHIFT - 660 donor_page_index = d_start >> (PAGE_SHIFT -
654 donor_inode->i_blkbits); 661 donor_inode->i_blkbits);
655 offset_in_page = o_start % blocks_per_page; 662 offset_in_page = o_start % blocks_per_page;
656 if (cur_len > blocks_per_page- offset_in_page) 663 if (cur_len > blocks_per_page- offset_in_page)
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d77d15f4b674..e4fc8ea45d78 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/backing-dev.h>
26 27
27#include "ext4_jbd2.h" 28#include "ext4_jbd2.h"
28#include "xattr.h" 29#include "xattr.h"
@@ -432,8 +433,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
432 * the page size, the remaining memory is zeroed when mapped, and 433 * the page size, the remaining memory is zeroed when mapped, and
433 * writes to that region are not written out to the file." 434 * writes to that region are not written out to the file."
434 */ 435 */
435 if (len < PAGE_CACHE_SIZE) 436 if (len < PAGE_SIZE)
436 zero_user_segment(page, len, PAGE_CACHE_SIZE); 437 zero_user_segment(page, len, PAGE_SIZE);
437 /* 438 /*
438 * In the first loop we prepare and mark buffers to submit. We have to 439 * In the first loop we prepare and mark buffers to submit. We have to
439 * mark all buffers in the page before submitting so that 440 * mark all buffers in the page before submitting so that
@@ -470,9 +471,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
470 471
471 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) && 472 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
472 nr_to_submit) { 473 nr_to_submit) {
473 data_page = ext4_encrypt(inode, page); 474 gfp_t gfp_flags = GFP_NOFS;
475
476 retry_encrypt:
477 data_page = ext4_encrypt(inode, page, gfp_flags);
474 if (IS_ERR(data_page)) { 478 if (IS_ERR(data_page)) {
475 ret = PTR_ERR(data_page); 479 ret = PTR_ERR(data_page);
480 if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
481 if (io->io_bio) {
482 ext4_io_submit(io);
483 congestion_wait(BLK_RW_ASYNC, HZ/50);
484 }
485 gfp_flags |= __GFP_NOFAIL;
486 goto retry_encrypt;
487 }
476 data_page = NULL; 488 data_page = NULL;
477 goto out; 489 goto out;
478 } 490 }
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 5dc5e95063de..dc54a4b60eba 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * then this code just gives up and calls the buffer_head-based read function. 24 * then this code just gives up and calls the buffer_head-based read function.
25 * It does handle a page which has holes at the end - that is a common case: 25 * It does handle a page which has holes at the end - that is a common case:
26 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 26 * the end-of-file on blocksize < PAGE_SIZE setups.
27 * 27 *
28 */ 28 */
29 29
@@ -140,7 +140,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
140 140
141 struct inode *inode = mapping->host; 141 struct inode *inode = mapping->host;
142 const unsigned blkbits = inode->i_blkbits; 142 const unsigned blkbits = inode->i_blkbits;
143 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 143 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
144 const unsigned blocksize = 1 << blkbits; 144 const unsigned blocksize = 1 << blkbits;
145 sector_t block_in_file; 145 sector_t block_in_file;
146 sector_t last_block; 146 sector_t last_block;
@@ -173,7 +173,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
173 if (page_has_buffers(page)) 173 if (page_has_buffers(page))
174 goto confused; 174 goto confused;
175 175
176 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 176 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
177 last_block = block_in_file + nr_pages * blocks_per_page; 177 last_block = block_in_file + nr_pages * blocks_per_page;
178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
179 if (last_block > last_block_in_file) 179 if (last_block > last_block_in_file)
@@ -217,7 +217,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
217 set_error_page: 217 set_error_page:
218 SetPageError(page); 218 SetPageError(page);
219 zero_user_segment(page, 0, 219 zero_user_segment(page, 0,
220 PAGE_CACHE_SIZE); 220 PAGE_SIZE);
221 unlock_page(page); 221 unlock_page(page);
222 goto next_page; 222 goto next_page;
223 } 223 }
@@ -250,7 +250,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
250 } 250 }
251 if (first_hole != blocks_per_page) { 251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits, 252 zero_user_segment(page, first_hole << blkbits,
253 PAGE_CACHE_SIZE); 253 PAGE_SIZE);
254 if (first_hole == 0) { 254 if (first_hole == 0) {
255 SetPageUptodate(page); 255 SetPageUptodate(page);
256 unlock_page(page); 256 unlock_page(page);
@@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
279 279
280 if (ext4_encrypted_inode(inode) && 280 if (ext4_encrypted_inode(inode) &&
281 S_ISREG(inode->i_mode)) { 281 S_ISREG(inode->i_mode)) {
282 ctx = ext4_get_crypto_ctx(inode); 282 ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
283 if (IS_ERR(ctx)) 283 if (IS_ERR(ctx))
284 goto set_error_page; 284 goto set_error_page;
285 } 285 }
@@ -319,7 +319,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
319 unlock_page(page); 319 unlock_page(page);
320 next_page: 320 next_page:
321 if (pages) 321 if (pages)
322 page_cache_release(page); 322 put_page(page);
323 } 323 }
324 BUG_ON(pages && !list_empty(pages)); 324 BUG_ON(pages && !list_empty(pages));
325 if (bio) 325 if (bio)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 539297515896..304c712dbe12 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1113,6 +1113,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
1113static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 1113static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1114 unsigned int flags); 1114 unsigned int flags);
1115static int ext4_enable_quotas(struct super_block *sb); 1115static int ext4_enable_quotas(struct super_block *sb);
1116static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1116 1117
1117static struct dquot **ext4_get_dquots(struct inode *inode) 1118static struct dquot **ext4_get_dquots(struct inode *inode)
1118{ 1119{
@@ -1129,7 +1130,7 @@ static const struct dquot_operations ext4_quota_operations = {
1129 .alloc_dquot = dquot_alloc, 1130 .alloc_dquot = dquot_alloc,
1130 .destroy_dquot = dquot_destroy, 1131 .destroy_dquot = dquot_destroy,
1131 .get_projid = ext4_get_projid, 1132 .get_projid = ext4_get_projid,
1132 .get_next_id = dquot_get_next_id, 1133 .get_next_id = ext4_get_next_id,
1133}; 1134};
1134 1135
1135static const struct quotactl_ops ext4_qctl_operations = { 1136static const struct quotactl_ops ext4_qctl_operations = {
@@ -1323,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1323 return -1; 1324 return -1;
1324 } 1325 }
1325 if (ext4_has_feature_quota(sb)) { 1326 if (ext4_has_feature_quota(sb)) {
1326 ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " 1327 ext4_msg(sb, KERN_INFO, "Journaled quota options "
1327 "when QUOTA feature is enabled"); 1328 "ignored when QUOTA feature is enabled");
1328 return -1; 1329 return 1;
1329 } 1330 }
1330 qname = match_strdup(args); 1331 qname = match_strdup(args);
1331 if (!qname) { 1332 if (!qname) {
@@ -1688,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1688 return -1; 1689 return -1;
1689 } 1690 }
1690 if (ext4_has_feature_quota(sb)) { 1691 if (ext4_has_feature_quota(sb)) {
1691 ext4_msg(sb, KERN_ERR, 1692 ext4_msg(sb, KERN_INFO,
1692 "Cannot set journaled quota options " 1693 "Quota format mount options ignored "
1693 "when QUOTA feature is enabled"); 1694 "when QUOTA feature is enabled");
1694 return -1; 1695 return 1;
1695 } 1696 }
1696 sbi->s_jquota_fmt = m->mount_opt; 1697 sbi->s_jquota_fmt = m->mount_opt;
1697#endif 1698#endif
@@ -1756,11 +1757,11 @@ static int parse_options(char *options, struct super_block *sb,
1756#ifdef CONFIG_QUOTA 1757#ifdef CONFIG_QUOTA
1757 if (ext4_has_feature_quota(sb) && 1758 if (ext4_has_feature_quota(sb) &&
1758 (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) { 1759 (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
1759 ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA " 1760 ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
1760 "feature is enabled"); 1761 "mount options ignored.");
1761 return 0; 1762 clear_opt(sb, USRQUOTA);
1762 } 1763 clear_opt(sb, GRPQUOTA);
1763 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { 1764 } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1764 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) 1765 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1765 clear_opt(sb, USRQUOTA); 1766 clear_opt(sb, USRQUOTA);
1766 1767
@@ -1784,7 +1785,7 @@ static int parse_options(char *options, struct super_block *sb,
1784 int blocksize = 1785 int blocksize =
1785 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 1786 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
1786 1787
1787 if (blocksize < PAGE_CACHE_SIZE) { 1788 if (blocksize < PAGE_SIZE) {
1788 ext4_msg(sb, KERN_ERR, "can't mount with " 1789 ext4_msg(sb, KERN_ERR, "can't mount with "
1789 "dioread_nolock if block size != PAGE_SIZE"); 1790 "dioread_nolock if block size != PAGE_SIZE");
1790 return 0; 1791 return 0;
@@ -3808,7 +3809,7 @@ no_journal:
3808 } 3809 }
3809 3810
3810 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && 3811 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
3811 (blocksize != PAGE_CACHE_SIZE)) { 3812 (blocksize != PAGE_SIZE)) {
3812 ext4_msg(sb, KERN_ERR, 3813 ext4_msg(sb, KERN_ERR,
3813 "Unsupported blocksize for fs encryption"); 3814 "Unsupported blocksize for fs encryption");
3814 goto failed_mount_wq; 3815 goto failed_mount_wq;
@@ -5028,6 +5029,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
5028 EXT4_SB(sb)->s_jquota_fmt, type); 5029 EXT4_SB(sb)->s_jquota_fmt, type);
5029} 5030}
5030 5031
5032static void lockdep_set_quota_inode(struct inode *inode, int subclass)
5033{
5034 struct ext4_inode_info *ei = EXT4_I(inode);
5035
5036 /* The first argument of lockdep_set_subclass has to be
5037 * *exactly* the same as the argument to init_rwsem() --- in
5038 * this case, in init_once() --- or lockdep gets unhappy
5039 * because the name of the lock is set using the
5040 * stringification of the argument to init_rwsem().
5041 */
5042 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
5043 lockdep_set_subclass(&ei->i_data_sem, subclass);
5044}
5045
5031/* 5046/*
5032 * Standard function to be called on quota_on 5047 * Standard function to be called on quota_on
5033 */ 5048 */
@@ -5067,8 +5082,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5067 if (err) 5082 if (err)
5068 return err; 5083 return err;
5069 } 5084 }
5070 5085 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
5071 return dquot_quota_on(sb, type, format_id, path); 5086 err = dquot_quota_on(sb, type, format_id, path);
5087 if (err)
5088 lockdep_set_quota_inode(path->dentry->d_inode,
5089 I_DATA_SEM_NORMAL);
5090 return err;
5072} 5091}
5073 5092
5074static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 5093static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@@ -5095,8 +5114,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
5095 5114
5096 /* Don't account quota for quota files to avoid recursion */ 5115 /* Don't account quota for quota files to avoid recursion */
5097 qf_inode->i_flags |= S_NOQUOTA; 5116 qf_inode->i_flags |= S_NOQUOTA;
5117 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5098 err = dquot_enable(qf_inode, type, format_id, flags); 5118 err = dquot_enable(qf_inode, type, format_id, flags);
5099 iput(qf_inode); 5119 iput(qf_inode);
5120 if (err)
5121 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5100 5122
5101 return err; 5123 return err;
5102} 5124}
@@ -5253,6 +5275,17 @@ out:
5253 return len; 5275 return len;
5254} 5276}
5255 5277
5278static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
5279{
5280 const struct quota_format_ops *ops;
5281
5282 if (!sb_has_quota_loaded(sb, qid->type))
5283 return -ESRCH;
5284 ops = sb_dqopt(sb)->ops[qid->type];
5285 if (!ops || !ops->get_next_id)
5286 return -ENOSYS;
5287 return dquot_get_next_id(sb, qid);
5288}
5256#endif 5289#endif
5257 5290
5258static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, 5291static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 6f7ee30a89ce..75ed5c2f0c16 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -80,12 +80,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
80 if (res <= plen) 80 if (res <= plen)
81 paddr[res] = '\0'; 81 paddr[res] = '\0';
82 if (cpage) 82 if (cpage)
83 page_cache_release(cpage); 83 put_page(cpage);
84 set_delayed_call(done, kfree_link, paddr); 84 set_delayed_call(done, kfree_link, paddr);
85 return paddr; 85 return paddr;
86errout: 86errout:
87 if (cpage) 87 if (cpage)
88 page_cache_release(cpage); 88 put_page(cpage);
89 kfree(paddr); 89 kfree(paddr);
90 return ERR_PTR(res); 90 return ERR_PTR(res);
91} 91}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 0441e055c8e8..e79bd32b9b79 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -230,6 +230,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
230 return error; 230 return error;
231} 231}
232 232
233static int
234__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
235 void *end, const char *function, unsigned int line)
236{
237 struct ext4_xattr_entry *entry = IFIRST(header);
238 int error = -EFSCORRUPTED;
239
240 if (((void *) header >= end) ||
241 (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
242 goto errout;
243 error = ext4_xattr_check_names(entry, end, entry);
244errout:
245 if (error)
246 __ext4_error_inode(inode, function, line, 0,
247 "corrupted in-inode xattr");
248 return error;
249}
250
251#define xattr_check_inode(inode, header, end) \
252 __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
253
233static inline int 254static inline int
234ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) 255ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
235{ 256{
@@ -341,7 +362,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
341 header = IHDR(inode, raw_inode); 362 header = IHDR(inode, raw_inode);
342 entry = IFIRST(header); 363 entry = IFIRST(header);
343 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 364 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
344 error = ext4_xattr_check_names(entry, end, entry); 365 error = xattr_check_inode(inode, header, end);
345 if (error) 366 if (error)
346 goto cleanup; 367 goto cleanup;
347 error = ext4_xattr_find_entry(&entry, name_index, name, 368 error = ext4_xattr_find_entry(&entry, name_index, name,
@@ -477,7 +498,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
477 raw_inode = ext4_raw_inode(&iloc); 498 raw_inode = ext4_raw_inode(&iloc);
478 header = IHDR(inode, raw_inode); 499 header = IHDR(inode, raw_inode);
479 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 500 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
480 error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header)); 501 error = xattr_check_inode(inode, header, end);
481 if (error) 502 if (error)
482 goto cleanup; 503 goto cleanup;
483 error = ext4_xattr_list_entries(dentry, IFIRST(header), 504 error = ext4_xattr_list_entries(dentry, IFIRST(header),
@@ -1040,8 +1061,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
1040 is->s.here = is->s.first; 1061 is->s.here = is->s.first;
1041 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 1062 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
1042 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 1063 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
1043 error = ext4_xattr_check_names(IFIRST(header), is->s.end, 1064 error = xattr_check_inode(inode, header, is->s.end);
1044 IFIRST(header));
1045 if (error) 1065 if (error)
1046 return error; 1066 return error;
1047 /* Find the named attribute. */ 1067 /* Find the named attribute. */
@@ -1356,6 +1376,10 @@ retry:
1356 last = entry; 1376 last = entry;
1357 total_ino = sizeof(struct ext4_xattr_ibody_header); 1377 total_ino = sizeof(struct ext4_xattr_ibody_header);
1358 1378
1379 error = xattr_check_inode(inode, header, end);
1380 if (error)
1381 goto cleanup;
1382
1359 free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); 1383 free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
1360 if (free >= new_extra_isize) { 1384 if (free >= new_extra_isize) {
1361 entry = IFIRST(header); 1385 entry = IFIRST(header);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e5c762b37239..5dafb9cef12e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -223,7 +223,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
223 /* Allocate a new bio */ 223 /* Allocate a new bio */
224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); 224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
225 225
226 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 226 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
227 bio_put(bio); 227 bio_put(bio);
228 return -EFAULT; 228 return -EFAULT;
229 } 229 }
@@ -265,8 +265,8 @@ alloc_new:
265 265
266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; 266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
267 267
268 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < 268 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
269 PAGE_CACHE_SIZE) { 269 PAGE_SIZE) {
270 __submit_merged_bio(io); 270 __submit_merged_bio(io);
271 goto alloc_new; 271 goto alloc_new;
272 } 272 }
@@ -406,7 +406,7 @@ got_it:
406 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 406 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
407 */ 407 */
408 if (dn.data_blkaddr == NEW_ADDR) { 408 if (dn.data_blkaddr == NEW_ADDR) {
409 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 409 zero_user_segment(page, 0, PAGE_SIZE);
410 SetPageUptodate(page); 410 SetPageUptodate(page);
411 unlock_page(page); 411 unlock_page(page);
412 return page; 412 return page;
@@ -517,7 +517,7 @@ struct page *get_new_data_page(struct inode *inode,
517 goto got_it; 517 goto got_it;
518 518
519 if (dn.data_blkaddr == NEW_ADDR) { 519 if (dn.data_blkaddr == NEW_ADDR) {
520 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 520 zero_user_segment(page, 0, PAGE_SIZE);
521 SetPageUptodate(page); 521 SetPageUptodate(page);
522 } else { 522 } else {
523 f2fs_put_page(page, 1); 523 f2fs_put_page(page, 1);
@@ -530,8 +530,8 @@ struct page *get_new_data_page(struct inode *inode,
530 } 530 }
531got_it: 531got_it:
532 if (new_i_size && i_size_read(inode) < 532 if (new_i_size && i_size_read(inode) <
533 ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { 533 ((loff_t)(index + 1) << PAGE_SHIFT)) {
534 i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); 534 i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
535 /* Only the directory inode sets new_i_size */ 535 /* Only the directory inode sets new_i_size */
536 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 536 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
537 } 537 }
@@ -570,9 +570,9 @@ alloc:
570 /* update i_size */ 570 /* update i_size */
571 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + 571 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
572 dn->ofs_in_node; 572 dn->ofs_in_node;
573 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) 573 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
574 i_size_write(dn->inode, 574 i_size_write(dn->inode,
575 ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); 575 ((loff_t)(fofs + 1) << PAGE_SHIFT));
576 return 0; 576 return 0;
577} 577}
578 578
@@ -971,7 +971,7 @@ got_it:
971 goto confused; 971 goto confused;
972 } 972 }
973 } else { 973 } else {
974 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 974 zero_user_segment(page, 0, PAGE_SIZE);
975 SetPageUptodate(page); 975 SetPageUptodate(page);
976 unlock_page(page); 976 unlock_page(page);
977 goto next_page; 977 goto next_page;
@@ -992,7 +992,7 @@ submit_and_realloc:
992 if (f2fs_encrypted_inode(inode) && 992 if (f2fs_encrypted_inode(inode) &&
993 S_ISREG(inode->i_mode)) { 993 S_ISREG(inode->i_mode)) {
994 994
995 ctx = fscrypt_get_ctx(inode); 995 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
996 if (IS_ERR(ctx)) 996 if (IS_ERR(ctx))
997 goto set_error_page; 997 goto set_error_page;
998 998
@@ -1021,7 +1021,7 @@ submit_and_realloc:
1021 goto next_page; 1021 goto next_page;
1022set_error_page: 1022set_error_page:
1023 SetPageError(page); 1023 SetPageError(page);
1024 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1024 zero_user_segment(page, 0, PAGE_SIZE);
1025 unlock_page(page); 1025 unlock_page(page);
1026 goto next_page; 1026 goto next_page;
1027confused: 1027confused:
@@ -1032,7 +1032,7 @@ confused:
1032 unlock_page(page); 1032 unlock_page(page);
1033next_page: 1033next_page:
1034 if (pages) 1034 if (pages)
1035 page_cache_release(page); 1035 put_page(page);
1036 } 1036 }
1037 BUG_ON(pages && !list_empty(pages)); 1037 BUG_ON(pages && !list_empty(pages));
1038 if (bio) 1038 if (bio)
@@ -1092,14 +1092,24 @@ int do_write_data_page(struct f2fs_io_info *fio)
1092 } 1092 }
1093 1093
1094 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 1094 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1095 gfp_t gfp_flags = GFP_NOFS;
1095 1096
1096 /* wait for GCed encrypted page writeback */ 1097 /* wait for GCed encrypted page writeback */
1097 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), 1098 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1098 fio->old_blkaddr); 1099 fio->old_blkaddr);
1099 1100retry_encrypt:
1100 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page); 1101 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1102 gfp_flags);
1101 if (IS_ERR(fio->encrypted_page)) { 1103 if (IS_ERR(fio->encrypted_page)) {
1102 err = PTR_ERR(fio->encrypted_page); 1104 err = PTR_ERR(fio->encrypted_page);
1105 if (err == -ENOMEM) {
1106 /* flush pending ios and wait for a while */
1107 f2fs_flush_merged_bios(F2FS_I_SB(inode));
1108 congestion_wait(BLK_RW_ASYNC, HZ/50);
1109 gfp_flags |= __GFP_NOFAIL;
1110 err = 0;
1111 goto retry_encrypt;
1112 }
1103 goto out_writepage; 1113 goto out_writepage;
1104 } 1114 }
1105 } 1115 }
@@ -1136,7 +1146,7 @@ static int f2fs_write_data_page(struct page *page,
1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1146 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1137 loff_t i_size = i_size_read(inode); 1147 loff_t i_size = i_size_read(inode);
1138 const pgoff_t end_index = ((unsigned long long) i_size) 1148 const pgoff_t end_index = ((unsigned long long) i_size)
1139 >> PAGE_CACHE_SHIFT; 1149 >> PAGE_SHIFT;
1140 unsigned offset = 0; 1150 unsigned offset = 0;
1141 bool need_balance_fs = false; 1151 bool need_balance_fs = false;
1142 int err = 0; 1152 int err = 0;
@@ -1157,11 +1167,11 @@ static int f2fs_write_data_page(struct page *page,
1157 * If the offset is out-of-range of file size, 1167 * If the offset is out-of-range of file size,
1158 * this page does not have to be written to disk. 1168 * this page does not have to be written to disk.
1159 */ 1169 */
1160 offset = i_size & (PAGE_CACHE_SIZE - 1); 1170 offset = i_size & (PAGE_SIZE - 1);
1161 if ((page->index >= end_index + 1) || !offset) 1171 if ((page->index >= end_index + 1) || !offset)
1162 goto out; 1172 goto out;
1163 1173
1164 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 1174 zero_user_segment(page, offset, PAGE_SIZE);
1165write: 1175write:
1166 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1176 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1167 goto redirty_out; 1177 goto redirty_out;
@@ -1267,8 +1277,8 @@ next:
1267 cycled = 0; 1277 cycled = 0;
1268 end = -1; 1278 end = -1;
1269 } else { 1279 } else {
1270 index = wbc->range_start >> PAGE_CACHE_SHIFT; 1280 index = wbc->range_start >> PAGE_SHIFT;
1271 end = wbc->range_end >> PAGE_CACHE_SHIFT; 1281 end = wbc->range_end >> PAGE_SHIFT;
1272 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1282 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1273 range_whole = 1; 1283 range_whole = 1;
1274 cycled = 1; /* ignore range_cyclic tests */ 1284 cycled = 1; /* ignore range_cyclic tests */
@@ -1448,11 +1458,11 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
1448 * the block addresses when there is no need to fill the page. 1458 * the block addresses when there is no need to fill the page.
1449 */ 1459 */
1450 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && 1460 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
1451 len == PAGE_CACHE_SIZE) 1461 len == PAGE_SIZE)
1452 return 0; 1462 return 0;
1453 1463
1454 if (f2fs_has_inline_data(inode) || 1464 if (f2fs_has_inline_data(inode) ||
1455 (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1465 (pos & PAGE_MASK) >= i_size_read(inode)) {
1456 f2fs_lock_op(sbi); 1466 f2fs_lock_op(sbi);
1457 locked = true; 1467 locked = true;
1458 } 1468 }
@@ -1513,7 +1523,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1513 struct inode *inode = mapping->host; 1523 struct inode *inode = mapping->host;
1514 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1515 struct page *page = NULL; 1525 struct page *page = NULL;
1516 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 1526 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1517 bool need_balance = false; 1527 bool need_balance = false;
1518 block_t blkaddr = NULL_ADDR; 1528 block_t blkaddr = NULL_ADDR;
1519 int err = 0; 1529 int err = 0;
@@ -1561,22 +1571,22 @@ repeat:
1561 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 1571 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1562 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); 1572 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1563 1573
1564 if (len == PAGE_CACHE_SIZE) 1574 if (len == PAGE_SIZE)
1565 goto out_update; 1575 goto out_update;
1566 if (PageUptodate(page)) 1576 if (PageUptodate(page))
1567 goto out_clear; 1577 goto out_clear;
1568 1578
1569 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1579 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
1570 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1580 unsigned start = pos & (PAGE_SIZE - 1);
1571 unsigned end = start + len; 1581 unsigned end = start + len;
1572 1582
1573 /* Reading beyond i_size is simple: memset to zero */ 1583 /* Reading beyond i_size is simple: memset to zero */
1574 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1584 zero_user_segments(page, 0, start, end, PAGE_SIZE);
1575 goto out_update; 1585 goto out_update;
1576 } 1586 }
1577 1587
1578 if (blkaddr == NEW_ADDR) { 1588 if (blkaddr == NEW_ADDR) {
1579 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1589 zero_user_segment(page, 0, PAGE_SIZE);
1580 } else { 1590 } else {
1581 struct f2fs_io_info fio = { 1591 struct f2fs_io_info fio = {
1582 .sbi = sbi, 1592 .sbi = sbi,
@@ -1688,7 +1698,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
1688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1698 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1689 1699
1690 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 1700 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1691 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) 1701 (offset % PAGE_SIZE || length != PAGE_SIZE))
1692 return; 1702 return;
1693 1703
1694 if (PageDirty(page)) { 1704 if (PageDirty(page)) {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 4fb6ef88a34f..f4a61a5ff79f 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -164,7 +164,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
164 164
165 /* build curseg */ 165 /* build curseg */
166 si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE; 166 si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
167 si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE; 167 si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
168 168
169 /* build dirty segmap */ 169 /* build dirty segmap */
170 si->base_mem += sizeof(struct dirty_seglist_info); 170 si->base_mem += sizeof(struct dirty_seglist_info);
@@ -201,9 +201,9 @@ get_cache:
201 201
202 si->page_mem = 0; 202 si->page_mem = 0;
203 npages = NODE_MAPPING(sbi)->nrpages; 203 npages = NODE_MAPPING(sbi)->nrpages;
204 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; 204 si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
205 npages = META_MAPPING(sbi)->nrpages; 205 npages = META_MAPPING(sbi)->nrpages;
206 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; 206 si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
207} 207}
208 208
209static int stat_show(struct seq_file *s, void *v) 209static int stat_show(struct seq_file *s, void *v)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 80641ad82745..af819571bce7 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -17,8 +17,8 @@
17 17
18static unsigned long dir_blocks(struct inode *inode) 18static unsigned long dir_blocks(struct inode *inode)
19{ 19{
20 return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1)) 20 return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
21 >> PAGE_CACHE_SHIFT; 21 >> PAGE_SHIFT;
22} 22}
23 23
24static unsigned int dir_buckets(unsigned int level, int dir_level) 24static unsigned int dir_buckets(unsigned int level, int dir_level)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index bbe2cd1265d0..7a4558d17f36 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1294,7 +1294,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
1294 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 1294 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
1295 unlock_page(page); 1295 unlock_page(page);
1296 } 1296 }
1297 page_cache_release(page); 1297 put_page(page);
1298} 1298}
1299 1299
1300static inline void f2fs_put_dnode(struct dnode_of_data *dn) 1300static inline void f2fs_put_dnode(struct dnode_of_data *dn)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b41c3579ea9e..90d1157a09f9 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -74,11 +74,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
74 goto mapped; 74 goto mapped;
75 75
76 /* page is wholly or partially inside EOF */ 76 /* page is wholly or partially inside EOF */
77 if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) > 77 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
78 i_size_read(inode)) { 78 i_size_read(inode)) {
79 unsigned offset; 79 unsigned offset;
80 offset = i_size_read(inode) & ~PAGE_CACHE_MASK; 80 offset = i_size_read(inode) & ~PAGE_MASK;
81 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 81 zero_user_segment(page, offset, PAGE_SIZE);
82 } 82 }
83 set_page_dirty(page); 83 set_page_dirty(page);
84 SetPageUptodate(page); 84 SetPageUptodate(page);
@@ -346,11 +346,11 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
346 goto found; 346 goto found;
347 } 347 }
348 348
349 pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); 349 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
350 350
351 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 351 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
352 352
353 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { 353 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
354 set_new_dnode(&dn, inode, NULL, NULL, 0); 354 set_new_dnode(&dn, inode, NULL, NULL, 0);
355 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); 355 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
356 if (err && err != -ENOENT) { 356 if (err && err != -ENOENT) {
@@ -370,7 +370,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
370 /* find data/hole in dnode block */ 370 /* find data/hole in dnode block */
371 for (; dn.ofs_in_node < end_offset; 371 for (; dn.ofs_in_node < end_offset;
372 dn.ofs_in_node++, pgofs++, 372 dn.ofs_in_node++, pgofs++,
373 data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { 373 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
374 block_t blkaddr; 374 block_t blkaddr;
375 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 375 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
376 376
@@ -441,7 +441,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
441static int f2fs_file_open(struct inode *inode, struct file *filp) 441static int f2fs_file_open(struct inode *inode, struct file *filp)
442{ 442{
443 int ret = generic_file_open(inode, filp); 443 int ret = generic_file_open(inode, filp);
444 struct inode *dir = filp->f_path.dentry->d_parent->d_inode; 444 struct dentry *dir;
445 445
446 if (!ret && f2fs_encrypted_inode(inode)) { 446 if (!ret && f2fs_encrypted_inode(inode)) {
447 ret = fscrypt_get_encryption_info(inode); 447 ret = fscrypt_get_encryption_info(inode);
@@ -450,9 +450,13 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
450 if (!fscrypt_has_encryption_key(inode)) 450 if (!fscrypt_has_encryption_key(inode))
451 return -ENOKEY; 451 return -ENOKEY;
452 } 452 }
453 if (f2fs_encrypted_inode(dir) && 453 dir = dget_parent(file_dentry(filp));
454 !fscrypt_has_permitted_context(dir, inode)) 454 if (f2fs_encrypted_inode(d_inode(dir)) &&
455 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
456 dput(dir);
455 return -EPERM; 457 return -EPERM;
458 }
459 dput(dir);
456 return ret; 460 return ret;
457} 461}
458 462
@@ -508,8 +512,8 @@ void truncate_data_blocks(struct dnode_of_data *dn)
508static int truncate_partial_data_page(struct inode *inode, u64 from, 512static int truncate_partial_data_page(struct inode *inode, u64 from,
509 bool cache_only) 513 bool cache_only)
510{ 514{
511 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 515 unsigned offset = from & (PAGE_SIZE - 1);
512 pgoff_t index = from >> PAGE_CACHE_SHIFT; 516 pgoff_t index = from >> PAGE_SHIFT;
513 struct address_space *mapping = inode->i_mapping; 517 struct address_space *mapping = inode->i_mapping;
514 struct page *page; 518 struct page *page;
515 519
@@ -529,7 +533,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
529 return 0; 533 return 0;
530truncate_out: 534truncate_out:
531 f2fs_wait_on_page_writeback(page, DATA, true); 535 f2fs_wait_on_page_writeback(page, DATA, true);
532 zero_user(page, offset, PAGE_CACHE_SIZE - offset); 536 zero_user(page, offset, PAGE_SIZE - offset);
533 if (!cache_only || !f2fs_encrypted_inode(inode) || 537 if (!cache_only || !f2fs_encrypted_inode(inode) ||
534 !S_ISREG(inode->i_mode)) 538 !S_ISREG(inode->i_mode))
535 set_page_dirty(page); 539 set_page_dirty(page);
@@ -799,11 +803,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
799 if (ret) 803 if (ret)
800 return ret; 804 return ret;
801 805
802 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 806 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
803 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 807 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
804 808
805 off_start = offset & (PAGE_CACHE_SIZE - 1); 809 off_start = offset & (PAGE_SIZE - 1);
806 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 810 off_end = (offset + len) & (PAGE_SIZE - 1);
807 811
808 if (pg_start == pg_end) { 812 if (pg_start == pg_end) {
809 ret = fill_zero(inode, pg_start, off_start, 813 ret = fill_zero(inode, pg_start, off_start,
@@ -813,7 +817,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
813 } else { 817 } else {
814 if (off_start) { 818 if (off_start) {
815 ret = fill_zero(inode, pg_start++, off_start, 819 ret = fill_zero(inode, pg_start++, off_start,
816 PAGE_CACHE_SIZE - off_start); 820 PAGE_SIZE - off_start);
817 if (ret) 821 if (ret)
818 return ret; 822 return ret;
819 } 823 }
@@ -830,8 +834,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
830 834
831 f2fs_balance_fs(sbi, true); 835 f2fs_balance_fs(sbi, true);
832 836
833 blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT; 837 blk_start = (loff_t)pg_start << PAGE_SHIFT;
834 blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT; 838 blk_end = (loff_t)pg_end << PAGE_SHIFT;
835 truncate_inode_pages_range(mapping, blk_start, 839 truncate_inode_pages_range(mapping, blk_start,
836 blk_end - 1); 840 blk_end - 1);
837 841
@@ -954,8 +958,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
954 if (ret) 958 if (ret)
955 return ret; 959 return ret;
956 960
957 pg_start = offset >> PAGE_CACHE_SHIFT; 961 pg_start = offset >> PAGE_SHIFT;
958 pg_end = (offset + len) >> PAGE_CACHE_SHIFT; 962 pg_end = (offset + len) >> PAGE_SHIFT;
959 963
960 /* write out all dirty pages from offset */ 964 /* write out all dirty pages from offset */
961 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 965 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -1006,11 +1010,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1006 1010
1007 truncate_pagecache_range(inode, offset, offset + len - 1); 1011 truncate_pagecache_range(inode, offset, offset + len - 1);
1008 1012
1009 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 1013 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1010 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 1014 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1011 1015
1012 off_start = offset & (PAGE_CACHE_SIZE - 1); 1016 off_start = offset & (PAGE_SIZE - 1);
1013 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 1017 off_end = (offset + len) & (PAGE_SIZE - 1);
1014 1018
1015 if (pg_start == pg_end) { 1019 if (pg_start == pg_end) {
1016 ret = fill_zero(inode, pg_start, off_start, 1020 ret = fill_zero(inode, pg_start, off_start,
@@ -1024,12 +1028,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1024 } else { 1028 } else {
1025 if (off_start) { 1029 if (off_start) {
1026 ret = fill_zero(inode, pg_start++, off_start, 1030 ret = fill_zero(inode, pg_start++, off_start,
1027 PAGE_CACHE_SIZE - off_start); 1031 PAGE_SIZE - off_start);
1028 if (ret) 1032 if (ret)
1029 return ret; 1033 return ret;
1030 1034
1031 new_size = max_t(loff_t, new_size, 1035 new_size = max_t(loff_t, new_size,
1032 (loff_t)pg_start << PAGE_CACHE_SHIFT); 1036 (loff_t)pg_start << PAGE_SHIFT);
1033 } 1037 }
1034 1038
1035 for (index = pg_start; index < pg_end; index++) { 1039 for (index = pg_start; index < pg_end; index++) {
@@ -1060,7 +1064,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1060 f2fs_unlock_op(sbi); 1064 f2fs_unlock_op(sbi);
1061 1065
1062 new_size = max_t(loff_t, new_size, 1066 new_size = max_t(loff_t, new_size,
1063 (loff_t)(index + 1) << PAGE_CACHE_SHIFT); 1067 (loff_t)(index + 1) << PAGE_SHIFT);
1064 } 1068 }
1065 1069
1066 if (off_end) { 1070 if (off_end) {
@@ -1117,8 +1121,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1117 1121
1118 truncate_pagecache(inode, offset); 1122 truncate_pagecache(inode, offset);
1119 1123
1120 pg_start = offset >> PAGE_CACHE_SHIFT; 1124 pg_start = offset >> PAGE_SHIFT;
1121 pg_end = (offset + len) >> PAGE_CACHE_SHIFT; 1125 pg_end = (offset + len) >> PAGE_SHIFT;
1122 delta = pg_end - pg_start; 1126 delta = pg_end - pg_start;
1123 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1127 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1124 1128
@@ -1158,11 +1162,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
1158 1162
1159 f2fs_balance_fs(sbi, true); 1163 f2fs_balance_fs(sbi, true);
1160 1164
1161 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 1165 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1162 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 1166 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1163 1167
1164 off_start = offset & (PAGE_CACHE_SIZE - 1); 1168 off_start = offset & (PAGE_SIZE - 1);
1165 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 1169 off_end = (offset + len) & (PAGE_SIZE - 1);
1166 1170
1167 f2fs_lock_op(sbi); 1171 f2fs_lock_op(sbi);
1168 1172
@@ -1180,12 +1184,12 @@ noalloc:
1180 if (pg_start == pg_end) 1184 if (pg_start == pg_end)
1181 new_size = offset + len; 1185 new_size = offset + len;
1182 else if (index == pg_start && off_start) 1186 else if (index == pg_start && off_start)
1183 new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT; 1187 new_size = (loff_t)(index + 1) << PAGE_SHIFT;
1184 else if (index == pg_end) 1188 else if (index == pg_end)
1185 new_size = ((loff_t)index << PAGE_CACHE_SHIFT) + 1189 new_size = ((loff_t)index << PAGE_SHIFT) +
1186 off_end; 1190 off_end;
1187 else 1191 else
1188 new_size += PAGE_CACHE_SIZE; 1192 new_size += PAGE_SIZE;
1189 } 1193 }
1190 1194
1191 if (!(mode & FALLOC_FL_KEEP_SIZE) && 1195 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -1652,8 +1656,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
1652 if (need_inplace_update(inode)) 1656 if (need_inplace_update(inode))
1653 return -EINVAL; 1657 return -EINVAL;
1654 1658
1655 pg_start = range->start >> PAGE_CACHE_SHIFT; 1659 pg_start = range->start >> PAGE_SHIFT;
1656 pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT; 1660 pg_end = (range->start + range->len) >> PAGE_SHIFT;
1657 1661
1658 f2fs_balance_fs(sbi, true); 1662 f2fs_balance_fs(sbi, true);
1659 1663
@@ -1770,7 +1774,7 @@ clear_out:
1770out: 1774out:
1771 inode_unlock(inode); 1775 inode_unlock(inode);
1772 if (!err) 1776 if (!err)
1773 range->len = (u64)total << PAGE_CACHE_SHIFT; 1777 range->len = (u64)total << PAGE_SHIFT;
1774 return err; 1778 return err;
1775} 1779}
1776 1780
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 358214e9f707..a2fbe6f427d3 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -51,7 +51,7 @@ void read_inline_data(struct page *page, struct page *ipage)
51 51
52 f2fs_bug_on(F2FS_P_SB(page), page->index); 52 f2fs_bug_on(F2FS_P_SB(page), page->index);
53 53
54 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 54 zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
55 55
56 /* Copy the whole inline data block */ 56 /* Copy the whole inline data block */
57 src_addr = inline_data_addr(ipage); 57 src_addr = inline_data_addr(ipage);
@@ -93,7 +93,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
93 } 93 }
94 94
95 if (page->index) 95 if (page->index)
96 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 96 zero_user_segment(page, 0, PAGE_SIZE);
97 else 97 else
98 read_inline_data(page, ipage); 98 read_inline_data(page, ipage);
99 99
@@ -375,7 +375,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
375 goto out; 375 goto out;
376 376
377 f2fs_wait_on_page_writeback(page, DATA, true); 377 f2fs_wait_on_page_writeback(page, DATA, true);
378 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 378 zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
379 379
380 dentry_blk = kmap_atomic(page); 380 dentry_blk = kmap_atomic(page);
381 381
@@ -405,8 +405,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
405 stat_dec_inline_dir(dir); 405 stat_dec_inline_dir(dir);
406 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 406 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
407 407
408 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 408 if (i_size_read(dir) < PAGE_SIZE) {
409 i_size_write(dir, PAGE_CACHE_SIZE); 409 i_size_write(dir, PAGE_SIZE);
410 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 410 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
411 } 411 }
412 412
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 7876f1052101..013e57932d61 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1027,12 +1027,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1027 goto errout; 1027 goto errout;
1028 } 1028 }
1029 1029
1030 /* this is broken symlink case */
1031 if (unlikely(cstr.name[0] == 0)) {
1032 res = -ENOENT;
1033 goto errout;
1034 }
1035
1036 if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { 1030 if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
1037 /* Symlink data on the disk is corrupted */ 1031 /* Symlink data on the disk is corrupted */
1038 res = -EIO; 1032 res = -EIO;
@@ -1046,17 +1040,23 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1046 if (res < 0) 1040 if (res < 0)
1047 goto errout; 1041 goto errout;
1048 1042
1043 /* this is broken symlink case */
1044 if (unlikely(pstr.name[0] == 0)) {
1045 res = -ENOENT;
1046 goto errout;
1047 }
1048
1049 paddr = pstr.name; 1049 paddr = pstr.name;
1050 1050
1051 /* Null-terminate the name */ 1051 /* Null-terminate the name */
1052 paddr[res] = '\0'; 1052 paddr[res] = '\0';
1053 1053
1054 page_cache_release(cpage); 1054 put_page(cpage);
1055 set_delayed_call(done, kfree_link, paddr); 1055 set_delayed_call(done, kfree_link, paddr);
1056 return paddr; 1056 return paddr;
1057errout: 1057errout:
1058 fscrypt_fname_free_buffer(&pstr); 1058 fscrypt_fname_free_buffer(&pstr);
1059 page_cache_release(cpage); 1059 put_page(cpage);
1060 return ERR_PTR(res); 1060 return ERR_PTR(res);
1061} 1061}
1062 1062
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 118321bd1a7f..1a33de9d84b1 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -46,11 +46,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
46 */ 46 */
47 if (type == FREE_NIDS) { 47 if (type == FREE_NIDS) {
48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
49 PAGE_CACHE_SHIFT; 49 PAGE_SHIFT;
50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
51 } else if (type == NAT_ENTRIES) { 51 } else if (type == NAT_ENTRIES) {
52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53 PAGE_CACHE_SHIFT; 53 PAGE_SHIFT;
54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
55 } else if (type == DIRTY_DENTS) { 55 } else if (type == DIRTY_DENTS) {
56 if (sbi->sb->s_bdi->wb.dirty_exceeded) 56 if (sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -62,13 +62,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
62 62
63 for (i = 0; i <= UPDATE_INO; i++) 63 for (i = 0; i <= UPDATE_INO; i++)
64 mem_size += (sbi->im[i].ino_num * 64 mem_size += (sbi->im[i].ino_num *
65 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 65 sizeof(struct ino_entry)) >> PAGE_SHIFT;
66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
67 } else if (type == EXTENT_CACHE) { 67 } else if (type == EXTENT_CACHE) {
68 mem_size = (atomic_read(&sbi->total_ext_tree) * 68 mem_size = (atomic_read(&sbi->total_ext_tree) *
69 sizeof(struct extent_tree) + 69 sizeof(struct extent_tree) +
70 atomic_read(&sbi->total_ext_node) * 70 atomic_read(&sbi->total_ext_node) *
71 sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; 71 sizeof(struct extent_node)) >> PAGE_SHIFT;
72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
73 } else { 73 } else {
74 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 74 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -121,7 +121,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
121 121
122 src_addr = page_address(src_page); 122 src_addr = page_address(src_page);
123 dst_addr = page_address(dst_page); 123 dst_addr = page_address(dst_page);
124 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 124 memcpy(dst_addr, src_addr, PAGE_SIZE);
125 set_page_dirty(dst_page); 125 set_page_dirty(dst_page);
126 f2fs_put_page(src_page, 1); 126 f2fs_put_page(src_page, 1);
127 127
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 0b30cd2aeebd..011942f94d64 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -591,7 +591,7 @@ out:
591 591
592 /* truncate meta pages to be used by the recovery */ 592 /* truncate meta pages to be used by the recovery */
593 truncate_inode_pages_range(META_MAPPING(sbi), 593 truncate_inode_pages_range(META_MAPPING(sbi),
594 (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1); 594 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
595 595
596 if (err) { 596 if (err) {
597 truncate_inode_pages_final(NODE_MAPPING(sbi)); 597 truncate_inode_pages_final(NODE_MAPPING(sbi));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6f16b39f0b52..540669d6978e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -885,12 +885,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
885 } 885 }
886 } 886 }
887 887
888 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - 888 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
889 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 889 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
890 if (valid_sum_count <= sum_in_page) 890 if (valid_sum_count <= sum_in_page)
891 return 1; 891 return 1;
892 else if ((valid_sum_count - sum_in_page) <= 892 else if ((valid_sum_count - sum_in_page) <=
893 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 893 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
894 return 2; 894 return 2;
895 return 3; 895 return 3;
896} 896}
@@ -909,9 +909,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
909 void *dst = page_address(page); 909 void *dst = page_address(page);
910 910
911 if (src) 911 if (src)
912 memcpy(dst, src, PAGE_CACHE_SIZE); 912 memcpy(dst, src, PAGE_SIZE);
913 else 913 else
914 memset(dst, 0, PAGE_CACHE_SIZE); 914 memset(dst, 0, PAGE_SIZE);
915 set_page_dirty(page); 915 set_page_dirty(page);
916 f2fs_put_page(page, 1); 916 f2fs_put_page(page, 1);
917} 917}
@@ -1596,7 +1596,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1596 s = (struct f2fs_summary *)(kaddr + offset); 1596 s = (struct f2fs_summary *)(kaddr + offset);
1597 seg_i->sum_blk->entries[j] = *s; 1597 seg_i->sum_blk->entries[j] = *s;
1598 offset += SUMMARY_SIZE; 1598 offset += SUMMARY_SIZE;
1599 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1599 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
1600 SUM_FOOTER_SIZE) 1600 SUM_FOOTER_SIZE)
1601 continue; 1601 continue;
1602 1602
@@ -1757,7 +1757,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1757 *summary = seg_i->sum_blk->entries[j]; 1757 *summary = seg_i->sum_blk->entries[j];
1758 written_size += SUMMARY_SIZE; 1758 written_size += SUMMARY_SIZE;
1759 1759
1760 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1760 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
1761 SUM_FOOTER_SIZE) 1761 SUM_FOOTER_SIZE)
1762 continue; 1762 continue;
1763 1763
@@ -1844,7 +1844,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1844 1844
1845 src_addr = page_address(src_page); 1845 src_addr = page_address(src_page);
1846 dst_addr = page_address(dst_page); 1846 dst_addr = page_address(dst_page);
1847 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1847 memcpy(dst_addr, src_addr, PAGE_SIZE);
1848 1848
1849 set_page_dirty(dst_page); 1849 set_page_dirty(dst_page);
1850 f2fs_put_page(src_page, 1); 1850 f2fs_put_page(src_page, 1);
@@ -2171,7 +2171,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
2171 2171
2172 for (i = 0; i < NR_CURSEG_TYPE; i++) { 2172 for (i = 0; i < NR_CURSEG_TYPE; i++) {
2173 mutex_init(&array[i].curseg_mutex); 2173 mutex_init(&array[i].curseg_mutex);
2174 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 2174 array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
2175 if (!array[i].sum_blk) 2175 if (!array[i].sum_blk)
2176 return -ENOMEM; 2176 return -ENOMEM;
2177 init_rwsem(&array[i].journal_rwsem); 2177 init_rwsem(&array[i].journal_rwsem);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 15bb81f8dac2..006f87d69921 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -984,9 +984,25 @@ static loff_t max_file_blocks(void)
984 return result; 984 return result;
985} 985}
986 986
987static int __f2fs_commit_super(struct buffer_head *bh,
988 struct f2fs_super_block *super)
989{
990 lock_buffer(bh);
991 if (super)
992 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
993 set_buffer_uptodate(bh);
994 set_buffer_dirty(bh);
995 unlock_buffer(bh);
996
997 /* it's rare case, we can do fua all the time */
998 return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
999}
1000
987static inline bool sanity_check_area_boundary(struct super_block *sb, 1001static inline bool sanity_check_area_boundary(struct super_block *sb,
988 struct f2fs_super_block *raw_super) 1002 struct buffer_head *bh)
989{ 1003{
1004 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1005 (bh->b_data + F2FS_SUPER_OFFSET);
990 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1006 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
991 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); 1007 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
992 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); 1008 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -1000,6 +1016,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
1000 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); 1016 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1001 u32 segment_count = le32_to_cpu(raw_super->segment_count); 1017 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1002 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 1018 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1019 u64 main_end_blkaddr = main_blkaddr +
1020 (segment_count_main << log_blocks_per_seg);
1021 u64 seg_end_blkaddr = segment0_blkaddr +
1022 (segment_count << log_blocks_per_seg);
1003 1023
1004 if (segment0_blkaddr != cp_blkaddr) { 1024 if (segment0_blkaddr != cp_blkaddr) {
1005 f2fs_msg(sb, KERN_INFO, 1025 f2fs_msg(sb, KERN_INFO,
@@ -1044,22 +1064,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
1044 return true; 1064 return true;
1045 } 1065 }
1046 1066
1047 if (main_blkaddr + (segment_count_main << log_blocks_per_seg) != 1067 if (main_end_blkaddr > seg_end_blkaddr) {
1048 segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
1049 f2fs_msg(sb, KERN_INFO, 1068 f2fs_msg(sb, KERN_INFO,
1050 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)", 1069 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1051 main_blkaddr, 1070 main_blkaddr,
1052 segment0_blkaddr + (segment_count << log_blocks_per_seg), 1071 segment0_blkaddr +
1072 (segment_count << log_blocks_per_seg),
1053 segment_count_main << log_blocks_per_seg); 1073 segment_count_main << log_blocks_per_seg);
1054 return true; 1074 return true;
1075 } else if (main_end_blkaddr < seg_end_blkaddr) {
1076 int err = 0;
1077 char *res;
1078
1079 /* fix in-memory information all the time */
1080 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
1081 segment0_blkaddr) >> log_blocks_per_seg);
1082
1083 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
1084 res = "internally";
1085 } else {
1086 err = __f2fs_commit_super(bh, NULL);
1087 res = err ? "failed" : "done";
1088 }
1089 f2fs_msg(sb, KERN_INFO,
1090 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1091 res, main_blkaddr,
1092 segment0_blkaddr +
1093 (segment_count << log_blocks_per_seg),
1094 segment_count_main << log_blocks_per_seg);
1095 if (err)
1096 return true;
1055 } 1097 }
1056
1057 return false; 1098 return false;
1058} 1099}
1059 1100
1060static int sanity_check_raw_super(struct super_block *sb, 1101static int sanity_check_raw_super(struct super_block *sb,
1061 struct f2fs_super_block *raw_super) 1102 struct buffer_head *bh)
1062{ 1103{
1104 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1105 (bh->b_data + F2FS_SUPER_OFFSET);
1063 unsigned int blocksize; 1106 unsigned int blocksize;
1064 1107
1065 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 1108 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1070,10 +1113,10 @@ static int sanity_check_raw_super(struct super_block *sb,
1070 } 1113 }
1071 1114
1072 /* Currently, support only 4KB page cache size */ 1115 /* Currently, support only 4KB page cache size */
1073 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { 1116 if (F2FS_BLKSIZE != PAGE_SIZE) {
1074 f2fs_msg(sb, KERN_INFO, 1117 f2fs_msg(sb, KERN_INFO,
1075 "Invalid page_cache_size (%lu), supports only 4KB\n", 1118 "Invalid page_cache_size (%lu), supports only 4KB\n",
1076 PAGE_CACHE_SIZE); 1119 PAGE_SIZE);
1077 return 1; 1120 return 1;
1078 } 1121 }
1079 1122
@@ -1126,7 +1169,7 @@ static int sanity_check_raw_super(struct super_block *sb,
1126 } 1169 }
1127 1170
1128 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ 1171 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1129 if (sanity_check_area_boundary(sb, raw_super)) 1172 if (sanity_check_area_boundary(sb, bh))
1130 return 1; 1173 return 1;
1131 1174
1132 return 0; 1175 return 0;
@@ -1202,7 +1245,7 @@ static int read_raw_super_block(struct super_block *sb,
1202{ 1245{
1203 int block; 1246 int block;
1204 struct buffer_head *bh; 1247 struct buffer_head *bh;
1205 struct f2fs_super_block *super, *buf; 1248 struct f2fs_super_block *super;
1206 int err = 0; 1249 int err = 0;
1207 1250
1208 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL); 1251 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
@@ -1218,11 +1261,8 @@ static int read_raw_super_block(struct super_block *sb,
1218 continue; 1261 continue;
1219 } 1262 }
1220 1263
1221 buf = (struct f2fs_super_block *)
1222 (bh->b_data + F2FS_SUPER_OFFSET);
1223
1224 /* sanity checking of raw super */ 1264 /* sanity checking of raw super */
1225 if (sanity_check_raw_super(sb, buf)) { 1265 if (sanity_check_raw_super(sb, bh)) {
1226 f2fs_msg(sb, KERN_ERR, 1266 f2fs_msg(sb, KERN_ERR,
1227 "Can't find valid F2FS filesystem in %dth superblock", 1267 "Can't find valid F2FS filesystem in %dth superblock",
1228 block + 1); 1268 block + 1);
@@ -1232,7 +1272,8 @@ static int read_raw_super_block(struct super_block *sb,
1232 } 1272 }
1233 1273
1234 if (!*raw_super) { 1274 if (!*raw_super) {
1235 memcpy(super, buf, sizeof(*super)); 1275 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
1276 sizeof(*super));
1236 *valid_super_block = block; 1277 *valid_super_block = block;
1237 *raw_super = super; 1278 *raw_super = super;
1238 } 1279 }
@@ -1252,42 +1293,29 @@ static int read_raw_super_block(struct super_block *sb,
1252 return err; 1293 return err;
1253} 1294}
1254 1295
1255static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block) 1296int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1256{ 1297{
1257 struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
1258 struct buffer_head *bh; 1298 struct buffer_head *bh;
1259 int err; 1299 int err;
1260 1300
1261 bh = sb_getblk(sbi->sb, block); 1301 /* write back-up superblock first */
1302 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
1262 if (!bh) 1303 if (!bh)
1263 return -EIO; 1304 return -EIO;
1264 1305 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1265 lock_buffer(bh);
1266 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1267 set_buffer_uptodate(bh);
1268 set_buffer_dirty(bh);
1269 unlock_buffer(bh);
1270
1271 /* it's rare case, we can do fua all the time */
1272 err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
1273 brelse(bh); 1306 brelse(bh);
1274 1307
1275 return err;
1276}
1277
1278int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1279{
1280 int err;
1281
1282 /* write back-up superblock first */
1283 err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
1284
1285 /* if we are in recovery path, skip writing valid superblock */ 1308 /* if we are in recovery path, skip writing valid superblock */
1286 if (recover || err) 1309 if (recover || err)
1287 return err; 1310 return err;
1288 1311
1289 /* write current valid superblock */ 1312 /* write current valid superblock */
1290 return __f2fs_commit_super(sbi, sbi->valid_super_block); 1313 bh = sb_getblk(sbi->sb, sbi->valid_super_block);
1314 if (!bh)
1315 return -EIO;
1316 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1317 brelse(bh);
1318 return err;
1291} 1319}
1292 1320
1293static int f2fs_fill_super(struct super_block *sb, void *data, int silent) 1321static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1442,7 +1470,7 @@ try_onemore:
1442 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); 1470 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1443 if (__exist_node_summaries(sbi)) 1471 if (__exist_node_summaries(sbi))
1444 sbi->kbytes_written = 1472 sbi->kbytes_written =
1445 le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written); 1473 le64_to_cpu(seg_i->journal->info.kbytes_written);
1446 1474
1447 build_gc_manager(sbi); 1475 build_gc_manager(sbi);
1448 1476
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index cb84f0fcc72a..bfc780c682fb 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -66,11 +66,11 @@ static int
66vxfs_immed_readpage(struct file *fp, struct page *pp) 66vxfs_immed_readpage(struct file *fp, struct page *pp)
67{ 67{
68 struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); 68 struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
69 u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT; 69 u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT;
70 caddr_t kaddr; 70 caddr_t kaddr;
71 71
72 kaddr = kmap(pp); 72 kaddr = kmap(pp);
73 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); 73 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
74 kunmap(pp); 74 kunmap(pp);
75 75
76 flush_dcache_page(pp); 76 flush_dcache_page(pp);
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 1cff72df0389..a49e0cfbb686 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -45,7 +45,7 @@
45/* 45/*
46 * Number of VxFS blocks per page. 46 * Number of VxFS blocks per page.
47 */ 47 */
48#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize)) 48#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize))
49 49
50 50
51static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); 51static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
@@ -175,7 +175,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
175 if (de) { 175 if (de) {
176 ino = de->d_ino; 176 ino = de->d_ino;
177 kunmap(pp); 177 kunmap(pp);
178 page_cache_release(pp); 178 put_page(pp);
179 } 179 }
180 180
181 return (ino); 181 return (ino);
@@ -255,8 +255,8 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
255 nblocks = dir_blocks(ip); 255 nblocks = dir_blocks(ip);
256 pblocks = VXFS_BLOCK_PER_PAGE(sbp); 256 pblocks = VXFS_BLOCK_PER_PAGE(sbp);
257 257
258 page = pos >> PAGE_CACHE_SHIFT; 258 page = pos >> PAGE_SHIFT;
259 offset = pos & ~PAGE_CACHE_MASK; 259 offset = pos & ~PAGE_MASK;
260 block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks; 260 block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
261 261
262 for (; page < npages; page++, block = 0) { 262 for (; page < npages; page++, block = 0) {
@@ -289,7 +289,7 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
289 continue; 289 continue;
290 290
291 offset = (char *)de - kaddr; 291 offset = (char *)de - kaddr;
292 ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; 292 ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
293 if (!dir_emit(ctx, de->d_name, de->d_namelen, 293 if (!dir_emit(ctx, de->d_name, de->d_namelen,
294 de->d_ino, DT_UNKNOWN)) { 294 de->d_ino, DT_UNKNOWN)) {
295 vxfs_put_page(pp); 295 vxfs_put_page(pp);
@@ -301,6 +301,6 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
301 vxfs_put_page(pp); 301 vxfs_put_page(pp);
302 offset = 0; 302 offset = 0;
303 } 303 }
304 ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; 304 ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
305 return 0; 305 return 0;
306} 306}
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 5d318c44f855..e806694d4145 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -50,7 +50,7 @@ inline void
50vxfs_put_page(struct page *pp) 50vxfs_put_page(struct page *pp)
51{ 51{
52 kunmap(pp); 52 kunmap(pp);
53 page_cache_release(pp); 53 put_page(pp);
54} 54}
55 55
56/** 56/**
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fee81e8768c9..592cea54cea0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -33,7 +33,7 @@
33/* 33/*
34 * 4MB minimal write chunk size 34 * 4MB minimal write chunk size
35 */ 35 */
36#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 36#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
37 37
38struct wb_completion { 38struct wb_completion {
39 atomic_t cnt; 39 atomic_t cnt;
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 6b35fc4860a0..3078b679fcd1 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -113,7 +113,7 @@ try_again:
113 113
114 wake_up_bit(&cookie->flags, 0); 114 wake_up_bit(&cookie->flags, 0);
115 if (xpage) 115 if (xpage)
116 page_cache_release(xpage); 116 put_page(xpage);
117 __fscache_uncache_page(cookie, page); 117 __fscache_uncache_page(cookie, page);
118 return true; 118 return true;
119 119
@@ -164,7 +164,7 @@ static void fscache_end_page_write(struct fscache_object *object,
164 } 164 }
165 spin_unlock(&object->lock); 165 spin_unlock(&object->lock);
166 if (xpage) 166 if (xpage)
167 page_cache_release(xpage); 167 put_page(xpage);
168} 168}
169 169
170/* 170/*
@@ -884,7 +884,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
884 spin_unlock(&cookie->stores_lock); 884 spin_unlock(&cookie->stores_lock);
885 885
886 for (i = n - 1; i >= 0; i--) 886 for (i = n - 1; i >= 0; i--)
887 page_cache_release(results[i]); 887 put_page(results[i]);
888 } 888 }
889 889
890 _leave(""); 890 _leave("");
@@ -982,7 +982,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
982 982
983 radix_tree_tag_set(&cookie->stores, page->index, 983 radix_tree_tag_set(&cookie->stores, page->index,
984 FSCACHE_COOKIE_PENDING_TAG); 984 FSCACHE_COOKIE_PENDING_TAG);
985 page_cache_get(page); 985 get_page(page);
986 986
987 /* we only want one writer at a time, but we do need to queue new 987 /* we only want one writer at a time, but we do need to queue new
988 * writers after exclusive ops */ 988 * writers after exclusive ops */
@@ -1026,7 +1026,7 @@ submit_failed:
1026 radix_tree_delete(&cookie->stores, page->index); 1026 radix_tree_delete(&cookie->stores, page->index);
1027 spin_unlock(&cookie->stores_lock); 1027 spin_unlock(&cookie->stores_lock);
1028 wake_cookie = __fscache_unuse_cookie(cookie); 1028 wake_cookie = __fscache_unuse_cookie(cookie);
1029 page_cache_release(page); 1029 put_page(page);
1030 ret = -ENOBUFS; 1030 ret = -ENOBUFS;
1031 goto nobufs; 1031 goto nobufs;
1032 1032
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ebb5e37455a0..cbece1221417 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -897,7 +897,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
897 return err; 897 return err;
898 } 898 }
899 899
900 page_cache_get(newpage); 900 get_page(newpage);
901 901
902 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 902 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
903 lru_cache_add_file(newpage); 903 lru_cache_add_file(newpage);
@@ -912,12 +912,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
912 912
913 if (err) { 913 if (err) {
914 unlock_page(newpage); 914 unlock_page(newpage);
915 page_cache_release(newpage); 915 put_page(newpage);
916 return err; 916 return err;
917 } 917 }
918 918
919 unlock_page(oldpage); 919 unlock_page(oldpage);
920 page_cache_release(oldpage); 920 put_page(oldpage);
921 cs->len = 0; 921 cs->len = 0;
922 922
923 return 0; 923 return 0;
@@ -951,7 +951,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
951 fuse_copy_finish(cs); 951 fuse_copy_finish(cs);
952 952
953 buf = cs->pipebufs; 953 buf = cs->pipebufs;
954 page_cache_get(page); 954 get_page(page);
955 buf->page = page; 955 buf->page = page;
956 buf->offset = offset; 956 buf->offset = offset;
957 buf->len = count; 957 buf->len = count;
@@ -1435,7 +1435,7 @@ out_unlock:
1435 1435
1436out: 1436out:
1437 for (; page_nr < cs.nr_segs; page_nr++) 1437 for (; page_nr < cs.nr_segs; page_nr++)
1438 page_cache_release(bufs[page_nr].page); 1438 put_page(bufs[page_nr].page);
1439 1439
1440 kfree(bufs); 1440 kfree(bufs);
1441 return ret; 1441 return ret;
@@ -1632,8 +1632,8 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1632 goto out_up_killsb; 1632 goto out_up_killsb;
1633 1633
1634 mapping = inode->i_mapping; 1634 mapping = inode->i_mapping;
1635 index = outarg.offset >> PAGE_CACHE_SHIFT; 1635 index = outarg.offset >> PAGE_SHIFT;
1636 offset = outarg.offset & ~PAGE_CACHE_MASK; 1636 offset = outarg.offset & ~PAGE_MASK;
1637 file_size = i_size_read(inode); 1637 file_size = i_size_read(inode);
1638 end = outarg.offset + outarg.size; 1638 end = outarg.offset + outarg.size;
1639 if (end > file_size) { 1639 if (end > file_size) {
@@ -1652,13 +1652,13 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1652 if (!page) 1652 if (!page)
1653 goto out_iput; 1653 goto out_iput;
1654 1654
1655 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); 1655 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1656 err = fuse_copy_page(cs, &page, offset, this_num, 0); 1656 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1657 if (!err && offset == 0 && 1657 if (!err && offset == 0 &&
1658 (this_num == PAGE_CACHE_SIZE || file_size == end)) 1658 (this_num == PAGE_SIZE || file_size == end))
1659 SetPageUptodate(page); 1659 SetPageUptodate(page);
1660 unlock_page(page); 1660 unlock_page(page);
1661 page_cache_release(page); 1661 put_page(page);
1662 1662
1663 if (err) 1663 if (err)
1664 goto out_iput; 1664 goto out_iput;
@@ -1697,7 +1697,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1697 size_t total_len = 0; 1697 size_t total_len = 0;
1698 int num_pages; 1698 int num_pages;
1699 1699
1700 offset = outarg->offset & ~PAGE_CACHE_MASK; 1700 offset = outarg->offset & ~PAGE_MASK;
1701 file_size = i_size_read(inode); 1701 file_size = i_size_read(inode);
1702 1702
1703 num = outarg->size; 1703 num = outarg->size;
@@ -1720,7 +1720,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1720 req->page_descs[0].offset = offset; 1720 req->page_descs[0].offset = offset;
1721 req->end = fuse_retrieve_end; 1721 req->end = fuse_retrieve_end;
1722 1722
1723 index = outarg->offset >> PAGE_CACHE_SHIFT; 1723 index = outarg->offset >> PAGE_SHIFT;
1724 1724
1725 while (num && req->num_pages < num_pages) { 1725 while (num && req->num_pages < num_pages) {
1726 struct page *page; 1726 struct page *page;
@@ -1730,7 +1730,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1730 if (!page) 1730 if (!page)
1731 break; 1731 break;
1732 1732
1733 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); 1733 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1734 req->pages[req->num_pages] = page; 1734 req->pages[req->num_pages] = page;
1735 req->page_descs[req->num_pages].length = this_num; 1735 req->page_descs[req->num_pages].length = this_num;
1736 req->num_pages++; 1736 req->num_pages++;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9dde38f12c07..719924d6c706 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -348,7 +348,7 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
348 pgoff_t curr_index; 348 pgoff_t curr_index;
349 349
350 BUG_ON(req->inode != inode); 350 BUG_ON(req->inode != inode);
351 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 351 curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
352 if (idx_from < curr_index + req->num_pages && 352 if (idx_from < curr_index + req->num_pages &&
353 curr_index <= idx_to) { 353 curr_index <= idx_to) {
354 found = true; 354 found = true;
@@ -683,11 +683,11 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
683 * present there. 683 * present there.
684 */ 684 */
685 int i; 685 int i;
686 int start_idx = num_read >> PAGE_CACHE_SHIFT; 686 int start_idx = num_read >> PAGE_SHIFT;
687 size_t off = num_read & (PAGE_CACHE_SIZE - 1); 687 size_t off = num_read & (PAGE_SIZE - 1);
688 688
689 for (i = start_idx; i < req->num_pages; i++) { 689 for (i = start_idx; i < req->num_pages; i++) {
690 zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); 690 zero_user_segment(req->pages[i], off, PAGE_SIZE);
691 off = 0; 691 off = 0;
692 } 692 }
693 } else { 693 } else {
@@ -704,7 +704,7 @@ static int fuse_do_readpage(struct file *file, struct page *page)
704 struct fuse_req *req; 704 struct fuse_req *req;
705 size_t num_read; 705 size_t num_read;
706 loff_t pos = page_offset(page); 706 loff_t pos = page_offset(page);
707 size_t count = PAGE_CACHE_SIZE; 707 size_t count = PAGE_SIZE;
708 u64 attr_ver; 708 u64 attr_ver;
709 int err; 709 int err;
710 710
@@ -789,7 +789,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
789 else 789 else
790 SetPageError(page); 790 SetPageError(page);
791 unlock_page(page); 791 unlock_page(page);
792 page_cache_release(page); 792 put_page(page);
793 } 793 }
794 if (req->ff) 794 if (req->ff)
795 fuse_file_put(req->ff, false); 795 fuse_file_put(req->ff, false);
@@ -800,7 +800,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
800 struct fuse_file *ff = file->private_data; 800 struct fuse_file *ff = file->private_data;
801 struct fuse_conn *fc = ff->fc; 801 struct fuse_conn *fc = ff->fc;
802 loff_t pos = page_offset(req->pages[0]); 802 loff_t pos = page_offset(req->pages[0]);
803 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 803 size_t count = req->num_pages << PAGE_SHIFT;
804 804
805 req->out.argpages = 1; 805 req->out.argpages = 1;
806 req->out.page_zeroing = 1; 806 req->out.page_zeroing = 1;
@@ -836,7 +836,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
836 836
837 if (req->num_pages && 837 if (req->num_pages &&
838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
839 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 839 (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
840 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 840 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
841 int nr_alloc = min_t(unsigned, data->nr_pages, 841 int nr_alloc = min_t(unsigned, data->nr_pages,
842 FUSE_MAX_PAGES_PER_REQ); 842 FUSE_MAX_PAGES_PER_REQ);
@@ -858,7 +858,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
858 return -EIO; 858 return -EIO;
859 } 859 }
860 860
861 page_cache_get(page); 861 get_page(page);
862 req->pages[req->num_pages] = page; 862 req->pages[req->num_pages] = page;
863 req->page_descs[req->num_pages].length = PAGE_SIZE; 863 req->page_descs[req->num_pages].length = PAGE_SIZE;
864 req->num_pages++; 864 req->num_pages++;
@@ -1003,17 +1003,17 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
1003 for (i = 0; i < req->num_pages; i++) { 1003 for (i = 0; i < req->num_pages; i++) {
1004 struct page *page = req->pages[i]; 1004 struct page *page = req->pages[i];
1005 1005
1006 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 1006 if (!req->out.h.error && !offset && count >= PAGE_SIZE)
1007 SetPageUptodate(page); 1007 SetPageUptodate(page);
1008 1008
1009 if (count > PAGE_CACHE_SIZE - offset) 1009 if (count > PAGE_SIZE - offset)
1010 count -= PAGE_CACHE_SIZE - offset; 1010 count -= PAGE_SIZE - offset;
1011 else 1011 else
1012 count = 0; 1012 count = 0;
1013 offset = 0; 1013 offset = 0;
1014 1014
1015 unlock_page(page); 1015 unlock_page(page);
1016 page_cache_release(page); 1016 put_page(page);
1017 } 1017 }
1018 1018
1019 return res; 1019 return res;
@@ -1024,7 +1024,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1024 struct iov_iter *ii, loff_t pos) 1024 struct iov_iter *ii, loff_t pos)
1025{ 1025{
1026 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1026 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 1027 unsigned offset = pos & (PAGE_SIZE - 1);
1028 size_t count = 0; 1028 size_t count = 0;
1029 int err; 1029 int err;
1030 1030
@@ -1034,8 +1034,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1034 do { 1034 do {
1035 size_t tmp; 1035 size_t tmp;
1036 struct page *page; 1036 struct page *page;
1037 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1037 pgoff_t index = pos >> PAGE_SHIFT;
1038 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 1038 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
1039 iov_iter_count(ii)); 1039 iov_iter_count(ii));
1040 1040
1041 bytes = min_t(size_t, bytes, fc->max_write - count); 1041 bytes = min_t(size_t, bytes, fc->max_write - count);
@@ -1059,7 +1059,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1059 iov_iter_advance(ii, tmp); 1059 iov_iter_advance(ii, tmp);
1060 if (!tmp) { 1060 if (!tmp) {
1061 unlock_page(page); 1061 unlock_page(page);
1062 page_cache_release(page); 1062 put_page(page);
1063 bytes = min(bytes, iov_iter_single_seg_count(ii)); 1063 bytes = min(bytes, iov_iter_single_seg_count(ii));
1064 goto again; 1064 goto again;
1065 } 1065 }
@@ -1072,7 +1072,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1072 count += tmp; 1072 count += tmp;
1073 pos += tmp; 1073 pos += tmp;
1074 offset += tmp; 1074 offset += tmp;
1075 if (offset == PAGE_CACHE_SIZE) 1075 if (offset == PAGE_SIZE)
1076 offset = 0; 1076 offset = 0;
1077 1077
1078 if (!fc->big_writes) 1078 if (!fc->big_writes)
@@ -1086,8 +1086,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1086static inline unsigned fuse_wr_pages(loff_t pos, size_t len) 1086static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
1087{ 1087{
1088 return min_t(unsigned, 1088 return min_t(unsigned,
1089 ((pos + len - 1) >> PAGE_CACHE_SHIFT) - 1089 ((pos + len - 1) >> PAGE_SHIFT) -
1090 (pos >> PAGE_CACHE_SHIFT) + 1, 1090 (pos >> PAGE_SHIFT) + 1,
1091 FUSE_MAX_PAGES_PER_REQ); 1091 FUSE_MAX_PAGES_PER_REQ);
1092} 1092}
1093 1093
@@ -1205,8 +1205,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1205 goto out; 1205 goto out;
1206 1206
1207 invalidate_mapping_pages(file->f_mapping, 1207 invalidate_mapping_pages(file->f_mapping,
1208 pos >> PAGE_CACHE_SHIFT, 1208 pos >> PAGE_SHIFT,
1209 endbyte >> PAGE_CACHE_SHIFT); 1209 endbyte >> PAGE_SHIFT);
1210 1210
1211 written += written_buffered; 1211 written += written_buffered;
1212 iocb->ki_pos = pos + written_buffered; 1212 iocb->ki_pos = pos + written_buffered;
@@ -1315,8 +1315,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1315 size_t nmax = write ? fc->max_write : fc->max_read; 1315 size_t nmax = write ? fc->max_write : fc->max_read;
1316 loff_t pos = *ppos; 1316 loff_t pos = *ppos;
1317 size_t count = iov_iter_count(iter); 1317 size_t count = iov_iter_count(iter);
1318 pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT; 1318 pgoff_t idx_from = pos >> PAGE_SHIFT;
1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1320 ssize_t res = 0; 1320 ssize_t res = 0;
1321 struct fuse_req *req; 1321 struct fuse_req *req;
1322 int err = 0; 1322 int err = 0;
@@ -1466,7 +1466,7 @@ __acquires(fc->lock)
1466{ 1466{
1467 struct fuse_inode *fi = get_fuse_inode(req->inode); 1467 struct fuse_inode *fi = get_fuse_inode(req->inode);
1468 struct fuse_write_in *inarg = &req->misc.write.in; 1468 struct fuse_write_in *inarg = &req->misc.write.in;
1469 __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; 1469 __u64 data_size = req->num_pages * PAGE_SIZE;
1470 1470
1471 if (!fc->connected) 1471 if (!fc->connected)
1472 goto out_free; 1472 goto out_free;
@@ -1727,7 +1727,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1727 list_del(&new_req->writepages_entry); 1727 list_del(&new_req->writepages_entry);
1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) { 1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
1729 BUG_ON(old_req->inode != new_req->inode); 1729 BUG_ON(old_req->inode != new_req->inode);
1730 curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1730 curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
1731 if (curr_index <= page->index && 1731 if (curr_index <= page->index &&
1732 page->index < curr_index + old_req->num_pages) { 1732 page->index < curr_index + old_req->num_pages) {
1733 found = true; 1733 found = true;
@@ -1742,7 +1742,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1742 new_req->num_pages = 1; 1742 new_req->num_pages = 1;
1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { 1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
1744 BUG_ON(tmp->inode != new_req->inode); 1744 BUG_ON(tmp->inode != new_req->inode);
1745 curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1745 curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
1746 if (tmp->num_pages == 1 && 1746 if (tmp->num_pages == 1 &&
1747 curr_index == page->index) { 1747 curr_index == page->index) {
1748 old_req = tmp; 1748 old_req = tmp;
@@ -1799,7 +1799,7 @@ static int fuse_writepages_fill(struct page *page,
1799 1799
1800 if (req && req->num_pages && 1800 if (req && req->num_pages &&
1801 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || 1801 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
1802 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || 1802 (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
1803 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { 1803 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
1804 fuse_writepages_send(data); 1804 fuse_writepages_send(data);
1805 data->req = NULL; 1805 data->req = NULL;
@@ -1924,7 +1924,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
1924 loff_t pos, unsigned len, unsigned flags, 1924 loff_t pos, unsigned len, unsigned flags,
1925 struct page **pagep, void **fsdata) 1925 struct page **pagep, void **fsdata)
1926{ 1926{
1927 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1927 pgoff_t index = pos >> PAGE_SHIFT;
1928 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 1928 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
1929 struct page *page; 1929 struct page *page;
1930 loff_t fsize; 1930 loff_t fsize;
@@ -1938,15 +1938,15 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
1938 1938
1939 fuse_wait_on_page_writeback(mapping->host, page->index); 1939 fuse_wait_on_page_writeback(mapping->host, page->index);
1940 1940
1941 if (PageUptodate(page) || len == PAGE_CACHE_SIZE) 1941 if (PageUptodate(page) || len == PAGE_SIZE)
1942 goto success; 1942 goto success;
1943 /* 1943 /*
1944 * Check if the start this page comes after the end of file, in which 1944 * Check if the start this page comes after the end of file, in which
1945 * case the readpage can be optimized away. 1945 * case the readpage can be optimized away.
1946 */ 1946 */
1947 fsize = i_size_read(mapping->host); 1947 fsize = i_size_read(mapping->host);
1948 if (fsize <= (pos & PAGE_CACHE_MASK)) { 1948 if (fsize <= (pos & PAGE_MASK)) {
1949 size_t off = pos & ~PAGE_CACHE_MASK; 1949 size_t off = pos & ~PAGE_MASK;
1950 if (off) 1950 if (off)
1951 zero_user_segment(page, 0, off); 1951 zero_user_segment(page, 0, off);
1952 goto success; 1952 goto success;
@@ -1960,7 +1960,7 @@ success:
1960 1960
1961cleanup: 1961cleanup:
1962 unlock_page(page); 1962 unlock_page(page);
1963 page_cache_release(page); 1963 put_page(page);
1964error: 1964error:
1965 return err; 1965 return err;
1966} 1966}
@@ -1973,16 +1973,16 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1973 1973
1974 if (!PageUptodate(page)) { 1974 if (!PageUptodate(page)) {
1975 /* Zero any unwritten bytes at the end of the page */ 1975 /* Zero any unwritten bytes at the end of the page */
1976 size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; 1976 size_t endoff = (pos + copied) & ~PAGE_MASK;
1977 if (endoff) 1977 if (endoff)
1978 zero_user_segment(page, endoff, PAGE_CACHE_SIZE); 1978 zero_user_segment(page, endoff, PAGE_SIZE);
1979 SetPageUptodate(page); 1979 SetPageUptodate(page);
1980 } 1980 }
1981 1981
1982 fuse_write_update_size(inode, pos + copied); 1982 fuse_write_update_size(inode, pos + copied);
1983 set_page_dirty(page); 1983 set_page_dirty(page);
1984 unlock_page(page); 1984 unlock_page(page);
1985 page_cache_release(page); 1985 put_page(page);
1986 1986
1987 return copied; 1987 return copied;
1988} 1988}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4d69d5c0bedc..1ce67668a8e1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -339,11 +339,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
339 339
340 fuse_invalidate_attr(inode); 340 fuse_invalidate_attr(inode);
341 if (offset >= 0) { 341 if (offset >= 0) {
342 pg_start = offset >> PAGE_CACHE_SHIFT; 342 pg_start = offset >> PAGE_SHIFT;
343 if (len <= 0) 343 if (len <= 0)
344 pg_end = -1; 344 pg_end = -1;
345 else 345 else
346 pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; 346 pg_end = (offset + len - 1) >> PAGE_SHIFT;
347 invalidate_inode_pages2_range(inode->i_mapping, 347 invalidate_inode_pages2_range(inode->i_mapping,
348 pg_start, pg_end); 348 pg_start, pg_end);
349 } 349 }
@@ -864,7 +864,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
864 process_init_limits(fc, arg); 864 process_init_limits(fc, arg);
865 865
866 if (arg->minor >= 6) { 866 if (arg->minor >= 6) {
867 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; 867 ra_pages = arg->max_readahead / PAGE_SIZE;
868 if (arg->flags & FUSE_ASYNC_READ) 868 if (arg->flags & FUSE_ASYNC_READ)
869 fc->async_read = 1; 869 fc->async_read = 1;
870 if (!(arg->flags & FUSE_POSIX_LOCKS)) 870 if (!(arg->flags & FUSE_POSIX_LOCKS))
@@ -901,7 +901,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
901 if (arg->time_gran && arg->time_gran <= 1000000000) 901 if (arg->time_gran && arg->time_gran <= 1000000000)
902 fc->sb->s_time_gran = arg->time_gran; 902 fc->sb->s_time_gran = arg->time_gran;
903 } else { 903 } else {
904 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 904 ra_pages = fc->max_read / PAGE_SIZE;
905 fc->no_lock = 1; 905 fc->no_lock = 1;
906 fc->no_flock = 1; 906 fc->no_flock = 1;
907 } 907 }
@@ -922,7 +922,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
922 922
923 arg->major = FUSE_KERNEL_VERSION; 923 arg->major = FUSE_KERNEL_VERSION;
924 arg->minor = FUSE_KERNEL_MINOR_VERSION; 924 arg->minor = FUSE_KERNEL_MINOR_VERSION;
925 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 925 arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE;
926 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 926 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
927 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | 927 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -955,7 +955,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
955 int err; 955 int err;
956 956
957 fc->bdi.name = "fuse"; 957 fc->bdi.name = "fuse";
958 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 958 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
959 /* fuse does it's own writeback accounting */ 959 /* fuse does it's own writeback accounting */
960 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT; 960 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
961 961
@@ -1053,8 +1053,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1053 goto err; 1053 goto err;
1054#endif 1054#endif
1055 } else { 1055 } else {
1056 sb->s_blocksize = PAGE_CACHE_SIZE; 1056 sb->s_blocksize = PAGE_SIZE;
1057 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1057 sb->s_blocksize_bits = PAGE_SHIFT;
1058 } 1058 }
1059 sb->s_magic = FUSE_SUPER_MAGIC; 1059 sb->s_magic = FUSE_SUPER_MAGIC;
1060 sb->s_op = &fuse_super_operations; 1060 sb->s_op = &fuse_super_operations;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aa016e4b8bec..1bbbee945f46 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -101,7 +101,7 @@ static int gfs2_writepage_common(struct page *page,
101 struct gfs2_inode *ip = GFS2_I(inode); 101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_sbd *sdp = GFS2_SB(inode); 102 struct gfs2_sbd *sdp = GFS2_SB(inode);
103 loff_t i_size = i_size_read(inode); 103 loff_t i_size = i_size_read(inode);
104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 104 pgoff_t end_index = i_size >> PAGE_SHIFT;
105 unsigned offset; 105 unsigned offset;
106 106
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
@@ -109,9 +109,9 @@ static int gfs2_writepage_common(struct page *page,
109 if (current->journal_info) 109 if (current->journal_info)
110 goto redirty; 110 goto redirty;
111 /* Is the page fully outside i_size? (truncate in progress) */ 111 /* Is the page fully outside i_size? (truncate in progress) */
112 offset = i_size & (PAGE_CACHE_SIZE-1); 112 offset = i_size & (PAGE_SIZE-1);
113 if (page->index > end_index || (page->index == end_index && !offset)) { 113 if (page->index > end_index || (page->index == end_index && !offset)) {
114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
115 goto out; 115 goto out;
116 } 116 }
117 return 1; 117 return 1;
@@ -238,7 +238,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
238{ 238{
239 struct inode *inode = mapping->host; 239 struct inode *inode = mapping->host;
240 struct gfs2_sbd *sdp = GFS2_SB(inode); 240 struct gfs2_sbd *sdp = GFS2_SB(inode);
241 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 241 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
242 int i; 242 int i;
243 int ret; 243 int ret;
244 244
@@ -366,8 +366,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
366 cycled = 0; 366 cycled = 0;
367 end = -1; 367 end = -1;
368 } else { 368 } else {
369 index = wbc->range_start >> PAGE_CACHE_SHIFT; 369 index = wbc->range_start >> PAGE_SHIFT;
370 end = wbc->range_end >> PAGE_CACHE_SHIFT; 370 end = wbc->range_end >> PAGE_SHIFT;
371 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 371 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
372 range_whole = 1; 372 range_whole = 1;
373 cycled = 1; /* ignore range_cyclic tests */ 373 cycled = 1; /* ignore range_cyclic tests */
@@ -458,7 +458,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
458 * so we need to supply one here. It doesn't happen often. 458 * so we need to supply one here. It doesn't happen often.
459 */ 459 */
460 if (unlikely(page->index)) { 460 if (unlikely(page->index)) {
461 zero_user(page, 0, PAGE_CACHE_SIZE); 461 zero_user(page, 0, PAGE_SIZE);
462 SetPageUptodate(page); 462 SetPageUptodate(page);
463 return 0; 463 return 0;
464 } 464 }
@@ -471,7 +471,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
471 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 471 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
472 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 472 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
473 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 473 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
474 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 474 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
475 kunmap_atomic(kaddr); 475 kunmap_atomic(kaddr);
476 flush_dcache_page(page); 476 flush_dcache_page(page);
477 brelse(dibh); 477 brelse(dibh);
@@ -560,8 +560,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
560 unsigned size) 560 unsigned size)
561{ 561{
562 struct address_space *mapping = ip->i_inode.i_mapping; 562 struct address_space *mapping = ip->i_inode.i_mapping;
563 unsigned long index = *pos / PAGE_CACHE_SIZE; 563 unsigned long index = *pos / PAGE_SIZE;
564 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); 564 unsigned offset = *pos & (PAGE_SIZE - 1);
565 unsigned copied = 0; 565 unsigned copied = 0;
566 unsigned amt; 566 unsigned amt;
567 struct page *page; 567 struct page *page;
@@ -569,15 +569,15 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
569 569
570 do { 570 do {
571 amt = size - copied; 571 amt = size - copied;
572 if (offset + size > PAGE_CACHE_SIZE) 572 if (offset + size > PAGE_SIZE)
573 amt = PAGE_CACHE_SIZE - offset; 573 amt = PAGE_SIZE - offset;
574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
575 if (IS_ERR(page)) 575 if (IS_ERR(page))
576 return PTR_ERR(page); 576 return PTR_ERR(page);
577 p = kmap_atomic(page); 577 p = kmap_atomic(page);
578 memcpy(buf + copied, p + offset, amt); 578 memcpy(buf + copied, p + offset, amt);
579 kunmap_atomic(p); 579 kunmap_atomic(p);
580 page_cache_release(page); 580 put_page(page);
581 copied += amt; 581 copied += amt;
582 index++; 582 index++;
583 offset = 0; 583 offset = 0;
@@ -651,8 +651,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
651 unsigned requested = 0; 651 unsigned requested = 0;
652 int alloc_required; 652 int alloc_required;
653 int error = 0; 653 int error = 0;
654 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 654 pgoff_t index = pos >> PAGE_SHIFT;
655 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 655 unsigned from = pos & (PAGE_SIZE - 1);
656 struct page *page; 656 struct page *page;
657 657
658 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 658 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -697,7 +697,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
697 rblocks += gfs2_rg_blocks(ip, requested); 697 rblocks += gfs2_rg_blocks(ip, requested);
698 698
699 error = gfs2_trans_begin(sdp, rblocks, 699 error = gfs2_trans_begin(sdp, rblocks,
700 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 700 PAGE_SIZE/sdp->sd_sb.sb_bsize);
701 if (error) 701 if (error)
702 goto out_trans_fail; 702 goto out_trans_fail;
703 703
@@ -727,7 +727,7 @@ out:
727 return 0; 727 return 0;
728 728
729 unlock_page(page); 729 unlock_page(page);
730 page_cache_release(page); 730 put_page(page);
731 731
732 gfs2_trans_end(sdp); 732 gfs2_trans_end(sdp);
733 if (pos + len > ip->i_inode.i_size) 733 if (pos + len > ip->i_inode.i_size)
@@ -827,7 +827,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
827 if (!PageUptodate(page)) 827 if (!PageUptodate(page))
828 SetPageUptodate(page); 828 SetPageUptodate(page);
829 unlock_page(page); 829 unlock_page(page);
830 page_cache_release(page); 830 put_page(page);
831 831
832 if (copied) { 832 if (copied) {
833 if (inode->i_size < to) 833 if (inode->i_size < to)
@@ -877,7 +877,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
877 struct gfs2_sbd *sdp = GFS2_SB(inode); 877 struct gfs2_sbd *sdp = GFS2_SB(inode);
878 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 878 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
879 struct buffer_head *dibh; 879 struct buffer_head *dibh;
880 unsigned int from = pos & (PAGE_CACHE_SIZE - 1); 880 unsigned int from = pos & (PAGE_SIZE - 1);
881 unsigned int to = from + len; 881 unsigned int to = from + len;
882 int ret; 882 int ret;
883 struct gfs2_trans *tr = current->journal_info; 883 struct gfs2_trans *tr = current->journal_info;
@@ -888,7 +888,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
888 ret = gfs2_meta_inode_buffer(ip, &dibh); 888 ret = gfs2_meta_inode_buffer(ip, &dibh);
889 if (unlikely(ret)) { 889 if (unlikely(ret)) {
890 unlock_page(page); 890 unlock_page(page);
891 page_cache_release(page); 891 put_page(page);
892 goto failed; 892 goto failed;
893 } 893 }
894 894
@@ -992,7 +992,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
992{ 992{
993 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 993 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
994 unsigned int stop = offset + length; 994 unsigned int stop = offset + length;
995 int partial_page = (offset || length < PAGE_CACHE_SIZE); 995 int partial_page = (offset || length < PAGE_SIZE);
996 struct buffer_head *bh, *head; 996 struct buffer_head *bh, *head;
997 unsigned long pos = 0; 997 unsigned long pos = 0;
998 998
@@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1082 * the first place, mapping->nr_pages will always be zero. 1082 * the first place, mapping->nr_pages will always be zero.
1083 */ 1083 */
1084 if (mapping->nrpages) { 1084 if (mapping->nrpages) {
1085 loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1); 1085 loff_t lstart = offset & ~(PAGE_SIZE - 1);
1086 loff_t len = iov_iter_count(iter); 1086 loff_t len = iov_iter_count(iter);
1087 loff_t end = PAGE_ALIGN(offset + len) - 1; 1087 loff_t end = PAGE_ALIGN(offset + len) - 1;
1088 1088
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 0860f0b5b3f1..24ce1cdd434a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -75,7 +75,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
75 dsize = dibh->b_size - sizeof(struct gfs2_dinode); 75 dsize = dibh->b_size - sizeof(struct gfs2_dinode);
76 76
77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
78 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 78 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
79 kunmap(page); 79 kunmap(page);
80 80
81 SetPageUptodate(page); 81 SetPageUptodate(page);
@@ -98,7 +98,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
98 98
99 if (release) { 99 if (release) {
100 unlock_page(page); 100 unlock_page(page);
101 page_cache_release(page); 101 put_page(page);
102 } 102 }
103 103
104 return 0; 104 return 0;
@@ -932,8 +932,8 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
932{ 932{
933 struct inode *inode = mapping->host; 933 struct inode *inode = mapping->host;
934 struct gfs2_inode *ip = GFS2_I(inode); 934 struct gfs2_inode *ip = GFS2_I(inode);
935 unsigned long index = from >> PAGE_CACHE_SHIFT; 935 unsigned long index = from >> PAGE_SHIFT;
936 unsigned offset = from & (PAGE_CACHE_SIZE-1); 936 unsigned offset = from & (PAGE_SIZE-1);
937 unsigned blocksize, iblock, length, pos; 937 unsigned blocksize, iblock, length, pos;
938 struct buffer_head *bh; 938 struct buffer_head *bh;
939 struct page *page; 939 struct page *page;
@@ -945,7 +945,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
945 945
946 blocksize = inode->i_sb->s_blocksize; 946 blocksize = inode->i_sb->s_blocksize;
947 length = blocksize - (offset & (blocksize - 1)); 947 length = blocksize - (offset & (blocksize - 1));
948 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 948 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
949 949
950 if (!page_has_buffers(page)) 950 if (!page_has_buffers(page))
951 create_empty_buffers(page, blocksize, 0); 951 create_empty_buffers(page, blocksize, 0);
@@ -989,7 +989,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
989 mark_buffer_dirty(bh); 989 mark_buffer_dirty(bh);
990unlock: 990unlock:
991 unlock_page(page); 991 unlock_page(page);
992 page_cache_release(page); 992 put_page(page);
993 return err; 993 return err;
994} 994}
995 995
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index c9384f932975..208efc70ad49 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -354,8 +354,8 @@ static int gfs2_allocate_page_backing(struct page *page)
354{ 354{
355 struct inode *inode = page->mapping->host; 355 struct inode *inode = page->mapping->host;
356 struct buffer_head bh; 356 struct buffer_head bh;
357 unsigned long size = PAGE_CACHE_SIZE; 357 unsigned long size = PAGE_SIZE;
358 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 358 u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
359 359
360 do { 360 do {
361 bh.b_state = 0; 361 bh.b_state = 0;
@@ -386,7 +386,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
386 struct gfs2_sbd *sdp = GFS2_SB(inode); 386 struct gfs2_sbd *sdp = GFS2_SB(inode);
387 struct gfs2_alloc_parms ap = { .aflags = 0, }; 387 struct gfs2_alloc_parms ap = { .aflags = 0, };
388 unsigned long last_index; 388 unsigned long last_index;
389 u64 pos = page->index << PAGE_CACHE_SHIFT; 389 u64 pos = page->index << PAGE_SHIFT;
390 unsigned int data_blocks, ind_blocks, rblocks; 390 unsigned int data_blocks, ind_blocks, rblocks;
391 struct gfs2_holder gh; 391 struct gfs2_holder gh;
392 loff_t size; 392 loff_t size;
@@ -401,7 +401,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
401 if (ret) 401 if (ret)
402 goto out; 402 goto out;
403 403
404 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); 404 gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
405 405
406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
407 ret = gfs2_glock_nq(&gh); 407 ret = gfs2_glock_nq(&gh);
@@ -411,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
411 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 411 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
412 set_bit(GIF_SW_PAGED, &ip->i_flags); 412 set_bit(GIF_SW_PAGED, &ip->i_flags);
413 413
414 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { 414 if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
415 lock_page(page); 415 lock_page(page);
416 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { 416 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
417 ret = -EAGAIN; 417 ret = -EAGAIN;
@@ -424,7 +424,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
424 if (ret) 424 if (ret)
425 goto out_unlock; 425 goto out_unlock;
426 426
427 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 427 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
428 ap.target = data_blocks + ind_blocks; 428 ap.target = data_blocks + ind_blocks;
429 ret = gfs2_quota_lock_check(ip, &ap); 429 ret = gfs2_quota_lock_check(ip, &ap);
430 if (ret) 430 if (ret)
@@ -447,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
447 lock_page(page); 447 lock_page(page);
448 ret = -EINVAL; 448 ret = -EINVAL;
449 size = i_size_read(inode); 449 size = i_size_read(inode);
450 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 450 last_index = (size - 1) >> PAGE_SHIFT;
451 /* Check page index against inode size */ 451 /* Check page index against inode size */
452 if (size == 0 || (page->index > last_index)) 452 if (size == 0 || (page->index > last_index))
453 goto out_trans_end; 453 goto out_trans_end;
@@ -873,7 +873,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
873 rblocks += data_blocks ? data_blocks : 1; 873 rblocks += data_blocks ? data_blocks : 1;
874 874
875 error = gfs2_trans_begin(sdp, rblocks, 875 error = gfs2_trans_begin(sdp, rblocks,
876 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 876 PAGE_SIZE/sdp->sd_sb.sb_bsize);
877 if (error) 877 if (error)
878 goto out_trans_fail; 878 goto out_trans_fail;
879 879
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index e137d96f1b17..0448524c11bc 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -124,7 +124,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
124 if (mapping == NULL) 124 if (mapping == NULL)
125 mapping = &sdp->sd_aspace; 125 mapping = &sdp->sd_aspace;
126 126
127 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift; 127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128 index = blkno >> shift; /* convert block to page */ 128 index = blkno >> shift; /* convert block to page */
129 bufnum = blkno - (index << shift); /* block buf index within page */ 129 bufnum = blkno - (index << shift); /* block buf index within page */
130 130
@@ -154,7 +154,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
154 map_bh(bh, sdp->sd_vfs, blkno); 154 map_bh(bh, sdp->sd_vfs, blkno);
155 155
156 unlock_page(page); 156 unlock_page(page);
157 page_cache_release(page); 157 put_page(page);
158 158
159 return bh; 159 return bh;
160} 160}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a39891344259..ce7d69a2fdc0 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -701,7 +701,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
701 unsigned to_write = bytes, pg_off = off; 701 unsigned to_write = bytes, pg_off = off;
702 int done = 0; 702 int done = 0;
703 703
704 blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift); 704 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
705 boff = off % bsize; 705 boff = off % bsize;
706 706
707 page = find_or_create_page(mapping, index, GFP_NOFS); 707 page = find_or_create_page(mapping, index, GFP_NOFS);
@@ -753,13 +753,13 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
753 flush_dcache_page(page); 753 flush_dcache_page(page);
754 kunmap_atomic(kaddr); 754 kunmap_atomic(kaddr);
755 unlock_page(page); 755 unlock_page(page);
756 page_cache_release(page); 756 put_page(page);
757 757
758 return 0; 758 return 0;
759 759
760unlock_out: 760unlock_out:
761 unlock_page(page); 761 unlock_page(page);
762 page_cache_release(page); 762 put_page(page);
763 return -EIO; 763 return -EIO;
764} 764}
765 765
@@ -773,13 +773,13 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
773 773
774 nbytes = sizeof(struct gfs2_quota); 774 nbytes = sizeof(struct gfs2_quota);
775 775
776 pg_beg = loc >> PAGE_CACHE_SHIFT; 776 pg_beg = loc >> PAGE_SHIFT;
777 pg_off = loc % PAGE_CACHE_SIZE; 777 pg_off = loc % PAGE_SIZE;
778 778
779 /* If the quota straddles a page boundary, split the write in two */ 779 /* If the quota straddles a page boundary, split the write in two */
780 if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { 780 if ((pg_off + nbytes) > PAGE_SIZE) {
781 pg_oflow = 1; 781 pg_oflow = 1;
782 overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; 782 overflow = (pg_off + nbytes) - PAGE_SIZE;
783 } 783 }
784 784
785 ptr = qp; 785 ptr = qp;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 07c0265aa195..99a0bdac8796 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -918,9 +918,8 @@ static int read_rindex_entry(struct gfs2_inode *ip)
918 goto fail; 918 goto fail;
919 919
920 rgd->rd_gl->gl_object = rgd; 920 rgd->rd_gl->gl_object = rgd;
921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK; 921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
922 rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr + 922 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
923 rgd->rd_length) * bsize) - 1;
924 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; 923 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
925 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED); 924 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
926 if (rgd->rd_data > sdp->sd_max_rg_data) 925 if (rgd->rd_data > sdp->sd_max_rg_data)
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 221719eac5de..d77d844b668b 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
278 278
279 mapping = tree->inode->i_mapping; 279 mapping = tree->inode->i_mapping;
280 off = (loff_t)cnid * tree->node_size; 280 off = (loff_t)cnid * tree->node_size;
281 block = off >> PAGE_CACHE_SHIFT; 281 block = off >> PAGE_SHIFT;
282 node->page_offset = off & ~PAGE_CACHE_MASK; 282 node->page_offset = off & ~PAGE_MASK;
283 for (i = 0; i < tree->pages_per_bnode; i++) { 283 for (i = 0; i < tree->pages_per_bnode; i++) {
284 page = read_mapping_page(mapping, block++, NULL); 284 page = read_mapping_page(mapping, block++, NULL);
285 if (IS_ERR(page)) 285 if (IS_ERR(page))
286 goto fail; 286 goto fail;
287 if (PageError(page)) { 287 if (PageError(page)) {
288 page_cache_release(page); 288 put_page(page);
289 goto fail; 289 goto fail;
290 } 290 }
291 node->page[i] = page; 291 node->page[i] = page;
@@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
401 401
402 for (i = 0; i < node->tree->pages_per_bnode; i++) 402 for (i = 0; i < node->tree->pages_per_bnode; i++)
403 if (node->page[i]) 403 if (node->page[i])
404 page_cache_release(node->page[i]); 404 put_page(node->page[i]);
405 kfree(node); 405 kfree(node);
406} 406}
407 407
@@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
429 429
430 pagep = node->page; 430 pagep = node->page;
431 memset(kmap(*pagep) + node->page_offset, 0, 431 memset(kmap(*pagep) + node->page_offset, 0,
432 min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); 432 min((int)PAGE_SIZE, (int)tree->node_size));
433 set_page_dirty(*pagep); 433 set_page_dirty(*pagep);
434 kunmap(*pagep); 434 kunmap(*pagep);
435 for (i = 1; i < tree->pages_per_bnode; i++) { 435 for (i = 1; i < tree->pages_per_bnode; i++) {
436 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 436 memset(kmap(*++pagep), 0, PAGE_SIZE);
437 set_page_dirty(*pagep); 437 set_page_dirty(*pagep);
438 kunmap(*pagep); 438 kunmap(*pagep);
439 } 439 }
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e660e69..37cdd955eceb 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
116 } 116 }
117 117
118 tree->node_size_shift = ffs(size) - 1; 118 tree->node_size_shift = ffs(size) - 1;
119 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 119 tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 120
121 kunmap(page); 121 kunmap(page);
122 page_cache_release(page); 122 put_page(page);
123 return tree; 123 return tree;
124 124
125fail_page: 125fail_page:
126 page_cache_release(page); 126 put_page(page);
127free_inode: 127free_inode:
128 tree->inode->i_mapping->a_ops = &hfs_aops; 128 tree->inode->i_mapping->a_ops = &hfs_aops;
129 iput(tree->inode); 129 iput(tree->inode);
@@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
257 off = off16; 257 off = off16;
258 258
259 off += node->page_offset; 259 off += node->page_offset;
260 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 260 pagep = node->page + (off >> PAGE_SHIFT);
261 data = kmap(*pagep); 261 data = kmap(*pagep);
262 off &= ~PAGE_CACHE_MASK; 262 off &= ~PAGE_MASK;
263 idx = 0; 263 idx = 0;
264 264
265 for (;;) { 265 for (;;) {
@@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
279 } 279 }
280 } 280 }
281 } 281 }
282 if (++off >= PAGE_CACHE_SIZE) { 282 if (++off >= PAGE_SIZE) {
283 kunmap(*pagep); 283 kunmap(*pagep);
284 data = kmap(*++pagep); 284 data = kmap(*++pagep);
285 off = 0; 285 off = 0;
@@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
302 len = hfs_brec_lenoff(node, 0, &off16); 302 len = hfs_brec_lenoff(node, 0, &off16);
303 off = off16; 303 off = off16;
304 off += node->page_offset; 304 off += node->page_offset;
305 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 305 pagep = node->page + (off >> PAGE_SHIFT);
306 data = kmap(*pagep); 306 data = kmap(*pagep);
307 off &= ~PAGE_CACHE_MASK; 307 off &= ~PAGE_MASK;
308 } 308 }
309} 309}
310 310
@@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
348 len = hfs_brec_lenoff(node, 0, &off); 348 len = hfs_brec_lenoff(node, 0, &off);
349 } 349 }
350 off += node->page_offset + nidx / 8; 350 off += node->page_offset + nidx / 8;
351 page = node->page[off >> PAGE_CACHE_SHIFT]; 351 page = node->page[off >> PAGE_SHIFT];
352 data = kmap(page); 352 data = kmap(page);
353 off &= ~PAGE_CACHE_MASK; 353 off &= ~PAGE_MASK;
354 m = 1 << (~nidx & 7); 354 m = 1 << (~nidx & 7);
355 byte = data[off]; 355 byte = data[off];
356 if (!(byte & m)) { 356 if (!(byte & m)) {
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 6686bf39a5b5..cb1e5faa2fb7 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
91 if (!tree) 91 if (!tree)
92 return 0; 92 return 0;
93 93
94 if (tree->node_size >= PAGE_CACHE_SIZE) { 94 if (tree->node_size >= PAGE_SIZE) {
95 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); 95 nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
96 spin_lock(&tree->hash_lock); 96 spin_lock(&tree->hash_lock);
97 node = hfs_bnode_findhash(tree, nidx); 97 node = hfs_bnode_findhash(tree, nidx);
98 if (!node) 98 if (!node)
@@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
105 } 105 }
106 spin_unlock(&tree->hash_lock); 106 spin_unlock(&tree->hash_lock);
107 } else { 107 } else {
108 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); 108 nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
109 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 109 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
110 spin_lock(&tree->hash_lock); 110 spin_lock(&tree->hash_lock);
111 do { 111 do {
112 node = hfs_bnode_findhash(tree, nidx++); 112 node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index d2954451519e..c0ae274c0a22 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -13,7 +13,7 @@
13#include "hfsplus_fs.h" 13#include "hfsplus_fs.h"
14#include "hfsplus_raw.h" 14#include "hfsplus_raw.h"
15 15
16#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8) 16#define PAGE_CACHE_BITS (PAGE_SIZE * 8)
17 17
18int hfsplus_block_allocate(struct super_block *sb, u32 size, 18int hfsplus_block_allocate(struct super_block *sb, u32 size,
19 u32 offset, u32 *max) 19 u32 offset, u32 *max)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 63924662aaf3..ce014ceb89ef 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
24 int l; 24 int l;
25 25
26 off += node->page_offset; 26 off += node->page_offset;
27 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 27 pagep = node->page + (off >> PAGE_SHIFT);
28 off &= ~PAGE_CACHE_MASK; 28 off &= ~PAGE_MASK;
29 29
30 l = min_t(int, len, PAGE_CACHE_SIZE - off); 30 l = min_t(int, len, PAGE_SIZE - off);
31 memcpy(buf, kmap(*pagep) + off, l); 31 memcpy(buf, kmap(*pagep) + off, l);
32 kunmap(*pagep); 32 kunmap(*pagep);
33 33
34 while ((len -= l) != 0) { 34 while ((len -= l) != 0) {
35 buf += l; 35 buf += l;
36 l = min_t(int, len, PAGE_CACHE_SIZE); 36 l = min_t(int, len, PAGE_SIZE);
37 memcpy(buf, kmap(*++pagep), l); 37 memcpy(buf, kmap(*++pagep), l);
38 kunmap(*pagep); 38 kunmap(*pagep);
39 } 39 }
@@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
77 int l; 77 int l;
78 78
79 off += node->page_offset; 79 off += node->page_offset;
80 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 80 pagep = node->page + (off >> PAGE_SHIFT);
81 off &= ~PAGE_CACHE_MASK; 81 off &= ~PAGE_MASK;
82 82
83 l = min_t(int, len, PAGE_CACHE_SIZE - off); 83 l = min_t(int, len, PAGE_SIZE - off);
84 memcpy(kmap(*pagep) + off, buf, l); 84 memcpy(kmap(*pagep) + off, buf, l);
85 set_page_dirty(*pagep); 85 set_page_dirty(*pagep);
86 kunmap(*pagep); 86 kunmap(*pagep);
87 87
88 while ((len -= l) != 0) { 88 while ((len -= l) != 0) {
89 buf += l; 89 buf += l;
90 l = min_t(int, len, PAGE_CACHE_SIZE); 90 l = min_t(int, len, PAGE_SIZE);
91 memcpy(kmap(*++pagep), buf, l); 91 memcpy(kmap(*++pagep), buf, l);
92 set_page_dirty(*pagep); 92 set_page_dirty(*pagep);
93 kunmap(*pagep); 93 kunmap(*pagep);
@@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
107 int l; 107 int l;
108 108
109 off += node->page_offset; 109 off += node->page_offset;
110 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 110 pagep = node->page + (off >> PAGE_SHIFT);
111 off &= ~PAGE_CACHE_MASK; 111 off &= ~PAGE_MASK;
112 112
113 l = min_t(int, len, PAGE_CACHE_SIZE - off); 113 l = min_t(int, len, PAGE_SIZE - off);
114 memset(kmap(*pagep) + off, 0, l); 114 memset(kmap(*pagep) + off, 0, l);
115 set_page_dirty(*pagep); 115 set_page_dirty(*pagep);
116 kunmap(*pagep); 116 kunmap(*pagep);
117 117
118 while ((len -= l) != 0) { 118 while ((len -= l) != 0) {
119 l = min_t(int, len, PAGE_CACHE_SIZE); 119 l = min_t(int, len, PAGE_SIZE);
120 memset(kmap(*++pagep), 0, l); 120 memset(kmap(*++pagep), 0, l);
121 set_page_dirty(*pagep); 121 set_page_dirty(*pagep);
122 kunmap(*pagep); 122 kunmap(*pagep);
@@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
136 tree = src_node->tree; 136 tree = src_node->tree;
137 src += src_node->page_offset; 137 src += src_node->page_offset;
138 dst += dst_node->page_offset; 138 dst += dst_node->page_offset;
139 src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); 139 src_page = src_node->page + (src >> PAGE_SHIFT);
140 src &= ~PAGE_CACHE_MASK; 140 src &= ~PAGE_MASK;
141 dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); 141 dst_page = dst_node->page + (dst >> PAGE_SHIFT);
142 dst &= ~PAGE_CACHE_MASK; 142 dst &= ~PAGE_MASK;
143 143
144 if (src == dst) { 144 if (src == dst) {
145 l = min_t(int, len, PAGE_CACHE_SIZE - src); 145 l = min_t(int, len, PAGE_SIZE - src);
146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); 146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
147 kunmap(*src_page); 147 kunmap(*src_page);
148 set_page_dirty(*dst_page); 148 set_page_dirty(*dst_page);
149 kunmap(*dst_page); 149 kunmap(*dst_page);
150 150
151 while ((len -= l) != 0) { 151 while ((len -= l) != 0) {
152 l = min_t(int, len, PAGE_CACHE_SIZE); 152 l = min_t(int, len, PAGE_SIZE);
153 memcpy(kmap(*++dst_page), kmap(*++src_page), l); 153 memcpy(kmap(*++dst_page), kmap(*++src_page), l);
154 kunmap(*src_page); 154 kunmap(*src_page);
155 set_page_dirty(*dst_page); 155 set_page_dirty(*dst_page);
@@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
161 do { 161 do {
162 src_ptr = kmap(*src_page) + src; 162 src_ptr = kmap(*src_page) + src;
163 dst_ptr = kmap(*dst_page) + dst; 163 dst_ptr = kmap(*dst_page) + dst;
164 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { 164 if (PAGE_SIZE - src < PAGE_SIZE - dst) {
165 l = PAGE_CACHE_SIZE - src; 165 l = PAGE_SIZE - src;
166 src = 0; 166 src = 0;
167 dst += l; 167 dst += l;
168 } else { 168 } else {
169 l = PAGE_CACHE_SIZE - dst; 169 l = PAGE_SIZE - dst;
170 src += l; 170 src += l;
171 dst = 0; 171 dst = 0;
172 } 172 }
@@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
195 dst += node->page_offset; 195 dst += node->page_offset;
196 if (dst > src) { 196 if (dst > src) {
197 src += len - 1; 197 src += len - 1;
198 src_page = node->page + (src >> PAGE_CACHE_SHIFT); 198 src_page = node->page + (src >> PAGE_SHIFT);
199 src = (src & ~PAGE_CACHE_MASK) + 1; 199 src = (src & ~PAGE_MASK) + 1;
200 dst += len - 1; 200 dst += len - 1;
201 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); 201 dst_page = node->page + (dst >> PAGE_SHIFT);
202 dst = (dst & ~PAGE_CACHE_MASK) + 1; 202 dst = (dst & ~PAGE_MASK) + 1;
203 203
204 if (src == dst) { 204 if (src == dst) {
205 while (src < len) { 205 while (src < len) {
@@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
208 set_page_dirty(*dst_page); 208 set_page_dirty(*dst_page);
209 kunmap(*dst_page); 209 kunmap(*dst_page);
210 len -= src; 210 len -= src;
211 src = PAGE_CACHE_SIZE; 211 src = PAGE_SIZE;
212 src_page--; 212 src_page--;
213 dst_page--; 213 dst_page--;
214 } 214 }
@@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
226 dst_ptr = kmap(*dst_page) + dst; 226 dst_ptr = kmap(*dst_page) + dst;
227 if (src < dst) { 227 if (src < dst) {
228 l = src; 228 l = src;
229 src = PAGE_CACHE_SIZE; 229 src = PAGE_SIZE;
230 dst -= l; 230 dst -= l;
231 } else { 231 } else {
232 l = dst; 232 l = dst;
233 src -= l; 233 src -= l;
234 dst = PAGE_CACHE_SIZE; 234 dst = PAGE_SIZE;
235 } 235 }
236 l = min(len, l); 236 l = min(len, l);
237 memmove(dst_ptr - l, src_ptr - l, l); 237 memmove(dst_ptr - l, src_ptr - l, l);
238 kunmap(*src_page); 238 kunmap(*src_page);
239 set_page_dirty(*dst_page); 239 set_page_dirty(*dst_page);
240 kunmap(*dst_page); 240 kunmap(*dst_page);
241 if (dst == PAGE_CACHE_SIZE) 241 if (dst == PAGE_SIZE)
242 dst_page--; 242 dst_page--;
243 else 243 else
244 src_page--; 244 src_page--;
245 } while ((len -= l)); 245 } while ((len -= l));
246 } 246 }
247 } else { 247 } else {
248 src_page = node->page + (src >> PAGE_CACHE_SHIFT); 248 src_page = node->page + (src >> PAGE_SHIFT);
249 src &= ~PAGE_CACHE_MASK; 249 src &= ~PAGE_MASK;
250 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); 250 dst_page = node->page + (dst >> PAGE_SHIFT);
251 dst &= ~PAGE_CACHE_MASK; 251 dst &= ~PAGE_MASK;
252 252
253 if (src == dst) { 253 if (src == dst) {
254 l = min_t(int, len, PAGE_CACHE_SIZE - src); 254 l = min_t(int, len, PAGE_SIZE - src);
255 memmove(kmap(*dst_page) + src, 255 memmove(kmap(*dst_page) + src,
256 kmap(*src_page) + src, l); 256 kmap(*src_page) + src, l);
257 kunmap(*src_page); 257 kunmap(*src_page);
@@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
259 kunmap(*dst_page); 259 kunmap(*dst_page);
260 260
261 while ((len -= l) != 0) { 261 while ((len -= l) != 0) {
262 l = min_t(int, len, PAGE_CACHE_SIZE); 262 l = min_t(int, len, PAGE_SIZE);
263 memmove(kmap(*++dst_page), 263 memmove(kmap(*++dst_page),
264 kmap(*++src_page), l); 264 kmap(*++src_page), l);
265 kunmap(*src_page); 265 kunmap(*src_page);
@@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
272 do { 272 do {
273 src_ptr = kmap(*src_page) + src; 273 src_ptr = kmap(*src_page) + src;
274 dst_ptr = kmap(*dst_page) + dst; 274 dst_ptr = kmap(*dst_page) + dst;
275 if (PAGE_CACHE_SIZE - src < 275 if (PAGE_SIZE - src <
276 PAGE_CACHE_SIZE - dst) { 276 PAGE_SIZE - dst) {
277 l = PAGE_CACHE_SIZE - src; 277 l = PAGE_SIZE - src;
278 src = 0; 278 src = 0;
279 dst += l; 279 dst += l;
280 } else { 280 } else {
281 l = PAGE_CACHE_SIZE - dst; 281 l = PAGE_SIZE - dst;
282 src += l; 282 src += l;
283 dst = 0; 283 dst = 0;
284 } 284 }
@@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
444 444
445 mapping = tree->inode->i_mapping; 445 mapping = tree->inode->i_mapping;
446 off = (loff_t)cnid << tree->node_size_shift; 446 off = (loff_t)cnid << tree->node_size_shift;
447 block = off >> PAGE_CACHE_SHIFT; 447 block = off >> PAGE_SHIFT;
448 node->page_offset = off & ~PAGE_CACHE_MASK; 448 node->page_offset = off & ~PAGE_MASK;
449 for (i = 0; i < tree->pages_per_bnode; block++, i++) { 449 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
450 page = read_mapping_page(mapping, block, NULL); 450 page = read_mapping_page(mapping, block, NULL);
451 if (IS_ERR(page)) 451 if (IS_ERR(page))
452 goto fail; 452 goto fail;
453 if (PageError(page)) { 453 if (PageError(page)) {
454 page_cache_release(page); 454 put_page(page);
455 goto fail; 455 goto fail;
456 } 456 }
457 node->page[i] = page; 457 node->page[i] = page;
@@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
569 569
570 for (i = 0; i < node->tree->pages_per_bnode; i++) 570 for (i = 0; i < node->tree->pages_per_bnode; i++)
571 if (node->page[i]) 571 if (node->page[i])
572 page_cache_release(node->page[i]); 572 put_page(node->page[i]);
573 kfree(node); 573 kfree(node);
574} 574}
575 575
@@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
597 597
598 pagep = node->page; 598 pagep = node->page;
599 memset(kmap(*pagep) + node->page_offset, 0, 599 memset(kmap(*pagep) + node->page_offset, 0,
600 min_t(int, PAGE_CACHE_SIZE, tree->node_size)); 600 min_t(int, PAGE_SIZE, tree->node_size));
601 set_page_dirty(*pagep); 601 set_page_dirty(*pagep);
602 kunmap(*pagep); 602 kunmap(*pagep);
603 for (i = 1; i < tree->pages_per_bnode; i++) { 603 for (i = 1; i < tree->pages_per_bnode; i++) {
604 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 604 memset(kmap(*++pagep), 0, PAGE_SIZE);
605 set_page_dirty(*pagep); 605 set_page_dirty(*pagep);
606 kunmap(*pagep); 606 kunmap(*pagep);
607 } 607 }
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 3345c7553edc..d9d1a36ba826 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -236,15 +236,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
236 tree->node_size_shift = ffs(size) - 1; 236 tree->node_size_shift = ffs(size) - 1;
237 237
238 tree->pages_per_bnode = 238 tree->pages_per_bnode =
239 (tree->node_size + PAGE_CACHE_SIZE - 1) >> 239 (tree->node_size + PAGE_SIZE - 1) >>
240 PAGE_CACHE_SHIFT; 240 PAGE_SHIFT;
241 241
242 kunmap(page); 242 kunmap(page);
243 page_cache_release(page); 243 put_page(page);
244 return tree; 244 return tree;
245 245
246 fail_page: 246 fail_page:
247 page_cache_release(page); 247 put_page(page);
248 free_inode: 248 free_inode:
249 tree->inode->i_mapping->a_ops = &hfsplus_aops; 249 tree->inode->i_mapping->a_ops = &hfsplus_aops;
250 iput(tree->inode); 250 iput(tree->inode);
@@ -380,9 +380,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
380 off = off16; 380 off = off16;
381 381
382 off += node->page_offset; 382 off += node->page_offset;
383 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 383 pagep = node->page + (off >> PAGE_SHIFT);
384 data = kmap(*pagep); 384 data = kmap(*pagep);
385 off &= ~PAGE_CACHE_MASK; 385 off &= ~PAGE_MASK;
386 idx = 0; 386 idx = 0;
387 387
388 for (;;) { 388 for (;;) {
@@ -403,7 +403,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
403 } 403 }
404 } 404 }
405 } 405 }
406 if (++off >= PAGE_CACHE_SIZE) { 406 if (++off >= PAGE_SIZE) {
407 kunmap(*pagep); 407 kunmap(*pagep);
408 data = kmap(*++pagep); 408 data = kmap(*++pagep);
409 off = 0; 409 off = 0;
@@ -426,9 +426,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
426 len = hfs_brec_lenoff(node, 0, &off16); 426 len = hfs_brec_lenoff(node, 0, &off16);
427 off = off16; 427 off = off16;
428 off += node->page_offset; 428 off += node->page_offset;
429 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 429 pagep = node->page + (off >> PAGE_SHIFT);
430 data = kmap(*pagep); 430 data = kmap(*pagep);
431 off &= ~PAGE_CACHE_MASK; 431 off &= ~PAGE_MASK;
432 } 432 }
433} 433}
434 434
@@ -475,9 +475,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
475 len = hfs_brec_lenoff(node, 0, &off); 475 len = hfs_brec_lenoff(node, 0, &off);
476 } 476 }
477 off += node->page_offset + nidx / 8; 477 off += node->page_offset + nidx / 8;
478 page = node->page[off >> PAGE_CACHE_SHIFT]; 478 page = node->page[off >> PAGE_SHIFT];
479 data = kmap(page); 479 data = kmap(page);
480 off &= ~PAGE_CACHE_MASK; 480 off &= ~PAGE_MASK;
481 m = 1 << (~nidx & 7); 481 m = 1 << (~nidx & 7);
482 byte = data[off]; 482 byte = data[off];
483 if (!(byte & m)) { 483 if (!(byte & m)) {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 1a6394cdb54e..b28f39865c3a 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -87,9 +87,9 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
87 } 87 }
88 if (!tree) 88 if (!tree)
89 return 0; 89 return 0;
90 if (tree->node_size >= PAGE_CACHE_SIZE) { 90 if (tree->node_size >= PAGE_SIZE) {
91 nidx = page->index >> 91 nidx = page->index >>
92 (tree->node_size_shift - PAGE_CACHE_SHIFT); 92 (tree->node_size_shift - PAGE_SHIFT);
93 spin_lock(&tree->hash_lock); 93 spin_lock(&tree->hash_lock);
94 node = hfs_bnode_findhash(tree, nidx); 94 node = hfs_bnode_findhash(tree, nidx);
95 if (!node) 95 if (!node)
@@ -103,8 +103,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
103 spin_unlock(&tree->hash_lock); 103 spin_unlock(&tree->hash_lock);
104 } else { 104 } else {
105 nidx = page->index << 105 nidx = page->index <<
106 (PAGE_CACHE_SHIFT - tree->node_size_shift); 106 (PAGE_SHIFT - tree->node_size_shift);
107 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 107 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
108 spin_lock(&tree->hash_lock); 108 spin_lock(&tree->hash_lock);
109 do { 109 do {
110 node = hfs_bnode_findhash(tree, nidx++); 110 node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 5d54490a136d..c35911362ff9 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -438,7 +438,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
438 err = -EFBIG; 438 err = -EFBIG;
439 last_fs_block = sbi->total_blocks - 1; 439 last_fs_block = sbi->total_blocks - 1;
440 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 440 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
441 PAGE_CACHE_SHIFT; 441 PAGE_SHIFT;
442 442
443 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 443 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
444 (last_fs_page > (pgoff_t)(~0ULL))) { 444 (last_fs_page > (pgoff_t)(~0ULL))) {
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index ab01530b4930..70e445ff0cff 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -220,7 +220,7 @@ check_attr_tree_state_again:
220 220
221 index = 0; 221 index = 0;
222 written = 0; 222 written = 0;
223 for (; written < node_size; index++, written += PAGE_CACHE_SIZE) { 223 for (; written < node_size; index++, written += PAGE_SIZE) {
224 void *kaddr; 224 void *kaddr;
225 225
226 page = read_mapping_page(mapping, index, NULL); 226 page = read_mapping_page(mapping, index, NULL);
@@ -231,11 +231,11 @@ check_attr_tree_state_again:
231 231
232 kaddr = kmap_atomic(page); 232 kaddr = kmap_atomic(page);
233 memcpy(kaddr, buf + written, 233 memcpy(kaddr, buf + written,
234 min_t(size_t, PAGE_CACHE_SIZE, node_size - written)); 234 min_t(size_t, PAGE_SIZE, node_size - written));
235 kunmap_atomic(kaddr); 235 kunmap_atomic(kaddr);
236 236
237 set_page_dirty(page); 237 set_page_dirty(page);
238 page_cache_release(page); 238 put_page(page);
239 } 239 }
240 240
241 hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY); 241 hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d1abbee281d1..7016653f3e41 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -410,12 +410,12 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
410 struct inode *inode = mapping->host; 410 struct inode *inode = mapping->host;
411 char *buffer; 411 char *buffer;
412 loff_t base = page_offset(page); 412 loff_t base = page_offset(page);
413 int count = PAGE_CACHE_SIZE; 413 int count = PAGE_SIZE;
414 int end_index = inode->i_size >> PAGE_CACHE_SHIFT; 414 int end_index = inode->i_size >> PAGE_SHIFT;
415 int err; 415 int err;
416 416
417 if (page->index >= end_index) 417 if (page->index >= end_index)
418 count = inode->i_size & (PAGE_CACHE_SIZE-1); 418 count = inode->i_size & (PAGE_SIZE-1);
419 419
420 buffer = kmap(page); 420 buffer = kmap(page);
421 421
@@ -447,7 +447,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
447 447
448 buffer = kmap(page); 448 buffer = kmap(page);
449 bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, 449 bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
450 PAGE_CACHE_SIZE); 450 PAGE_SIZE);
451 if (bytes_read < 0) { 451 if (bytes_read < 0) {
452 ClearPageUptodate(page); 452 ClearPageUptodate(page);
453 SetPageError(page); 453 SetPageError(page);
@@ -455,7 +455,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
455 goto out; 455 goto out;
456 } 456 }
457 457
458 memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); 458 memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
459 459
460 ClearPageError(page); 460 ClearPageError(page);
461 SetPageUptodate(page); 461 SetPageUptodate(page);
@@ -471,7 +471,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping,
471 loff_t pos, unsigned len, unsigned flags, 471 loff_t pos, unsigned len, unsigned flags,
472 struct page **pagep, void **fsdata) 472 struct page **pagep, void **fsdata)
473{ 473{
474 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 474 pgoff_t index = pos >> PAGE_SHIFT;
475 475
476 *pagep = grab_cache_page_write_begin(mapping, index, flags); 476 *pagep = grab_cache_page_write_begin(mapping, index, flags);
477 if (!*pagep) 477 if (!*pagep)
@@ -485,14 +485,14 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
485{ 485{
486 struct inode *inode = mapping->host; 486 struct inode *inode = mapping->host;
487 void *buffer; 487 void *buffer;
488 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 488 unsigned from = pos & (PAGE_SIZE - 1);
489 int err; 489 int err;
490 490
491 buffer = kmap(page); 491 buffer = kmap(page);
492 err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); 492 err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
493 kunmap(page); 493 kunmap(page);
494 494
495 if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) 495 if (!PageUptodate(page) && err == PAGE_SIZE)
496 SetPageUptodate(page); 496 SetPageUptodate(page);
497 497
498 /* 498 /*
@@ -502,7 +502,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
502 if (err > 0 && (pos > inode->i_size)) 502 if (err > 0 && (pos > inode->i_size))
503 inode->i_size = pos; 503 inode->i_size = pos;
504 unlock_page(page); 504 unlock_page(page);
505 page_cache_release(page); 505 put_page(page);
506 506
507 return err; 507 return err;
508} 508}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e1f465a389d5..4ea71eba40a5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -213,12 +213,12 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
213 int i, chunksize; 213 int i, chunksize;
214 214
215 /* Find which 4k chunk and offset with in that chunk */ 215 /* Find which 4k chunk and offset with in that chunk */
216 i = offset >> PAGE_CACHE_SHIFT; 216 i = offset >> PAGE_SHIFT;
217 offset = offset & ~PAGE_CACHE_MASK; 217 offset = offset & ~PAGE_MASK;
218 218
219 while (size) { 219 while (size) {
220 size_t n; 220 size_t n;
221 chunksize = PAGE_CACHE_SIZE; 221 chunksize = PAGE_SIZE;
222 if (offset) 222 if (offset)
223 chunksize -= offset; 223 chunksize -= offset;
224 if (chunksize > size) 224 if (chunksize > size)
@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
237/* 237/*
238 * Support for read() - Find the page attached to f_mapping and copy out the 238 * Support for read() - Find the page attached to f_mapping and copy out the
239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
240 * since it has PAGE_CACHE_SIZE assumptions. 240 * since it has PAGE_SIZE assumptions.
241 */ 241 */
242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
243{ 243{
@@ -285,7 +285,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
285 * We have the page, copy it to user space buffer. 285 * We have the page, copy it to user space buffer.
286 */ 286 */
287 copied = hugetlbfs_read_actor(page, offset, to, nr); 287 copied = hugetlbfs_read_actor(page, offset, to, nr);
288 page_cache_release(page); 288 put_page(page);
289 } 289 }
290 offset += copied; 290 offset += copied;
291 retval += copied; 291 retval += copied;
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index f311bf084015..2e4e834d1a98 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -26,7 +26,7 @@
26#include "zisofs.h" 26#include "zisofs.h"
27 27
28/* This should probably be global. */ 28/* This should probably be global. */
29static char zisofs_sink_page[PAGE_CACHE_SIZE]; 29static char zisofs_sink_page[PAGE_SIZE];
30 30
31/* 31/*
32 * This contains the zlib memory allocation and the mutex for the 32 * This contains the zlib memory allocation and the mutex for the
@@ -70,11 +70,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
70 for ( i = 0 ; i < pcount ; i++ ) { 70 for ( i = 0 ; i < pcount ; i++ ) {
71 if (!pages[i]) 71 if (!pages[i])
72 continue; 72 continue;
73 memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); 73 memset(page_address(pages[i]), 0, PAGE_SIZE);
74 flush_dcache_page(pages[i]); 74 flush_dcache_page(pages[i]);
75 SetPageUptodate(pages[i]); 75 SetPageUptodate(pages[i]);
76 } 76 }
77 return ((loff_t)pcount) << PAGE_CACHE_SHIFT; 77 return ((loff_t)pcount) << PAGE_SHIFT;
78 } 78 }
79 79
80 /* Because zlib is not thread-safe, do all the I/O at the top. */ 80 /* Because zlib is not thread-safe, do all the I/O at the top. */
@@ -121,11 +121,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
121 if (pages[curpage]) { 121 if (pages[curpage]) {
122 stream.next_out = page_address(pages[curpage]) 122 stream.next_out = page_address(pages[curpage])
123 + poffset; 123 + poffset;
124 stream.avail_out = PAGE_CACHE_SIZE - poffset; 124 stream.avail_out = PAGE_SIZE - poffset;
125 poffset = 0; 125 poffset = 0;
126 } else { 126 } else {
127 stream.next_out = (void *)&zisofs_sink_page; 127 stream.next_out = (void *)&zisofs_sink_page;
128 stream.avail_out = PAGE_CACHE_SIZE; 128 stream.avail_out = PAGE_SIZE;
129 } 129 }
130 } 130 }
131 if (!stream.avail_in) { 131 if (!stream.avail_in) {
@@ -220,14 +220,14 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
220 * pages with the data we have anyway... 220 * pages with the data we have anyway...
221 */ 221 */
222 start_off = page_offset(pages[full_page]); 222 start_off = page_offset(pages[full_page]);
223 end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); 223 end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
224 224
225 cstart_block = start_off >> zisofs_block_shift; 225 cstart_block = start_off >> zisofs_block_shift;
226 cend_block = (end_off + (1 << zisofs_block_shift) - 1) 226 cend_block = (end_off + (1 << zisofs_block_shift) - 1)
227 >> zisofs_block_shift; 227 >> zisofs_block_shift;
228 228
229 WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) != 229 WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
230 ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK)); 230 ((cstart_block << zisofs_block_shift) & PAGE_MASK));
231 231
232 /* Find the pointer to this specific chunk */ 232 /* Find the pointer to this specific chunk */
233 /* Note: we're not using isonum_731() here because the data is known aligned */ 233 /* Note: we're not using isonum_731() here because the data is known aligned */
@@ -260,10 +260,10 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
260 ret = zisofs_uncompress_block(inode, block_start, block_end, 260 ret = zisofs_uncompress_block(inode, block_start, block_end,
261 pcount, pages, poffset, &err); 261 pcount, pages, poffset, &err);
262 poffset += ret; 262 poffset += ret;
263 pages += poffset >> PAGE_CACHE_SHIFT; 263 pages += poffset >> PAGE_SHIFT;
264 pcount -= poffset >> PAGE_CACHE_SHIFT; 264 pcount -= poffset >> PAGE_SHIFT;
265 full_page -= poffset >> PAGE_CACHE_SHIFT; 265 full_page -= poffset >> PAGE_SHIFT;
266 poffset &= ~PAGE_CACHE_MASK; 266 poffset &= ~PAGE_MASK;
267 267
268 if (err) { 268 if (err) {
269 brelse(bh); 269 brelse(bh);
@@ -282,7 +282,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
282 282
283 if (poffset && *pages) { 283 if (poffset && *pages) {
284 memset(page_address(*pages) + poffset, 0, 284 memset(page_address(*pages) + poffset, 0,
285 PAGE_CACHE_SIZE - poffset); 285 PAGE_SIZE - poffset);
286 flush_dcache_page(*pages); 286 flush_dcache_page(*pages);
287 SetPageUptodate(*pages); 287 SetPageUptodate(*pages);
288 } 288 }
@@ -302,12 +302,12 @@ static int zisofs_readpage(struct file *file, struct page *page)
302 int i, pcount, full_page; 302 int i, pcount, full_page;
303 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; 303 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
304 unsigned int zisofs_pages_per_cblock = 304 unsigned int zisofs_pages_per_cblock =
305 PAGE_CACHE_SHIFT <= zisofs_block_shift ? 305 PAGE_SHIFT <= zisofs_block_shift ?
306 (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0; 306 (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
307 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; 307 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
308 pgoff_t index = page->index, end_index; 308 pgoff_t index = page->index, end_index;
309 309
310 end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 310 end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
311 /* 311 /*
312 * If this page is wholly outside i_size we just return zero; 312 * If this page is wholly outside i_size we just return zero;
313 * do_generic_file_read() will handle this for us 313 * do_generic_file_read() will handle this for us
@@ -318,7 +318,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
318 return 0; 318 return 0;
319 } 319 }
320 320
321 if (PAGE_CACHE_SHIFT <= zisofs_block_shift) { 321 if (PAGE_SHIFT <= zisofs_block_shift) {
322 /* We have already been given one page, this is the one 322 /* We have already been given one page, this is the one
323 we must do. */ 323 we must do. */
324 full_page = index & (zisofs_pages_per_cblock - 1); 324 full_page = index & (zisofs_pages_per_cblock - 1);
@@ -351,7 +351,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
351 kunmap(pages[i]); 351 kunmap(pages[i]);
352 unlock_page(pages[i]); 352 unlock_page(pages[i]);
353 if (i != full_page) 353 if (i != full_page)
354 page_cache_release(pages[i]); 354 put_page(pages[i]);
355 } 355 }
356 } 356 }
357 357
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bcd2d41b318a..131dedc920d8 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1021,7 +1021,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
1021 * the page with useless information without generating any 1021 * the page with useless information without generating any
1022 * I/O errors. 1022 * I/O errors.
1023 */ 1023 */
1024 if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { 1024 if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
1025 printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n", 1025 printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
1026 __func__, b_off, 1026 __func__, b_off,
1027 (unsigned long long)inode->i_size); 1027 (unsigned long long)inode->i_size);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 517f2de784cf..2ad98d6e19f4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -81,11 +81,11 @@ static void release_buffer_page(struct buffer_head *bh)
81 if (!trylock_page(page)) 81 if (!trylock_page(page))
82 goto nope; 82 goto nope;
83 83
84 page_cache_get(page); 84 get_page(page);
85 __brelse(bh); 85 __brelse(bh);
86 try_to_free_buffers(page); 86 try_to_free_buffers(page);
87 unlock_page(page); 87 unlock_page(page);
88 page_cache_release(page); 88 put_page(page);
89 return; 89 return;
90 90
91nope: 91nope:
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index de73a9516a54..435f0b26ac20 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2221,7 +2221,7 @@ void jbd2_journal_ack_err(journal_t *journal)
2221 2221
2222int jbd2_journal_blocks_per_page(struct inode *inode) 2222int jbd2_journal_blocks_per_page(struct inode *inode)
2223{ 2223{
2224 return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 2224 return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
2225} 2225}
2226 2226
2227/* 2227/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 01e4652d88f6..67c103867bf8 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2263,7 +2263,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
2263 struct buffer_head *head, *bh, *next; 2263 struct buffer_head *head, *bh, *next;
2264 unsigned int stop = offset + length; 2264 unsigned int stop = offset + length;
2265 unsigned int curr_off = 0; 2265 unsigned int curr_off = 0;
2266 int partial_page = (offset || length < PAGE_CACHE_SIZE); 2266 int partial_page = (offset || length < PAGE_SIZE);
2267 int may_free = 1; 2267 int may_free = 1;
2268 int ret = 0; 2268 int ret = 0;
2269 2269
@@ -2272,7 +2272,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
2272 if (!page_has_buffers(page)) 2272 if (!page_has_buffers(page))
2273 return 0; 2273 return 0;
2274 2274
2275 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 2275 BUG_ON(stop > PAGE_SIZE || stop < length);
2276 2276
2277 /* We will potentially be playing with lists other than just the 2277 /* We will potentially be playing with lists other than just the
2278 * data lists (especially for journaled data mode), so be 2278 * data lists (especially for journaled data mode), so be
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 1090eb64b90d..9d26b1b9fc01 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -95,15 +95,15 @@ __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
95 rather than mucking around with actually reading the node 95 rather than mucking around with actually reading the node
96 and checking the compression type, which is the real way 96 and checking the compression type, which is the real way
97 to tell a hole node. */ 97 to tell a hole node. */
98 if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) 98 if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag)
99 && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { 99 && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
100 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", 100 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
101 ref_offset(fn->raw)); 101 ref_offset(fn->raw));
102 bitched = 1; 102 bitched = 1;
103 } 103 }
104 104
105 if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) 105 if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag)
106 && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { 106 && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
107 JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", 107 JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
108 ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); 108 ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
109 bitched = 1; 109 bitched = 1;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index cad86bac3453..0e62dec3effc 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
87 int ret; 87 int ret;
88 88
89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", 89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
90 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); 90 __func__, inode->i_ino, pg->index << PAGE_SHIFT);
91 91
92 BUG_ON(!PageLocked(pg)); 92 BUG_ON(!PageLocked(pg));
93 93
94 pg_buf = kmap(pg); 94 pg_buf = kmap(pg);
95 /* FIXME: Can kmap fail? */ 95 /* FIXME: Can kmap fail? */
96 96
97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); 97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
98 PAGE_SIZE);
98 99
99 if (ret) { 100 if (ret) {
100 ClearPageUptodate(pg); 101 ClearPageUptodate(pg);
@@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
137 struct page *pg; 138 struct page *pg;
138 struct inode *inode = mapping->host; 139 struct inode *inode = mapping->host;
139 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 140 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
140 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 141 pgoff_t index = pos >> PAGE_SHIFT;
141 uint32_t pageofs = index << PAGE_CACHE_SHIFT; 142 uint32_t pageofs = index << PAGE_SHIFT;
142 int ret = 0; 143 int ret = 0;
143 144
144 pg = grab_cache_page_write_begin(mapping, index, flags); 145 pg = grab_cache_page_write_begin(mapping, index, flags);
@@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
230 231
231out_page: 232out_page:
232 unlock_page(pg); 233 unlock_page(pg);
233 page_cache_release(pg); 234 put_page(pg);
234 return ret; 235 return ret;
235} 236}
236 237
@@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
245 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 246 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
246 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 247 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
247 struct jffs2_raw_inode *ri; 248 struct jffs2_raw_inode *ri;
248 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 249 unsigned start = pos & (PAGE_SIZE - 1);
249 unsigned end = start + copied; 250 unsigned end = start + copied;
250 unsigned aligned_start = start & ~3; 251 unsigned aligned_start = start & ~3;
251 int ret = 0; 252 int ret = 0;
252 uint32_t writtenlen = 0; 253 uint32_t writtenlen = 0;
253 254
254 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", 255 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
255 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, 256 __func__, inode->i_ino, pg->index << PAGE_SHIFT,
256 start, end, pg->flags); 257 start, end, pg->flags);
257 258
258 /* We need to avoid deadlock with page_cache_read() in 259 /* We need to avoid deadlock with page_cache_read() in
@@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
261 to re-lock it. */ 262 to re-lock it. */
262 BUG_ON(!PageUptodate(pg)); 263 BUG_ON(!PageUptodate(pg));
263 264
264 if (end == PAGE_CACHE_SIZE) { 265 if (end == PAGE_SIZE) {
265 /* When writing out the end of a page, write out the 266 /* When writing out the end of a page, write out the
266 _whole_ page. This helps to reduce the number of 267 _whole_ page. This helps to reduce the number of
267 nodes in files which have many short writes, like 268 nodes in files which have many short writes, like
@@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
275 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", 276 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
276 __func__); 277 __func__);
277 unlock_page(pg); 278 unlock_page(pg);
278 page_cache_release(pg); 279 put_page(pg);
279 return -ENOMEM; 280 return -ENOMEM;
280 } 281 }
281 282
@@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
292 kmap(pg); 293 kmap(pg);
293 294
294 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, 295 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
295 (pg->index << PAGE_CACHE_SHIFT) + aligned_start, 296 (pg->index << PAGE_SHIFT) + aligned_start,
296 end - aligned_start, &writtenlen); 297 end - aligned_start, &writtenlen);
297 298
298 kunmap(pg); 299 kunmap(pg);
@@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
329 jffs2_dbg(1, "%s() returning %d\n", 330 jffs2_dbg(1, "%s() returning %d\n",
330 __func__, writtenlen > 0 ? writtenlen : ret); 331 __func__, writtenlen > 0 ? writtenlen : ret);
331 unlock_page(pg); 332 unlock_page(pg);
332 page_cache_release(pg); 333 put_page(pg);
333 return writtenlen > 0 ? writtenlen : ret; 334 return writtenlen > 0 ? writtenlen : ret;
334} 335}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bead25ae8fe4..ae2ebb26b446 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -586,8 +586,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
586 goto out_root; 586 goto out_root;
587 587
588 sb->s_maxbytes = 0xFFFFFFFF; 588 sb->s_maxbytes = 0xFFFFFFFF;
589 sb->s_blocksize = PAGE_CACHE_SIZE; 589 sb->s_blocksize = PAGE_SIZE;
590 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 590 sb->s_blocksize_bits = PAGE_SHIFT;
591 sb->s_magic = JFFS2_SUPER_MAGIC; 591 sb->s_magic = JFFS2_SUPER_MAGIC;
592 if (!(sb->s_flags & MS_RDONLY)) 592 if (!(sb->s_flags & MS_RDONLY))
593 jffs2_start_garbage_collect_thread(c); 593 jffs2_start_garbage_collect_thread(c);
@@ -685,7 +685,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
685 struct inode *inode = OFNI_EDONI_2SFFJ(f); 685 struct inode *inode = OFNI_EDONI_2SFFJ(f);
686 struct page *pg; 686 struct page *pg;
687 687
688 pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 688 pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
689 (void *)jffs2_do_readpage_unlock, inode); 689 (void *)jffs2_do_readpage_unlock, inode);
690 if (IS_ERR(pg)) 690 if (IS_ERR(pg))
691 return (void *)pg; 691 return (void *)pg;
@@ -701,7 +701,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c,
701 struct page *pg = (void *)*priv; 701 struct page *pg = (void *)*priv;
702 702
703 kunmap(pg); 703 kunmap(pg);
704 page_cache_release(pg); 704 put_page(pg);
705} 705}
706 706
707static int jffs2_flash_setup(struct jffs2_sb_info *c) { 707static int jffs2_flash_setup(struct jffs2_sb_info *c) {
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 7e553f286775..9ed0f26cf023 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -552,7 +552,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
552 goto upnout; 552 goto upnout;
553 } 553 }
554 /* We found a datanode. Do the GC */ 554 /* We found a datanode. Do the GC */
555 if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) { 555 if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
556 /* It crosses a page boundary. Therefore, it must be a hole. */ 556 /* It crosses a page boundary. Therefore, it must be a hole. */
557 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); 557 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
558 } else { 558 } else {
@@ -1192,8 +1192,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1192 struct jffs2_node_frag *frag; 1192 struct jffs2_node_frag *frag;
1193 uint32_t min, max; 1193 uint32_t min, max;
1194 1194
1195 min = start & ~(PAGE_CACHE_SIZE-1); 1195 min = start & ~(PAGE_SIZE-1);
1196 max = min + PAGE_CACHE_SIZE; 1196 max = min + PAGE_SIZE;
1197 1197
1198 frag = jffs2_lookup_node_frag(&f->fragtree, start); 1198 frag = jffs2_lookup_node_frag(&f->fragtree, start);
1199 1199
@@ -1351,7 +1351,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1351 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); 1351 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
1352 datalen = end - offset; 1352 datalen = end - offset;
1353 1353
1354 writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1)); 1354 writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
1355 1355
1356 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); 1356 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
1357 1357
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 9a5449bc3afb..b86c78d178c6 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list,
90 90
91 /* If the last fragment starts at the RAM page boundary, it is 91 /* If the last fragment starts at the RAM page boundary, it is
92 * REF_PRISTINE irrespective of its size. */ 92 * REF_PRISTINE irrespective of its size. */
93 if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { 93 if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
94 dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", 94 dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
95 frag->ofs, frag->ofs + frag->size); 95 frag->ofs, frag->ofs + frag->size);
96 frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; 96 frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
@@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
237 If so, both 'this' and the new node get marked REF_NORMAL so 237 If so, both 'this' and the new node get marked REF_NORMAL so
238 the GC can take a look. 238 the GC can take a look.
239 */ 239 */
240 if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { 240 if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
241 if (this->node) 241 if (this->node)
242 mark_ref_normal(this->node->raw); 242 mark_ref_normal(this->node->raw);
243 mark_ref_normal(newfrag->node->raw); 243 mark_ref_normal(newfrag->node->raw);
@@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
382 382
383 /* If we now share a page with other nodes, mark either previous 383 /* If we now share a page with other nodes, mark either previous
384 or next node REF_NORMAL, as appropriate. */ 384 or next node REF_NORMAL, as appropriate. */
385 if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { 385 if (newfrag->ofs & (PAGE_SIZE-1)) {
386 struct jffs2_node_frag *prev = frag_prev(newfrag); 386 struct jffs2_node_frag *prev = frag_prev(newfrag);
387 387
388 mark_ref_normal(fn->raw); 388 mark_ref_normal(fn->raw);
@@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
391 mark_ref_normal(prev->node->raw); 391 mark_ref_normal(prev->node->raw);
392 } 392 }
393 393
394 if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { 394 if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
395 struct jffs2_node_frag *next = frag_next(newfrag); 395 struct jffs2_node_frag *next = frag_next(newfrag);
396 396
397 if (next) { 397 if (next) {
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index b634de4c8101..7fb187ab2682 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -172,8 +172,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
172 beginning of a page and runs to the end of the file, or if 172 beginning of a page and runs to the end of the file, or if
173 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. 173 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
174 */ 174 */
175 if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || 175 if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
176 ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && 176 ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
177 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { 177 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
178 flash_ofs |= REF_PRISTINE; 178 flash_ofs |= REF_PRISTINE;
179 } else { 179 } else {
@@ -366,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
366 break; 366 break;
367 } 367 }
368 mutex_lock(&f->sem); 368 mutex_lock(&f->sem);
369 datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); 369 datalen = min_t(uint32_t, writelen,
370 PAGE_SIZE - (offset & (PAGE_SIZE-1)));
370 cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); 371 cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
371 372
372 comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); 373 comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index a3eb316b1ac3..b60e015cc757 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -80,7 +80,7 @@ static inline void lock_metapage(struct metapage *mp)
80static struct kmem_cache *metapage_cache; 80static struct kmem_cache *metapage_cache;
81static mempool_t *metapage_mempool; 81static mempool_t *metapage_mempool;
82 82
83#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) 83#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
84 84
85#if MPS_PER_PAGE > 1 85#if MPS_PER_PAGE > 1
86 86
@@ -316,7 +316,7 @@ static void last_write_complete(struct page *page)
316 struct metapage *mp; 316 struct metapage *mp;
317 unsigned int offset; 317 unsigned int offset;
318 318
319 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 319 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
320 mp = page_to_mp(page, offset); 320 mp = page_to_mp(page, offset);
321 if (mp && test_bit(META_io, &mp->flag)) { 321 if (mp && test_bit(META_io, &mp->flag)) {
322 if (mp->lsn) 322 if (mp->lsn)
@@ -366,12 +366,12 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
366 int bad_blocks = 0; 366 int bad_blocks = 0;
367 367
368 page_start = (sector_t)page->index << 368 page_start = (sector_t)page->index <<
369 (PAGE_CACHE_SHIFT - inode->i_blkbits); 369 (PAGE_SHIFT - inode->i_blkbits);
370 BUG_ON(!PageLocked(page)); 370 BUG_ON(!PageLocked(page));
371 BUG_ON(PageWriteback(page)); 371 BUG_ON(PageWriteback(page));
372 set_page_writeback(page); 372 set_page_writeback(page);
373 373
374 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 374 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
375 mp = page_to_mp(page, offset); 375 mp = page_to_mp(page, offset);
376 376
377 if (!mp || !test_bit(META_dirty, &mp->flag)) 377 if (!mp || !test_bit(META_dirty, &mp->flag))
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
416 bio = NULL; 416 bio = NULL;
417 } else 417 } else
418 inc_io(page); 418 inc_io(page);
419 xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; 419 xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
420 pblock = metapage_get_blocks(inode, lblock, &xlen); 420 pblock = metapage_get_blocks(inode, lblock, &xlen);
421 if (!pblock) { 421 if (!pblock) {
422 printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); 422 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -485,7 +485,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
485 struct inode *inode = page->mapping->host; 485 struct inode *inode = page->mapping->host;
486 struct bio *bio = NULL; 486 struct bio *bio = NULL;
487 int block_offset; 487 int block_offset;
488 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; 488 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
489 sector_t page_start; /* address of page in fs blocks */ 489 sector_t page_start; /* address of page in fs blocks */
490 sector_t pblock; 490 sector_t pblock;
491 int xlen; 491 int xlen;
@@ -494,7 +494,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
494 494
495 BUG_ON(!PageLocked(page)); 495 BUG_ON(!PageLocked(page));
496 page_start = (sector_t)page->index << 496 page_start = (sector_t)page->index <<
497 (PAGE_CACHE_SHIFT - inode->i_blkbits); 497 (PAGE_SHIFT - inode->i_blkbits);
498 498
499 block_offset = 0; 499 block_offset = 0;
500 while (block_offset < blocks_per_page) { 500 while (block_offset < blocks_per_page) {
@@ -542,7 +542,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
542 int ret = 1; 542 int ret = 1;
543 int offset; 543 int offset;
544 544
545 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 545 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
546 mp = page_to_mp(page, offset); 546 mp = page_to_mp(page, offset);
547 547
548 if (!mp) 548 if (!mp)
@@ -568,7 +568,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
568static void metapage_invalidatepage(struct page *page, unsigned int offset, 568static void metapage_invalidatepage(struct page *page, unsigned int offset,
569 unsigned int length) 569 unsigned int length)
570{ 570{
571 BUG_ON(offset || length < PAGE_CACHE_SIZE); 571 BUG_ON(offset || length < PAGE_SIZE);
572 572
573 BUG_ON(PageWriteback(page)); 573 BUG_ON(PageWriteback(page));
574 574
@@ -599,10 +599,10 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
599 inode->i_ino, lblock, absolute); 599 inode->i_ino, lblock, absolute);
600 600
601 l2bsize = inode->i_blkbits; 601 l2bsize = inode->i_blkbits;
602 l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize; 602 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
603 page_index = lblock >> l2BlocksPerPage; 603 page_index = lblock >> l2BlocksPerPage;
604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; 604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
605 if ((page_offset + size) > PAGE_CACHE_SIZE) { 605 if ((page_offset + size) > PAGE_SIZE) {
606 jfs_err("MetaData crosses page boundary!!"); 606 jfs_err("MetaData crosses page boundary!!");
607 jfs_err("lblock = %lx, size = %d", lblock, size); 607 jfs_err("lblock = %lx, size = %d", lblock, size);
608 dump_stack(); 608 dump_stack();
@@ -621,7 +621,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
621 mapping = inode->i_mapping; 621 mapping = inode->i_mapping;
622 } 622 }
623 623
624 if (new && (PSIZE == PAGE_CACHE_SIZE)) { 624 if (new && (PSIZE == PAGE_SIZE)) {
625 page = grab_cache_page(mapping, page_index); 625 page = grab_cache_page(mapping, page_index);
626 if (!page) { 626 if (!page) {
627 jfs_err("grab_cache_page failed!"); 627 jfs_err("grab_cache_page failed!");
@@ -693,7 +693,7 @@ unlock:
693void grab_metapage(struct metapage * mp) 693void grab_metapage(struct metapage * mp)
694{ 694{
695 jfs_info("grab_metapage: mp = 0x%p", mp); 695 jfs_info("grab_metapage: mp = 0x%p", mp);
696 page_cache_get(mp->page); 696 get_page(mp->page);
697 lock_page(mp->page); 697 lock_page(mp->page);
698 mp->count++; 698 mp->count++;
699 lock_metapage(mp); 699 lock_metapage(mp);
@@ -706,12 +706,12 @@ void force_metapage(struct metapage *mp)
706 jfs_info("force_metapage: mp = 0x%p", mp); 706 jfs_info("force_metapage: mp = 0x%p", mp);
707 set_bit(META_forcewrite, &mp->flag); 707 set_bit(META_forcewrite, &mp->flag);
708 clear_bit(META_sync, &mp->flag); 708 clear_bit(META_sync, &mp->flag);
709 page_cache_get(page); 709 get_page(page);
710 lock_page(page); 710 lock_page(page);
711 set_page_dirty(page); 711 set_page_dirty(page);
712 write_one_page(page, 1); 712 write_one_page(page, 1);
713 clear_bit(META_forcewrite, &mp->flag); 713 clear_bit(META_forcewrite, &mp->flag);
714 page_cache_release(page); 714 put_page(page);
715} 715}
716 716
717void hold_metapage(struct metapage *mp) 717void hold_metapage(struct metapage *mp)
@@ -726,7 +726,7 @@ void put_metapage(struct metapage *mp)
726 unlock_page(mp->page); 726 unlock_page(mp->page);
727 return; 727 return;
728 } 728 }
729 page_cache_get(mp->page); 729 get_page(mp->page);
730 mp->count++; 730 mp->count++;
731 lock_metapage(mp); 731 lock_metapage(mp);
732 unlock_page(mp->page); 732 unlock_page(mp->page);
@@ -746,7 +746,7 @@ void release_metapage(struct metapage * mp)
746 assert(mp->count); 746 assert(mp->count);
747 if (--mp->count || mp->nohomeok) { 747 if (--mp->count || mp->nohomeok) {
748 unlock_page(page); 748 unlock_page(page);
749 page_cache_release(page); 749 put_page(page);
750 return; 750 return;
751 } 751 }
752 752
@@ -764,13 +764,13 @@ void release_metapage(struct metapage * mp)
764 drop_metapage(page, mp); 764 drop_metapage(page, mp);
765 765
766 unlock_page(page); 766 unlock_page(page);
767 page_cache_release(page); 767 put_page(page);
768} 768}
769 769
770void __invalidate_metapages(struct inode *ip, s64 addr, int len) 770void __invalidate_metapages(struct inode *ip, s64 addr, int len)
771{ 771{
772 sector_t lblock; 772 sector_t lblock;
773 int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits; 773 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
774 int BlocksPerPage = 1 << l2BlocksPerPage; 774 int BlocksPerPage = 1 << l2BlocksPerPage;
775 /* All callers are interested in block device's mapping */ 775 /* All callers are interested in block device's mapping */
776 struct address_space *mapping = 776 struct address_space *mapping =
@@ -788,7 +788,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage); 788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
789 if (!page) 789 if (!page)
790 continue; 790 continue;
791 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 791 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
792 mp = page_to_mp(page, offset); 792 mp = page_to_mp(page, offset);
793 if (!mp) 793 if (!mp)
794 continue; 794 continue;
@@ -803,7 +803,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
803 remove_from_logsync(mp); 803 remove_from_logsync(mp);
804 } 804 }
805 unlock_page(page); 805 unlock_page(page);
806 page_cache_release(page); 806 put_page(page);
807 } 807 }
808} 808}
809 809
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index 337e9e51ac06..a869fb4a20d6 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -106,7 +106,7 @@ static inline void metapage_nohomeok(struct metapage *mp)
106 lock_page(page); 106 lock_page(page);
107 if (!mp->nohomeok++) { 107 if (!mp->nohomeok++) {
108 mark_metapage_dirty(mp); 108 mark_metapage_dirty(mp);
109 page_cache_get(page); 109 get_page(page);
110 wait_on_page_writeback(page); 110 wait_on_page_writeback(page);
111 } 111 }
112 unlock_page(page); 112 unlock_page(page);
@@ -128,7 +128,7 @@ static inline void metapage_wait_for_io(struct metapage *mp)
128static inline void _metapage_homeok(struct metapage *mp) 128static inline void _metapage_homeok(struct metapage *mp)
129{ 129{
130 if (!--mp->nohomeok) 130 if (!--mp->nohomeok)
131 page_cache_release(mp->page); 131 put_page(mp->page);
132} 132}
133 133
134static inline void metapage_homeok(struct metapage *mp) 134static inline void metapage_homeok(struct metapage *mp)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f5d85ba8e23..78d599198bf5 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -596,7 +596,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
596 * Page cache is indexed by long. 596 * Page cache is indexed by long.
597 * I would use MAX_LFS_FILESIZE, but it's only half as big 597 * I would use MAX_LFS_FILESIZE, but it's only half as big
598 */ 598 */
599 sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, 599 sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
600 (u64)sb->s_maxbytes); 600 (u64)sb->s_maxbytes);
601#endif 601#endif
602 sb->s_time_gran = 1; 602 sb->s_time_gran = 1;
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index b67dbccdaf88..f73541fbe7af 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -138,8 +138,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
138 struct dentry *root; 138 struct dentry *root;
139 139
140 info->sb = sb; 140 info->sb = sb;
141 sb->s_blocksize = PAGE_CACHE_SIZE; 141 sb->s_blocksize = PAGE_SIZE;
142 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 142 sb->s_blocksize_bits = PAGE_SHIFT;
143 sb->s_magic = magic; 143 sb->s_magic = magic;
144 sb->s_op = &kernfs_sops; 144 sb->s_op = &kernfs_sops;
145 sb->s_time_gran = 1; 145 sb->s_time_gran = 1;
diff --git a/fs/libfs.c b/fs/libfs.c
index 0ca80b2af420..f3fa82ce9b70 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -25,7 +25,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
25{ 25{
26 struct inode *inode = d_inode(dentry); 26 struct inode *inode = d_inode(dentry);
27 generic_fillattr(inode, stat); 27 generic_fillattr(inode, stat);
28 stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); 28 stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
29 return 0; 29 return 0;
30} 30}
31EXPORT_SYMBOL(simple_getattr); 31EXPORT_SYMBOL(simple_getattr);
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(simple_getattr);
33int simple_statfs(struct dentry *dentry, struct kstatfs *buf) 33int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
34{ 34{
35 buf->f_type = dentry->d_sb->s_magic; 35 buf->f_type = dentry->d_sb->s_magic;
36 buf->f_bsize = PAGE_CACHE_SIZE; 36 buf->f_bsize = PAGE_SIZE;
37 buf->f_namelen = NAME_MAX; 37 buf->f_namelen = NAME_MAX;
38 return 0; 38 return 0;
39} 39}
@@ -395,7 +395,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
395 struct page *page; 395 struct page *page;
396 pgoff_t index; 396 pgoff_t index;
397 397
398 index = pos >> PAGE_CACHE_SHIFT; 398 index = pos >> PAGE_SHIFT;
399 399
400 page = grab_cache_page_write_begin(mapping, index, flags); 400 page = grab_cache_page_write_begin(mapping, index, flags);
401 if (!page) 401 if (!page)
@@ -403,10 +403,10 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
403 403
404 *pagep = page; 404 *pagep = page;
405 405
406 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { 406 if (!PageUptodate(page) && (len != PAGE_SIZE)) {
407 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 407 unsigned from = pos & (PAGE_SIZE - 1);
408 408
409 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); 409 zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
410 } 410 }
411 return 0; 411 return 0;
412} 412}
@@ -442,7 +442,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
442 442
443 /* zero the stale part of the page if we did a short copy */ 443 /* zero the stale part of the page if we did a short copy */
444 if (copied < len) { 444 if (copied < len) {
445 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 445 unsigned from = pos & (PAGE_SIZE - 1);
446 446
447 zero_user(page, from + copied, len - copied); 447 zero_user(page, from + copied, len - copied);
448 } 448 }
@@ -458,7 +458,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
458 458
459 set_page_dirty(page); 459 set_page_dirty(page);
460 unlock_page(page); 460 unlock_page(page);
461 page_cache_release(page); 461 put_page(page);
462 462
463 return copied; 463 return copied;
464} 464}
@@ -477,8 +477,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
477 struct dentry *dentry; 477 struct dentry *dentry;
478 int i; 478 int i;
479 479
480 s->s_blocksize = PAGE_CACHE_SIZE; 480 s->s_blocksize = PAGE_SIZE;
481 s->s_blocksize_bits = PAGE_CACHE_SHIFT; 481 s->s_blocksize_bits = PAGE_SHIFT;
482 s->s_magic = magic; 482 s->s_magic = magic;
483 s->s_op = &simple_super_operations; 483 s->s_op = &simple_super_operations;
484 s->s_time_gran = 1; 484 s->s_time_gran = 1;
@@ -994,12 +994,12 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
994{ 994{
995 u64 last_fs_block = num_blocks - 1; 995 u64 last_fs_block = num_blocks - 1;
996 u64 last_fs_page = 996 u64 last_fs_page =
997 last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits); 997 last_fs_block >> (PAGE_SHIFT - blocksize_bits);
998 998
999 if (unlikely(num_blocks == 0)) 999 if (unlikely(num_blocks == 0))
1000 return 0; 1000 return 0;
1001 1001
1002 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT)) 1002 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
1003 return -EINVAL; 1003 return -EINVAL;
1004 1004
1005 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || 1005 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index a709d80c8ebc..cc26f8f215f5 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -64,7 +64,7 @@ static void writeseg_end_io(struct bio *bio)
64 64
65 bio_for_each_segment_all(bvec, bio, i) { 65 bio_for_each_segment_all(bvec, bio, i) {
66 end_page_writeback(bvec->bv_page); 66 end_page_writeback(bvec->bv_page);
67 page_cache_release(bvec->bv_page); 67 put_page(bvec->bv_page);
68 } 68 }
69 bio_put(bio); 69 bio_put(bio);
70 if (atomic_dec_and_test(&super->s_pending_writes)) 70 if (atomic_dec_and_test(&super->s_pending_writes))
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 9c501449450d..b76a62b1978f 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -46,9 +46,9 @@ static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
46 46
47 BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs)); 47 BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
48 BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift); 48 BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
49 BUG_ON(len > PAGE_CACHE_SIZE); 49 BUG_ON(len > PAGE_SIZE);
50 page_start = ofs & PAGE_CACHE_MASK; 50 page_start = ofs & PAGE_MASK;
51 page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; 51 page_end = PAGE_ALIGN(ofs + len) - 1;
52 ret = mtd_write(mtd, ofs, len, &retlen, buf); 52 ret = mtd_write(mtd, ofs, len, &retlen, buf);
53 if (ret || (retlen != len)) 53 if (ret || (retlen != len))
54 return -EIO; 54 return -EIO;
@@ -82,7 +82,7 @@ static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
82 if (!page) 82 if (!page)
83 continue; 83 continue;
84 memset(page_address(page), 0xFF, PAGE_SIZE); 84 memset(page_address(page), 0xFF, PAGE_SIZE);
85 page_cache_release(page); 85 put_page(page);
86 } 86 }
87 return 0; 87 return 0;
88} 88}
@@ -195,7 +195,7 @@ static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
196 page_address(page)); 196 page_address(page));
197 unlock_page(page); 197 unlock_page(page);
198 page_cache_release(page); 198 put_page(page);
199 if (err) 199 if (err)
200 return err; 200 return err;
201 } 201 }
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 542468e9bfb4..ddbed2be5366 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -183,7 +183,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
183 if (name->len != be16_to_cpu(dd->namelen) || 183 if (name->len != be16_to_cpu(dd->namelen) ||
184 memcmp(name->name, dd->name, name->len)) { 184 memcmp(name->name, dd->name, name->len)) {
185 kunmap_atomic(dd); 185 kunmap_atomic(dd);
186 page_cache_release(page); 186 put_page(page);
187 continue; 187 continue;
188 } 188 }
189 189
@@ -238,7 +238,7 @@ static int logfs_unlink(struct inode *dir, struct dentry *dentry)
238 return PTR_ERR(page); 238 return PTR_ERR(page);
239 } 239 }
240 index = page->index; 240 index = page->index;
241 page_cache_release(page); 241 put_page(page);
242 242
243 mutex_lock(&super->s_dirop_mutex); 243 mutex_lock(&super->s_dirop_mutex);
244 logfs_add_transaction(dir, ta); 244 logfs_add_transaction(dir, ta);
@@ -316,7 +316,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
316 be16_to_cpu(dd->namelen), 316 be16_to_cpu(dd->namelen),
317 be64_to_cpu(dd->ino), dd->type); 317 be64_to_cpu(dd->ino), dd->type);
318 kunmap(page); 318 kunmap(page);
319 page_cache_release(page); 319 put_page(page);
320 if (full) 320 if (full)
321 break; 321 break;
322 } 322 }
@@ -349,7 +349,7 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
349 dd = kmap_atomic(page); 349 dd = kmap_atomic(page);
350 ino = be64_to_cpu(dd->ino); 350 ino = be64_to_cpu(dd->ino);
351 kunmap_atomic(dd); 351 kunmap_atomic(dd);
352 page_cache_release(page); 352 put_page(page);
353 353
354 inode = logfs_iget(dir->i_sb, ino); 354 inode = logfs_iget(dir->i_sb, ino);
355 if (IS_ERR(inode)) 355 if (IS_ERR(inode))
@@ -392,7 +392,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
392 392
393 err = logfs_write_buf(dir, page, WF_LOCK); 393 err = logfs_write_buf(dir, page, WF_LOCK);
394 unlock_page(page); 394 unlock_page(page);
395 page_cache_release(page); 395 put_page(page);
396 if (!err) 396 if (!err)
397 grow_dir(dir, index); 397 grow_dir(dir, index);
398 return err; 398 return err;
@@ -561,7 +561,7 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
561 map = kmap_atomic(page); 561 map = kmap_atomic(page);
562 memcpy(dd, map, sizeof(*dd)); 562 memcpy(dd, map, sizeof(*dd));
563 kunmap_atomic(map); 563 kunmap_atomic(map);
564 page_cache_release(page); 564 put_page(page);
565 return 0; 565 return 0;
566} 566}
567 567
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 61eaeb1b6cac..f01ddfb1a03b 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -15,21 +15,21 @@ static int logfs_write_begin(struct file *file, struct address_space *mapping,
15{ 15{
16 struct inode *inode = mapping->host; 16 struct inode *inode = mapping->host;
17 struct page *page; 17 struct page *page;
18 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 18 pgoff_t index = pos >> PAGE_SHIFT;
19 19
20 page = grab_cache_page_write_begin(mapping, index, flags); 20 page = grab_cache_page_write_begin(mapping, index, flags);
21 if (!page) 21 if (!page)
22 return -ENOMEM; 22 return -ENOMEM;
23 *pagep = page; 23 *pagep = page;
24 24
25 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 25 if ((len == PAGE_SIZE) || PageUptodate(page))
26 return 0; 26 return 0;
27 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 27 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
28 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 28 unsigned start = pos & (PAGE_SIZE - 1);
29 unsigned end = start + len; 29 unsigned end = start + len;
30 30
31 /* Reading beyond i_size is simple: memset to zero */ 31 /* Reading beyond i_size is simple: memset to zero */
32 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 32 zero_user_segments(page, 0, start, end, PAGE_SIZE);
33 return 0; 33 return 0;
34 } 34 }
35 return logfs_readpage_nolock(page); 35 return logfs_readpage_nolock(page);
@@ -41,11 +41,11 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
41{ 41{
42 struct inode *inode = mapping->host; 42 struct inode *inode = mapping->host;
43 pgoff_t index = page->index; 43 pgoff_t index = page->index;
44 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 44 unsigned start = pos & (PAGE_SIZE - 1);
45 unsigned end = start + copied; 45 unsigned end = start + copied;
46 int ret = 0; 46 int ret = 0;
47 47
48 BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); 48 BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize);
49 BUG_ON(page->index > I3_BLOCKS); 49 BUG_ON(page->index > I3_BLOCKS);
50 50
51 if (copied < len) { 51 if (copied < len) {
@@ -61,8 +61,8 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
61 if (copied == 0) 61 if (copied == 0)
62 goto out; /* FIXME: do we need to update inode? */ 62 goto out; /* FIXME: do we need to update inode? */
63 63
64 if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) { 64 if (i_size_read(inode) < (index << PAGE_SHIFT) + end) {
65 i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end); 65 i_size_write(inode, (index << PAGE_SHIFT) + end);
66 mark_inode_dirty_sync(inode); 66 mark_inode_dirty_sync(inode);
67 } 67 }
68 68
@@ -75,7 +75,7 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
75 } 75 }
76out: 76out:
77 unlock_page(page); 77 unlock_page(page);
78 page_cache_release(page); 78 put_page(page);
79 return ret ? ret : copied; 79 return ret ? ret : copied;
80} 80}
81 81
@@ -118,7 +118,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
118{ 118{
119 struct inode *inode = page->mapping->host; 119 struct inode *inode = page->mapping->host;
120 loff_t i_size = i_size_read(inode); 120 loff_t i_size = i_size_read(inode);
121 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 121 pgoff_t end_index = i_size >> PAGE_SHIFT;
122 unsigned offset; 122 unsigned offset;
123 u64 bix; 123 u64 bix;
124 level_t level; 124 level_t level;
@@ -142,7 +142,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
142 return __logfs_writepage(page); 142 return __logfs_writepage(page);
143 143
144 /* Is the page fully outside i_size? (truncate in progress) */ 144 /* Is the page fully outside i_size? (truncate in progress) */
145 offset = i_size & (PAGE_CACHE_SIZE-1); 145 offset = i_size & (PAGE_SIZE-1);
146 if (bix > end_index || offset == 0) { 146 if (bix > end_index || offset == 0) {
147 unlock_page(page); 147 unlock_page(page);
148 return 0; /* don't care */ 148 return 0; /* don't care */
@@ -155,7 +155,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
155 * the page size, the remaining memory is zeroed when mapped, and 155 * the page size, the remaining memory is zeroed when mapped, and
156 * writes to that region are not written out to the file." 156 * writes to that region are not written out to the file."
157 */ 157 */
158 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 158 zero_user_segment(page, offset, PAGE_SIZE);
159 return __logfs_writepage(page); 159 return __logfs_writepage(page);
160} 160}
161 161
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 20973c9e52f8..3fb8c6d67303 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -281,7 +281,7 @@ static struct page *logfs_get_read_page(struct inode *inode, u64 bix,
281static void logfs_put_read_page(struct page *page) 281static void logfs_put_read_page(struct page *page)
282{ 282{
283 unlock_page(page); 283 unlock_page(page);
284 page_cache_release(page); 284 put_page(page);
285} 285}
286 286
287static void logfs_lock_write_page(struct page *page) 287static void logfs_lock_write_page(struct page *page)
@@ -323,7 +323,7 @@ repeat:
323 return NULL; 323 return NULL;
324 err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS); 324 err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
325 if (unlikely(err)) { 325 if (unlikely(err)) {
326 page_cache_release(page); 326 put_page(page);
327 if (err == -EEXIST) 327 if (err == -EEXIST)
328 goto repeat; 328 goto repeat;
329 return NULL; 329 return NULL;
@@ -342,7 +342,7 @@ static void logfs_unlock_write_page(struct page *page)
342static void logfs_put_write_page(struct page *page) 342static void logfs_put_write_page(struct page *page)
343{ 343{
344 logfs_unlock_write_page(page); 344 logfs_unlock_write_page(page);
345 page_cache_release(page); 345 put_page(page);
346} 346}
347 347
348static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level, 348static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
@@ -562,7 +562,7 @@ static void indirect_free_block(struct super_block *sb,
562 562
563 if (PagePrivate(page)) { 563 if (PagePrivate(page)) {
564 ClearPagePrivate(page); 564 ClearPagePrivate(page);
565 page_cache_release(page); 565 put_page(page);
566 set_page_private(page, 0); 566 set_page_private(page, 0);
567 } 567 }
568 __free_block(sb, block); 568 __free_block(sb, block);
@@ -655,7 +655,7 @@ static void alloc_data_block(struct inode *inode, struct page *page)
655 block->page = page; 655 block->page = page;
656 656
657 SetPagePrivate(page); 657 SetPagePrivate(page);
658 page_cache_get(page); 658 get_page(page);
659 set_page_private(page, (unsigned long) block); 659 set_page_private(page, (unsigned long) block);
660 660
661 block->ops = &indirect_block_ops; 661 block->ops = &indirect_block_ops;
@@ -709,7 +709,7 @@ static u64 block_get_pointer(struct page *page, int index)
709 709
710static int logfs_read_empty(struct page *page) 710static int logfs_read_empty(struct page *page)
711{ 711{
712 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 712 zero_user_segment(page, 0, PAGE_SIZE);
713 return 0; 713 return 0;
714} 714}
715 715
@@ -1660,7 +1660,7 @@ static int truncate_data_block(struct inode *inode, struct page *page,
1660 if (err) 1660 if (err)
1661 return err; 1661 return err;
1662 1662
1663 zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE); 1663 zero_user_segment(page, size - pageofs, PAGE_SIZE);
1664 return logfs_segment_write(inode, page, shadow); 1664 return logfs_segment_write(inode, page, shadow);
1665} 1665}
1666 1666
@@ -1919,7 +1919,7 @@ static void move_page_to_inode(struct inode *inode, struct page *page)
1919 block->page = NULL; 1919 block->page = NULL;
1920 if (PagePrivate(page)) { 1920 if (PagePrivate(page)) {
1921 ClearPagePrivate(page); 1921 ClearPagePrivate(page);
1922 page_cache_release(page); 1922 put_page(page);
1923 set_page_private(page, 0); 1923 set_page_private(page, 0);
1924 } 1924 }
1925} 1925}
@@ -1940,7 +1940,7 @@ static void move_inode_to_page(struct page *page, struct inode *inode)
1940 1940
1941 if (!PagePrivate(page)) { 1941 if (!PagePrivate(page)) {
1942 SetPagePrivate(page); 1942 SetPagePrivate(page);
1943 page_cache_get(page); 1943 get_page(page);
1944 set_page_private(page, (unsigned long) block); 1944 set_page_private(page, (unsigned long) block);
1945 } 1945 }
1946 1946
@@ -1971,7 +1971,7 @@ int logfs_read_inode(struct inode *inode)
1971 logfs_disk_to_inode(di, inode); 1971 logfs_disk_to_inode(di, inode);
1972 kunmap_atomic(di); 1972 kunmap_atomic(di);
1973 move_page_to_inode(inode, page); 1973 move_page_to_inode(inode, page);
1974 page_cache_release(page); 1974 put_page(page);
1975 return 0; 1975 return 0;
1976} 1976}
1977 1977
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index d270e4b2ab6b..1efd6055f4b0 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -90,9 +90,9 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
90 90
91 if (!PagePrivate(page)) { 91 if (!PagePrivate(page)) {
92 SetPagePrivate(page); 92 SetPagePrivate(page);
93 page_cache_get(page); 93 get_page(page);
94 } 94 }
95 page_cache_release(page); 95 put_page(page);
96 96
97 buf += copylen; 97 buf += copylen;
98 len -= copylen; 98 len -= copylen;
@@ -117,9 +117,9 @@ static void pad_partial_page(struct logfs_area *area)
117 memset(page_address(page) + offset, 0xff, len); 117 memset(page_address(page) + offset, 0xff, len);
118 if (!PagePrivate(page)) { 118 if (!PagePrivate(page)) {
119 SetPagePrivate(page); 119 SetPagePrivate(page);
120 page_cache_get(page); 120 get_page(page);
121 } 121 }
122 page_cache_release(page); 122 put_page(page);
123 } 123 }
124} 124}
125 125
@@ -129,20 +129,20 @@ static void pad_full_pages(struct logfs_area *area)
129 struct logfs_super *super = logfs_super(sb); 129 struct logfs_super *super = logfs_super(sb);
130 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 130 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
131 u32 len = super->s_segsize - area->a_used_bytes; 131 u32 len = super->s_segsize - area->a_used_bytes;
132 pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT; 132 pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT;
133 pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT; 133 pgoff_t no_indizes = len >> PAGE_SHIFT;
134 struct page *page; 134 struct page *page;
135 135
136 while (no_indizes) { 136 while (no_indizes) {
137 page = get_mapping_page(sb, index, 0); 137 page = get_mapping_page(sb, index, 0);
138 BUG_ON(!page); /* FIXME: reserve a pool */ 138 BUG_ON(!page); /* FIXME: reserve a pool */
139 SetPageUptodate(page); 139 SetPageUptodate(page);
140 memset(page_address(page), 0xff, PAGE_CACHE_SIZE); 140 memset(page_address(page), 0xff, PAGE_SIZE);
141 if (!PagePrivate(page)) { 141 if (!PagePrivate(page)) {
142 SetPagePrivate(page); 142 SetPagePrivate(page);
143 page_cache_get(page); 143 get_page(page);
144 } 144 }
145 page_cache_release(page); 145 put_page(page);
146 index++; 146 index++;
147 no_indizes--; 147 no_indizes--;
148 } 148 }
@@ -411,7 +411,7 @@ int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
411 if (IS_ERR(page)) 411 if (IS_ERR(page))
412 return PTR_ERR(page); 412 return PTR_ERR(page);
413 memcpy(buf, page_address(page) + offset, copylen); 413 memcpy(buf, page_address(page) + offset, copylen);
414 page_cache_release(page); 414 put_page(page);
415 415
416 buf += copylen; 416 buf += copylen;
417 len -= copylen; 417 len -= copylen;
@@ -499,7 +499,7 @@ static void move_btree_to_page(struct inode *inode, struct page *page,
499 499
500 if (!PagePrivate(page)) { 500 if (!PagePrivate(page)) {
501 SetPagePrivate(page); 501 SetPagePrivate(page);
502 page_cache_get(page); 502 get_page(page);
503 set_page_private(page, (unsigned long) block); 503 set_page_private(page, (unsigned long) block);
504 } 504 }
505 block->ops = &indirect_block_ops; 505 block->ops = &indirect_block_ops;
@@ -554,7 +554,7 @@ void move_page_to_btree(struct page *page)
554 554
555 if (PagePrivate(page)) { 555 if (PagePrivate(page)) {
556 ClearPagePrivate(page); 556 ClearPagePrivate(page);
557 page_cache_release(page); 557 put_page(page);
558 set_page_private(page, 0); 558 set_page_private(page, 0);
559 } 559 }
560 block->ops = &btree_block_ops; 560 block->ops = &btree_block_ops;
@@ -723,9 +723,9 @@ void freeseg(struct super_block *sb, u32 segno)
723 continue; 723 continue;
724 if (PagePrivate(page)) { 724 if (PagePrivate(page)) {
725 ClearPagePrivate(page); 725 ClearPagePrivate(page);
726 page_cache_release(page); 726 put_page(page);
727 } 727 }
728 page_cache_release(page); 728 put_page(page);
729 } 729 }
730} 730}
731 731
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 54360293bcb5..5751082dba52 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -48,7 +48,7 @@ void emergency_read_end(struct page *page)
48 if (page == emergency_page) 48 if (page == emergency_page)
49 mutex_unlock(&emergency_mutex); 49 mutex_unlock(&emergency_mutex);
50 else 50 else
51 page_cache_release(page); 51 put_page(page);
52} 52}
53 53
54static void dump_segfile(struct super_block *sb) 54static void dump_segfile(struct super_block *sb)
@@ -206,7 +206,7 @@ static int write_one_sb(struct super_block *sb,
206 logfs_set_segment_erased(sb, segno, ec, 0); 206 logfs_set_segment_erased(sb, segno, ec, 0);
207 logfs_write_ds(sb, ds, segno, ec); 207 logfs_write_ds(sb, ds, segno, ec);
208 err = super->s_devops->write_sb(sb, page); 208 err = super->s_devops->write_sb(sb, page);
209 page_cache_release(page); 209 put_page(page);
210 return err; 210 return err;
211} 211}
212 212
@@ -366,24 +366,24 @@ static struct page *find_super_block(struct super_block *sb)
366 return NULL; 366 return NULL;
367 last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); 367 last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
368 if (!last || IS_ERR(last)) { 368 if (!last || IS_ERR(last)) {
369 page_cache_release(first); 369 put_page(first);
370 return NULL; 370 return NULL;
371 } 371 }
372 372
373 if (!logfs_check_ds(page_address(first))) { 373 if (!logfs_check_ds(page_address(first))) {
374 page_cache_release(last); 374 put_page(last);
375 return first; 375 return first;
376 } 376 }
377 377
378 /* First one didn't work, try the second superblock */ 378 /* First one didn't work, try the second superblock */
379 if (!logfs_check_ds(page_address(last))) { 379 if (!logfs_check_ds(page_address(last))) {
380 page_cache_release(first); 380 put_page(first);
381 return last; 381 return last;
382 } 382 }
383 383
384 /* Neither worked, sorry folks */ 384 /* Neither worked, sorry folks */
385 page_cache_release(first); 385 put_page(first);
386 page_cache_release(last); 386 put_page(last);
387 return NULL; 387 return NULL;
388} 388}
389 389
@@ -425,7 +425,7 @@ static int __logfs_read_sb(struct super_block *sb)
425 super->s_data_levels = ds->ds_data_levels; 425 super->s_data_levels = ds->ds_data_levels;
426 super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels 426 super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
427 + super->s_data_levels; 427 + super->s_data_levels;
428 page_cache_release(page); 428 put_page(page);
429 return 0; 429 return 0;
430} 430}
431 431
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index d19ac258105a..33957c07cd11 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -28,7 +28,7 @@ const struct file_operations minix_dir_operations = {
28static inline void dir_put_page(struct page *page) 28static inline void dir_put_page(struct page *page)
29{ 29{
30 kunmap(page); 30 kunmap(page);
31 page_cache_release(page); 31 put_page(page);
32} 32}
33 33
34/* 34/*
@@ -38,10 +38,10 @@ static inline void dir_put_page(struct page *page)
38static unsigned 38static unsigned
39minix_last_byte(struct inode *inode, unsigned long page_nr) 39minix_last_byte(struct inode *inode, unsigned long page_nr)
40{ 40{
41 unsigned last_byte = PAGE_CACHE_SIZE; 41 unsigned last_byte = PAGE_SIZE;
42 42
43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) 43 if (page_nr == (inode->i_size >> PAGE_SHIFT))
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); 44 last_byte = inode->i_size & (PAGE_SIZE - 1);
45 return last_byte; 45 return last_byte;
46} 46}
47 47
@@ -92,8 +92,8 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
92 if (pos >= inode->i_size) 92 if (pos >= inode->i_size)
93 return 0; 93 return 0;
94 94
95 offset = pos & ~PAGE_CACHE_MASK; 95 offset = pos & ~PAGE_MASK;
96 n = pos >> PAGE_CACHE_SHIFT; 96 n = pos >> PAGE_SHIFT;
97 97
98 for ( ; n < npages; n++, offset = 0) { 98 for ( ; n < npages; n++, offset = 0) {
99 char *p, *kaddr, *limit; 99 char *p, *kaddr, *limit;
@@ -229,7 +229,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
229 lock_page(page); 229 lock_page(page);
230 kaddr = (char*)page_address(page); 230 kaddr = (char*)page_address(page);
231 dir_end = kaddr + minix_last_byte(dir, n); 231 dir_end = kaddr + minix_last_byte(dir, n);
232 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; 232 limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
233 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { 233 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
234 de = (minix_dirent *)p; 234 de = (minix_dirent *)p;
235 de3 = (minix3_dirent *)p; 235 de3 = (minix3_dirent *)p;
@@ -327,7 +327,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
327 } 327 }
328 328
329 kaddr = kmap_atomic(page); 329 kaddr = kmap_atomic(page);
330 memset(kaddr, 0, PAGE_CACHE_SIZE); 330 memset(kaddr, 0, PAGE_SIZE);
331 331
332 if (sbi->s_version == MINIX_V3) { 332 if (sbi->s_version == MINIX_V3) {
333 minix3_dirent *de3 = (minix3_dirent *)kaddr; 333 minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -350,7 +350,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
350 350
351 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); 351 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
352fail: 352fail:
353 page_cache_release(page); 353 put_page(page);
354 return err; 354 return err;
355} 355}
356 356
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index a795a11e50c7..2887d1d95ce2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -243,11 +243,11 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
243out_dir: 243out_dir:
244 if (dir_de) { 244 if (dir_de) {
245 kunmap(dir_page); 245 kunmap(dir_page);
246 page_cache_release(dir_page); 246 put_page(dir_page);
247 } 247 }
248out_old: 248out_old:
249 kunmap(old_page); 249 kunmap(old_page);
250 page_cache_release(old_page); 250 put_page(old_page);
251out: 251out:
252 return err; 252 return err;
253} 253}
diff --git a/fs/mpage.c b/fs/mpage.c
index 6bd9fd90964e..eedc644b78d7 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
107 * don't make any buffers if there is only one buffer on 107 * don't make any buffers if there is only one buffer on
108 * the page and the page just needs to be set up to date 108 * the page and the page just needs to be set up to date
109 */ 109 */
110 if (inode->i_blkbits == PAGE_CACHE_SHIFT && 110 if (inode->i_blkbits == PAGE_SHIFT &&
111 buffer_uptodate(bh)) { 111 buffer_uptodate(bh)) {
112 SetPageUptodate(page); 112 SetPageUptodate(page);
113 return; 113 return;
@@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
145{ 145{
146 struct inode *inode = page->mapping->host; 146 struct inode *inode = page->mapping->host;
147 const unsigned blkbits = inode->i_blkbits; 147 const unsigned blkbits = inode->i_blkbits;
148 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 148 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
149 const unsigned blocksize = 1 << blkbits; 149 const unsigned blocksize = 1 << blkbits;
150 sector_t block_in_file; 150 sector_t block_in_file;
151 sector_t last_block; 151 sector_t last_block;
@@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
162 if (page_has_buffers(page)) 162 if (page_has_buffers(page))
163 goto confused; 163 goto confused;
164 164
165 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 165 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
166 last_block = block_in_file + nr_pages * blocks_per_page; 166 last_block = block_in_file + nr_pages * blocks_per_page;
167 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 167 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
168 if (last_block > last_block_in_file) 168 if (last_block > last_block_in_file)
@@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
249 } 249 }
250 250
251 if (first_hole != blocks_per_page) { 251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); 252 zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
253 if (first_hole == 0) { 253 if (first_hole == 0) {
254 SetPageUptodate(page); 254 SetPageUptodate(page);
255 unlock_page(page); 255 unlock_page(page);
@@ -331,7 +331,7 @@ confused:
331 * 331 *
332 * then this code just gives up and calls the buffer_head-based read function. 332 * then this code just gives up and calls the buffer_head-based read function.
333 * It does handle a page which has holes at the end - that is a common case: 333 * It does handle a page which has holes at the end - that is a common case:
334 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 334 * the end-of-file on blocksize < PAGE_SIZE setups.
335 * 335 *
336 * BH_Boundary explanation: 336 * BH_Boundary explanation:
337 * 337 *
@@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
380 &first_logical_block, 380 &first_logical_block,
381 get_block, gfp); 381 get_block, gfp);
382 } 382 }
383 page_cache_release(page); 383 put_page(page);
384 } 384 }
385 BUG_ON(!list_empty(pages)); 385 BUG_ON(!list_empty(pages));
386 if (bio) 386 if (bio)
@@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
472 struct inode *inode = page->mapping->host; 472 struct inode *inode = page->mapping->host;
473 const unsigned blkbits = inode->i_blkbits; 473 const unsigned blkbits = inode->i_blkbits;
474 unsigned long end_index; 474 unsigned long end_index;
475 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 475 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
476 sector_t last_block; 476 sector_t last_block;
477 sector_t block_in_file; 477 sector_t block_in_file;
478 sector_t blocks[MAX_BUF_PER_PAGE]; 478 sector_t blocks[MAX_BUF_PER_PAGE];
@@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
542 * The page has no buffers: map it to disk 542 * The page has no buffers: map it to disk
543 */ 543 */
544 BUG_ON(!PageUptodate(page)); 544 BUG_ON(!PageUptodate(page));
545 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 545 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
546 last_block = (i_size - 1) >> blkbits; 546 last_block = (i_size - 1) >> blkbits;
547 map_bh.b_page = page; 547 map_bh.b_page = page;
548 for (page_block = 0; page_block < blocks_per_page; ) { 548 for (page_block = 0; page_block < blocks_per_page; ) {
@@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
574 first_unmapped = page_block; 574 first_unmapped = page_block;
575 575
576page_is_mapped: 576page_is_mapped:
577 end_index = i_size >> PAGE_CACHE_SHIFT; 577 end_index = i_size >> PAGE_SHIFT;
578 if (page->index >= end_index) { 578 if (page->index >= end_index) {
579 /* 579 /*
580 * The page straddles i_size. It must be zeroed out on each 580 * The page straddles i_size. It must be zeroed out on each
@@ -584,11 +584,11 @@ page_is_mapped:
584 * is zeroed when mapped, and writes to that region are not 584 * is zeroed when mapped, and writes to that region are not
585 * written out to the file." 585 * written out to the file."
586 */ 586 */
587 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); 587 unsigned offset = i_size & (PAGE_SIZE - 1);
588 588
589 if (page->index > end_index || !offset) 589 if (page->index > end_index || !offset)
590 goto confused; 590 goto confused;
591 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 591 zero_user_segment(page, offset, PAGE_SIZE);
592 } 592 }
593 593
594 /* 594 /*
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index b7f8eaeea5d8..bfdad003ee56 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -510,7 +510,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
510 kunmap(ctl.page); 510 kunmap(ctl.page);
511 SetPageUptodate(ctl.page); 511 SetPageUptodate(ctl.page);
512 unlock_page(ctl.page); 512 unlock_page(ctl.page);
513 page_cache_release(ctl.page); 513 put_page(ctl.page);
514 ctl.page = NULL; 514 ctl.page = NULL;
515 } 515 }
516 ctl.idx = 0; 516 ctl.idx = 0;
@@ -520,7 +520,7 @@ invalid_cache:
520 if (ctl.page) { 520 if (ctl.page) {
521 kunmap(ctl.page); 521 kunmap(ctl.page);
522 unlock_page(ctl.page); 522 unlock_page(ctl.page);
523 page_cache_release(ctl.page); 523 put_page(ctl.page);
524 ctl.page = NULL; 524 ctl.page = NULL;
525 } 525 }
526 ctl.cache = cache; 526 ctl.cache = cache;
@@ -554,14 +554,14 @@ finished:
554 kunmap(ctl.page); 554 kunmap(ctl.page);
555 SetPageUptodate(ctl.page); 555 SetPageUptodate(ctl.page);
556 unlock_page(ctl.page); 556 unlock_page(ctl.page);
557 page_cache_release(ctl.page); 557 put_page(ctl.page);
558 } 558 }
559 if (page) { 559 if (page) {
560 cache->head = ctl.head; 560 cache->head = ctl.head;
561 kunmap(page); 561 kunmap(page);
562 SetPageUptodate(page); 562 SetPageUptodate(page);
563 unlock_page(page); 563 unlock_page(page);
564 page_cache_release(page); 564 put_page(page);
565 } 565 }
566out: 566out:
567 return result; 567 return result;
@@ -649,7 +649,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
649 kunmap(ctl.page); 649 kunmap(ctl.page);
650 SetPageUptodate(ctl.page); 650 SetPageUptodate(ctl.page);
651 unlock_page(ctl.page); 651 unlock_page(ctl.page);
652 page_cache_release(ctl.page); 652 put_page(ctl.page);
653 } 653 }
654 ctl.cache = NULL; 654 ctl.cache = NULL;
655 ctl.idx -= NCP_DIRCACHE_SIZE; 655 ctl.idx -= NCP_DIRCACHE_SIZE;
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 5233fbc1747a..17cfb743b5bf 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -191,7 +191,7 @@ struct ncp_cache_head {
191 int eof; 191 int eof;
192}; 192};
193 193
194#define NCP_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *))) 194#define NCP_DIRCACHE_SIZE ((int)(PAGE_SIZE/sizeof(struct dentry *)))
195union ncp_dir_cache { 195union ncp_dir_cache {
196 struct ncp_cache_head head; 196 struct ncp_cache_head head;
197 struct dentry *dentry[NCP_DIRCACHE_SIZE]; 197 struct dentry *dentry[NCP_DIRCACHE_SIZE];
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 02e4d87d2ed3..17a42e4eb872 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -231,7 +231,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
231 size_t bytes_left = header->args.count; 231 size_t bytes_left = header->args.count;
232 unsigned int pg_offset = header->args.pgbase, pg_len; 232 unsigned int pg_offset = header->args.pgbase, pg_len;
233 struct page **pages = header->args.pages; 233 struct page **pages = header->args.pages;
234 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 234 int pg_index = header->args.pgbase >> PAGE_SHIFT;
235 const bool is_dio = (header->dreq != NULL); 235 const bool is_dio = (header->dreq != NULL);
236 struct blk_plug plug; 236 struct blk_plug plug;
237 int i; 237 int i;
@@ -263,13 +263,13 @@ bl_read_pagelist(struct nfs_pgio_header *header)
263 } 263 }
264 264
265 if (is_dio) { 265 if (is_dio) {
266 if (pg_offset + bytes_left > PAGE_CACHE_SIZE) 266 if (pg_offset + bytes_left > PAGE_SIZE)
267 pg_len = PAGE_CACHE_SIZE - pg_offset; 267 pg_len = PAGE_SIZE - pg_offset;
268 else 268 else
269 pg_len = bytes_left; 269 pg_len = bytes_left;
270 } else { 270 } else {
271 BUG_ON(pg_offset != 0); 271 BUG_ON(pg_offset != 0);
272 pg_len = PAGE_CACHE_SIZE; 272 pg_len = PAGE_SIZE;
273 } 273 }
274 274
275 if (is_hole(&be)) { 275 if (is_hole(&be)) {
@@ -339,9 +339,9 @@ static void bl_write_cleanup(struct work_struct *work)
339 339
340 if (likely(!hdr->pnfs_error)) { 340 if (likely(!hdr->pnfs_error)) {
341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); 341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
342 u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK; 342 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
343 u64 end = (hdr->args.offset + hdr->args.count + 343 u64 end = (hdr->args.offset + hdr->args.count +
344 PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK; 344 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
345 345
346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, 346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
347 (end - start) >> SECTOR_SHIFT); 347 (end - start) >> SECTOR_SHIFT);
@@ -373,7 +373,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
373 loff_t offset = header->args.offset; 373 loff_t offset = header->args.offset;
374 size_t count = header->args.count; 374 size_t count = header->args.count;
375 struct page **pages = header->args.pages; 375 struct page **pages = header->args.pages;
376 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 376 int pg_index = header->args.pgbase >> PAGE_SHIFT;
377 unsigned int pg_len; 377 unsigned int pg_len;
378 struct blk_plug plug; 378 struct blk_plug plug;
379 int i; 379 int i;
@@ -392,7 +392,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
392 blk_start_plug(&plug); 392 blk_start_plug(&plug);
393 393
394 /* we always write out the whole page */ 394 /* we always write out the whole page */
395 offset = offset & (loff_t)PAGE_CACHE_MASK; 395 offset = offset & (loff_t)PAGE_MASK;
396 isect = offset >> SECTOR_SHIFT; 396 isect = offset >> SECTOR_SHIFT;
397 397
398 for (i = pg_index; i < header->page_array.npages; i++) { 398 for (i = pg_index; i < header->page_array.npages; i++) {
@@ -408,7 +408,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
408 extent_length = be.be_length - (isect - be.be_f_offset); 408 extent_length = be.be_length - (isect - be.be_f_offset);
409 } 409 }
410 410
411 pg_len = PAGE_CACHE_SIZE; 411 pg_len = PAGE_SIZE;
412 bio = do_add_page_to_bio(bio, header->page_array.npages - i, 412 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
413 WRITE, isect, pages[i], &map, &be, 413 WRITE, isect, pages[i], &map, &be,
414 bl_end_io_write, par, 414 bl_end_io_write, par,
@@ -820,7 +820,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
820 pgoff_t end; 820 pgoff_t end;
821 821
822 /* Optimize common case that writes from 0 to end of file */ 822 /* Optimize common case that writes from 0 to end of file */
823 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); 823 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
824 if (end != inode->i_mapping->nrpages) { 824 if (end != inode->i_mapping->nrpages) {
825 rcu_read_lock(); 825 rcu_read_lock();
826 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX); 826 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
@@ -828,9 +828,9 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
828 } 828 }
829 829
830 if (!end) 830 if (!end)
831 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT); 831 return i_size_read(inode) - (idx << PAGE_SHIFT);
832 else 832 else
833 return (end - idx) << PAGE_CACHE_SHIFT; 833 return (end - idx) << PAGE_SHIFT;
834} 834}
835 835
836static void 836static void
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index bc21205309e0..18e6fd0b9506 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -40,8 +40,8 @@
40#include "../pnfs.h" 40#include "../pnfs.h"
41#include "../netns.h" 41#include "../netns.h"
42 42
43#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) 43#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT)
44#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) 44#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
45#define SECTOR_SIZE (1 << SECTOR_SHIFT) 45#define SECTOR_SIZE (1 << SECTOR_SHIFT)
46 46
47struct pnfs_block_dev; 47struct pnfs_block_dev;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d6d5d2a48e83..0c96528db94a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -736,7 +736,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
736 server->rsize = max_rpc_payload; 736 server->rsize = max_rpc_payload;
737 if (server->rsize > NFS_MAX_FILE_IO_SIZE) 737 if (server->rsize > NFS_MAX_FILE_IO_SIZE)
738 server->rsize = NFS_MAX_FILE_IO_SIZE; 738 server->rsize = NFS_MAX_FILE_IO_SIZE;
739 server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 739 server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
740 740
741 server->backing_dev_info.name = "nfs"; 741 server->backing_dev_info.name = "nfs";
742 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; 742 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
@@ -745,13 +745,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
745 server->wsize = max_rpc_payload; 745 server->wsize = max_rpc_payload;
746 if (server->wsize > NFS_MAX_FILE_IO_SIZE) 746 if (server->wsize > NFS_MAX_FILE_IO_SIZE)
747 server->wsize = NFS_MAX_FILE_IO_SIZE; 747 server->wsize = NFS_MAX_FILE_IO_SIZE;
748 server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 748 server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
749 749
750 server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); 750 server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
751 751
752 server->dtsize = nfs_block_size(fsinfo->dtpref, NULL); 752 server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
753 if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES) 753 if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES)
754 server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES; 754 server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES;
755 if (server->dtsize > server->rsize) 755 if (server->dtsize > server->rsize)
756 server->dtsize = server->rsize; 756 server->dtsize = server->rsize;
757 757
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4bfa7d8bcade..33eb81738d03 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
377 again: 377 again:
378 timestamp = jiffies; 378 timestamp = jiffies;
379 gencount = nfs_inc_attr_generation_counter(); 379 gencount = nfs_inc_attr_generation_counter();
380 error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages, 380 error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
381 NFS_SERVER(inode)->dtsize, desc->plus); 381 NFS_SERVER(inode)->dtsize, desc->plus);
382 if (error < 0) { 382 if (error < 0) {
383 /* We requested READDIRPLUS, but the server doesn't grok it */ 383 /* We requested READDIRPLUS, but the server doesn't grok it */
@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
560 count++; 560 count++;
561 561
562 if (desc->plus != 0) 562 if (desc->plus != 0)
563 nfs_prime_dcache(desc->file->f_path.dentry, entry); 563 nfs_prime_dcache(file_dentry(desc->file), entry);
564 564
565 status = nfs_readdir_add_to_array(entry, page); 565 status = nfs_readdir_add_to_array(entry, page);
566 if (status != 0) 566 if (status != 0)
@@ -707,7 +707,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
707{ 707{
708 if (!desc->page->mapping) 708 if (!desc->page->mapping)
709 nfs_readdir_clear_array(desc->page); 709 nfs_readdir_clear_array(desc->page);
710 page_cache_release(desc->page); 710 put_page(desc->page);
711 desc->page = NULL; 711 desc->page = NULL;
712} 712}
713 713
@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
864 */ 864 */
865static int nfs_readdir(struct file *file, struct dir_context *ctx) 865static int nfs_readdir(struct file *file, struct dir_context *ctx)
866{ 866{
867 struct dentry *dentry = file->f_path.dentry; 867 struct dentry *dentry = file_dentry(file);
868 struct inode *inode = d_inode(dentry); 868 struct inode *inode = d_inode(dentry);
869 nfs_readdir_descriptor_t my_desc, 869 nfs_readdir_descriptor_t my_desc,
870 *desc = &my_desc; 870 *desc = &my_desc;
@@ -1923,7 +1923,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1923 * add_to_page_cache_lru() grabs an extra page refcount. 1923 * add_to_page_cache_lru() grabs an extra page refcount.
1924 * Drop it here to avoid leaking this page later. 1924 * Drop it here to avoid leaking this page later.
1925 */ 1925 */
1926 page_cache_release(page); 1926 put_page(page);
1927 } else 1927 } else
1928 __free_page(page); 1928 __free_page(page);
1929 1929
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 7a0cfd3266e5..c93826e4a8c6 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -269,7 +269,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
269{ 269{
270 unsigned int i; 270 unsigned int i;
271 for (i = 0; i < npages; i++) 271 for (i = 0; i < npages; i++)
272 page_cache_release(pages[i]); 272 put_page(pages[i]);
273} 273}
274 274
275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, 275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
@@ -1003,7 +1003,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1003 iov_iter_count(iter)); 1003 iov_iter_count(iter));
1004 1004
1005 pos = iocb->ki_pos; 1005 pos = iocb->ki_pos;
1006 end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; 1006 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
1007 1007
1008 inode_lock(inode); 1008 inode_lock(inode);
1009 1009
@@ -1013,7 +1013,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1013 1013
1014 if (mapping->nrpages) { 1014 if (mapping->nrpages) {
1015 result = invalidate_inode_pages2_range(mapping, 1015 result = invalidate_inode_pages2_range(mapping,
1016 pos >> PAGE_CACHE_SHIFT, end); 1016 pos >> PAGE_SHIFT, end);
1017 if (result) 1017 if (result)
1018 goto out_unlock; 1018 goto out_unlock;
1019 } 1019 }
@@ -1042,7 +1042,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1042 1042
1043 if (mapping->nrpages) { 1043 if (mapping->nrpages) {
1044 invalidate_inode_pages2_range(mapping, 1044 invalidate_inode_pages2_range(mapping,
1045 pos >> PAGE_CACHE_SHIFT, end); 1045 pos >> PAGE_SHIFT, end);
1046 } 1046 }
1047 1047
1048 inode_unlock(inode); 1048 inode_unlock(inode);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 89bf093d342a..be01095b97ae 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -320,7 +320,7 @@ static int nfs_want_read_modify_write(struct file *file, struct page *page,
320 loff_t pos, unsigned len) 320 loff_t pos, unsigned len)
321{ 321{
322 unsigned int pglen = nfs_page_length(page); 322 unsigned int pglen = nfs_page_length(page);
323 unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); 323 unsigned int offset = pos & (PAGE_SIZE - 1);
324 unsigned int end = offset + len; 324 unsigned int end = offset + len;
325 325
326 if (pnfs_ld_read_whole_page(file->f_mapping->host)) { 326 if (pnfs_ld_read_whole_page(file->f_mapping->host)) {
@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
351 struct page **pagep, void **fsdata) 351 struct page **pagep, void **fsdata)
352{ 352{
353 int ret; 353 int ret;
354 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 354 pgoff_t index = pos >> PAGE_SHIFT;
355 struct page *page; 355 struct page *page;
356 int once_thru = 0; 356 int once_thru = 0;
357 357
@@ -380,12 +380,12 @@ start:
380 ret = nfs_flush_incompatible(file, page); 380 ret = nfs_flush_incompatible(file, page);
381 if (ret) { 381 if (ret) {
382 unlock_page(page); 382 unlock_page(page);
383 page_cache_release(page); 383 put_page(page);
384 } else if (!once_thru && 384 } else if (!once_thru &&
385 nfs_want_read_modify_write(file, page, pos, len)) { 385 nfs_want_read_modify_write(file, page, pos, len)) {
386 once_thru = 1; 386 once_thru = 1;
387 ret = nfs_readpage(file, page); 387 ret = nfs_readpage(file, page);
388 page_cache_release(page); 388 put_page(page);
389 if (!ret) 389 if (!ret)
390 goto start; 390 goto start;
391 } 391 }
@@ -396,7 +396,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
396 loff_t pos, unsigned len, unsigned copied, 396 loff_t pos, unsigned len, unsigned copied,
397 struct page *page, void *fsdata) 397 struct page *page, void *fsdata)
398{ 398{
399 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 399 unsigned offset = pos & (PAGE_SIZE - 1);
400 struct nfs_open_context *ctx = nfs_file_open_context(file); 400 struct nfs_open_context *ctx = nfs_file_open_context(file);
401 int status; 401 int status;
402 402
@@ -413,20 +413,20 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
413 413
414 if (pglen == 0) { 414 if (pglen == 0) {
415 zero_user_segments(page, 0, offset, 415 zero_user_segments(page, 0, offset,
416 end, PAGE_CACHE_SIZE); 416 end, PAGE_SIZE);
417 SetPageUptodate(page); 417 SetPageUptodate(page);
418 } else if (end >= pglen) { 418 } else if (end >= pglen) {
419 zero_user_segment(page, end, PAGE_CACHE_SIZE); 419 zero_user_segment(page, end, PAGE_SIZE);
420 if (offset == 0) 420 if (offset == 0)
421 SetPageUptodate(page); 421 SetPageUptodate(page);
422 } else 422 } else
423 zero_user_segment(page, pglen, PAGE_CACHE_SIZE); 423 zero_user_segment(page, pglen, PAGE_SIZE);
424 } 424 }
425 425
426 status = nfs_updatepage(file, page, offset, copied); 426 status = nfs_updatepage(file, page, offset, copied);
427 427
428 unlock_page(page); 428 unlock_page(page);
429 page_cache_release(page); 429 put_page(page);
430 430
431 if (status < 0) 431 if (status < 0)
432 return status; 432 return status;
@@ -454,7 +454,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
454 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", 454 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
455 page, offset, length); 455 page, offset, length);
456 456
457 if (offset != 0 || length < PAGE_CACHE_SIZE) 457 if (offset != 0 || length < PAGE_SIZE)
458 return; 458 return;
459 /* Cancel any unstarted writes on this page */ 459 /* Cancel any unstarted writes on this page */
460 nfs_wb_page_cancel(page_file_mapping(page)->host, page); 460 nfs_wb_page_cancel(page_file_mapping(page)->host, page);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 33d18c411905..738c84a42eb0 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
940{ 940{
941 struct nfs_open_context *ctx; 941 struct nfs_open_context *ctx;
942 942
943 ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); 943 ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
944 if (IS_ERR(ctx)) 944 if (IS_ERR(ctx))
945 return PTR_ERR(ctx); 945 return PTR_ERR(ctx);
946 nfs_file_set_open_context(filp, ctx); 946 nfs_file_set_open_context(filp, ctx);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 565f8135ae1f..f1d1d2c472e9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -638,11 +638,11 @@ unsigned int nfs_page_length(struct page *page)
638 638
639 if (i_size > 0) { 639 if (i_size > 0) {
640 pgoff_t page_index = page_file_index(page); 640 pgoff_t page_index = page_file_index(page);
641 pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 641 pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
642 if (page_index < end_index) 642 if (page_index < end_index)
643 return PAGE_CACHE_SIZE; 643 return PAGE_SIZE;
644 if (page_index == end_index) 644 if (page_index == end_index)
645 return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; 645 return ((i_size - 1) & ~PAGE_MASK) + 1;
646 } 646 }
647 return 0; 647 return 0;
648} 648}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 22c35abbee9d..d0390516467c 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -26,7 +26,7 @@ static int
26nfs4_file_open(struct inode *inode, struct file *filp) 26nfs4_file_open(struct inode *inode, struct file *filp)
27{ 27{
28 struct nfs_open_context *ctx; 28 struct nfs_open_context *ctx;
29 struct dentry *dentry = filp->f_path.dentry; 29 struct dentry *dentry = file_dentry(filp);
30 struct dentry *parent = NULL; 30 struct dentry *parent = NULL;
31 struct inode *dir; 31 struct inode *dir;
32 unsigned openflags = filp->f_flags; 32 unsigned openflags = filp->f_flags;
@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
57 parent = dget_parent(dentry); 57 parent = dget_parent(dentry);
58 dir = d_inode(parent); 58 dir = d_inode(parent);
59 59
60 ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); 60 ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
61 err = PTR_ERR(ctx); 61 err = PTR_ERR(ctx);
62 if (IS_ERR(ctx)) 62 if (IS_ERR(ctx))
63 goto out; 63 goto out;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4e4441216804..88474a4fc669 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5001,7 +5001,7 @@ static int decode_space_limit(struct xdr_stream *xdr,
5001 blocksize = be32_to_cpup(p); 5001 blocksize = be32_to_cpup(p);
5002 maxsize = (uint64_t)nblocks * (uint64_t)blocksize; 5002 maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
5003 } 5003 }
5004 maxsize >>= PAGE_CACHE_SHIFT; 5004 maxsize >>= PAGE_SHIFT;
5005 *pagemod_limit = min_t(u64, maxsize, ULONG_MAX); 5005 *pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
5006 return 0; 5006 return 0;
5007out_overflow: 5007out_overflow:
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9aebffb40505..049c1b1f2932 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -486,7 +486,7 @@ static void __r4w_put_page(void *priv, struct page *page)
486 dprintk("%s: index=0x%lx\n", __func__, 486 dprintk("%s: index=0x%lx\n", __func__,
487 (page == ZERO_PAGE(0)) ? -1UL : page->index); 487 (page == ZERO_PAGE(0)) ? -1UL : page->index);
488 if (ZERO_PAGE(0) != page) 488 if (ZERO_PAGE(0) != page)
489 page_cache_release(page); 489 put_page(page);
490 return; 490 return;
491} 491}
492 492
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 8ce4f61cbaa5..1f6db4231057 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
342 * update_nfs_request below if the region is not locked. */ 342 * update_nfs_request below if the region is not locked. */
343 req->wb_page = page; 343 req->wb_page = page;
344 req->wb_index = page_file_index(page); 344 req->wb_index = page_file_index(page);
345 page_cache_get(page); 345 get_page(page);
346 req->wb_offset = offset; 346 req->wb_offset = offset;
347 req->wb_pgbase = offset; 347 req->wb_pgbase = offset;
348 req->wb_bytes = count; 348 req->wb_bytes = count;
@@ -392,7 +392,7 @@ static void nfs_clear_request(struct nfs_page *req)
392 struct nfs_lock_context *l_ctx = req->wb_lock_context; 392 struct nfs_lock_context *l_ctx = req->wb_lock_context;
393 393
394 if (page != NULL) { 394 if (page != NULL) {
395 page_cache_release(page); 395 put_page(page);
396 req->wb_page = NULL; 396 req->wb_page = NULL;
397 } 397 }
398 if (l_ctx != NULL) { 398 if (l_ctx != NULL) {
@@ -904,7 +904,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
904 return false; 904 return false;
905 } else { 905 } else {
906 if (req->wb_pgbase != 0 || 906 if (req->wb_pgbase != 0 ||
907 prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 907 prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
908 return false; 908 return false;
909 } 909 }
910 } 910 }
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2fa483e6dbe2..89a5ef4df08a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -841,7 +841,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
841 841
842 i_size = i_size_read(ino); 842 i_size = i_size_read(ino);
843 843
844 lgp->args.minlength = PAGE_CACHE_SIZE; 844 lgp->args.minlength = PAGE_SIZE;
845 if (lgp->args.minlength > range->length) 845 if (lgp->args.minlength > range->length)
846 lgp->args.minlength = range->length; 846 lgp->args.minlength = range->length;
847 if (range->iomode == IOMODE_READ) { 847 if (range->iomode == IOMODE_READ) {
@@ -1618,13 +1618,13 @@ lookup_again:
1618 spin_unlock(&clp->cl_lock); 1618 spin_unlock(&clp->cl_lock);
1619 } 1619 }
1620 1620
1621 pg_offset = arg.offset & ~PAGE_CACHE_MASK; 1621 pg_offset = arg.offset & ~PAGE_MASK;
1622 if (pg_offset) { 1622 if (pg_offset) {
1623 arg.offset -= pg_offset; 1623 arg.offset -= pg_offset;
1624 arg.length += pg_offset; 1624 arg.length += pg_offset;
1625 } 1625 }
1626 if (arg.length != NFS4_MAX_UINT64) 1626 if (arg.length != NFS4_MAX_UINT64)
1627 arg.length = PAGE_CACHE_ALIGN(arg.length); 1627 arg.length = PAGE_ALIGN(arg.length);
1628 1628
1629 lseg = send_layoutget(lo, ctx, &arg, gfp_flags); 1629 lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1630 atomic_dec(&lo->plh_outstanding); 1630 atomic_dec(&lo->plh_outstanding);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index eb31e23e7def..6776d7a7839e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -46,7 +46,7 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
46static 46static
47int nfs_return_empty_page(struct page *page) 47int nfs_return_empty_page(struct page *page)
48{ 48{
49 zero_user(page, 0, PAGE_CACHE_SIZE); 49 zero_user(page, 0, PAGE_SIZE);
50 SetPageUptodate(page); 50 SetPageUptodate(page);
51 unlock_page(page); 51 unlock_page(page);
52 return 0; 52 return 0;
@@ -118,8 +118,8 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118 unlock_page(page); 118 unlock_page(page);
119 return PTR_ERR(new); 119 return PTR_ERR(new);
120 } 120 }
121 if (len < PAGE_CACHE_SIZE) 121 if (len < PAGE_SIZE)
122 zero_user_segment(page, len, PAGE_CACHE_SIZE); 122 zero_user_segment(page, len, PAGE_SIZE);
123 123
124 nfs_pageio_init_read(&pgio, inode, false, 124 nfs_pageio_init_read(&pgio, inode, false,
125 &nfs_async_read_completion_ops); 125 &nfs_async_read_completion_ops);
@@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page)
295 int error; 295 int error;
296 296
297 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 297 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
298 page, PAGE_CACHE_SIZE, page_file_index(page)); 298 page, PAGE_SIZE, page_file_index(page));
299 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 299 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
300 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 300 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
301 301
@@ -361,8 +361,8 @@ readpage_async_filler(void *data, struct page *page)
361 if (IS_ERR(new)) 361 if (IS_ERR(new))
362 goto out_error; 362 goto out_error;
363 363
364 if (len < PAGE_CACHE_SIZE) 364 if (len < PAGE_SIZE)
365 zero_user_segment(page, len, PAGE_CACHE_SIZE); 365 zero_user_segment(page, len, PAGE_SIZE);
366 if (!nfs_pageio_add_request(desc->pgio, new)) { 366 if (!nfs_pageio_add_request(desc->pgio, new)) {
367 nfs_list_remove_request(new); 367 nfs_list_remove_request(new);
368 nfs_readpage_release(new); 368 nfs_readpage_release(new);
@@ -424,8 +424,8 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
424 424
425 pgm = &pgio.pg_mirrors[0]; 425 pgm = &pgio.pg_mirrors[0];
426 NFS_I(inode)->read_io += pgm->pg_bytes_written; 426 NFS_I(inode)->read_io += pgm->pg_bytes_written;
427 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> 427 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
428 PAGE_CACHE_SHIFT; 428 PAGE_SHIFT;
429 nfs_add_stats(inode, NFSIOS_READPAGES, npages); 429 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
430read_complete: 430read_complete:
431 put_nfs_open_context(desc.ctx); 431 put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5754835a2886..5f4fd53e5764 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -150,7 +150,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
150 150
151 spin_lock(&inode->i_lock); 151 spin_lock(&inode->i_lock);
152 i_size = i_size_read(inode); 152 i_size = i_size_read(inode);
153 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 153 end_index = (i_size - 1) >> PAGE_SHIFT;
154 if (i_size > 0 && page_file_index(page) < end_index) 154 if (i_size > 0 && page_file_index(page) < end_index)
155 goto out; 155 goto out;
156 end = page_file_offset(page) + ((loff_t)offset+count); 156 end = page_file_offset(page) + ((loff_t)offset+count);
@@ -1942,7 +1942,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1942int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) 1942int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
1943{ 1943{
1944 loff_t range_start = page_file_offset(page); 1944 loff_t range_start = page_file_offset(page);
1945 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1945 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
1946 struct writeback_control wbc = { 1946 struct writeback_control wbc = {
1947 .sync_mode = WB_SYNC_ALL, 1947 .sync_mode = WB_SYNC_ALL,
1948 .nr_to_write = 0, 1948 .nr_to_write = 0,
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 27f75bcbeb30..a9fb3636c142 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -458,7 +458,7 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
458 struct buffer_head *pbh; 458 struct buffer_head *pbh;
459 __u64 key; 459 __u64 key;
460 460
461 key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - 461 key = page_index(bh->b_page) << (PAGE_SHIFT -
462 bmap->b_inode->i_blkbits); 462 bmap->b_inode->i_blkbits);
463 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) 463 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
464 key++; 464 key++;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index a35ae35e6932..e0c9daf9aa22 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
62 set_buffer_uptodate(bh); 62 set_buffer_uptodate(bh);
63 63
64 unlock_page(bh->b_page); 64 unlock_page(bh->b_page);
65 page_cache_release(bh->b_page); 65 put_page(bh->b_page);
66 return bh; 66 return bh;
67} 67}
68 68
@@ -128,7 +128,7 @@ found:
128 128
129out_locked: 129out_locked:
130 unlock_page(page); 130 unlock_page(page);
131 page_cache_release(page); 131 put_page(page);
132 return err; 132 return err;
133} 133}
134 134
@@ -146,7 +146,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
146 pgoff_t index = page_index(page); 146 pgoff_t index = page_index(page);
147 int still_dirty; 147 int still_dirty;
148 148
149 page_cache_get(page); 149 get_page(page);
150 lock_page(page); 150 lock_page(page);
151 wait_on_page_writeback(page); 151 wait_on_page_writeback(page);
152 152
@@ -154,7 +154,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
154 still_dirty = PageDirty(page); 154 still_dirty = PageDirty(page);
155 mapping = page->mapping; 155 mapping = page->mapping;
156 unlock_page(page); 156 unlock_page(page);
157 page_cache_release(page); 157 put_page(page);
158 158
159 if (!still_dirty && mapping) 159 if (!still_dirty && mapping)
160 invalidate_inode_pages2_range(mapping, index, index); 160 invalidate_inode_pages2_range(mapping, index, index);
@@ -181,7 +181,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
181 obh = ctxt->bh; 181 obh = ctxt->bh;
182 ctxt->newbh = NULL; 182 ctxt->newbh = NULL;
183 183
184 if (inode->i_blkbits == PAGE_CACHE_SHIFT) { 184 if (inode->i_blkbits == PAGE_SHIFT) {
185 lock_page(obh->b_page); 185 lock_page(obh->b_page);
186 /* 186 /*
187 * We cannot call radix_tree_preload for the kernels older 187 * We cannot call radix_tree_preload for the kernels older
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 6b8b92b19cec..e08f064e4bd7 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -58,7 +58,7 @@ static inline unsigned nilfs_chunk_size(struct inode *inode)
58static inline void nilfs_put_page(struct page *page) 58static inline void nilfs_put_page(struct page *page)
59{ 59{
60 kunmap(page); 60 kunmap(page);
61 page_cache_release(page); 61 put_page(page);
62} 62}
63 63
64/* 64/*
@@ -69,9 +69,9 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
69{ 69{
70 unsigned last_byte = inode->i_size; 70 unsigned last_byte = inode->i_size;
71 71
72 last_byte -= page_nr << PAGE_CACHE_SHIFT; 72 last_byte -= page_nr << PAGE_SHIFT;
73 if (last_byte > PAGE_CACHE_SIZE) 73 if (last_byte > PAGE_SIZE)
74 last_byte = PAGE_CACHE_SIZE; 74 last_byte = PAGE_SIZE;
75 return last_byte; 75 return last_byte;
76} 76}
77 77
@@ -109,12 +109,12 @@ static void nilfs_check_page(struct page *page)
109 unsigned chunk_size = nilfs_chunk_size(dir); 109 unsigned chunk_size = nilfs_chunk_size(dir);
110 char *kaddr = page_address(page); 110 char *kaddr = page_address(page);
111 unsigned offs, rec_len; 111 unsigned offs, rec_len;
112 unsigned limit = PAGE_CACHE_SIZE; 112 unsigned limit = PAGE_SIZE;
113 struct nilfs_dir_entry *p; 113 struct nilfs_dir_entry *p;
114 char *error; 114 char *error;
115 115
116 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 116 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
117 limit = dir->i_size & ~PAGE_CACHE_MASK; 117 limit = dir->i_size & ~PAGE_MASK;
118 if (limit & (chunk_size - 1)) 118 if (limit & (chunk_size - 1))
119 goto Ebadsize; 119 goto Ebadsize;
120 if (!limit) 120 if (!limit)
@@ -161,7 +161,7 @@ Espan:
161bad_entry: 161bad_entry:
162 nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - " 162 nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
163 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 163 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
164 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 164 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
165 (unsigned long) le64_to_cpu(p->inode), 165 (unsigned long) le64_to_cpu(p->inode),
166 rec_len, p->name_len); 166 rec_len, p->name_len);
167 goto fail; 167 goto fail;
@@ -170,7 +170,7 @@ Eend:
170 nilfs_error(sb, "nilfs_check_page", 170 nilfs_error(sb, "nilfs_check_page",
171 "entry in directory #%lu spans the page boundary" 171 "entry in directory #%lu spans the page boundary"
172 "offset=%lu, inode=%lu", 172 "offset=%lu, inode=%lu",
173 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 173 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
174 (unsigned long) le64_to_cpu(p->inode)); 174 (unsigned long) le64_to_cpu(p->inode));
175fail: 175fail:
176 SetPageChecked(page); 176 SetPageChecked(page);
@@ -256,8 +256,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
256 loff_t pos = ctx->pos; 256 loff_t pos = ctx->pos;
257 struct inode *inode = file_inode(file); 257 struct inode *inode = file_inode(file);
258 struct super_block *sb = inode->i_sb; 258 struct super_block *sb = inode->i_sb;
259 unsigned int offset = pos & ~PAGE_CACHE_MASK; 259 unsigned int offset = pos & ~PAGE_MASK;
260 unsigned long n = pos >> PAGE_CACHE_SHIFT; 260 unsigned long n = pos >> PAGE_SHIFT;
261 unsigned long npages = dir_pages(inode); 261 unsigned long npages = dir_pages(inode);
262/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ 262/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
263 263
@@ -272,7 +272,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
272 if (IS_ERR(page)) { 272 if (IS_ERR(page)) {
273 nilfs_error(sb, __func__, "bad page in #%lu", 273 nilfs_error(sb, __func__, "bad page in #%lu",
274 inode->i_ino); 274 inode->i_ino);
275 ctx->pos += PAGE_CACHE_SIZE - offset; 275 ctx->pos += PAGE_SIZE - offset;
276 return -EIO; 276 return -EIO;
277 } 277 }
278 kaddr = page_address(page); 278 kaddr = page_address(page);
@@ -361,7 +361,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
361 if (++n >= npages) 361 if (++n >= npages)
362 n = 0; 362 n = 0;
363 /* next page is past the blocks we've got */ 363 /* next page is past the blocks we've got */
364 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { 364 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
365 nilfs_error(dir->i_sb, __func__, 365 nilfs_error(dir->i_sb, __func__,
366 "dir %lu size %lld exceeds block count %llu", 366 "dir %lu size %lld exceeds block count %llu",
367 dir->i_ino, dir->i_size, 367 dir->i_ino, dir->i_size,
@@ -401,7 +401,7 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
401 if (de) { 401 if (de) {
402 res = le64_to_cpu(de->inode); 402 res = le64_to_cpu(de->inode);
403 kunmap(page); 403 kunmap(page);
404 page_cache_release(page); 404 put_page(page);
405 } 405 }
406 return res; 406 return res;
407} 407}
@@ -460,7 +460,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
460 kaddr = page_address(page); 460 kaddr = page_address(page);
461 dir_end = kaddr + nilfs_last_byte(dir, n); 461 dir_end = kaddr + nilfs_last_byte(dir, n);
462 de = (struct nilfs_dir_entry *)kaddr; 462 de = (struct nilfs_dir_entry *)kaddr;
463 kaddr += PAGE_CACHE_SIZE - reclen; 463 kaddr += PAGE_SIZE - reclen;
464 while ((char *)de <= kaddr) { 464 while ((char *)de <= kaddr) {
465 if ((char *)de == dir_end) { 465 if ((char *)de == dir_end) {
466 /* We hit i_size */ 466 /* We hit i_size */
@@ -603,7 +603,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
603 kunmap_atomic(kaddr); 603 kunmap_atomic(kaddr);
604 nilfs_commit_chunk(page, mapping, 0, chunk_size); 604 nilfs_commit_chunk(page, mapping, 0, chunk_size);
605fail: 605fail:
606 page_cache_release(page); 606 put_page(page);
607 return err; 607 return err;
608} 608}
609 609
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 748ca238915a..0224b7826ace 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -115,7 +115,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
115 115
116 failed: 116 failed:
117 unlock_page(bh->b_page); 117 unlock_page(bh->b_page);
118 page_cache_release(bh->b_page); 118 put_page(bh->b_page);
119 return err; 119 return err;
120} 120}
121 121
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 21a1e2e0d92f..534631358b13 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -249,7 +249,7 @@ static int nilfs_set_page_dirty(struct page *page)
249 if (nr_dirty) 249 if (nr_dirty)
250 nilfs_set_file_dirty(inode, nr_dirty); 250 nilfs_set_file_dirty(inode, nr_dirty);
251 } else if (ret) { 251 } else if (ret) {
252 unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 252 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
253 253
254 nilfs_set_file_dirty(inode, nr_dirty); 254 nilfs_set_file_dirty(inode, nr_dirty);
255 } 255 }
@@ -291,7 +291,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
291 struct page *page, void *fsdata) 291 struct page *page, void *fsdata)
292{ 292{
293 struct inode *inode = mapping->host; 293 struct inode *inode = mapping->host;
294 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 294 unsigned start = pos & (PAGE_SIZE - 1);
295 unsigned nr_dirty; 295 unsigned nr_dirty;
296 int err; 296 int err;
297 297
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 1125f40233ff..f6982b9153d5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -110,7 +110,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
110 110
111 failed_bh: 111 failed_bh:
112 unlock_page(bh->b_page); 112 unlock_page(bh->b_page);
113 page_cache_release(bh->b_page); 113 put_page(bh->b_page);
114 brelse(bh); 114 brelse(bh);
115 115
116 failed_unlock: 116 failed_unlock:
@@ -170,7 +170,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
170 170
171 failed_bh: 171 failed_bh:
172 unlock_page(bh->b_page); 172 unlock_page(bh->b_page);
173 page_cache_release(bh->b_page); 173 put_page(bh->b_page);
174 brelse(bh); 174 brelse(bh);
175 failed: 175 failed:
176 return ret; 176 return ret;
@@ -363,7 +363,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
363int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) 363int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
364{ 364{
365 pgoff_t index = (pgoff_t)block >> 365 pgoff_t index = (pgoff_t)block >>
366 (PAGE_CACHE_SHIFT - inode->i_blkbits); 366 (PAGE_SHIFT - inode->i_blkbits);
367 struct page *page; 367 struct page *page;
368 unsigned long first_block; 368 unsigned long first_block;
369 int ret = 0; 369 int ret = 0;
@@ -376,7 +376,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
376 wait_on_page_writeback(page); 376 wait_on_page_writeback(page);
377 377
378 first_block = (unsigned long)index << 378 first_block = (unsigned long)index <<
379 (PAGE_CACHE_SHIFT - inode->i_blkbits); 379 (PAGE_SHIFT - inode->i_blkbits);
380 if (page_has_buffers(page)) { 380 if (page_has_buffers(page)) {
381 struct buffer_head *bh; 381 struct buffer_head *bh;
382 382
@@ -385,7 +385,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
385 } 385 }
386 still_dirty = PageDirty(page); 386 still_dirty = PageDirty(page);
387 unlock_page(page); 387 unlock_page(page);
388 page_cache_release(page); 388 put_page(page);
389 389
390 if (still_dirty || 390 if (still_dirty ||
391 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) 391 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
@@ -578,7 +578,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
578 } 578 }
579 579
580 unlock_page(page); 580 unlock_page(page);
581 page_cache_release(page); 581 put_page(page);
582 return 0; 582 return 0;
583} 583}
584 584
@@ -597,7 +597,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
597 bh_frozen = nilfs_page_get_nth_block(page, n); 597 bh_frozen = nilfs_page_get_nth_block(page, n);
598 } 598 }
599 unlock_page(page); 599 unlock_page(page);
600 page_cache_release(page); 600 put_page(page);
601 } 601 }
602 return bh_frozen; 602 return bh_frozen;
603} 603}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 7ccdb961eea9..151bc19d47c0 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -431,11 +431,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
431out_dir: 431out_dir:
432 if (dir_de) { 432 if (dir_de) {
433 kunmap(dir_page); 433 kunmap(dir_page);
434 page_cache_release(dir_page); 434 put_page(dir_page);
435 } 435 }
436out_old: 436out_old:
437 kunmap(old_page); 437 kunmap(old_page);
438 page_cache_release(old_page); 438 put_page(old_page);
439out: 439out:
440 nilfs_transaction_abort(old_dir->i_sb); 440 nilfs_transaction_abort(old_dir->i_sb);
441 return err; 441 return err;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index c20df77eff99..489391561cda 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -50,7 +50,7 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
50 if (!page_has_buffers(page)) 50 if (!page_has_buffers(page))
51 create_empty_buffers(page, 1 << blkbits, b_state); 51 create_empty_buffers(page, 1 << blkbits, b_state);
52 52
53 first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits); 53 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
54 bh = nilfs_page_get_nth_block(page, block - first_block); 54 bh = nilfs_page_get_nth_block(page, block - first_block);
55 55
56 touch_buffer(bh); 56 touch_buffer(bh);
@@ -64,7 +64,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
64 unsigned long b_state) 64 unsigned long b_state)
65{ 65{
66 int blkbits = inode->i_blkbits; 66 int blkbits = inode->i_blkbits;
67 pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); 67 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
68 struct page *page; 68 struct page *page;
69 struct buffer_head *bh; 69 struct buffer_head *bh;
70 70
@@ -75,7 +75,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); 75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
76 if (unlikely(!bh)) { 76 if (unlikely(!bh)) {
77 unlock_page(page); 77 unlock_page(page);
78 page_cache_release(page); 78 put_page(page);
79 return NULL; 79 return NULL;
80 } 80 }
81 return bh; 81 return bh;
@@ -288,7 +288,7 @@ repeat:
288 __set_page_dirty_nobuffers(dpage); 288 __set_page_dirty_nobuffers(dpage);
289 289
290 unlock_page(dpage); 290 unlock_page(dpage);
291 page_cache_release(dpage); 291 put_page(dpage);
292 unlock_page(page); 292 unlock_page(page);
293 } 293 }
294 pagevec_release(&pvec); 294 pagevec_release(&pvec);
@@ -333,7 +333,7 @@ repeat:
333 WARN_ON(PageDirty(dpage)); 333 WARN_ON(PageDirty(dpage));
334 nilfs_copy_page(dpage, page, 0); 334 nilfs_copy_page(dpage, page, 0);
335 unlock_page(dpage); 335 unlock_page(dpage);
336 page_cache_release(dpage); 336 put_page(dpage);
337 } else { 337 } else {
338 struct page *page2; 338 struct page *page2;
339 339
@@ -350,7 +350,7 @@ repeat:
350 if (unlikely(err < 0)) { 350 if (unlikely(err < 0)) {
351 WARN_ON(err == -EEXIST); 351 WARN_ON(err == -EEXIST);
352 page->mapping = NULL; 352 page->mapping = NULL;
353 page_cache_release(page); /* for cache */ 353 put_page(page); /* for cache */
354 } else { 354 } else {
355 page->mapping = dmap; 355 page->mapping = dmap;
356 dmap->nrpages++; 356 dmap->nrpages++;
@@ -523,8 +523,8 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
523 if (inode->i_mapping->nrpages == 0) 523 if (inode->i_mapping->nrpages == 0)
524 return 0; 524 return 0;
525 525
526 index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 526 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
527 nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); 527 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
528 528
529 pagevec_init(&pvec, 0); 529 pagevec_init(&pvec, 0);
530 530
@@ -537,7 +537,7 @@ repeat:
537 if (length > 0 && pvec.pages[0]->index > index) 537 if (length > 0 && pvec.pages[0]->index > index)
538 goto out; 538 goto out;
539 539
540 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 540 b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
541 i = 0; 541 i = 0;
542 do { 542 do {
543 page = pvec.pages[i]; 543 page = pvec.pages[i];
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 9b4f205d1173..5afa77fadc11 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -544,14 +544,14 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
544 blocksize, page, NULL); 544 blocksize, page, NULL);
545 545
546 unlock_page(page); 546 unlock_page(page);
547 page_cache_release(page); 547 put_page(page);
548 548
549 (*nr_salvaged_blocks)++; 549 (*nr_salvaged_blocks)++;
550 goto next; 550 goto next;
551 551
552 failed_page: 552 failed_page:
553 unlock_page(page); 553 unlock_page(page);
554 page_cache_release(page); 554 put_page(page);
555 555
556 failed_inode: 556 failed_inode:
557 printk(KERN_WARNING 557 printk(KERN_WARNING
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65adaae7e4..4317f72568e6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2070,7 +2070,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2070 goto failed_to_write; 2070 goto failed_to_write;
2071 2071
2072 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || 2072 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2073 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) { 2073 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2074 /* 2074 /*
2075 * At this point, we avoid double buffering 2075 * At this point, we avoid double buffering
2076 * for blocksize < pagesize because page dirty 2076 * for blocksize < pagesize because page dirty
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 7521e11db728..97768a1379f2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
74 74
75 set_buffer_uptodate(bh); 75 set_buffer_uptodate(bh);
76 76
77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + 77 file_ofs = ((s64)page->index << PAGE_SHIFT) +
78 bh_offset(bh); 78 bh_offset(bh);
79 read_lock_irqsave(&ni->size_lock, flags); 79 read_lock_irqsave(&ni->size_lock, flags);
80 init_size = ni->initialized_size; 80 init_size = ni->initialized_size;
@@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
142 u32 rec_size; 142 u32 rec_size;
143 143
144 rec_size = ni->itype.index.block_size; 144 rec_size = ni->itype.index.block_size;
145 recs = PAGE_CACHE_SIZE / rec_size; 145 recs = PAGE_SIZE / rec_size;
146 /* Should have been verified before we got here... */ 146 /* Should have been verified before we got here... */
147 BUG_ON(!recs); 147 BUG_ON(!recs);
148 local_irq_save(flags); 148 local_irq_save(flags);
@@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page)
229 * fully truncated, truncate will throw it away as soon as we unlock 229 * fully truncated, truncate will throw it away as soon as we unlock
230 * it so no need to worry what we do with it. 230 * it so no need to worry what we do with it.
231 */ 231 */
232 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 232 iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
233 read_lock_irqsave(&ni->size_lock, flags); 233 read_lock_irqsave(&ni->size_lock, flags);
234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; 234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
235 init_size = ni->initialized_size; 235 init_size = ni->initialized_size;
@@ -412,9 +412,9 @@ retry_readpage:
412 vi = page->mapping->host; 412 vi = page->mapping->host;
413 i_size = i_size_read(vi); 413 i_size = i_size_read(vi);
414 /* Is the page fully outside i_size? (truncate in progress) */ 414 /* Is the page fully outside i_size? (truncate in progress) */
415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 415 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
416 PAGE_CACHE_SHIFT)) { 416 PAGE_SHIFT)) {
417 zero_user(page, 0, PAGE_CACHE_SIZE); 417 zero_user(page, 0, PAGE_SIZE);
418 ntfs_debug("Read outside i_size - truncated?"); 418 ntfs_debug("Read outside i_size - truncated?");
419 goto done; 419 goto done;
420 } 420 }
@@ -463,7 +463,7 @@ retry_readpage:
463 * ok to ignore the compressed flag here. 463 * ok to ignore the compressed flag here.
464 */ 464 */
465 if (unlikely(page->index > 0)) { 465 if (unlikely(page->index > 0)) {
466 zero_user(page, 0, PAGE_CACHE_SIZE); 466 zero_user(page, 0, PAGE_SIZE);
467 goto done; 467 goto done;
468 } 468 }
469 if (!NInoAttr(ni)) 469 if (!NInoAttr(ni))
@@ -509,7 +509,7 @@ retry_readpage:
509 le16_to_cpu(ctx->attr->data.resident.value_offset), 509 le16_to_cpu(ctx->attr->data.resident.value_offset),
510 attr_len); 510 attr_len);
511 /* Zero the remainder of the page. */ 511 /* Zero the remainder of the page. */
512 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 512 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
513 flush_dcache_page(page); 513 flush_dcache_page(page);
514 kunmap_atomic(addr); 514 kunmap_atomic(addr);
515put_unm_err_out: 515put_unm_err_out:
@@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
599 /* NOTE: Different naming scheme to ntfs_read_block()! */ 599 /* NOTE: Different naming scheme to ntfs_read_block()! */
600 600
601 /* The first block in the page. */ 601 /* The first block in the page. */
602 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 602 block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
603 603
604 read_lock_irqsave(&ni->size_lock, flags); 604 read_lock_irqsave(&ni->size_lock, flags);
605 i_size = i_size_read(vi); 605 i_size = i_size_read(vi);
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
674 // in the inode. 674 // in the inode.
675 // Again, for each page do: 675 // Again, for each page do:
676 // __set_page_dirty_buffers(); 676 // __set_page_dirty_buffers();
677 // page_cache_release() 677 // put_page()
678 // We don't need to wait on the writes. 678 // We don't need to wait on the writes.
679 // Update iblock. 679 // Update iblock.
680 } 680 }
@@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page,
925 ntfs_volume *vol = ni->vol; 925 ntfs_volume *vol = ni->vol;
926 u8 *kaddr; 926 u8 *kaddr;
927 unsigned int rec_size = ni->itype.index.block_size; 927 unsigned int rec_size = ni->itype.index.block_size;
928 ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; 928 ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
929 struct buffer_head *bh, *head, *tbh, *rec_start_bh; 929 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
930 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 930 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
931 runlist_element *rl; 931 runlist_element *rl;
@@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page,
949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); 949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
950 bh_size = vol->sb->s_blocksize; 950 bh_size = vol->sb->s_blocksize;
951 bh_size_bits = vol->sb->s_blocksize_bits; 951 bh_size_bits = vol->sb->s_blocksize_bits;
952 max_bhs = PAGE_CACHE_SIZE / bh_size; 952 max_bhs = PAGE_SIZE / bh_size;
953 BUG_ON(!max_bhs); 953 BUG_ON(!max_bhs);
954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE); 954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
955 955
@@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page,
961 BUG_ON(!bh); 961 BUG_ON(!bh);
962 962
963 rec_size_bits = ni->itype.index.block_size_bits; 963 rec_size_bits = ni->itype.index.block_size_bits;
964 BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); 964 BUG_ON(!(PAGE_SIZE >> rec_size_bits));
965 bhs_per_rec = rec_size >> bh_size_bits; 965 bhs_per_rec = rec_size >> bh_size_bits;
966 BUG_ON(!bhs_per_rec); 966 BUG_ON(!bhs_per_rec);
967 967
968 /* The first block in the page. */ 968 /* The first block in the page. */
969 rec_block = block = (sector_t)page->index << 969 rec_block = block = (sector_t)page->index <<
970 (PAGE_CACHE_SHIFT - bh_size_bits); 970 (PAGE_SHIFT - bh_size_bits);
971 971
972 /* The first out of bounds block for the data size. */ 972 /* The first out of bounds block for the data size. */
973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; 973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
@@ -1133,7 +1133,7 @@ lock_retry_remap:
1133 unsigned long mft_no; 1133 unsigned long mft_no;
1134 1134
1135 /* Get the mft record number. */ 1135 /* Get the mft record number. */
1136 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1136 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1137 >> rec_size_bits; 1137 >> rec_size_bits;
1138 /* Check whether to write this mft record. */ 1138 /* Check whether to write this mft record. */
1139 tni = NULL; 1139 tni = NULL;
@@ -1249,7 +1249,7 @@ do_mirror:
1249 continue; 1249 continue;
1250 ofs = bh_offset(tbh); 1250 ofs = bh_offset(tbh);
1251 /* Get the mft record number. */ 1251 /* Get the mft record number. */
1252 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1252 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1253 >> rec_size_bits; 1253 >> rec_size_bits;
1254 if (mft_no < vol->mftmirr_size) 1254 if (mft_no < vol->mftmirr_size)
1255 ntfs_sync_mft_mirror(vol, mft_no, 1255 ntfs_sync_mft_mirror(vol, mft_no,
@@ -1300,7 +1300,7 @@ done:
1300 * Set page error if there is only one ntfs record in the page. 1300 * Set page error if there is only one ntfs record in the page.
1301 * Otherwise we would loose per-record granularity. 1301 * Otherwise we would loose per-record granularity.
1302 */ 1302 */
1303 if (ni->itype.index.block_size == PAGE_CACHE_SIZE) 1303 if (ni->itype.index.block_size == PAGE_SIZE)
1304 SetPageError(page); 1304 SetPageError(page);
1305 NVolSetErrors(vol); 1305 NVolSetErrors(vol);
1306 } 1306 }
@@ -1308,7 +1308,7 @@ done:
1308 ntfs_debug("Page still contains one or more dirty ntfs " 1308 ntfs_debug("Page still contains one or more dirty ntfs "
1309 "records. Redirtying the page starting at " 1309 "records. Redirtying the page starting at "
1310 "record 0x%lx.", page->index << 1310 "record 0x%lx.", page->index <<
1311 (PAGE_CACHE_SHIFT - rec_size_bits)); 1311 (PAGE_SHIFT - rec_size_bits));
1312 redirty_page_for_writepage(wbc, page); 1312 redirty_page_for_writepage(wbc, page);
1313 unlock_page(page); 1313 unlock_page(page);
1314 } else { 1314 } else {
@@ -1365,13 +1365,13 @@ retry_writepage:
1365 BUG_ON(!PageLocked(page)); 1365 BUG_ON(!PageLocked(page));
1366 i_size = i_size_read(vi); 1366 i_size = i_size_read(vi);
1367 /* Is the page fully outside i_size? (truncate in progress) */ 1367 /* Is the page fully outside i_size? (truncate in progress) */
1368 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 1368 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
1369 PAGE_CACHE_SHIFT)) { 1369 PAGE_SHIFT)) {
1370 /* 1370 /*
1371 * The page may have dirty, unmapped buffers. Make them 1371 * The page may have dirty, unmapped buffers. Make them
1372 * freeable here, so the page does not leak. 1372 * freeable here, so the page does not leak.
1373 */ 1373 */
1374 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1374 block_invalidatepage(page, 0, PAGE_SIZE);
1375 unlock_page(page); 1375 unlock_page(page);
1376 ntfs_debug("Write outside i_size - truncated?"); 1376 ntfs_debug("Write outside i_size - truncated?");
1377 return 0; 1377 return 0;
@@ -1414,10 +1414,10 @@ retry_writepage:
1414 /* NInoNonResident() == NInoIndexAllocPresent() */ 1414 /* NInoNonResident() == NInoIndexAllocPresent() */
1415 if (NInoNonResident(ni)) { 1415 if (NInoNonResident(ni)) {
1416 /* We have to zero every time due to mmap-at-end-of-file. */ 1416 /* We have to zero every time due to mmap-at-end-of-file. */
1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { 1417 if (page->index >= (i_size >> PAGE_SHIFT)) {
1418 /* The page straddles i_size. */ 1418 /* The page straddles i_size. */
1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; 1419 unsigned int ofs = i_size & ~PAGE_MASK;
1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE); 1420 zero_user_segment(page, ofs, PAGE_SIZE);
1421 } 1421 }
1422 /* Handle mst protected attributes. */ 1422 /* Handle mst protected attributes. */
1423 if (NInoMstProtected(ni)) 1423 if (NInoMstProtected(ni))
@@ -1500,7 +1500,7 @@ retry_writepage:
1500 le16_to_cpu(ctx->attr->data.resident.value_offset), 1500 le16_to_cpu(ctx->attr->data.resident.value_offset),
1501 addr, attr_len); 1501 addr, attr_len);
1502 /* Zero out of bounds area in the page cache page. */ 1502 /* Zero out of bounds area in the page cache page. */
1503 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1503 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1504 kunmap_atomic(addr); 1504 kunmap_atomic(addr);
1505 flush_dcache_page(page); 1505 flush_dcache_page(page);
1506 flush_dcache_mft_record_page(ctx->ntfs_ino); 1506 flush_dcache_mft_record_page(ctx->ntfs_ino);
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index caecc58f529c..820d6eabf60f 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -40,7 +40,7 @@
40static inline void ntfs_unmap_page(struct page *page) 40static inline void ntfs_unmap_page(struct page *page)
41{ 41{
42 kunmap(page); 42 kunmap(page);
43 page_cache_release(page); 43 put_page(page);
44} 44}
45 45
46/** 46/**
@@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page)
49 * @index: index into the page cache for @mapping of the page to map 49 * @index: index into the page cache for @mapping of the page to map
50 * 50 *
51 * Read a page from the page cache of the address space @mapping at position 51 * Read a page from the page cache of the address space @mapping at position
52 * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes. 52 * @index, where @index is in units of PAGE_SIZE, and not in bytes.
53 * 53 *
54 * If the page is not in memory it is loaded from disk first using the readpage 54 * If the page is not in memory it is loaded from disk first using the readpage
55 * method defined in the address space operations of @mapping and the page is 55 * method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 250ed5b20c8f..44a39a099b54 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -152,7 +152,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino != 152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
153 old_ctx.base_ntfs_ino) { 153 old_ctx.base_ntfs_ino) {
154 put_this_page = old_ctx.ntfs_ino->page; 154 put_this_page = old_ctx.ntfs_ino->page;
155 page_cache_get(put_this_page); 155 get_page(put_this_page);
156 } 156 }
157 /* 157 /*
158 * Reinitialize the search context so we can lookup the 158 * Reinitialize the search context so we can lookup the
@@ -275,7 +275,7 @@ retry_map:
275 * the pieces anyway. 275 * the pieces anyway.
276 */ 276 */
277 if (put_this_page) 277 if (put_this_page)
278 page_cache_release(put_this_page); 278 put_page(put_this_page);
279 } 279 }
280 return err; 280 return err;
281} 281}
@@ -1660,7 +1660,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1660 memcpy(kaddr, (u8*)a + 1660 memcpy(kaddr, (u8*)a +
1661 le16_to_cpu(a->data.resident.value_offset), 1661 le16_to_cpu(a->data.resident.value_offset),
1662 attr_size); 1662 attr_size);
1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); 1663 memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
1664 kunmap_atomic(kaddr); 1664 kunmap_atomic(kaddr);
1665 flush_dcache_page(page); 1665 flush_dcache_page(page);
1666 SetPageUptodate(page); 1666 SetPageUptodate(page);
@@ -1748,7 +1748,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1748 if (page) { 1748 if (page) {
1749 set_page_dirty(page); 1749 set_page_dirty(page);
1750 unlock_page(page); 1750 unlock_page(page);
1751 page_cache_release(page); 1751 put_page(page);
1752 } 1752 }
1753 ntfs_debug("Done."); 1753 ntfs_debug("Done.");
1754 return 0; 1754 return 0;
@@ -1835,7 +1835,7 @@ rl_err_out:
1835 ntfs_free(rl); 1835 ntfs_free(rl);
1836page_err_out: 1836page_err_out:
1837 unlock_page(page); 1837 unlock_page(page);
1838 page_cache_release(page); 1838 put_page(page);
1839 } 1839 }
1840 if (err == -EINVAL) 1840 if (err == -EINVAL)
1841 err = -EIO; 1841 err = -EIO;
@@ -2513,17 +2513,17 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2513 BUG_ON(NInoEncrypted(ni)); 2513 BUG_ON(NInoEncrypted(ni));
2514 mapping = VFS_I(ni)->i_mapping; 2514 mapping = VFS_I(ni)->i_mapping;
2515 /* Work out the starting index and page offset. */ 2515 /* Work out the starting index and page offset. */
2516 idx = ofs >> PAGE_CACHE_SHIFT; 2516 idx = ofs >> PAGE_SHIFT;
2517 start_ofs = ofs & ~PAGE_CACHE_MASK; 2517 start_ofs = ofs & ~PAGE_MASK;
2518 /* Work out the ending index and page offset. */ 2518 /* Work out the ending index and page offset. */
2519 end = ofs + cnt; 2519 end = ofs + cnt;
2520 end_ofs = end & ~PAGE_CACHE_MASK; 2520 end_ofs = end & ~PAGE_MASK;
2521 /* If the end is outside the inode size return -ESPIPE. */ 2521 /* If the end is outside the inode size return -ESPIPE. */
2522 if (unlikely(end > i_size_read(VFS_I(ni)))) { 2522 if (unlikely(end > i_size_read(VFS_I(ni)))) {
2523 ntfs_error(vol->sb, "Request exceeds end of attribute."); 2523 ntfs_error(vol->sb, "Request exceeds end of attribute.");
2524 return -ESPIPE; 2524 return -ESPIPE;
2525 } 2525 }
2526 end >>= PAGE_CACHE_SHIFT; 2526 end >>= PAGE_SHIFT;
2527 /* If there is a first partial page, need to do it the slow way. */ 2527 /* If there is a first partial page, need to do it the slow way. */
2528 if (start_ofs) { 2528 if (start_ofs) {
2529 page = read_mapping_page(mapping, idx, NULL); 2529 page = read_mapping_page(mapping, idx, NULL);
@@ -2536,7 +2536,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2536 * If the last page is the same as the first page, need to 2536 * If the last page is the same as the first page, need to
2537 * limit the write to the end offset. 2537 * limit the write to the end offset.
2538 */ 2538 */
2539 size = PAGE_CACHE_SIZE; 2539 size = PAGE_SIZE;
2540 if (idx == end) 2540 if (idx == end)
2541 size = end_ofs; 2541 size = end_ofs;
2542 kaddr = kmap_atomic(page); 2542 kaddr = kmap_atomic(page);
@@ -2544,7 +2544,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2544 flush_dcache_page(page); 2544 flush_dcache_page(page);
2545 kunmap_atomic(kaddr); 2545 kunmap_atomic(kaddr);
2546 set_page_dirty(page); 2546 set_page_dirty(page);
2547 page_cache_release(page); 2547 put_page(page);
2548 balance_dirty_pages_ratelimited(mapping); 2548 balance_dirty_pages_ratelimited(mapping);
2549 cond_resched(); 2549 cond_resched();
2550 if (idx == end) 2550 if (idx == end)
@@ -2561,7 +2561,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2561 return -ENOMEM; 2561 return -ENOMEM;
2562 } 2562 }
2563 kaddr = kmap_atomic(page); 2563 kaddr = kmap_atomic(page);
2564 memset(kaddr, val, PAGE_CACHE_SIZE); 2564 memset(kaddr, val, PAGE_SIZE);
2565 flush_dcache_page(page); 2565 flush_dcache_page(page);
2566 kunmap_atomic(kaddr); 2566 kunmap_atomic(kaddr);
2567 /* 2567 /*
@@ -2585,7 +2585,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2585 set_page_dirty(page); 2585 set_page_dirty(page);
2586 /* Finally unlock and release the page. */ 2586 /* Finally unlock and release the page. */
2587 unlock_page(page); 2587 unlock_page(page);
2588 page_cache_release(page); 2588 put_page(page);
2589 balance_dirty_pages_ratelimited(mapping); 2589 balance_dirty_pages_ratelimited(mapping);
2590 cond_resched(); 2590 cond_resched();
2591 } 2591 }
@@ -2602,7 +2602,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2602 flush_dcache_page(page); 2602 flush_dcache_page(page);
2603 kunmap_atomic(kaddr); 2603 kunmap_atomic(kaddr);
2604 set_page_dirty(page); 2604 set_page_dirty(page);
2605 page_cache_release(page); 2605 put_page(page);
2606 balance_dirty_pages_ratelimited(mapping); 2606 balance_dirty_pages_ratelimited(mapping);
2607 cond_resched(); 2607 cond_resched();
2608 } 2608 }
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
index 0809cf876098..ec130c588d2b 100644
--- a/fs/ntfs/bitmap.c
+++ b/fs/ntfs/bitmap.c
@@ -67,8 +67,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
67 * Calculate the indices for the pages containing the first and last 67 * Calculate the indices for the pages containing the first and last
68 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively. 68 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
69 */ 69 */
70 index = start_bit >> (3 + PAGE_CACHE_SHIFT); 70 index = start_bit >> (3 + PAGE_SHIFT);
71 end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT); 71 end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
72 72
73 /* Get the page containing the first bit (@start_bit). */ 73 /* Get the page containing the first bit (@start_bit). */
74 mapping = vi->i_mapping; 74 mapping = vi->i_mapping;
@@ -82,7 +82,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
82 kaddr = page_address(page); 82 kaddr = page_address(page);
83 83
84 /* Set @pos to the position of the byte containing @start_bit. */ 84 /* Set @pos to the position of the byte containing @start_bit. */
85 pos = (start_bit >> 3) & ~PAGE_CACHE_MASK; 85 pos = (start_bit >> 3) & ~PAGE_MASK;
86 86
87 /* Calculate the position of @start_bit in the first byte. */ 87 /* Calculate the position of @start_bit in the first byte. */
88 bit = start_bit & 7; 88 bit = start_bit & 7;
@@ -108,7 +108,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
108 * Depending on @value, modify all remaining whole bytes in the page up 108 * Depending on @value, modify all remaining whole bytes in the page up
109 * to @cnt. 109 * to @cnt.
110 */ 110 */
111 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); 111 len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
112 memset(kaddr + pos, value ? 0xff : 0, len); 112 memset(kaddr + pos, value ? 0xff : 0, len);
113 cnt -= len << 3; 113 cnt -= len << 3;
114 114
@@ -132,7 +132,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
132 * Depending on @value, modify all remaining whole bytes in the 132 * Depending on @value, modify all remaining whole bytes in the
133 * page up to @cnt. 133 * page up to @cnt.
134 */ 134 */
135 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); 135 len = min_t(s64, cnt >> 3, PAGE_SIZE);
136 memset(kaddr, value ? 0xff : 0, len); 136 memset(kaddr, value ? 0xff : 0, len);
137 cnt -= len << 3; 137 cnt -= len << 3;
138 } 138 }
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index f82498c35e78..f2b5e746f49b 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page,
104 unsigned int kp_ofs; 104 unsigned int kp_ofs;
105 105
106 ntfs_debug("Zeroing page region outside initialized size."); 106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { 107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
108 /*
109 * FIXME: Using clear_page() will become wrong when we get
110 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
111 */
112 clear_page(kp); 108 clear_page(kp);
113 return; 109 return;
114 } 110 }
115 kp_ofs = initialized_size & ~PAGE_CACHE_MASK; 111 kp_ofs = initialized_size & ~PAGE_MASK;
116 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); 112 memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
117 return; 113 return;
118} 114}
119 115
@@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page,
123static inline void handle_bounds_compressed_page(struct page *page, 119static inline void handle_bounds_compressed_page(struct page *page,
124 const loff_t i_size, const s64 initialized_size) 120 const loff_t i_size, const s64 initialized_size)
125{ 121{
126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && 122 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
127 (initialized_size < i_size)) 123 (initialized_size < i_size))
128 zero_partial_compressed_page(page, initialized_size); 124 zero_partial_compressed_page(page, initialized_size);
129 return; 125 return;
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
160 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was 156 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
161 * completed during the decompression of the compression block (@cb_start). 157 * completed during the decompression of the compression block (@cb_start).
162 * 158 *
163 * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up 159 * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
164 * unpredicatbly! You have been warned! 160 * unpredicatbly! You have been warned!
165 * 161 *
166 * Note to hackers: This function may not sleep until it has finished accessing 162 * Note to hackers: This function may not sleep until it has finished accessing
@@ -241,7 +237,7 @@ return_error:
241 if (di == xpage) 237 if (di == xpage)
242 *xpage_done = 1; 238 *xpage_done = 1;
243 else 239 else
244 page_cache_release(dp); 240 put_page(dp);
245 dest_pages[di] = NULL; 241 dest_pages[di] = NULL;
246 } 242 }
247 } 243 }
@@ -274,7 +270,7 @@ return_error:
274 cb = cb_sb_end; 270 cb = cb_sb_end;
275 271
276 /* Advance destination position to next sub-block. */ 272 /* Advance destination position to next sub-block. */
277 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; 273 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
278 if (!*dest_ofs && (++*dest_index > dest_max_index)) 274 if (!*dest_ofs && (++*dest_index > dest_max_index))
279 goto return_overflow; 275 goto return_overflow;
280 goto do_next_sb; 276 goto do_next_sb;
@@ -301,7 +297,7 @@ return_error:
301 297
302 /* Advance destination position to next sub-block. */ 298 /* Advance destination position to next sub-block. */
303 *dest_ofs += NTFS_SB_SIZE; 299 *dest_ofs += NTFS_SB_SIZE;
304 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { 300 if (!(*dest_ofs &= ~PAGE_MASK)) {
305finalize_page: 301finalize_page:
306 /* 302 /*
307 * First stage: add current page index to array of 303 * First stage: add current page index to array of
@@ -335,7 +331,7 @@ do_next_tag:
335 *dest_ofs += nr_bytes; 331 *dest_ofs += nr_bytes;
336 } 332 }
337 /* We have finished the current sub-block. */ 333 /* We have finished the current sub-block. */
338 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) 334 if (!(*dest_ofs &= ~PAGE_MASK))
339 goto finalize_page; 335 goto finalize_page;
340 goto do_next_sb; 336 goto do_next_sb;
341 } 337 }
@@ -462,7 +458,7 @@ return_overflow:
462 * have been written to so that we would lose data if we were to just overwrite 458 * have been written to so that we would lose data if we were to just overwrite
463 * them with the out-of-date uncompressed data. 459 * them with the out-of-date uncompressed data.
464 * 460 *
465 * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at 461 * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
466 * the end of the file I think. We need to detect this case and zero the out 462 * the end of the file I think. We need to detect this case and zero the out
467 * of bounds remainder of the page in question and mark it as handled. At the 463 * of bounds remainder of the page in question and mark it as handled. At the
468 * moment we would just return -EIO on such a page. This bug will only become 464 * moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
470 * clusters so is probably not going to be seen by anyone. Still this should 466 * clusters so is probably not going to be seen by anyone. Still this should
471 * be fixed. (AIA) 467 * be fixed. (AIA)
472 * 468 *
473 * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in 469 * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
474 * handling sparse and compressed cbs. (AIA) 470 * handling sparse and compressed cbs. (AIA)
475 * 471 *
476 * FIXME: At the moment we don't do any zeroing out in the case that 472 * FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page)
497 u64 cb_size_mask = cb_size - 1UL; 493 u64 cb_size_mask = cb_size - 1UL;
498 VCN vcn; 494 VCN vcn;
499 LCN lcn; 495 LCN lcn;
500 /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ 496 /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> 497 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits; 498 vol->cluster_size_bits;
503 /* 499 /*
504 * The first vcn after the last wanted vcn (minimum alignment is again 500 * The first vcn after the last wanted vcn (minimum alignment is again
505 * PAGE_CACHE_SIZE. 501 * PAGE_SIZE.
506 */ 502 */
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) 503 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
508 & ~cb_size_mask) >> vol->cluster_size_bits; 504 & ~cb_size_mask) >> vol->cluster_size_bits;
509 /* Number of compression blocks (cbs) in the wanted vcn range. */ 505 /* Number of compression blocks (cbs) in the wanted vcn range. */
510 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits 506 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
@@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page)
515 * guarantees of start_vcn and end_vcn, no need to round up here. 511 * guarantees of start_vcn and end_vcn, no need to round up here.
516 */ 512 */
517 unsigned int nr_pages = (end_vcn - start_vcn) << 513 unsigned int nr_pages = (end_vcn - start_vcn) <<
518 vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 514 vol->cluster_size_bits >> PAGE_SHIFT;
519 unsigned int xpage, max_page, cur_page, cur_ofs, i; 515 unsigned int xpage, max_page, cur_page, cur_ofs, i;
520 unsigned int cb_clusters, cb_max_ofs; 516 unsigned int cb_clusters, cb_max_ofs;
521 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; 517 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
@@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page)
549 * We have already been given one page, this is the one we must do. 545 * We have already been given one page, this is the one we must do.
550 * Once again, the alignment guarantees keep it simple. 546 * Once again, the alignment guarantees keep it simple.
551 */ 547 */
552 offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 548 offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
553 xpage = index - offset; 549 xpage = index - offset;
554 pages[xpage] = page; 550 pages[xpage] = page;
555 /* 551 /*
@@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page)
560 i_size = i_size_read(VFS_I(ni)); 556 i_size = i_size_read(VFS_I(ni));
561 initialized_size = ni->initialized_size; 557 initialized_size = ni->initialized_size;
562 read_unlock_irqrestore(&ni->size_lock, flags); 558 read_unlock_irqrestore(&ni->size_lock, flags);
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 559 max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
564 offset; 560 offset;
565 /* Is the page fully outside i_size? (truncate in progress) */ 561 /* Is the page fully outside i_size? (truncate in progress) */
566 if (xpage >= max_page) { 562 if (xpage >= max_page) {
567 kfree(bhs); 563 kfree(bhs);
568 kfree(pages); 564 kfree(pages);
569 zero_user(page, 0, PAGE_CACHE_SIZE); 565 zero_user(page, 0, PAGE_SIZE);
570 ntfs_debug("Compressed read outside i_size - truncated?"); 566 ntfs_debug("Compressed read outside i_size - truncated?");
571 SetPageUptodate(page); 567 SetPageUptodate(page);
572 unlock_page(page); 568 unlock_page(page);
@@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page)
591 continue; 587 continue;
592 } 588 }
593 unlock_page(page); 589 unlock_page(page);
594 page_cache_release(page); 590 put_page(page);
595 pages[i] = NULL; 591 pages[i] = NULL;
596 } 592 }
597 } 593 }
@@ -735,9 +731,9 @@ lock_retry_remap:
735 ntfs_debug("Successfully read the compression block."); 731 ntfs_debug("Successfully read the compression block.");
736 732
737 /* The last page and maximum offset within it for the current cb. */ 733 /* The last page and maximum offset within it for the current cb. */
738 cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; 734 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
739 cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; 735 cb_max_ofs = cb_max_page & ~PAGE_MASK;
740 cb_max_page >>= PAGE_CACHE_SHIFT; 736 cb_max_page >>= PAGE_SHIFT;
741 737
742 /* Catch end of file inside a compression block. */ 738 /* Catch end of file inside a compression block. */
743 if (cb_max_page > max_page) 739 if (cb_max_page > max_page)
@@ -753,16 +749,11 @@ lock_retry_remap:
753 for (; cur_page < cb_max_page; cur_page++) { 749 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page]; 750 page = pages[cur_page];
755 if (page) { 751 if (page) {
756 /*
757 * FIXME: Using clear_page() will become wrong
758 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
759 * for now there is no problem.
760 */
761 if (likely(!cur_ofs)) 752 if (likely(!cur_ofs))
762 clear_page(page_address(page)); 753 clear_page(page_address(page));
763 else 754 else
764 memset(page_address(page) + cur_ofs, 0, 755 memset(page_address(page) + cur_ofs, 0,
765 PAGE_CACHE_SIZE - 756 PAGE_SIZE -
766 cur_ofs); 757 cur_ofs);
767 flush_dcache_page(page); 758 flush_dcache_page(page);
768 kunmap(page); 759 kunmap(page);
@@ -771,10 +762,10 @@ lock_retry_remap:
771 if (cur_page == xpage) 762 if (cur_page == xpage)
772 xpage_done = 1; 763 xpage_done = 1;
773 else 764 else
774 page_cache_release(page); 765 put_page(page);
775 pages[cur_page] = NULL; 766 pages[cur_page] = NULL;
776 } 767 }
777 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 768 cb_pos += PAGE_SIZE - cur_ofs;
778 cur_ofs = 0; 769 cur_ofs = 0;
779 if (cb_pos >= cb_end) 770 if (cb_pos >= cb_end)
780 break; 771 break;
@@ -807,7 +798,7 @@ lock_retry_remap:
807 * synchronous io for the majority of pages. 798 * synchronous io for the majority of pages.
808 * Or if we choose not to do the read-ahead/-behind stuff, we 799 * Or if we choose not to do the read-ahead/-behind stuff, we
809 * could just return block_read_full_page(pages[xpage]) as long 800 * could just return block_read_full_page(pages[xpage]) as long
810 * as PAGE_CACHE_SIZE <= cb_size. 801 * as PAGE_SIZE <= cb_size.
811 */ 802 */
812 if (cb_max_ofs) 803 if (cb_max_ofs)
813 cb_max_page--; 804 cb_max_page--;
@@ -816,8 +807,8 @@ lock_retry_remap:
816 page = pages[cur_page]; 807 page = pages[cur_page];
817 if (page) 808 if (page)
818 memcpy(page_address(page) + cur_ofs, cb_pos, 809 memcpy(page_address(page) + cur_ofs, cb_pos,
819 PAGE_CACHE_SIZE - cur_ofs); 810 PAGE_SIZE - cur_ofs);
820 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 811 cb_pos += PAGE_SIZE - cur_ofs;
821 cur_ofs = 0; 812 cur_ofs = 0;
822 if (cb_pos >= cb_end) 813 if (cb_pos >= cb_end)
823 break; 814 break;
@@ -850,10 +841,10 @@ lock_retry_remap:
850 if (cur2_page == xpage) 841 if (cur2_page == xpage)
851 xpage_done = 1; 842 xpage_done = 1;
852 else 843 else
853 page_cache_release(page); 844 put_page(page);
854 pages[cur2_page] = NULL; 845 pages[cur2_page] = NULL;
855 } 846 }
856 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; 847 cb_pos2 += PAGE_SIZE - cur_ofs2;
857 cur_ofs2 = 0; 848 cur_ofs2 = 0;
858 if (cb_pos2 >= cb_end) 849 if (cb_pos2 >= cb_end)
859 break; 850 break;
@@ -884,7 +875,7 @@ lock_retry_remap:
884 kunmap(page); 875 kunmap(page);
885 unlock_page(page); 876 unlock_page(page);
886 if (prev_cur_page != xpage) 877 if (prev_cur_page != xpage)
887 page_cache_release(page); 878 put_page(page);
888 pages[prev_cur_page] = NULL; 879 pages[prev_cur_page] = NULL;
889 } 880 }
890 } 881 }
@@ -914,7 +905,7 @@ lock_retry_remap:
914 kunmap(page); 905 kunmap(page);
915 unlock_page(page); 906 unlock_page(page);
916 if (cur_page != xpage) 907 if (cur_page != xpage)
917 page_cache_release(page); 908 put_page(page);
918 pages[cur_page] = NULL; 909 pages[cur_page] = NULL;
919 } 910 }
920 } 911 }
@@ -961,7 +952,7 @@ err_out:
961 kunmap(page); 952 kunmap(page);
962 unlock_page(page); 953 unlock_page(page);
963 if (i != xpage) 954 if (i != xpage)
964 page_cache_release(page); 955 put_page(page);
965 } 956 }
966 } 957 }
967 kfree(pages); 958 kfree(pages);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index b2eff5816adc..a18613579001 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,11 +315,11 @@ found_it:
315descend_into_child_node: 315descend_into_child_node:
316 /* 316 /*
317 * Convert vcn to index into the index allocation attribute in units 317 * Convert vcn to index into the index allocation attribute in units
318 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 318 * of PAGE_SIZE and map the page cache page, reading it from
319 * disk if necessary. 319 * disk if necessary.
320 */ 320 */
321 page = ntfs_map_page(ia_mapping, vcn << 321 page = ntfs_map_page(ia_mapping, vcn <<
322 dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 322 dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
323 if (IS_ERR(page)) { 323 if (IS_ERR(page)) {
324 ntfs_error(sb, "Failed to map directory index page, error %ld.", 324 ntfs_error(sb, "Failed to map directory index page, error %ld.",
325 -PTR_ERR(page)); 325 -PTR_ERR(page));
@@ -331,9 +331,9 @@ descend_into_child_node:
331fast_descend_into_child_node: 331fast_descend_into_child_node:
332 /* Get to the index allocation block. */ 332 /* Get to the index allocation block. */
333 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 333 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
334 dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 334 dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
335 /* Bounds checks. */ 335 /* Bounds checks. */
336 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 336 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
337 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 337 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
338 "inode 0x%lx or driver bug.", dir_ni->mft_no); 338 "inode 0x%lx or driver bug.", dir_ni->mft_no);
339 goto unm_err_out; 339 goto unm_err_out;
@@ -366,7 +366,7 @@ fast_descend_into_child_node:
366 goto unm_err_out; 366 goto unm_err_out;
367 } 367 }
368 index_end = (u8*)ia + dir_ni->itype.index.block_size; 368 index_end = (u8*)ia + dir_ni->itype.index.block_size;
369 if (index_end > kaddr + PAGE_CACHE_SIZE) { 369 if (index_end > kaddr + PAGE_SIZE) {
370 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 370 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
371 "0x%lx crosses page boundary. Impossible! " 371 "0x%lx crosses page boundary. Impossible! "
372 "Cannot access! This is probably a bug in the " 372 "Cannot access! This is probably a bug in the "
@@ -559,9 +559,9 @@ found_it2:
559 /* If vcn is in the same page cache page as old_vcn we 559 /* If vcn is in the same page cache page as old_vcn we
560 * recycle the mapped page. */ 560 * recycle the mapped page. */
561 if (old_vcn << vol->cluster_size_bits >> 561 if (old_vcn << vol->cluster_size_bits >>
562 PAGE_CACHE_SHIFT == vcn << 562 PAGE_SHIFT == vcn <<
563 vol->cluster_size_bits >> 563 vol->cluster_size_bits >>
564 PAGE_CACHE_SHIFT) 564 PAGE_SHIFT)
565 goto fast_descend_into_child_node; 565 goto fast_descend_into_child_node;
566 unlock_page(page); 566 unlock_page(page);
567 ntfs_unmap_page(page); 567 ntfs_unmap_page(page);
@@ -793,11 +793,11 @@ found_it:
793descend_into_child_node: 793descend_into_child_node:
794 /* 794 /*
795 * Convert vcn to index into the index allocation attribute in units 795 * Convert vcn to index into the index allocation attribute in units
796 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 796 * of PAGE_SIZE and map the page cache page, reading it from
797 * disk if necessary. 797 * disk if necessary.
798 */ 798 */
799 page = ntfs_map_page(ia_mapping, vcn << 799 page = ntfs_map_page(ia_mapping, vcn <<
800 dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 800 dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
801 if (IS_ERR(page)) { 801 if (IS_ERR(page)) {
802 ntfs_error(sb, "Failed to map directory index page, error %ld.", 802 ntfs_error(sb, "Failed to map directory index page, error %ld.",
803 -PTR_ERR(page)); 803 -PTR_ERR(page));
@@ -809,9 +809,9 @@ descend_into_child_node:
809fast_descend_into_child_node: 809fast_descend_into_child_node:
810 /* Get to the index allocation block. */ 810 /* Get to the index allocation block. */
811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
813 /* Bounds checks. */ 813 /* Bounds checks. */
814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
816 "inode 0x%lx or driver bug.", dir_ni->mft_no); 816 "inode 0x%lx or driver bug.", dir_ni->mft_no);
817 goto unm_err_out; 817 goto unm_err_out;
@@ -844,7 +844,7 @@ fast_descend_into_child_node:
844 goto unm_err_out; 844 goto unm_err_out;
845 } 845 }
846 index_end = (u8*)ia + dir_ni->itype.index.block_size; 846 index_end = (u8*)ia + dir_ni->itype.index.block_size;
847 if (index_end > kaddr + PAGE_CACHE_SIZE) { 847 if (index_end > kaddr + PAGE_SIZE) {
848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
849 "0x%lx crosses page boundary. Impossible! " 849 "0x%lx crosses page boundary. Impossible! "
850 "Cannot access! This is probably a bug in the " 850 "Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@ found_it2:
968 /* If vcn is in the same page cache page as old_vcn we 968 /* If vcn is in the same page cache page as old_vcn we
969 * recycle the mapped page. */ 969 * recycle the mapped page. */
970 if (old_vcn << vol->cluster_size_bits >> 970 if (old_vcn << vol->cluster_size_bits >>
971 PAGE_CACHE_SHIFT == vcn << 971 PAGE_SHIFT == vcn <<
972 vol->cluster_size_bits >> 972 vol->cluster_size_bits >>
973 PAGE_CACHE_SHIFT) 973 PAGE_SHIFT)
974 goto fast_descend_into_child_node; 974 goto fast_descend_into_child_node;
975 unlock_page(page); 975 unlock_page(page);
976 ntfs_unmap_page(page); 976 ntfs_unmap_page(page);
@@ -1246,15 +1246,15 @@ skip_index_root:
1246 goto iput_err_out; 1246 goto iput_err_out;
1247 } 1247 }
1248 /* Get the starting bit position in the current bitmap page. */ 1248 /* Get the starting bit position in the current bitmap page. */
1249 cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1); 1249 cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1);
1250 bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1); 1250 bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1);
1251get_next_bmp_page: 1251get_next_bmp_page:
1252 ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx", 1252 ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
1253 (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT), 1253 (unsigned long long)bmp_pos >> (3 + PAGE_SHIFT),
1254 (unsigned long long)bmp_pos & 1254 (unsigned long long)bmp_pos &
1255 (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1)); 1255 (unsigned long long)((PAGE_SIZE * 8) - 1));
1256 bmp_page = ntfs_map_page(bmp_mapping, 1256 bmp_page = ntfs_map_page(bmp_mapping,
1257 bmp_pos >> (3 + PAGE_CACHE_SHIFT)); 1257 bmp_pos >> (3 + PAGE_SHIFT));
1258 if (IS_ERR(bmp_page)) { 1258 if (IS_ERR(bmp_page)) {
1259 ntfs_error(sb, "Reading index bitmap failed."); 1259 ntfs_error(sb, "Reading index bitmap failed.");
1260 err = PTR_ERR(bmp_page); 1260 err = PTR_ERR(bmp_page);
@@ -1270,9 +1270,9 @@ find_next_index_buffer:
1270 * If we have reached the end of the bitmap page, get the next 1270 * If we have reached the end of the bitmap page, get the next
1271 * page, and put away the old one. 1271 * page, and put away the old one.
1272 */ 1272 */
1273 if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) { 1273 if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) {
1274 ntfs_unmap_page(bmp_page); 1274 ntfs_unmap_page(bmp_page);
1275 bmp_pos += PAGE_CACHE_SIZE * 8; 1275 bmp_pos += PAGE_SIZE * 8;
1276 cur_bmp_pos = 0; 1276 cur_bmp_pos = 0;
1277 goto get_next_bmp_page; 1277 goto get_next_bmp_page;
1278 } 1278 }
@@ -1285,8 +1285,8 @@ find_next_index_buffer:
1285 ntfs_debug("Handling index buffer 0x%llx.", 1285 ntfs_debug("Handling index buffer 0x%llx.",
1286 (unsigned long long)bmp_pos + cur_bmp_pos); 1286 (unsigned long long)bmp_pos + cur_bmp_pos);
1287 /* If the current index buffer is in the same page we reuse the page. */ 1287 /* If the current index buffer is in the same page we reuse the page. */
1288 if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) != 1288 if ((prev_ia_pos & (s64)PAGE_MASK) !=
1289 (ia_pos & (s64)PAGE_CACHE_MASK)) { 1289 (ia_pos & (s64)PAGE_MASK)) {
1290 prev_ia_pos = ia_pos; 1290 prev_ia_pos = ia_pos;
1291 if (likely(ia_page != NULL)) { 1291 if (likely(ia_page != NULL)) {
1292 unlock_page(ia_page); 1292 unlock_page(ia_page);
@@ -1296,7 +1296,7 @@ find_next_index_buffer:
1296 * Map the page cache page containing the current ia_pos, 1296 * Map the page cache page containing the current ia_pos,
1297 * reading it from disk if necessary. 1297 * reading it from disk if necessary.
1298 */ 1298 */
1299 ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT); 1299 ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT);
1300 if (IS_ERR(ia_page)) { 1300 if (IS_ERR(ia_page)) {
1301 ntfs_error(sb, "Reading index allocation data failed."); 1301 ntfs_error(sb, "Reading index allocation data failed.");
1302 err = PTR_ERR(ia_page); 1302 err = PTR_ERR(ia_page);
@@ -1307,10 +1307,10 @@ find_next_index_buffer:
1307 kaddr = (u8*)page_address(ia_page); 1307 kaddr = (u8*)page_address(ia_page);
1308 } 1308 }
1309 /* Get the current index buffer. */ 1309 /* Get the current index buffer. */
1310 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & 1310 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
1311 ~(s64)(ndir->itype.index.block_size - 1))); 1311 ~(s64)(ndir->itype.index.block_size - 1)));
1312 /* Bounds checks. */ 1312 /* Bounds checks. */
1313 if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { 1313 if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
1314 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 1314 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
1315 "inode 0x%lx or driver bug.", vdir->i_ino); 1315 "inode 0x%lx or driver bug.", vdir->i_ino);
1316 goto err_out; 1316 goto err_out;
@@ -1348,7 +1348,7 @@ find_next_index_buffer:
1348 goto err_out; 1348 goto err_out;
1349 } 1349 }
1350 index_end = (u8*)ia + ndir->itype.index.block_size; 1350 index_end = (u8*)ia + ndir->itype.index.block_size;
1351 if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) { 1351 if (unlikely(index_end > kaddr + PAGE_SIZE)) {
1352 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 1352 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
1353 "0x%lx crosses page boundary. Impossible! " 1353 "0x%lx crosses page boundary. Impossible! "
1354 "Cannot access! This is probably a bug in the " 1354 "Cannot access! This is probably a bug in the "
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index bed4d427dfae..91117ada8528 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -220,8 +220,8 @@ do_non_resident_extend:
220 m = NULL; 220 m = NULL;
221 } 221 }
222 mapping = vi->i_mapping; 222 mapping = vi->i_mapping;
223 index = old_init_size >> PAGE_CACHE_SHIFT; 223 index = old_init_size >> PAGE_SHIFT;
224 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 224 end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
225 do { 225 do {
226 /* 226 /*
227 * Read the page. If the page is not present, this will zero 227 * Read the page. If the page is not present, this will zero
@@ -233,7 +233,7 @@ do_non_resident_extend:
233 goto init_err_out; 233 goto init_err_out;
234 } 234 }
235 if (unlikely(PageError(page))) { 235 if (unlikely(PageError(page))) {
236 page_cache_release(page); 236 put_page(page);
237 err = -EIO; 237 err = -EIO;
238 goto init_err_out; 238 goto init_err_out;
239 } 239 }
@@ -242,13 +242,13 @@ do_non_resident_extend:
242 * enough to make ntfs_writepage() work. 242 * enough to make ntfs_writepage() work.
243 */ 243 */
244 write_lock_irqsave(&ni->size_lock, flags); 244 write_lock_irqsave(&ni->size_lock, flags);
245 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; 245 ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
246 if (ni->initialized_size > new_init_size) 246 if (ni->initialized_size > new_init_size)
247 ni->initialized_size = new_init_size; 247 ni->initialized_size = new_init_size;
248 write_unlock_irqrestore(&ni->size_lock, flags); 248 write_unlock_irqrestore(&ni->size_lock, flags);
249 /* Set the page dirty so it gets written out. */ 249 /* Set the page dirty so it gets written out. */
250 set_page_dirty(page); 250 set_page_dirty(page);
251 page_cache_release(page); 251 put_page(page);
252 /* 252 /*
253 * Play nice with the vm and the rest of the system. This is 253 * Play nice with the vm and the rest of the system. This is
254 * very much needed as we can potentially be modifying the 254 * very much needed as we can potentially be modifying the
@@ -543,7 +543,7 @@ out:
543err_out: 543err_out:
544 while (nr > 0) { 544 while (nr > 0) {
545 unlock_page(pages[--nr]); 545 unlock_page(pages[--nr]);
546 page_cache_release(pages[nr]); 546 put_page(pages[nr]);
547 } 547 }
548 goto out; 548 goto out;
549} 549}
@@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
573 * only partially being written to. 573 * only partially being written to.
574 * 574 *
575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
576 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside 576 * greater than PAGE_SIZE, that all pages in @pages are entirely inside
577 * the same cluster and that they are the entirety of that cluster, and that 577 * the same cluster and that they are the entirety of that cluster, and that
578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. 578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
579 * 579 *
@@ -653,7 +653,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
653 u = 0; 653 u = 0;
654do_next_page: 654do_next_page:
655 page = pages[u]; 655 page = pages[u];
656 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 656 bh_pos = (s64)page->index << PAGE_SHIFT;
657 bh = head = page_buffers(page); 657 bh = head = page_buffers(page);
658 do { 658 do {
659 VCN cdelta; 659 VCN cdelta;
@@ -810,11 +810,11 @@ map_buffer_cached:
810 810
811 kaddr = kmap_atomic(page); 811 kaddr = kmap_atomic(page);
812 if (bh_pos < pos) { 812 if (bh_pos < pos) {
813 pofs = bh_pos & ~PAGE_CACHE_MASK; 813 pofs = bh_pos & ~PAGE_MASK;
814 memset(kaddr + pofs, 0, pos - bh_pos); 814 memset(kaddr + pofs, 0, pos - bh_pos);
815 } 815 }
816 if (bh_end > end) { 816 if (bh_end > end) {
817 pofs = end & ~PAGE_CACHE_MASK; 817 pofs = end & ~PAGE_MASK;
818 memset(kaddr + pofs, 0, bh_end - end); 818 memset(kaddr + pofs, 0, bh_end - end);
819 } 819 }
820 kunmap_atomic(kaddr); 820 kunmap_atomic(kaddr);
@@ -942,7 +942,7 @@ rl_not_mapped_enoent:
942 * unmapped. This can only happen when the cluster size is 942 * unmapped. This can only happen when the cluster size is
943 * less than the page cache size. 943 * less than the page cache size.
944 */ 944 */
945 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { 945 if (unlikely(vol->cluster_size < PAGE_SIZE)) {
946 bh_cend = (bh_end + vol->cluster_size - 1) >> 946 bh_cend = (bh_end + vol->cluster_size - 1) >>
947 vol->cluster_size_bits; 947 vol->cluster_size_bits;
948 if ((bh_cend <= cpos || bh_cpos >= cend)) { 948 if ((bh_cend <= cpos || bh_cpos >= cend)) {
@@ -1208,7 +1208,7 @@ rl_not_mapped_enoent:
1208 wait_on_buffer(bh); 1208 wait_on_buffer(bh);
1209 if (likely(buffer_uptodate(bh))) { 1209 if (likely(buffer_uptodate(bh))) {
1210 page = bh->b_page; 1210 page = bh->b_page;
1211 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + 1211 bh_pos = ((s64)page->index << PAGE_SHIFT) +
1212 bh_offset(bh); 1212 bh_offset(bh);
1213 /* 1213 /*
1214 * If the buffer overflows the initialized size, need 1214 * If the buffer overflows the initialized size, need
@@ -1350,7 +1350,7 @@ rl_not_mapped_enoent:
1350 bh = head = page_buffers(page); 1350 bh = head = page_buffers(page);
1351 do { 1351 do {
1352 if (u == nr_pages && 1352 if (u == nr_pages &&
1353 ((s64)page->index << PAGE_CACHE_SHIFT) + 1353 ((s64)page->index << PAGE_SHIFT) +
1354 bh_offset(bh) >= end) 1354 bh_offset(bh) >= end)
1355 break; 1355 break;
1356 if (!buffer_new(bh)) 1356 if (!buffer_new(bh))
@@ -1422,7 +1422,7 @@ static inline int ntfs_commit_pages_after_non_resident_write(
1422 bool partial; 1422 bool partial;
1423 1423
1424 page = pages[u]; 1424 page = pages[u];
1425 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 1425 bh_pos = (s64)page->index << PAGE_SHIFT;
1426 bh = head = page_buffers(page); 1426 bh = head = page_buffers(page);
1427 partial = false; 1427 partial = false;
1428 do { 1428 do {
@@ -1639,7 +1639,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
1639 if (end < attr_len) 1639 if (end < attr_len)
1640 memcpy(kaddr + end, kattr + end, attr_len - end); 1640 memcpy(kaddr + end, kattr + end, attr_len - end);
1641 /* Zero the region outside the end of the attribute value. */ 1641 /* Zero the region outside the end of the attribute value. */
1642 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1642 memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
1643 flush_dcache_page(page); 1643 flush_dcache_page(page);
1644 SetPageUptodate(page); 1644 SetPageUptodate(page);
1645 } 1645 }
@@ -1706,7 +1706,7 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
1706 unsigned len, copied; 1706 unsigned len, copied;
1707 1707
1708 do { 1708 do {
1709 len = PAGE_CACHE_SIZE - ofs; 1709 len = PAGE_SIZE - ofs;
1710 if (len > bytes) 1710 if (len > bytes)
1711 len = bytes; 1711 len = bytes;
1712 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, 1712 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
@@ -1724,14 +1724,14 @@ out:
1724 return total; 1724 return total;
1725err: 1725err:
1726 /* Zero the rest of the target like __copy_from_user(). */ 1726 /* Zero the rest of the target like __copy_from_user(). */
1727 len = PAGE_CACHE_SIZE - copied; 1727 len = PAGE_SIZE - copied;
1728 do { 1728 do {
1729 if (len > bytes) 1729 if (len > bytes)
1730 len = bytes; 1730 len = bytes;
1731 zero_user(*pages, copied, len); 1731 zero_user(*pages, copied, len);
1732 bytes -= len; 1732 bytes -= len;
1733 copied = 0; 1733 copied = 0;
1734 len = PAGE_CACHE_SIZE; 1734 len = PAGE_SIZE;
1735 } while (++pages < last_page); 1735 } while (++pages < last_page);
1736 goto out; 1736 goto out;
1737} 1737}
@@ -1787,8 +1787,8 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1787 * attributes. 1787 * attributes.
1788 */ 1788 */
1789 nr_pages = 1; 1789 nr_pages = 1;
1790 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) 1790 if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
1791 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; 1791 nr_pages = vol->cluster_size >> PAGE_SHIFT;
1792 last_vcn = -1; 1792 last_vcn = -1;
1793 do { 1793 do {
1794 VCN vcn; 1794 VCN vcn;
@@ -1796,9 +1796,9 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1796 unsigned ofs, do_pages, u; 1796 unsigned ofs, do_pages, u;
1797 size_t copied; 1797 size_t copied;
1798 1798
1799 start_idx = idx = pos >> PAGE_CACHE_SHIFT; 1799 start_idx = idx = pos >> PAGE_SHIFT;
1800 ofs = pos & ~PAGE_CACHE_MASK; 1800 ofs = pos & ~PAGE_MASK;
1801 bytes = PAGE_CACHE_SIZE - ofs; 1801 bytes = PAGE_SIZE - ofs;
1802 do_pages = 1; 1802 do_pages = 1;
1803 if (nr_pages > 1) { 1803 if (nr_pages > 1) {
1804 vcn = pos >> vol->cluster_size_bits; 1804 vcn = pos >> vol->cluster_size_bits;
@@ -1832,7 +1832,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1832 if (lcn == LCN_HOLE) { 1832 if (lcn == LCN_HOLE) {
1833 start_idx = (pos & ~(s64) 1833 start_idx = (pos & ~(s64)
1834 vol->cluster_size_mask) 1834 vol->cluster_size_mask)
1835 >> PAGE_CACHE_SHIFT; 1835 >> PAGE_SHIFT;
1836 bytes = vol->cluster_size - (pos & 1836 bytes = vol->cluster_size - (pos &
1837 vol->cluster_size_mask); 1837 vol->cluster_size_mask);
1838 do_pages = nr_pages; 1838 do_pages = nr_pages;
@@ -1871,12 +1871,12 @@ again:
1871 if (unlikely(status)) { 1871 if (unlikely(status)) {
1872 do { 1872 do {
1873 unlock_page(pages[--do_pages]); 1873 unlock_page(pages[--do_pages]);
1874 page_cache_release(pages[do_pages]); 1874 put_page(pages[do_pages]);
1875 } while (do_pages); 1875 } while (do_pages);
1876 break; 1876 break;
1877 } 1877 }
1878 } 1878 }
1879 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; 1879 u = (pos >> PAGE_SHIFT) - pages[0]->index;
1880 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, 1880 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
1881 i, bytes); 1881 i, bytes);
1882 ntfs_flush_dcache_pages(pages + u, do_pages - u); 1882 ntfs_flush_dcache_pages(pages + u, do_pages - u);
@@ -1889,7 +1889,7 @@ again:
1889 } 1889 }
1890 do { 1890 do {
1891 unlock_page(pages[--do_pages]); 1891 unlock_page(pages[--do_pages]);
1892 page_cache_release(pages[do_pages]); 1892 put_page(pages[do_pages]);
1893 } while (do_pages); 1893 } while (do_pages);
1894 if (unlikely(status < 0)) 1894 if (unlikely(status < 0))
1895 break; 1895 break;
@@ -1921,7 +1921,7 @@ again:
1921 } 1921 }
1922 } while (iov_iter_count(i)); 1922 } while (iov_iter_count(i));
1923 if (cached_page) 1923 if (cached_page)
1924 page_cache_release(cached_page); 1924 put_page(cached_page);
1925 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", 1925 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
1926 written ? "written" : "status", (unsigned long)written, 1926 written ? "written" : "status", (unsigned long)written,
1927 (long)status); 1927 (long)status);
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 096c135691ae..0d645f357930 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,11 +272,11 @@ done:
272descend_into_child_node: 272descend_into_child_node:
273 /* 273 /*
274 * Convert vcn to index into the index allocation attribute in units 274 * Convert vcn to index into the index allocation attribute in units
275 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 275 * of PAGE_SIZE and map the page cache page, reading it from
276 * disk if necessary. 276 * disk if necessary.
277 */ 277 */
278 page = ntfs_map_page(ia_mapping, vcn << 278 page = ntfs_map_page(ia_mapping, vcn <<
279 idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 279 idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
280 if (IS_ERR(page)) { 280 if (IS_ERR(page)) {
281 ntfs_error(sb, "Failed to map index page, error %ld.", 281 ntfs_error(sb, "Failed to map index page, error %ld.",
282 -PTR_ERR(page)); 282 -PTR_ERR(page));
@@ -288,9 +288,9 @@ descend_into_child_node:
288fast_descend_into_child_node: 288fast_descend_into_child_node:
289 /* Get to the index allocation block. */ 289 /* Get to the index allocation block. */
290 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 290 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
291 idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 291 idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
292 /* Bounds checks. */ 292 /* Bounds checks. */
293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
294 ntfs_error(sb, "Out of bounds check failed. Corrupt inode " 294 ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
295 "0x%lx or driver bug.", idx_ni->mft_no); 295 "0x%lx or driver bug.", idx_ni->mft_no);
296 goto unm_err_out; 296 goto unm_err_out;
@@ -323,7 +323,7 @@ fast_descend_into_child_node:
323 goto unm_err_out; 323 goto unm_err_out;
324 } 324 }
325 index_end = (u8*)ia + idx_ni->itype.index.block_size; 325 index_end = (u8*)ia + idx_ni->itype.index.block_size;
326 if (index_end > kaddr + PAGE_CACHE_SIZE) { 326 if (index_end > kaddr + PAGE_SIZE) {
327 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx " 327 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
328 "crosses page boundary. Impossible! Cannot " 328 "crosses page boundary. Impossible! Cannot "
329 "access! This is probably a bug in the " 329 "access! This is probably a bug in the "
@@ -427,9 +427,9 @@ ia_done:
427 * the mapped page. 427 * the mapped page.
428 */ 428 */
429 if (old_vcn << vol->cluster_size_bits >> 429 if (old_vcn << vol->cluster_size_bits >>
430 PAGE_CACHE_SHIFT == vcn << 430 PAGE_SHIFT == vcn <<
431 vol->cluster_size_bits >> 431 vol->cluster_size_bits >>
432 PAGE_CACHE_SHIFT) 432 PAGE_SHIFT)
433 goto fast_descend_into_child_node; 433 goto fast_descend_into_child_node;
434 unlock_page(page); 434 unlock_page(page);
435 ntfs_unmap_page(page); 435 ntfs_unmap_page(page);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d284f07eda77..f40972d6df90 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -868,12 +868,12 @@ skip_attr_list_load:
868 ni->itype.index.block_size); 868 ni->itype.index.block_size);
869 goto unm_err_out; 869 goto unm_err_out;
870 } 870 }
871 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { 871 if (ni->itype.index.block_size > PAGE_SIZE) {
872 ntfs_error(vi->i_sb, "Index block size (%u) > " 872 ntfs_error(vi->i_sb, "Index block size (%u) > "
873 "PAGE_CACHE_SIZE (%ld) is not " 873 "PAGE_SIZE (%ld) is not "
874 "supported. Sorry.", 874 "supported. Sorry.",
875 ni->itype.index.block_size, 875 ni->itype.index.block_size,
876 PAGE_CACHE_SIZE); 876 PAGE_SIZE);
877 err = -EOPNOTSUPP; 877 err = -EOPNOTSUPP;
878 goto unm_err_out; 878 goto unm_err_out;
879 } 879 }
@@ -1585,10 +1585,10 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1585 "two.", ni->itype.index.block_size); 1585 "two.", ni->itype.index.block_size);
1586 goto unm_err_out; 1586 goto unm_err_out;
1587 } 1587 }
1588 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { 1588 if (ni->itype.index.block_size > PAGE_SIZE) {
1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE " 1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
1590 "(%ld) is not supported. Sorry.", 1590 "(%ld) is not supported. Sorry.",
1591 ni->itype.index.block_size, PAGE_CACHE_SIZE); 1591 ni->itype.index.block_size, PAGE_SIZE);
1592 err = -EOPNOTSUPP; 1592 err = -EOPNOTSUPP;
1593 goto unm_err_out; 1593 goto unm_err_out;
1594 } 1594 }
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
index 1711b710b641..27a24a42f712 100644
--- a/fs/ntfs/lcnalloc.c
+++ b/fs/ntfs/lcnalloc.c
@@ -283,15 +283,15 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
283 ntfs_unmap_page(page); 283 ntfs_unmap_page(page);
284 } 284 }
285 page = ntfs_map_page(mapping, last_read_pos >> 285 page = ntfs_map_page(mapping, last_read_pos >>
286 PAGE_CACHE_SHIFT); 286 PAGE_SHIFT);
287 if (IS_ERR(page)) { 287 if (IS_ERR(page)) {
288 err = PTR_ERR(page); 288 err = PTR_ERR(page);
289 ntfs_error(vol->sb, "Failed to map page."); 289 ntfs_error(vol->sb, "Failed to map page.");
290 goto out; 290 goto out;
291 } 291 }
292 buf_size = last_read_pos & ~PAGE_CACHE_MASK; 292 buf_size = last_read_pos & ~PAGE_MASK;
293 buf = page_address(page) + buf_size; 293 buf = page_address(page) + buf_size;
294 buf_size = PAGE_CACHE_SIZE - buf_size; 294 buf_size = PAGE_SIZE - buf_size;
295 if (unlikely(last_read_pos + buf_size > i_size)) 295 if (unlikely(last_read_pos + buf_size > i_size))
296 buf_size = i_size - last_read_pos; 296 buf_size = i_size - last_read_pos;
297 buf_size <<= 3; 297 buf_size <<= 3;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index c71de292c5ad..9d71213ca81e 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -381,7 +381,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
381 * completely inside @rp, just copy it from there. Otherwise map all 381 * completely inside @rp, just copy it from there. Otherwise map all
382 * the required pages and copy the data from them. 382 * the required pages and copy the data from them.
383 */ 383 */
384 size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK); 384 size = PAGE_SIZE - (pos & ~PAGE_MASK);
385 if (size >= le32_to_cpu(rp->system_page_size)) { 385 if (size >= le32_to_cpu(rp->system_page_size)) {
386 memcpy(trp, rp, le32_to_cpu(rp->system_page_size)); 386 memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
387 } else { 387 } else {
@@ -394,8 +394,8 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
394 /* Copy the remaining data one page at a time. */ 394 /* Copy the remaining data one page at a time. */
395 have_read = size; 395 have_read = size;
396 to_read = le32_to_cpu(rp->system_page_size) - size; 396 to_read = le32_to_cpu(rp->system_page_size) - size;
397 idx = (pos + size) >> PAGE_CACHE_SHIFT; 397 idx = (pos + size) >> PAGE_SHIFT;
398 BUG_ON((pos + size) & ~PAGE_CACHE_MASK); 398 BUG_ON((pos + size) & ~PAGE_MASK);
399 do { 399 do {
400 page = ntfs_map_page(vi->i_mapping, idx); 400 page = ntfs_map_page(vi->i_mapping, idx);
401 if (IS_ERR(page)) { 401 if (IS_ERR(page)) {
@@ -406,7 +406,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
406 err = -EIO; 406 err = -EIO;
407 goto err_out; 407 goto err_out;
408 } 408 }
409 size = min_t(int, to_read, PAGE_CACHE_SIZE); 409 size = min_t(int, to_read, PAGE_SIZE);
410 memcpy((u8*)trp + have_read, page_address(page), size); 410 memcpy((u8*)trp + have_read, page_address(page), size);
411 ntfs_unmap_page(page); 411 ntfs_unmap_page(page);
412 have_read += size; 412 have_read += size;
@@ -509,11 +509,11 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
509 * log page size if the page cache size is between the default log page 509 * log page size if the page cache size is between the default log page
510 * size and twice that. 510 * size and twice that.
511 */ 511 */
512 if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <= 512 if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <=
513 DefaultLogPageSize * 2) 513 DefaultLogPageSize * 2)
514 log_page_size = DefaultLogPageSize; 514 log_page_size = DefaultLogPageSize;
515 else 515 else
516 log_page_size = PAGE_CACHE_SIZE; 516 log_page_size = PAGE_SIZE;
517 log_page_mask = log_page_size - 1; 517 log_page_mask = log_page_size - 1;
518 /* 518 /*
519 * Use ntfs_ffs() instead of ffs() to enable the compiler to 519 * Use ntfs_ffs() instead of ffs() to enable the compiler to
@@ -539,7 +539,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
539 * to be empty. 539 * to be empty.
540 */ 540 */
541 for (pos = 0; pos < size; pos <<= 1) { 541 for (pos = 0; pos < size; pos <<= 1) {
542 pgoff_t idx = pos >> PAGE_CACHE_SHIFT; 542 pgoff_t idx = pos >> PAGE_SHIFT;
543 if (!page || page->index != idx) { 543 if (!page || page->index != idx) {
544 if (page) 544 if (page)
545 ntfs_unmap_page(page); 545 ntfs_unmap_page(page);
@@ -550,7 +550,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
550 goto err_out; 550 goto err_out;
551 } 551 }
552 } 552 }
553 kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK); 553 kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
554 /* 554 /*
555 * A non-empty block means the logfile is not empty while an 555 * A non-empty block means the logfile is not empty while an
556 * empty block after a non-empty block has been encountered 556 * empty block after a non-empty block has been encountered
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 3014a36a255b..37b2501caaa4 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -61,16 +61,16 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
61 * here if the volume was that big... 61 * here if the volume was that big...
62 */ 62 */
63 index = (u64)ni->mft_no << vol->mft_record_size_bits >> 63 index = (u64)ni->mft_no << vol->mft_record_size_bits >>
64 PAGE_CACHE_SHIFT; 64 PAGE_SHIFT;
65 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 65 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
66 66
67 i_size = i_size_read(mft_vi); 67 i_size = i_size_read(mft_vi);
68 /* The maximum valid index into the page cache for $MFT's data. */ 68 /* The maximum valid index into the page cache for $MFT's data. */
69 end_index = i_size >> PAGE_CACHE_SHIFT; 69 end_index = i_size >> PAGE_SHIFT;
70 70
71 /* If the wanted index is out of bounds the mft record doesn't exist. */ 71 /* If the wanted index is out of bounds the mft record doesn't exist. */
72 if (unlikely(index >= end_index)) { 72 if (unlikely(index >= end_index)) {
73 if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs + 73 if (index > end_index || (i_size & ~PAGE_MASK) < ofs +
74 vol->mft_record_size) { 74 vol->mft_record_size) {
75 page = ERR_PTR(-ENOENT); 75 page = ERR_PTR(-ENOENT);
76 ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, " 76 ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
@@ -487,7 +487,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
487 } 487 }
488 /* Get the page containing the mirror copy of the mft record @m. */ 488 /* Get the page containing the mirror copy of the mft record @m. */
489 page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >> 489 page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
490 (PAGE_CACHE_SHIFT - vol->mft_record_size_bits)); 490 (PAGE_SHIFT - vol->mft_record_size_bits));
491 if (IS_ERR(page)) { 491 if (IS_ERR(page)) {
492 ntfs_error(vol->sb, "Failed to map mft mirror page."); 492 ntfs_error(vol->sb, "Failed to map mft mirror page.");
493 err = PTR_ERR(page); 493 err = PTR_ERR(page);
@@ -497,7 +497,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
497 BUG_ON(!PageUptodate(page)); 497 BUG_ON(!PageUptodate(page));
498 ClearPageUptodate(page); 498 ClearPageUptodate(page);
499 /* Offset of the mft mirror record inside the page. */ 499 /* Offset of the mft mirror record inside the page. */
500 page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 500 page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
501 /* The address in the page of the mirror copy of the mft record @m. */ 501 /* The address in the page of the mirror copy of the mft record @m. */
502 kmirr = page_address(page) + page_ofs; 502 kmirr = page_address(page) + page_ofs;
503 /* Copy the mst protected mft record to the mirror. */ 503 /* Copy the mst protected mft record to the mirror. */
@@ -1178,8 +1178,8 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
1178 for (; pass <= 2;) { 1178 for (; pass <= 2;) {
1179 /* Cap size to pass_end. */ 1179 /* Cap size to pass_end. */
1180 ofs = data_pos >> 3; 1180 ofs = data_pos >> 3;
1181 page_ofs = ofs & ~PAGE_CACHE_MASK; 1181 page_ofs = ofs & ~PAGE_MASK;
1182 size = PAGE_CACHE_SIZE - page_ofs; 1182 size = PAGE_SIZE - page_ofs;
1183 ll = ((pass_end + 7) >> 3) - ofs; 1183 ll = ((pass_end + 7) >> 3) - ofs;
1184 if (size > ll) 1184 if (size > ll)
1185 size = ll; 1185 size = ll;
@@ -1190,7 +1190,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
1190 */ 1190 */
1191 if (size) { 1191 if (size) {
1192 page = ntfs_map_page(mftbmp_mapping, 1192 page = ntfs_map_page(mftbmp_mapping,
1193 ofs >> PAGE_CACHE_SHIFT); 1193 ofs >> PAGE_SHIFT);
1194 if (IS_ERR(page)) { 1194 if (IS_ERR(page)) {
1195 ntfs_error(vol->sb, "Failed to read mft " 1195 ntfs_error(vol->sb, "Failed to read mft "
1196 "bitmap, aborting."); 1196 "bitmap, aborting.");
@@ -1328,13 +1328,13 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
1328 */ 1328 */
1329 ll = lcn >> 3; 1329 ll = lcn >> 3;
1330 page = ntfs_map_page(vol->lcnbmp_ino->i_mapping, 1330 page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
1331 ll >> PAGE_CACHE_SHIFT); 1331 ll >> PAGE_SHIFT);
1332 if (IS_ERR(page)) { 1332 if (IS_ERR(page)) {
1333 up_write(&mftbmp_ni->runlist.lock); 1333 up_write(&mftbmp_ni->runlist.lock);
1334 ntfs_error(vol->sb, "Failed to read from lcn bitmap."); 1334 ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
1335 return PTR_ERR(page); 1335 return PTR_ERR(page);
1336 } 1336 }
1337 b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK); 1337 b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
1338 tb = 1 << (lcn & 7ull); 1338 tb = 1 << (lcn & 7ull);
1339 down_write(&vol->lcnbmp_lock); 1339 down_write(&vol->lcnbmp_lock);
1340 if (*b != 0xff && !(*b & tb)) { 1340 if (*b != 0xff && !(*b & tb)) {
@@ -2103,14 +2103,14 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
2103 * The index into the page cache and the offset within the page cache 2103 * The index into the page cache and the offset within the page cache
2104 * page of the wanted mft record. 2104 * page of the wanted mft record.
2105 */ 2105 */
2106 index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; 2106 index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT;
2107 ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 2107 ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
2108 /* The maximum valid index into the page cache for $MFT's data. */ 2108 /* The maximum valid index into the page cache for $MFT's data. */
2109 i_size = i_size_read(mft_vi); 2109 i_size = i_size_read(mft_vi);
2110 end_index = i_size >> PAGE_CACHE_SHIFT; 2110 end_index = i_size >> PAGE_SHIFT;
2111 if (unlikely(index >= end_index)) { 2111 if (unlikely(index >= end_index)) {
2112 if (unlikely(index > end_index || ofs + vol->mft_record_size >= 2112 if (unlikely(index > end_index || ofs + vol->mft_record_size >=
2113 (i_size & ~PAGE_CACHE_MASK))) { 2113 (i_size & ~PAGE_MASK))) {
2114 ntfs_error(vol->sb, "Tried to format non-existing mft " 2114 ntfs_error(vol->sb, "Tried to format non-existing mft "
2115 "record 0x%llx.", (long long)mft_no); 2115 "record 0x%llx.", (long long)mft_no);
2116 return -ENOENT; 2116 return -ENOENT;
@@ -2515,8 +2515,8 @@ mft_rec_already_initialized:
2515 * We now have allocated and initialized the mft record. Calculate the 2515 * We now have allocated and initialized the mft record. Calculate the
2516 * index of and the offset within the page cache page the record is in. 2516 * index of and the offset within the page cache page the record is in.
2517 */ 2517 */
2518 index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; 2518 index = bit << vol->mft_record_size_bits >> PAGE_SHIFT;
2519 ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 2519 ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK;
2520 /* Read, map, and pin the page containing the mft record. */ 2520 /* Read, map, and pin the page containing the mft record. */
2521 page = ntfs_map_page(vol->mft_ino->i_mapping, index); 2521 page = ntfs_map_page(vol->mft_ino->i_mapping, index);
2522 if (IS_ERR(page)) { 2522 if (IS_ERR(page)) {
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index c581e26a350d..12de47b96ca9 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -43,7 +43,7 @@ typedef enum {
43 NTFS_MAX_NAME_LEN = 255, 43 NTFS_MAX_NAME_LEN = 255,
44 NTFS_MAX_ATTR_NAME_LEN = 255, 44 NTFS_MAX_ATTR_NAME_LEN = 255,
45 NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */ 45 NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */
46 NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE, 46 NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE,
47} NTFS_CONSTANTS; 47} NTFS_CONSTANTS;
48 48
49/* Global variables. */ 49/* Global variables. */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1b38abdaa3ed..ecb49870a680 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,14 +823,14 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)", 823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
824 vol->mft_record_size_bits, vol->mft_record_size_bits); 824 vol->mft_record_size_bits, vol->mft_record_size_bits);
825 /* 825 /*
826 * We cannot support mft record sizes above the PAGE_CACHE_SIZE since 826 * We cannot support mft record sizes above the PAGE_SIZE since
827 * we store $MFT/$DATA, the table of mft records in the page cache. 827 * we store $MFT/$DATA, the table of mft records in the page cache.
828 */ 828 */
829 if (vol->mft_record_size > PAGE_CACHE_SIZE) { 829 if (vol->mft_record_size > PAGE_SIZE) {
830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the " 830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
831 "PAGE_CACHE_SIZE on your system (%lu). " 831 "PAGE_SIZE on your system (%lu). "
832 "This is not supported. Sorry.", 832 "This is not supported. Sorry.",
833 vol->mft_record_size, PAGE_CACHE_SIZE); 833 vol->mft_record_size, PAGE_SIZE);
834 return false; 834 return false;
835 } 835 }
836 /* We cannot support mft record sizes below the sector size. */ 836 /* We cannot support mft record sizes below the sector size. */
@@ -1096,7 +1096,7 @@ static bool check_mft_mirror(ntfs_volume *vol)
1096 1096
1097 ntfs_debug("Entering."); 1097 ntfs_debug("Entering.");
1098 /* Compare contents of $MFT and $MFTMirr. */ 1098 /* Compare contents of $MFT and $MFTMirr. */
1099 mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size; 1099 mrecs_per_page = PAGE_SIZE / vol->mft_record_size;
1100 BUG_ON(!mrecs_per_page); 1100 BUG_ON(!mrecs_per_page);
1101 BUG_ON(!vol->mftmirr_size); 1101 BUG_ON(!vol->mftmirr_size);
1102 mft_page = mirr_page = NULL; 1102 mft_page = mirr_page = NULL;
@@ -1615,20 +1615,20 @@ static bool load_and_init_attrdef(ntfs_volume *vol)
1615 if (!vol->attrdef) 1615 if (!vol->attrdef)
1616 goto iput_failed; 1616 goto iput_failed;
1617 index = 0; 1617 index = 0;
1618 max_index = i_size >> PAGE_CACHE_SHIFT; 1618 max_index = i_size >> PAGE_SHIFT;
1619 size = PAGE_CACHE_SIZE; 1619 size = PAGE_SIZE;
1620 while (index < max_index) { 1620 while (index < max_index) {
1621 /* Read the attrdef table and copy it into the linear buffer. */ 1621 /* Read the attrdef table and copy it into the linear buffer. */
1622read_partial_attrdef_page: 1622read_partial_attrdef_page:
1623 page = ntfs_map_page(ino->i_mapping, index); 1623 page = ntfs_map_page(ino->i_mapping, index);
1624 if (IS_ERR(page)) 1624 if (IS_ERR(page))
1625 goto free_iput_failed; 1625 goto free_iput_failed;
1626 memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT), 1626 memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT),
1627 page_address(page), size); 1627 page_address(page), size);
1628 ntfs_unmap_page(page); 1628 ntfs_unmap_page(page);
1629 }; 1629 };
1630 if (size == PAGE_CACHE_SIZE) { 1630 if (size == PAGE_SIZE) {
1631 size = i_size & ~PAGE_CACHE_MASK; 1631 size = i_size & ~PAGE_MASK;
1632 if (size) 1632 if (size)
1633 goto read_partial_attrdef_page; 1633 goto read_partial_attrdef_page;
1634 } 1634 }
@@ -1684,20 +1684,20 @@ static bool load_and_init_upcase(ntfs_volume *vol)
1684 if (!vol->upcase) 1684 if (!vol->upcase)
1685 goto iput_upcase_failed; 1685 goto iput_upcase_failed;
1686 index = 0; 1686 index = 0;
1687 max_index = i_size >> PAGE_CACHE_SHIFT; 1687 max_index = i_size >> PAGE_SHIFT;
1688 size = PAGE_CACHE_SIZE; 1688 size = PAGE_SIZE;
1689 while (index < max_index) { 1689 while (index < max_index) {
1690 /* Read the upcase table and copy it into the linear buffer. */ 1690 /* Read the upcase table and copy it into the linear buffer. */
1691read_partial_upcase_page: 1691read_partial_upcase_page:
1692 page = ntfs_map_page(ino->i_mapping, index); 1692 page = ntfs_map_page(ino->i_mapping, index);
1693 if (IS_ERR(page)) 1693 if (IS_ERR(page))
1694 goto iput_upcase_failed; 1694 goto iput_upcase_failed;
1695 memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT), 1695 memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT),
1696 page_address(page), size); 1696 page_address(page), size);
1697 ntfs_unmap_page(page); 1697 ntfs_unmap_page(page);
1698 }; 1698 };
1699 if (size == PAGE_CACHE_SIZE) { 1699 if (size == PAGE_SIZE) {
1700 size = i_size & ~PAGE_CACHE_MASK; 1700 size = i_size & ~PAGE_MASK;
1701 if (size) 1701 if (size)
1702 goto read_partial_upcase_page; 1702 goto read_partial_upcase_page;
1703 } 1703 }
@@ -2471,14 +2471,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2471 down_read(&vol->lcnbmp_lock); 2471 down_read(&vol->lcnbmp_lock);
2472 /* 2472 /*
2473 * Convert the number of bits into bytes rounded up, then convert into 2473 * Convert the number of bits into bytes rounded up, then convert into
2474 * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one 2474 * multiples of PAGE_SIZE, rounding up so that if we have one
2475 * full and one partial page max_index = 2. 2475 * full and one partial page max_index = 2.
2476 */ 2476 */
2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> 2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
2478 PAGE_CACHE_SHIFT; 2478 PAGE_SHIFT;
2479 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2479 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
2481 max_index, PAGE_CACHE_SIZE / 4); 2481 max_index, PAGE_SIZE / 4);
2482 for (index = 0; index < max_index; index++) { 2482 for (index = 0; index < max_index; index++) {
2483 unsigned long *kaddr; 2483 unsigned long *kaddr;
2484 2484
@@ -2491,7 +2491,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2491 if (IS_ERR(page)) { 2491 if (IS_ERR(page)) {
2492 ntfs_debug("read_mapping_page() error. Skipping " 2492 ntfs_debug("read_mapping_page() error. Skipping "
2493 "page (index 0x%lx).", index); 2493 "page (index 0x%lx).", index);
2494 nr_free -= PAGE_CACHE_SIZE * 8; 2494 nr_free -= PAGE_SIZE * 8;
2495 continue; 2495 continue;
2496 } 2496 }
2497 kaddr = kmap_atomic(page); 2497 kaddr = kmap_atomic(page);
@@ -2503,9 +2503,9 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2503 * ntfs_readpage(). 2503 * ntfs_readpage().
2504 */ 2504 */
2505 nr_free -= bitmap_weight(kaddr, 2505 nr_free -= bitmap_weight(kaddr,
2506 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2506 PAGE_SIZE * BITS_PER_BYTE);
2507 kunmap_atomic(kaddr); 2507 kunmap_atomic(kaddr);
2508 page_cache_release(page); 2508 put_page(page);
2509 } 2509 }
2510 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); 2510 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
2511 /* 2511 /*
@@ -2547,9 +2547,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2547 pgoff_t index; 2547 pgoff_t index;
2548 2548
2549 ntfs_debug("Entering."); 2549 ntfs_debug("Entering.");
2550 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2550 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
2552 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2552 "0x%lx.", max_index, PAGE_SIZE / 4);
2553 for (index = 0; index < max_index; index++) { 2553 for (index = 0; index < max_index; index++) {
2554 unsigned long *kaddr; 2554 unsigned long *kaddr;
2555 2555
@@ -2562,7 +2562,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2562 if (IS_ERR(page)) { 2562 if (IS_ERR(page)) {
2563 ntfs_debug("read_mapping_page() error. Skipping " 2563 ntfs_debug("read_mapping_page() error. Skipping "
2564 "page (index 0x%lx).", index); 2564 "page (index 0x%lx).", index);
2565 nr_free -= PAGE_CACHE_SIZE * 8; 2565 nr_free -= PAGE_SIZE * 8;
2566 continue; 2566 continue;
2567 } 2567 }
2568 kaddr = kmap_atomic(page); 2568 kaddr = kmap_atomic(page);
@@ -2574,9 +2574,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2574 * ntfs_readpage(). 2574 * ntfs_readpage().
2575 */ 2575 */
2576 nr_free -= bitmap_weight(kaddr, 2576 nr_free -= bitmap_weight(kaddr,
2577 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2577 PAGE_SIZE * BITS_PER_BYTE);
2578 kunmap_atomic(kaddr); 2578 kunmap_atomic(kaddr);
2579 page_cache_release(page); 2579 put_page(page);
2580 } 2580 }
2581 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", 2581 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
2582 index - 1); 2582 index - 1);
@@ -2618,17 +2618,17 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2618 /* Type of filesystem. */ 2618 /* Type of filesystem. */
2619 sfs->f_type = NTFS_SB_MAGIC; 2619 sfs->f_type = NTFS_SB_MAGIC;
2620 /* Optimal transfer block size. */ 2620 /* Optimal transfer block size. */
2621 sfs->f_bsize = PAGE_CACHE_SIZE; 2621 sfs->f_bsize = PAGE_SIZE;
2622 /* 2622 /*
2623 * Total data blocks in filesystem in units of f_bsize and since 2623 * Total data blocks in filesystem in units of f_bsize and since
2624 * inodes are also stored in data blocs ($MFT is a file) this is just 2624 * inodes are also stored in data blocs ($MFT is a file) this is just
2625 * the total clusters. 2625 * the total clusters.
2626 */ 2626 */
2627 sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >> 2627 sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
2628 PAGE_CACHE_SHIFT; 2628 PAGE_SHIFT;
2629 /* Free data blocks in filesystem in units of f_bsize. */ 2629 /* Free data blocks in filesystem in units of f_bsize. */
2630 size = get_nr_free_clusters(vol) << vol->cluster_size_bits >> 2630 size = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
2631 PAGE_CACHE_SHIFT; 2631 PAGE_SHIFT;
2632 if (size < 0LL) 2632 if (size < 0LL)
2633 size = 0LL; 2633 size = 0LL;
2634 /* Free blocks avail to non-superuser, same as above on NTFS. */ 2634 /* Free blocks avail to non-superuser, same as above on NTFS. */
@@ -2639,11 +2639,11 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; 2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
2640 /* 2640 /*
2641 * Convert the maximum number of set bits into bytes rounded up, then 2641 * Convert the maximum number of set bits into bytes rounded up, then
2642 * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we 2642 * convert into multiples of PAGE_SIZE, rounding up so that if we
2643 * have one full and one partial page max_index = 2. 2643 * have one full and one partial page max_index = 2.
2644 */ 2644 */
2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) 2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
2646 + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2646 + 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT;
2647 read_unlock_irqrestore(&mft_ni->size_lock, flags); 2647 read_unlock_irqrestore(&mft_ni->size_lock, flags);
2648 /* Number of inodes in filesystem (at this point in time). */ 2648 /* Number of inodes in filesystem (at this point in time). */
2649 sfs->f_files = size; 2649 sfs->f_files = size;
@@ -2765,15 +2765,15 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2765 if (!parse_options(vol, (char*)opt)) 2765 if (!parse_options(vol, (char*)opt))
2766 goto err_out_now; 2766 goto err_out_now;
2767 2767
2768 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2768 /* We support sector sizes up to the PAGE_SIZE. */
2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
2770 if (!silent) 2770 if (!silent)
2771 ntfs_error(sb, "Device has unsupported sector size " 2771 ntfs_error(sb, "Device has unsupported sector size "
2772 "(%i). The maximum supported sector " 2772 "(%i). The maximum supported sector "
2773 "size on this architecture is %lu " 2773 "size on this architecture is %lu "
2774 "bytes.", 2774 "bytes.",
2775 bdev_logical_block_size(sb->s_bdev), 2775 bdev_logical_block_size(sb->s_bdev),
2776 PAGE_CACHE_SIZE); 2776 PAGE_SIZE);
2777 goto err_out_now; 2777 goto err_out_now;
2778 } 2778 }
2779 /* 2779 /*
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 70907d638b60..e361d1a0ca09 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6671,7 +6671,7 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6671{ 6671{
6672 int i; 6672 int i;
6673 struct page *page; 6673 struct page *page;
6674 unsigned int from, to = PAGE_CACHE_SIZE; 6674 unsigned int from, to = PAGE_SIZE;
6675 struct super_block *sb = inode->i_sb; 6675 struct super_block *sb = inode->i_sb;
6676 6676
6677 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); 6677 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
@@ -6679,21 +6679,21 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6679 if (numpages == 0) 6679 if (numpages == 0)
6680 goto out; 6680 goto out;
6681 6681
6682 to = PAGE_CACHE_SIZE; 6682 to = PAGE_SIZE;
6683 for(i = 0; i < numpages; i++) { 6683 for(i = 0; i < numpages; i++) {
6684 page = pages[i]; 6684 page = pages[i];
6685 6685
6686 from = start & (PAGE_CACHE_SIZE - 1); 6686 from = start & (PAGE_SIZE - 1);
6687 if ((end >> PAGE_CACHE_SHIFT) == page->index) 6687 if ((end >> PAGE_SHIFT) == page->index)
6688 to = end & (PAGE_CACHE_SIZE - 1); 6688 to = end & (PAGE_SIZE - 1);
6689 6689
6690 BUG_ON(from > PAGE_CACHE_SIZE); 6690 BUG_ON(from > PAGE_SIZE);
6691 BUG_ON(to > PAGE_CACHE_SIZE); 6691 BUG_ON(to > PAGE_SIZE);
6692 6692
6693 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, 6693 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
6694 &phys); 6694 &phys);
6695 6695
6696 start = (page->index + 1) << PAGE_CACHE_SHIFT; 6696 start = (page->index + 1) << PAGE_SHIFT;
6697 } 6697 }
6698out: 6698out:
6699 if (pages) 6699 if (pages)
@@ -6712,7 +6712,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6712 6712
6713 numpages = 0; 6713 numpages = 0;
6714 last_page_bytes = PAGE_ALIGN(end); 6714 last_page_bytes = PAGE_ALIGN(end);
6715 index = start >> PAGE_CACHE_SHIFT; 6715 index = start >> PAGE_SHIFT;
6716 do { 6716 do {
6717 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); 6717 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
6718 if (!pages[numpages]) { 6718 if (!pages[numpages]) {
@@ -6723,7 +6723,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6723 6723
6724 numpages++; 6724 numpages++;
6725 index++; 6725 index++;
6726 } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); 6726 } while (index < (last_page_bytes >> PAGE_SHIFT));
6727 6727
6728out: 6728out:
6729 if (ret != 0) { 6729 if (ret != 0) {
@@ -6950,8 +6950,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6950 * to do that now. 6950 * to do that now.
6951 */ 6951 */
6952 if (!ocfs2_sparse_alloc(osb) && 6952 if (!ocfs2_sparse_alloc(osb) &&
6953 PAGE_CACHE_SIZE < osb->s_clustersize) 6953 PAGE_SIZE < osb->s_clustersize)
6954 end = PAGE_CACHE_SIZE; 6954 end = PAGE_SIZE;
6955 6955
6956 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); 6956 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
6957 if (ret) { 6957 if (ret) {
@@ -6971,8 +6971,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6971 goto out_unlock; 6971 goto out_unlock;
6972 } 6972 }
6973 6973
6974 page_end = PAGE_CACHE_SIZE; 6974 page_end = PAGE_SIZE;
6975 if (PAGE_CACHE_SIZE > osb->s_clustersize) 6975 if (PAGE_SIZE > osb->s_clustersize)
6976 page_end = osb->s_clustersize; 6976 page_end = osb->s_clustersize;
6977 6977
6978 for (i = 0; i < num_pages; i++) 6978 for (i = 0; i < num_pages; i++)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1581240a7ca0..ad1577348a92 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -234,7 +234,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
234 234
235 size = i_size_read(inode); 235 size = i_size_read(inode);
236 236
237 if (size > PAGE_CACHE_SIZE || 237 if (size > PAGE_SIZE ||
238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { 238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
239 ocfs2_error(inode->i_sb, 239 ocfs2_error(inode->i_sb,
240 "Inode %llu has with inline data has bad size: %Lu\n", 240 "Inode %llu has with inline data has bad size: %Lu\n",
@@ -247,7 +247,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
247 if (size) 247 if (size)
248 memcpy(kaddr, di->id2.i_data.id_data, size); 248 memcpy(kaddr, di->id2.i_data.id_data, size);
249 /* Clear the remaining part of the page */ 249 /* Clear the remaining part of the page */
250 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); 250 memset(kaddr + size, 0, PAGE_SIZE - size);
251 flush_dcache_page(page); 251 flush_dcache_page(page);
252 kunmap_atomic(kaddr); 252 kunmap_atomic(kaddr);
253 253
@@ -282,7 +282,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
282{ 282{
283 struct inode *inode = page->mapping->host; 283 struct inode *inode = page->mapping->host;
284 struct ocfs2_inode_info *oi = OCFS2_I(inode); 284 struct ocfs2_inode_info *oi = OCFS2_I(inode);
285 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; 285 loff_t start = (loff_t)page->index << PAGE_SHIFT;
286 int ret, unlock = 1; 286 int ret, unlock = 1;
287 287
288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, 288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
@@ -385,7 +385,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
385 * drop out in that case as it's not worth handling here. 385 * drop out in that case as it's not worth handling here.
386 */ 386 */
387 last = list_entry(pages->prev, struct page, lru); 387 last = list_entry(pages->prev, struct page, lru);
388 start = (loff_t)last->index << PAGE_CACHE_SHIFT; 388 start = (loff_t)last->index << PAGE_SHIFT;
389 if (start >= i_size_read(inode)) 389 if (start >= i_size_read(inode))
390 goto out_unlock; 390 goto out_unlock;
391 391
@@ -511,12 +511,12 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
511 unsigned int *start, 511 unsigned int *start,
512 unsigned int *end) 512 unsigned int *end)
513{ 513{
514 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; 514 unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
515 515
516 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { 516 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
517 unsigned int cpp; 517 unsigned int cpp;
518 518
519 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); 519 cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
520 520
521 cluster_start = cpos % cpp; 521 cluster_start = cpos % cpp;
522 cluster_start = cluster_start << osb->s_clustersize_bits; 522 cluster_start = cluster_start << osb->s_clustersize_bits;
@@ -684,13 +684,13 @@ next_bh:
684 return ret; 684 return ret;
685} 685}
686 686
687#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 687#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
688#define OCFS2_MAX_CTXT_PAGES 1 688#define OCFS2_MAX_CTXT_PAGES 1
689#else 689#else
690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) 690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
691#endif 691#endif
692 692
693#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) 693#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
694 694
695struct ocfs2_unwritten_extent { 695struct ocfs2_unwritten_extent {
696 struct list_head ue_node; 696 struct list_head ue_node;
@@ -785,7 +785,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
785 if (pages[i]) { 785 if (pages[i]) {
786 unlock_page(pages[i]); 786 unlock_page(pages[i]);
787 mark_page_accessed(pages[i]); 787 mark_page_accessed(pages[i]);
788 page_cache_release(pages[i]); 788 put_page(pages[i]);
789 } 789 }
790 } 790 }
791} 791}
@@ -808,7 +808,7 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
808 } 808 }
809 } 809 }
810 mark_page_accessed(wc->w_target_page); 810 mark_page_accessed(wc->w_target_page);
811 page_cache_release(wc->w_target_page); 811 put_page(wc->w_target_page);
812 } 812 }
813 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 813 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
814} 814}
@@ -857,7 +857,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
857 wc->w_di_bh = di_bh; 857 wc->w_di_bh = di_bh;
858 wc->w_type = type; 858 wc->w_type = type;
859 859
860 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) 860 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
861 wc->w_large_pages = 1; 861 wc->w_large_pages = 1;
862 else 862 else
863 wc->w_large_pages = 0; 863 wc->w_large_pages = 0;
@@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode,
920 loff_t user_pos, unsigned user_len) 920 loff_t user_pos, unsigned user_len)
921{ 921{
922 int i; 922 int i;
923 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), 923 unsigned from = user_pos & (PAGE_SIZE - 1),
924 to = user_pos + user_len; 924 to = user_pos + user_len;
925 struct page *tmppage; 925 struct page *tmppage;
926 926
@@ -960,7 +960,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
960 (page_offset(page) <= user_pos)); 960 (page_offset(page) <= user_pos));
961 961
962 if (page == wc->w_target_page) { 962 if (page == wc->w_target_page) {
963 map_from = user_pos & (PAGE_CACHE_SIZE - 1); 963 map_from = user_pos & (PAGE_SIZE - 1);
964 map_to = map_from + user_len; 964 map_to = map_from + user_len;
965 965
966 if (new) 966 if (new)
@@ -1034,7 +1034,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1034 struct inode *inode = mapping->host; 1034 struct inode *inode = mapping->host;
1035 loff_t last_byte; 1035 loff_t last_byte;
1036 1036
1037 target_index = user_pos >> PAGE_CACHE_SHIFT; 1037 target_index = user_pos >> PAGE_SHIFT;
1038 1038
1039 /* 1039 /*
1040 * Figure out how many pages we'll be manipulating here. For 1040 * Figure out how many pages we'll be manipulating here. For
@@ -1053,14 +1053,14 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1053 */ 1053 */
1054 last_byte = max(user_pos + user_len, i_size_read(inode)); 1054 last_byte = max(user_pos + user_len, i_size_read(inode));
1055 BUG_ON(last_byte < 1); 1055 BUG_ON(last_byte < 1);
1056 end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1; 1056 end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
1057 if ((start + wc->w_num_pages) > end_index) 1057 if ((start + wc->w_num_pages) > end_index)
1058 wc->w_num_pages = end_index - start; 1058 wc->w_num_pages = end_index - start;
1059 } else { 1059 } else {
1060 wc->w_num_pages = 1; 1060 wc->w_num_pages = 1;
1061 start = target_index; 1061 start = target_index;
1062 } 1062 }
1063 end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT; 1063 end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
1064 1064
1065 for(i = 0; i < wc->w_num_pages; i++) { 1065 for(i = 0; i < wc->w_num_pages; i++) {
1066 index = start + i; 1066 index = start + i;
@@ -1082,7 +1082,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1082 goto out; 1082 goto out;
1083 } 1083 }
1084 1084
1085 page_cache_get(mmap_page); 1085 get_page(mmap_page);
1086 wc->w_pages[i] = mmap_page; 1086 wc->w_pages[i] = mmap_page;
1087 wc->w_target_locked = true; 1087 wc->w_target_locked = true;
1088 } else if (index >= target_index && index <= end_index && 1088 } else if (index >= target_index && index <= end_index &&
@@ -1272,7 +1272,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1272{ 1272{
1273 struct ocfs2_write_cluster_desc *desc; 1273 struct ocfs2_write_cluster_desc *desc;
1274 1274
1275 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); 1275 wc->w_target_from = pos & (PAGE_SIZE - 1);
1276 wc->w_target_to = wc->w_target_from + len; 1276 wc->w_target_to = wc->w_target_from + len;
1277 1277
1278 if (alloc == 0) 1278 if (alloc == 0)
@@ -1309,7 +1309,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1309 &wc->w_target_to); 1309 &wc->w_target_to);
1310 } else { 1310 } else {
1311 wc->w_target_from = 0; 1311 wc->w_target_from = 0;
1312 wc->w_target_to = PAGE_CACHE_SIZE; 1312 wc->w_target_to = PAGE_SIZE;
1313 } 1313 }
1314} 1314}
1315 1315
@@ -1981,7 +1981,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
1981 struct page *page, void *fsdata) 1981 struct page *page, void *fsdata)
1982{ 1982{
1983 int i, ret; 1983 int i, ret;
1984 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); 1984 unsigned from, to, start = pos & (PAGE_SIZE - 1);
1985 struct inode *inode = mapping->host; 1985 struct inode *inode = mapping->host;
1986 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1986 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1987 struct ocfs2_write_ctxt *wc = fsdata; 1987 struct ocfs2_write_ctxt *wc = fsdata;
@@ -2027,8 +2027,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
2027 from = wc->w_target_from; 2027 from = wc->w_target_from;
2028 to = wc->w_target_to; 2028 to = wc->w_target_to;
2029 2029
2030 BUG_ON(from > PAGE_CACHE_SIZE || 2030 BUG_ON(from > PAGE_SIZE ||
2031 to > PAGE_CACHE_SIZE || 2031 to > PAGE_SIZE ||
2032 to < from); 2032 to < from);
2033 } else { 2033 } else {
2034 /* 2034 /*
@@ -2037,7 +2037,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
2037 * to flush their entire range. 2037 * to flush their entire range.
2038 */ 2038 */
2039 from = 0; 2039 from = 0;
2040 to = PAGE_CACHE_SIZE; 2040 to = PAGE_SIZE;
2041 } 2041 }
2042 2042
2043 if (page_has_buffers(tmppage)) { 2043 if (page_has_buffers(tmppage)) {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index bd15929b5f92..1934abb6b680 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -417,13 +417,13 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
417 bio->bi_private = wc; 417 bio->bi_private = wc;
418 bio->bi_end_io = o2hb_bio_end_io; 418 bio->bi_end_io = o2hb_bio_end_io;
419 419
420 vec_start = (cs << bits) % PAGE_CACHE_SIZE; 420 vec_start = (cs << bits) % PAGE_SIZE;
421 while(cs < max_slots) { 421 while(cs < max_slots) {
422 current_page = cs / spp; 422 current_page = cs / spp;
423 page = reg->hr_slot_data[current_page]; 423 page = reg->hr_slot_data[current_page];
424 424
425 vec_len = min(PAGE_CACHE_SIZE - vec_start, 425 vec_len = min(PAGE_SIZE - vec_start,
426 (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); 426 (max_slots-cs) * (PAGE_SIZE/spp) );
427 427
428 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", 428 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
429 current_page, vec_len, vec_start); 429 current_page, vec_len, vec_start);
@@ -431,7 +431,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
431 len = bio_add_page(bio, page, vec_len, vec_start); 431 len = bio_add_page(bio, page, vec_len, vec_start);
432 if (len != vec_len) break; 432 if (len != vec_len) break;
433 433
434 cs += vec_len / (PAGE_CACHE_SIZE/spp); 434 cs += vec_len / (PAGE_SIZE/spp);
435 vec_start = 0; 435 vec_start = 0;
436 } 436 }
437 437
@@ -1576,7 +1576,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
1576 1576
1577static void o2hb_init_region_params(struct o2hb_region *reg) 1577static void o2hb_init_region_params(struct o2hb_region *reg)
1578{ 1578{
1579 reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; 1579 reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
1580 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; 1580 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
1581 1581
1582 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", 1582 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9aed6e202201..13719d3f35f8 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
2455 2455
2456 spin_unlock(&dlm->spinlock); 2456 spin_unlock(&dlm->spinlock);
2457 2457
2458 ret = 0;
2459
2458done: 2460done:
2459 dlm_put(dlm); 2461 dlm_put(dlm);
2460 return ret; 2462 return ret;
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 03768bb3aab1..47b3b2d4e775 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -571,8 +571,8 @@ static int dlmfs_fill_super(struct super_block * sb,
571 int silent) 571 int silent)
572{ 572{
573 sb->s_maxbytes = MAX_LFS_FILESIZE; 573 sb->s_maxbytes = MAX_LFS_FILESIZE;
574 sb->s_blocksize = PAGE_CACHE_SIZE; 574 sb->s_blocksize = PAGE_SIZE;
575 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 575 sb->s_blocksize_bits = PAGE_SHIFT;
576 sb->s_magic = DLMFS_MAGIC; 576 sb->s_magic = DLMFS_MAGIC;
577 sb->s_op = &dlmfs_ops; 577 sb->s_op = &dlmfs_ops;
578 sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); 578 sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c18ab45f8d21..5308841756be 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -770,14 +770,14 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
770{ 770{
771 struct address_space *mapping = inode->i_mapping; 771 struct address_space *mapping = inode->i_mapping;
772 struct page *page; 772 struct page *page;
773 unsigned long index = abs_from >> PAGE_CACHE_SHIFT; 773 unsigned long index = abs_from >> PAGE_SHIFT;
774 handle_t *handle; 774 handle_t *handle;
775 int ret = 0; 775 int ret = 0;
776 unsigned zero_from, zero_to, block_start, block_end; 776 unsigned zero_from, zero_to, block_start, block_end;
777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
778 778
779 BUG_ON(abs_from >= abs_to); 779 BUG_ON(abs_from >= abs_to);
780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
781 BUG_ON(abs_from & (inode->i_blkbits - 1)); 781 BUG_ON(abs_from & (inode->i_blkbits - 1));
782 782
783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh); 783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
@@ -794,10 +794,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
794 } 794 }
795 795
796 /* Get the offsets within the page that we want to zero */ 796 /* Get the offsets within the page that we want to zero */
797 zero_from = abs_from & (PAGE_CACHE_SIZE - 1); 797 zero_from = abs_from & (PAGE_SIZE - 1);
798 zero_to = abs_to & (PAGE_CACHE_SIZE - 1); 798 zero_to = abs_to & (PAGE_SIZE - 1);
799 if (!zero_to) 799 if (!zero_to)
800 zero_to = PAGE_CACHE_SIZE; 800 zero_to = PAGE_SIZE;
801 801
802 trace_ocfs2_write_zero_page( 802 trace_ocfs2_write_zero_page(
803 (unsigned long long)OCFS2_I(inode)->ip_blkno, 803 (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -851,7 +851,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
851 851
852out_unlock: 852out_unlock:
853 unlock_page(page); 853 unlock_page(page);
854 page_cache_release(page); 854 put_page(page);
855out_commit_trans: 855out_commit_trans:
856 if (handle) 856 if (handle)
857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -959,7 +959,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
959 BUG_ON(range_start >= range_end); 959 BUG_ON(range_start >= range_end);
960 960
961 while (zero_pos < range_end) { 961 while (zero_pos < range_end) {
962 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; 962 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
963 if (next_pos > range_end) 963 if (next_pos > range_end)
964 next_pos = range_end; 964 next_pos = range_end;
965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh); 965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 9ea081f4e6e4..71545ad4628c 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -65,13 +65,13 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
65 struct inode *inode = file_inode(file); 65 struct inode *inode = file_inode(file);
66 struct address_space *mapping = inode->i_mapping; 66 struct address_space *mapping = inode->i_mapping;
67 loff_t pos = page_offset(page); 67 loff_t pos = page_offset(page);
68 unsigned int len = PAGE_CACHE_SIZE; 68 unsigned int len = PAGE_SIZE;
69 pgoff_t last_index; 69 pgoff_t last_index;
70 struct page *locked_page = NULL; 70 struct page *locked_page = NULL;
71 void *fsdata; 71 void *fsdata;
72 loff_t size = i_size_read(inode); 72 loff_t size = i_size_read(inode);
73 73
74 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 74 last_index = (size - 1) >> PAGE_SHIFT;
75 75
76 /* 76 /*
77 * There are cases that lead to the page no longer bebongs to the 77 * There are cases that lead to the page no longer bebongs to the
@@ -102,7 +102,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
102 * because the "write" would invalidate their data. 102 * because the "write" would invalidate their data.
103 */ 103 */
104 if (page->index == last_index) 104 if (page->index == last_index)
105 len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; 105 len = ((size - 1) & ~PAGE_MASK) + 1;
106 106
107 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, 107 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
108 &locked_page, &fsdata, di_bh, page); 108 &locked_page, &fsdata, di_bh, page);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6cf6538a0651..e63af7ddfe68 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -822,10 +822,10 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
822 u32 clusters = pg_index; 822 u32 clusters = pg_index;
823 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 823 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
824 824
825 if (unlikely(PAGE_CACHE_SHIFT > cbits)) 825 if (unlikely(PAGE_SHIFT > cbits))
826 clusters = pg_index << (PAGE_CACHE_SHIFT - cbits); 826 clusters = pg_index << (PAGE_SHIFT - cbits);
827 else if (PAGE_CACHE_SHIFT < cbits) 827 else if (PAGE_SHIFT < cbits)
828 clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT); 828 clusters = pg_index >> (cbits - PAGE_SHIFT);
829 829
830 return clusters; 830 return clusters;
831} 831}
@@ -839,10 +839,10 @@ static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
839 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 839 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
840 pgoff_t index = clusters; 840 pgoff_t index = clusters;
841 841
842 if (PAGE_CACHE_SHIFT > cbits) { 842 if (PAGE_SHIFT > cbits) {
843 index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits); 843 index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
844 } else if (PAGE_CACHE_SHIFT < cbits) { 844 } else if (PAGE_SHIFT < cbits) {
845 index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT); 845 index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
846 } 846 }
847 847
848 return index; 848 return index;
@@ -853,8 +853,8 @@ static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
853 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 853 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
854 unsigned int pages_per_cluster = 1; 854 unsigned int pages_per_cluster = 1;
855 855
856 if (PAGE_CACHE_SHIFT < cbits) 856 if (PAGE_SHIFT < cbits)
857 pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT); 857 pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
858 858
859 return pages_per_cluster; 859 return pages_per_cluster;
860} 860}
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 3892f3c079ca..ab6a6cdcf91c 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -867,6 +867,10 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
867 int status = 0; 867 int status = 0;
868 868
869 trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type); 869 trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
870 if (!sb_has_quota_loaded(sb, type)) {
871 status = -ESRCH;
872 goto out;
873 }
870 status = ocfs2_lock_global_qf(info, 0); 874 status = ocfs2_lock_global_qf(info, 0);
871 if (status < 0) 875 if (status < 0)
872 goto out; 876 goto out;
@@ -878,8 +882,11 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
878out_global: 882out_global:
879 ocfs2_unlock_global_qf(info, 0); 883 ocfs2_unlock_global_qf(info, 0);
880out: 884out:
881 /* Avoid logging ENOENT since it just means there isn't next ID */ 885 /*
882 if (status && status != -ENOENT) 886 * Avoid logging ENOENT since it just means there isn't next ID and
887 * ESRCH which means quota isn't enabled for the filesystem.
888 */
889 if (status && status != -ENOENT && status != -ESRCH)
883 mlog_errno(status); 890 mlog_errno(status);
884 return status; 891 return status;
885} 892}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3eff031aaf26..744d5d90c363 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2937,16 +2937,16 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2937 end = i_size_read(inode); 2937 end = i_size_read(inode);
2938 2938
2939 while (offset < end) { 2939 while (offset < end) {
2940 page_index = offset >> PAGE_CACHE_SHIFT; 2940 page_index = offset >> PAGE_SHIFT;
2941 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; 2941 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
2942 if (map_end > end) 2942 if (map_end > end)
2943 map_end = end; 2943 map_end = end;
2944 2944
2945 /* from, to is the offset within the page. */ 2945 /* from, to is the offset within the page. */
2946 from = offset & (PAGE_CACHE_SIZE - 1); 2946 from = offset & (PAGE_SIZE - 1);
2947 to = PAGE_CACHE_SIZE; 2947 to = PAGE_SIZE;
2948 if (map_end & (PAGE_CACHE_SIZE - 1)) 2948 if (map_end & (PAGE_SIZE - 1))
2949 to = map_end & (PAGE_CACHE_SIZE - 1); 2949 to = map_end & (PAGE_SIZE - 1);
2950 2950
2951 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2951 page = find_or_create_page(mapping, page_index, GFP_NOFS);
2952 if (!page) { 2952 if (!page) {
@@ -2956,10 +2956,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2956 } 2956 }
2957 2957
2958 /* 2958 /*
2959 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2959 * In case PAGE_SIZE <= CLUSTER_SIZE, This page
2960 * can't be dirtied before we CoW it out. 2960 * can't be dirtied before we CoW it out.
2961 */ 2961 */
2962 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2962 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2963 BUG_ON(PageDirty(page)); 2963 BUG_ON(PageDirty(page));
2964 2964
2965 if (!PageUptodate(page)) { 2965 if (!PageUptodate(page)) {
@@ -2987,7 +2987,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2987 mark_page_accessed(page); 2987 mark_page_accessed(page);
2988unlock: 2988unlock:
2989 unlock_page(page); 2989 unlock_page(page);
2990 page_cache_release(page); 2990 put_page(page);
2991 page = NULL; 2991 page = NULL;
2992 offset = map_end; 2992 offset = map_end;
2993 if (ret) 2993 if (ret)
@@ -3165,8 +3165,8 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
3165 } 3165 }
3166 3166
3167 while (offset < end) { 3167 while (offset < end) {
3168 page_index = offset >> PAGE_CACHE_SHIFT; 3168 page_index = offset >> PAGE_SHIFT;
3169 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; 3169 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
3170 if (map_end > end) 3170 if (map_end > end)
3171 map_end = end; 3171 map_end = end;
3172 3172
@@ -3182,7 +3182,7 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
3182 mark_page_accessed(page); 3182 mark_page_accessed(page);
3183 3183
3184 unlock_page(page); 3184 unlock_page(page);
3185 page_cache_release(page); 3185 put_page(page);
3186 page = NULL; 3186 page = NULL;
3187 offset = map_end; 3187 offset = map_end;
3188 if (ret) 3188 if (ret)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7db631e1c8b0..d7cae3327de5 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -605,8 +605,8 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
605 /* 605 /*
606 * We might be limited by page cache size. 606 * We might be limited by page cache size.
607 */ 607 */
608 if (bytes > PAGE_CACHE_SIZE) { 608 if (bytes > PAGE_SIZE) {
609 bytes = PAGE_CACHE_SIZE; 609 bytes = PAGE_SIZE;
610 trim = 1; 610 trim = 1;
611 /* 611 /*
612 * Shift by 31 here so that we don't get larger than 612 * Shift by 31 here so that we don't get larger than
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
index ba7dec40771e..324f0af40d7b 100644
--- a/fs/orangefs/dir.c
+++ b/fs/orangefs/dir.c
@@ -153,7 +153,6 @@ static int orangefs_readdir(struct file *file, struct dir_context *ctx)
153 struct dentry *dentry = file->f_path.dentry; 153 struct dentry *dentry = file->f_path.dentry;
154 struct orangefs_kernel_op_s *new_op = NULL; 154 struct orangefs_kernel_op_s *new_op = NULL;
155 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode); 155 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
156 int buffer_full = 0;
157 struct orangefs_readdir_response_s readdir_response; 156 struct orangefs_readdir_response_s readdir_response;
158 void *dents_buf; 157 void *dents_buf;
159 int i = 0; 158 int i = 0;
@@ -350,8 +349,7 @@ get_new_buffer_index:
350 /* 349 /*
351 * Did we hit the end of the directory? 350 * Did we hit the end of the directory?
352 */ 351 */
353 if (readdir_response.token == ORANGEFS_READDIR_END && 352 if (readdir_response.token == ORANGEFS_READDIR_END) {
354 !buffer_full) {
355 gossip_debug(GOSSIP_DIR_DEBUG, 353 gossip_debug(GOSSIP_DIR_DEBUG,
356 "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n"); 354 "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
357 ctx->pos = ORANGEFS_READDIR_END; 355 ctx->pos = ORANGEFS_READDIR_END;
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 2382e267b49e..85640e955cde 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -18,8 +18,8 @@ static int read_one_page(struct page *page)
18 int max_block; 18 int max_block;
19 ssize_t bytes_read = 0; 19 ssize_t bytes_read = 0;
20 struct inode *inode = page->mapping->host; 20 struct inode *inode = page->mapping->host;
21 const __u32 blocksize = PAGE_CACHE_SIZE; /* inode->i_blksize */ 21 const __u32 blocksize = PAGE_SIZE; /* inode->i_blksize */
22 const __u32 blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */ 22 const __u32 blockbits = PAGE_SHIFT; /* inode->i_blkbits */
23 struct iov_iter to; 23 struct iov_iter to;
24 struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE}; 24 struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
25 25
@@ -86,7 +86,7 @@ static int orangefs_readpages(struct file *file,
86 "failure adding page to cache, read_one_page returned: %d\n", 86 "failure adding page to cache, read_one_page returned: %d\n",
87 ret); 87 ret);
88 } else { 88 } else {
89 page_cache_release(page); 89 put_page(page);
90 } 90 }
91 } 91 }
92 BUG_ON(!list_empty(pages)); 92 BUG_ON(!list_empty(pages));
@@ -204,22 +204,8 @@ static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
204 if (ret != 0) 204 if (ret != 0)
205 return ret; 205 return ret;
206 206
207 /* 207 if (orig_size != i_size_read(inode))
208 * Only change the c/mtime if we are changing the size or we are
209 * explicitly asked to change it. This handles the semantic difference
210 * between truncate() and ftruncate() as implemented in the VFS.
211 *
212 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
213 * special case where we need to update the times despite not having
214 * these flags set. For all other operations the VFS set these flags
215 * explicitly if it wants a timestamp update.
216 */
217 if (orig_size != i_size_read(inode) &&
218 !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
219 iattr->ia_ctime = iattr->ia_mtime =
220 current_fs_time(inode->i_sb);
221 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; 208 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
222 }
223 209
224 return ret; 210 return ret;
225} 211}
@@ -328,7 +314,7 @@ static int orangefs_init_iops(struct inode *inode)
328 case S_IFREG: 314 case S_IFREG:
329 inode->i_op = &orangefs_file_inode_operations; 315 inode->i_op = &orangefs_file_inode_operations;
330 inode->i_fop = &orangefs_file_operations; 316 inode->i_fop = &orangefs_file_operations;
331 inode->i_blkbits = PAGE_CACHE_SHIFT; 317 inode->i_blkbits = PAGE_SHIFT;
332 break; 318 break;
333 case S_IFLNK: 319 case S_IFLNK:
334 inode->i_op = &orangefs_symlink_inode_operations; 320 inode->i_op = &orangefs_symlink_inode_operations;
@@ -456,7 +442,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
456 inode->i_uid = current_fsuid(); 442 inode->i_uid = current_fsuid();
457 inode->i_gid = current_fsgid(); 443 inode->i_gid = current_fsgid();
458 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 444 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
459 inode->i_size = PAGE_CACHE_SIZE; 445 inode->i_size = PAGE_SIZE;
460 inode->i_rdev = dev; 446 inode->i_rdev = dev;
461 447
462 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); 448 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 1f8acc9f9a88..75375e90a63f 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -170,7 +170,7 @@ orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
170 int i; 170 int i;
171 171
172 for (i = 0; i < bufmap->page_count; i++) 172 for (i = 0; i < bufmap->page_count; i++)
173 page_cache_release(bufmap->page_array[i]); 173 put_page(bufmap->page_array[i]);
174} 174}
175 175
176static void 176static void
@@ -299,7 +299,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
299 299
300 for (i = 0; i < ret; i++) { 300 for (i = 0; i < ret; i++) {
301 SetPageError(bufmap->page_array[i]); 301 SetPageError(bufmap->page_array[i]);
302 page_cache_release(bufmap->page_array[i]); 302 put_page(bufmap->page_array[i]);
303 } 303 }
304 return -ENOMEM; 304 return -ENOMEM;
305 } 305 }
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 19670b8b4053..1714a737d556 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -126,8 +126,7 @@ out:
126 126
127void orangefs_debugfs_cleanup(void) 127void orangefs_debugfs_cleanup(void)
128{ 128{
129 if (debug_dir) 129 debugfs_remove_recursive(debug_dir);
130 debugfs_remove_recursive(debug_dir);
131} 130}
132 131
133/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ 132/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 40f5163b56aa..2d129b5886ee 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -303,7 +303,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
303 } 303 }
304 break; 304 break;
305 case S_IFDIR: 305 case S_IFDIR:
306 inode->i_size = PAGE_CACHE_SIZE; 306 inode->i_size = PAGE_SIZE;
307 orangefs_inode->blksize = (1 << inode->i_blkbits); 307 orangefs_inode->blksize = (1 << inode->i_blkbits);
308 spin_lock(&inode->i_lock); 308 spin_lock(&inode->i_lock);
309 inode_set_bytes(inode, inode->i_size); 309 inode_set_bytes(inode, inode->i_size);
@@ -315,9 +315,13 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
315 inode->i_size = (loff_t)strlen(new_op-> 315 inode->i_size = (loff_t)strlen(new_op->
316 downcall.resp.getattr.link_target); 316 downcall.resp.getattr.link_target);
317 orangefs_inode->blksize = (1 << inode->i_blkbits); 317 orangefs_inode->blksize = (1 << inode->i_blkbits);
318 strlcpy(orangefs_inode->link_target, 318 ret = strscpy(orangefs_inode->link_target,
319 new_op->downcall.resp.getattr.link_target, 319 new_op->downcall.resp.getattr.link_target,
320 ORANGEFS_NAME_MAX); 320 ORANGEFS_NAME_MAX);
321 if (ret == -E2BIG) {
322 ret = -EIO;
323 goto out;
324 }
321 inode->i_link = orangefs_inode->link_target; 325 inode->i_link = orangefs_inode->link_target;
322 } 326 }
323 break; 327 break;
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 50578a28bd9e..1efc6f8a5224 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -1,3 +1,4 @@
1#include <linux/kernel.h>
1#include <linux/types.h> 2#include <linux/types.h>
2#include <linux/spinlock_types.h> 3#include <linux/spinlock_types.h>
3#include <linux/slab.h> 4#include <linux/slab.h>
@@ -74,8 +75,8 @@ static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh,
74 void *p, int size) 75 void *p, int size)
75{ 76{
76 77
77 memset(p, 0, size);
78 memcpy(p, kh->u, 16); 78 memcpy(p, kh->u, 16);
79 memset(p + 16, 0, size - 16);
79 80
80} 81}
81 82
@@ -427,26 +428,28 @@ struct ORANGEFS_dev_map_desc {
427/* gossip.h *****************************************************************/ 428/* gossip.h *****************************************************************/
428 429
429#ifdef GOSSIP_DISABLE_DEBUG 430#ifdef GOSSIP_DISABLE_DEBUG
430#define gossip_debug(mask, format, f...) do {} while (0) 431#define gossip_debug(mask, fmt, ...) \
432do { \
433 if (0) \
434 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
435} while (0)
431#else 436#else
432extern __u64 gossip_debug_mask; 437extern __u64 gossip_debug_mask;
433extern struct client_debug_mask client_debug_mask; 438extern struct client_debug_mask client_debug_mask;
434 439
435/* try to avoid function call overhead by checking masks in macro */ 440/* try to avoid function call overhead by checking masks in macro */
436#define gossip_debug(mask, format, f...) \ 441#define gossip_debug(mask, fmt, ...) \
437do { \ 442do { \
438 if (gossip_debug_mask & mask) \ 443 if (gossip_debug_mask & (mask)) \
439 printk(format, ##f); \ 444 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
440} while (0) 445} while (0)
441#endif /* GOSSIP_DISABLE_DEBUG */ 446#endif /* GOSSIP_DISABLE_DEBUG */
442 447
443/* do file and line number printouts w/ the GNU preprocessor */ 448/* do file and line number printouts w/ the GNU preprocessor */
444#define gossip_ldebug(mask, format, f...) \ 449#define gossip_ldebug(mask, fmt, ...) \
445 gossip_debug(mask, "%s: " format, __func__, ##f) 450 gossip_debug(mask, "%s: " fmt, __func__, ##__VA_ARGS__)
446 451
447#define gossip_err printk 452#define gossip_err pr_err
448#define gossip_lerr(format, f...) \ 453#define gossip_lerr(fmt, ...) \
449 gossip_err("%s line %d: " format, \ 454 gossip_err("%s line %d: " fmt, \
450 __FILE__, \ 455 __FILE__, __LINE__, ##__VA_ARGS__)
451 __LINE__, \
452 ##f)
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index ef5da7538cd5..63a6280d8c3a 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -73,10 +73,6 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
73 "%s: prefix %s name %s, buffer_size %zd\n", 73 "%s: prefix %s name %s, buffer_size %zd\n",
74 __func__, prefix, name, size); 74 __func__, prefix, name, size);
75 75
76 if (name == NULL || (size > 0 && buffer == NULL)) {
77 gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n");
78 return -EINVAL;
79 }
80 if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) { 76 if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
81 gossip_err("Invalid key length (%d)\n", 77 gossip_err("Invalid key length (%d)\n",
82 (int)(strlen(name) + strlen(prefix))); 78 (int)(strlen(name) + strlen(prefix)));
@@ -146,8 +142,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
146 goto out_release_op; 142 goto out_release_op;
147 } 143 }
148 144
149 memset(buffer, 0, size);
150 memcpy(buffer, new_op->downcall.resp.getxattr.val, length); 145 memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
146 memset(buffer + length, 0, size - length);
151 gossip_debug(GOSSIP_XATTR_DEBUG, 147 gossip_debug(GOSSIP_XATTR_DEBUG,
152 "orangefs_inode_getxattr: inode %pU " 148 "orangefs_inode_getxattr: inode %pU "
153 "key %s key_sz %d, val_len %d\n", 149 "key %s key_sz %d, val_len %d\n",
@@ -239,8 +235,7 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
239 "%s: prefix %s, name %s, buffer_size %zd\n", 235 "%s: prefix %s, name %s, buffer_size %zd\n",
240 __func__, prefix, name, size); 236 __func__, prefix, name, size);
241 237
242 if (size < 0 || 238 if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
243 size >= ORANGEFS_MAX_XATTR_VALUELEN ||
244 flags < 0) { 239 flags < 0) {
245 gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n", 240 gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
246 (int)size, 241 (int)size,
@@ -248,12 +243,6 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
248 return -EINVAL; 243 return -EINVAL;
249 } 244 }
250 245
251 if (name == NULL ||
252 (size > 0 && value == NULL)) {
253 gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n");
254 return -EINVAL;
255 }
256
257 internal_flag = convert_to_internal_xattr_flags(flags); 246 internal_flag = convert_to_internal_xattr_flags(flags);
258 247
259 if (prefix) { 248 if (prefix) {
@@ -353,10 +342,6 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
353 gossip_err("%s: bogus NULL pointers\n", __func__); 342 gossip_err("%s: bogus NULL pointers\n", __func__);
354 return -EINVAL; 343 return -EINVAL;
355 } 344 }
356 if (size < 0) {
357 gossip_err("Invalid size (%d)\n", (int)size);
358 return -EINVAL;
359 }
360 345
361 down_read(&orangefs_inode->xattr_sem); 346 down_read(&orangefs_inode->xattr_sem);
362 new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR); 347 new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ef64984c9bbc..5d972e6cd3fe 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
295 } 295 }
296} 296}
297 297
298static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
299{
300 struct dentry *real;
301
302 if (d_is_dir(dentry)) {
303 if (!inode || inode == d_inode(dentry))
304 return dentry;
305 goto bug;
306 }
307
308 real = ovl_dentry_upper(dentry);
309 if (real && (!inode || inode == d_inode(real)))
310 return real;
311
312 real = ovl_dentry_lower(dentry);
313 if (!real)
314 goto bug;
315
316 if (!inode || inode == d_inode(real))
317 return real;
318
319 /* Handle recursion */
320 if (real->d_flags & DCACHE_OP_REAL)
321 return real->d_op->d_real(real, inode);
322
323bug:
324 WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
325 inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
326 return dentry;
327}
328
298static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags) 329static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
299{ 330{
300 struct ovl_entry *oe = dentry->d_fsdata; 331 struct ovl_entry *oe = dentry->d_fsdata;
@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
339static const struct dentry_operations ovl_dentry_operations = { 370static const struct dentry_operations ovl_dentry_operations = {
340 .d_release = ovl_dentry_release, 371 .d_release = ovl_dentry_release,
341 .d_select_inode = ovl_d_select_inode, 372 .d_select_inode = ovl_d_select_inode,
373 .d_real = ovl_d_real,
342}; 374};
343 375
344static const struct dentry_operations ovl_reval_dentry_operations = { 376static const struct dentry_operations ovl_reval_dentry_operations = {
345 .d_release = ovl_dentry_release, 377 .d_release = ovl_dentry_release,
346 .d_select_inode = ovl_d_select_inode, 378 .d_select_inode = ovl_d_select_inode,
379 .d_real = ovl_d_real,
347 .d_revalidate = ovl_dentry_revalidate, 380 .d_revalidate = ovl_dentry_revalidate,
348 .d_weak_revalidate = ovl_dentry_weak_revalidate, 381 .d_weak_revalidate = ovl_dentry_weak_revalidate,
349}; 382};
diff --git a/fs/pipe.c b/fs/pipe.c
index ab8dad3ccb6a..0d3f5165cb0b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -134,7 +134,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
134 if (page_count(page) == 1 && !pipe->tmp_page) 134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page; 135 pipe->tmp_page = page;
136 else 136 else
137 page_cache_release(page); 137 put_page(page);
138} 138}
139 139
140/** 140/**
@@ -180,7 +180,7 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
180 */ 180 */
181void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) 181void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
182{ 182{
183 page_cache_get(buf->page); 183 get_page(buf->page);
184} 184}
185EXPORT_SYMBOL(generic_pipe_buf_get); 185EXPORT_SYMBOL(generic_pipe_buf_get);
186 186
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(generic_pipe_buf_confirm);
211void generic_pipe_buf_release(struct pipe_inode_info *pipe, 211void generic_pipe_buf_release(struct pipe_inode_info *pipe,
212 struct pipe_buffer *buf) 212 struct pipe_buffer *buf)
213{ 213{
214 page_cache_release(buf->page); 214 put_page(buf->page);
215} 215}
216EXPORT_SYMBOL(generic_pipe_buf_release); 216EXPORT_SYMBOL(generic_pipe_buf_release);
217 217
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df431642042..541583510cfb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -553,7 +553,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
553 if (radix_tree_exceptional_entry(page)) 553 if (radix_tree_exceptional_entry(page))
554 mss->swap += PAGE_SIZE; 554 mss->swap += PAGE_SIZE;
555 else 555 else
556 page_cache_release(page); 556 put_page(page);
557 557
558 return; 558 return;
559 } 559 }
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1518 return page; 1518 return page;
1519} 1519}
1520 1520
1521#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1522static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1523 struct vm_area_struct *vma,
1524 unsigned long addr)
1525{
1526 struct page *page;
1527 int nid;
1528
1529 if (!pmd_present(pmd))
1530 return NULL;
1531
1532 page = vm_normal_page_pmd(vma, addr, pmd);
1533 if (!page)
1534 return NULL;
1535
1536 if (PageReserved(page))
1537 return NULL;
1538
1539 nid = page_to_nid(page);
1540 if (!node_isset(nid, node_states[N_MEMORY]))
1541 return NULL;
1542
1543 return page;
1544}
1545#endif
1546
1521static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1547static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1522 unsigned long end, struct mm_walk *walk) 1548 unsigned long end, struct mm_walk *walk)
1523{ 1549{
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1527 pte_t *orig_pte; 1553 pte_t *orig_pte;
1528 pte_t *pte; 1554 pte_t *pte;
1529 1555
1556#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1530 ptl = pmd_trans_huge_lock(pmd, vma); 1557 ptl = pmd_trans_huge_lock(pmd, vma);
1531 if (ptl) { 1558 if (ptl) {
1532 pte_t huge_pte = *(pte_t *)pmd;
1533 struct page *page; 1559 struct page *page;
1534 1560
1535 page = can_gather_numa_stats(huge_pte, vma, addr); 1561 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1536 if (page) 1562 if (page)
1537 gather_stats(page, md, pte_dirty(huge_pte), 1563 gather_stats(page, md, pmd_dirty(*pmd),
1538 HPAGE_PMD_SIZE/PAGE_SIZE); 1564 HPAGE_PMD_SIZE/PAGE_SIZE);
1539 spin_unlock(ptl); 1565 spin_unlock(ptl);
1540 return 0; 1566 return 0;
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1542 1568
1543 if (pmd_trans_unstable(pmd)) 1569 if (pmd_trans_unstable(pmd))
1544 return 0; 1570 return 0;
1571#endif
1545 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1572 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1546 do { 1573 do {
1547 struct page *page = can_gather_numa_stats(*pte, vma, addr); 1574 struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 55bb57e6a30d..8afe10cf7df8 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -279,12 +279,12 @@ static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
279 if (!page) 279 if (!page)
280 return VM_FAULT_OOM; 280 return VM_FAULT_OOM;
281 if (!PageUptodate(page)) { 281 if (!PageUptodate(page)) {
282 offset = (loff_t) index << PAGE_CACHE_SHIFT; 282 offset = (loff_t) index << PAGE_SHIFT;
283 buf = __va((page_to_pfn(page) << PAGE_SHIFT)); 283 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); 284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
285 if (rc < 0) { 285 if (rc < 0) {
286 unlock_page(page); 286 unlock_page(page);
287 page_cache_release(page); 287 put_page(page);
288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
289 } 289 }
290 SetPageUptodate(page); 290 SetPageUptodate(page);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index dc645b66cd79..45d6110744cb 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -420,8 +420,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
420 pstore_sb = sb; 420 pstore_sb = sb;
421 421
422 sb->s_maxbytes = MAX_LFS_FILESIZE; 422 sb->s_maxbytes = MAX_LFS_FILESIZE;
423 sb->s_blocksize = PAGE_CACHE_SIZE; 423 sb->s_blocksize = PAGE_SIZE;
424 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 424 sb->s_blocksize_bits = PAGE_SHIFT;
425 sb->s_magic = PSTOREFS_MAGIC; 425 sb->s_magic = PSTOREFS_MAGIC;
426 sb->s_op = &pstore_ops; 426 sb->s_op = &pstore_ops;
427 sb->s_time_gran = 1; 427 sb->s_time_gran = 1;
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index e1f37278cf97..144ceda4948e 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -35,9 +35,9 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
35static unsigned last_entry(struct inode *inode, unsigned long page_nr) 35static unsigned last_entry(struct inode *inode, unsigned long page_nr)
36{ 36{
37 unsigned long last_byte = inode->i_size; 37 unsigned long last_byte = inode->i_size;
38 last_byte -= page_nr << PAGE_CACHE_SHIFT; 38 last_byte -= page_nr << PAGE_SHIFT;
39 if (last_byte > PAGE_CACHE_SIZE) 39 if (last_byte > PAGE_SIZE)
40 last_byte = PAGE_CACHE_SIZE; 40 last_byte = PAGE_SIZE;
41 return last_byte / QNX6_DIR_ENTRY_SIZE; 41 return last_byte / QNX6_DIR_ENTRY_SIZE;
42} 42}
43 43
@@ -47,9 +47,9 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
47{ 47{
48 struct qnx6_sb_info *sbi = QNX6_SB(sb); 48 struct qnx6_sb_info *sbi = QNX6_SB(sb);
49 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ 49 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
50 u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */ 50 u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
51 /* within page */ 51 /* within page */
52 u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK; 52 u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
53 struct address_space *mapping = sbi->longfile->i_mapping; 53 struct address_space *mapping = sbi->longfile->i_mapping;
54 struct page *page = read_mapping_page(mapping, n, NULL); 54 struct page *page = read_mapping_page(mapping, n, NULL);
55 if (IS_ERR(page)) 55 if (IS_ERR(page))
@@ -115,8 +115,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
115 struct qnx6_sb_info *sbi = QNX6_SB(s); 115 struct qnx6_sb_info *sbi = QNX6_SB(s);
116 loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); 116 loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
117 unsigned long npages = dir_pages(inode); 117 unsigned long npages = dir_pages(inode);
118 unsigned long n = pos >> PAGE_CACHE_SHIFT; 118 unsigned long n = pos >> PAGE_SHIFT;
119 unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; 119 unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
120 bool done = false; 120 bool done = false;
121 121
122 ctx->pos = pos; 122 ctx->pos = pos;
@@ -131,7 +131,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
131 131
132 if (IS_ERR(page)) { 132 if (IS_ERR(page)) {
133 pr_err("%s(): read failed\n", __func__); 133 pr_err("%s(): read failed\n", __func__);
134 ctx->pos = (n + 1) << PAGE_CACHE_SHIFT; 134 ctx->pos = (n + 1) << PAGE_SHIFT;
135 return PTR_ERR(page); 135 return PTR_ERR(page);
136 } 136 }
137 de = ((struct qnx6_dir_entry *)page_address(page)) + start; 137 de = ((struct qnx6_dir_entry *)page_address(page)) + start;
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 47bb1de07155..1192422a1c56 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -542,8 +542,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
542 iget_failed(inode); 542 iget_failed(inode);
543 return ERR_PTR(-EIO); 543 return ERR_PTR(-EIO);
544 } 544 }
545 n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS); 545 n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
546 offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS); 546 offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
547 mapping = sbi->inodes->i_mapping; 547 mapping = sbi->inodes->i_mapping;
548 page = read_mapping_page(mapping, n, NULL); 548 page = read_mapping_page(mapping, n, NULL);
549 if (IS_ERR(page)) { 549 if (IS_ERR(page)) {
diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
index d3fb2b698800..f23b5c4a66ad 100644
--- a/fs/qnx6/qnx6.h
+++ b/fs/qnx6/qnx6.h
@@ -128,7 +128,7 @@ extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
128static inline void qnx6_put_page(struct page *page) 128static inline void qnx6_put_page(struct page *page)
129{ 129{
130 kunmap(page); 130 kunmap(page);
131 page_cache_release(page); 131 put_page(page);
132} 132}
133 133
134extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, 134extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ba827daea5a0..ff21980d0119 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2047,11 +2047,20 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2047 struct quota_info *dqopt = sb_dqopt(sb); 2047 struct quota_info *dqopt = sb_dqopt(sb);
2048 int err; 2048 int err;
2049 2049
2050 if (!dqopt->ops[qid->type]->get_next_id) 2050 mutex_lock(&dqopt->dqonoff_mutex);
2051 return -ENOSYS; 2051 if (!sb_has_quota_active(sb, qid->type)) {
2052 err = -ESRCH;
2053 goto out;
2054 }
2055 if (!dqopt->ops[qid->type]->get_next_id) {
2056 err = -ENOSYS;
2057 goto out;
2058 }
2052 mutex_lock(&dqopt->dqio_mutex); 2059 mutex_lock(&dqopt->dqio_mutex);
2053 err = dqopt->ops[qid->type]->get_next_id(sb, qid); 2060 err = dqopt->ops[qid->type]->get_next_id(sb, qid);
2054 mutex_unlock(&dqopt->dqio_mutex); 2061 mutex_unlock(&dqopt->dqio_mutex);
2062out:
2063 mutex_unlock(&dqopt->dqonoff_mutex);
2055 2064
2056 return err; 2065 return err;
2057} 2066}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 38981b037524..1ab6e6c2e60e 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -223,8 +223,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
223 return err; 223 return err;
224 224
225 sb->s_maxbytes = MAX_LFS_FILESIZE; 225 sb->s_maxbytes = MAX_LFS_FILESIZE;
226 sb->s_blocksize = PAGE_CACHE_SIZE; 226 sb->s_blocksize = PAGE_SIZE;
227 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 227 sb->s_blocksize_bits = PAGE_SHIFT;
228 sb->s_magic = RAMFS_MAGIC; 228 sb->s_magic = RAMFS_MAGIC;
229 sb->s_op = &ramfs_ops; 229 sb->s_op = &ramfs_ops;
230 sb->s_time_gran = 1; 230 sb->s_time_gran = 1;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9424a4ba93a9..389773711de4 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -180,11 +180,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
180 int partial = 0; 180 int partial = 0;
181 unsigned blocksize; 181 unsigned blocksize;
182 struct buffer_head *bh, *head; 182 struct buffer_head *bh, *head;
183 unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; 183 unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
184 int new; 184 int new;
185 int logit = reiserfs_file_data_log(inode); 185 int logit = reiserfs_file_data_log(inode);
186 struct super_block *s = inode->i_sb; 186 struct super_block *s = inode->i_sb;
187 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; 187 int bh_per_page = PAGE_SIZE / s->s_blocksize;
188 struct reiserfs_transaction_handle th; 188 struct reiserfs_transaction_handle th;
189 int ret = 0; 189 int ret = 0;
190 190
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ae9e5b308cf9..d5c2e9c865de 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -386,7 +386,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
386 goto finished; 386 goto finished;
387 } 387 }
388 /* read file tail into part of page */ 388 /* read file tail into part of page */
389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1); 389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
390 copy_item_head(&tmp_ih, ih); 390 copy_item_head(&tmp_ih, ih);
391 391
392 /* 392 /*
@@ -587,10 +587,10 @@ static int convert_tail_for_hole(struct inode *inode,
587 return -EIO; 587 return -EIO;
588 588
589 /* always try to read until the end of the block */ 589 /* always try to read until the end of the block */
590 tail_start = tail_offset & (PAGE_CACHE_SIZE - 1); 590 tail_start = tail_offset & (PAGE_SIZE - 1);
591 tail_end = (tail_start | (bh_result->b_size - 1)) + 1; 591 tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
592 592
593 index = tail_offset >> PAGE_CACHE_SHIFT; 593 index = tail_offset >> PAGE_SHIFT;
594 /* 594 /*
595 * hole_page can be zero in case of direct_io, we are sure 595 * hole_page can be zero in case of direct_io, we are sure
596 * that we cannot get here if we write with O_DIRECT into tail page 596 * that we cannot get here if we write with O_DIRECT into tail page
@@ -629,7 +629,7 @@ static int convert_tail_for_hole(struct inode *inode,
629unlock: 629unlock:
630 if (tail_page != hole_page) { 630 if (tail_page != hole_page) {
631 unlock_page(tail_page); 631 unlock_page(tail_page);
632 page_cache_release(tail_page); 632 put_page(tail_page);
633 } 633 }
634out: 634out:
635 return retval; 635 return retval;
@@ -2189,11 +2189,11 @@ static int grab_tail_page(struct inode *inode,
2189 * we want the page with the last byte in the file, 2189 * we want the page with the last byte in the file,
2190 * not the page that will hold the next byte for appending 2190 * not the page that will hold the next byte for appending
2191 */ 2191 */
2192 unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; 2192 unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
2193 unsigned long pos = 0; 2193 unsigned long pos = 0;
2194 unsigned long start = 0; 2194 unsigned long start = 0;
2195 unsigned long blocksize = inode->i_sb->s_blocksize; 2195 unsigned long blocksize = inode->i_sb->s_blocksize;
2196 unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1); 2196 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
2197 struct buffer_head *bh; 2197 struct buffer_head *bh;
2198 struct buffer_head *head; 2198 struct buffer_head *head;
2199 struct page *page; 2199 struct page *page;
@@ -2251,7 +2251,7 @@ out:
2251 2251
2252unlock: 2252unlock:
2253 unlock_page(page); 2253 unlock_page(page);
2254 page_cache_release(page); 2254 put_page(page);
2255 return error; 2255 return error;
2256} 2256}
2257 2257
@@ -2265,7 +2265,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2265{ 2265{
2266 struct reiserfs_transaction_handle th; 2266 struct reiserfs_transaction_handle th;
2267 /* we want the offset for the first byte after the end of the file */ 2267 /* we want the offset for the first byte after the end of the file */
2268 unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 2268 unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
2269 unsigned blocksize = inode->i_sb->s_blocksize; 2269 unsigned blocksize = inode->i_sb->s_blocksize;
2270 unsigned length; 2270 unsigned length;
2271 struct page *page = NULL; 2271 struct page *page = NULL;
@@ -2345,7 +2345,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2345 } 2345 }
2346 } 2346 }
2347 unlock_page(page); 2347 unlock_page(page);
2348 page_cache_release(page); 2348 put_page(page);
2349 } 2349 }
2350 2350
2351 reiserfs_write_unlock(inode->i_sb); 2351 reiserfs_write_unlock(inode->i_sb);
@@ -2354,7 +2354,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2354out: 2354out:
2355 if (page) { 2355 if (page) {
2356 unlock_page(page); 2356 unlock_page(page);
2357 page_cache_release(page); 2357 put_page(page);
2358 } 2358 }
2359 2359
2360 reiserfs_write_unlock(inode->i_sb); 2360 reiserfs_write_unlock(inode->i_sb);
@@ -2426,7 +2426,7 @@ research:
2426 } else if (is_direct_le_ih(ih)) { 2426 } else if (is_direct_le_ih(ih)) {
2427 char *p; 2427 char *p;
2428 p = page_address(bh_result->b_page); 2428 p = page_address(bh_result->b_page);
2429 p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1); 2429 p += (byte_offset - 1) & (PAGE_SIZE - 1);
2430 copy_size = ih_item_len(ih) - pos_in_item; 2430 copy_size = ih_item_len(ih) - pos_in_item;
2431 2431
2432 fs_gen = get_generation(inode->i_sb); 2432 fs_gen = get_generation(inode->i_sb);
@@ -2525,7 +2525,7 @@ static int reiserfs_write_full_page(struct page *page,
2525 struct writeback_control *wbc) 2525 struct writeback_control *wbc)
2526{ 2526{
2527 struct inode *inode = page->mapping->host; 2527 struct inode *inode = page->mapping->host;
2528 unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; 2528 unsigned long end_index = inode->i_size >> PAGE_SHIFT;
2529 int error = 0; 2529 int error = 0;
2530 unsigned long block; 2530 unsigned long block;
2531 sector_t last_block; 2531 sector_t last_block;
@@ -2535,7 +2535,7 @@ static int reiserfs_write_full_page(struct page *page,
2535 int checked = PageChecked(page); 2535 int checked = PageChecked(page);
2536 struct reiserfs_transaction_handle th; 2536 struct reiserfs_transaction_handle th;
2537 struct super_block *s = inode->i_sb; 2537 struct super_block *s = inode->i_sb;
2538 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; 2538 int bh_per_page = PAGE_SIZE / s->s_blocksize;
2539 th.t_trans_id = 0; 2539 th.t_trans_id = 0;
2540 2540
2541 /* no logging allowed when nonblocking or from PF_MEMALLOC */ 2541 /* no logging allowed when nonblocking or from PF_MEMALLOC */
@@ -2564,16 +2564,16 @@ static int reiserfs_write_full_page(struct page *page,
2564 if (page->index >= end_index) { 2564 if (page->index >= end_index) {
2565 unsigned last_offset; 2565 unsigned last_offset;
2566 2566
2567 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 2567 last_offset = inode->i_size & (PAGE_SIZE - 1);
2568 /* no file contents in this page */ 2568 /* no file contents in this page */
2569 if (page->index >= end_index + 1 || !last_offset) { 2569 if (page->index >= end_index + 1 || !last_offset) {
2570 unlock_page(page); 2570 unlock_page(page);
2571 return 0; 2571 return 0;
2572 } 2572 }
2573 zero_user_segment(page, last_offset, PAGE_CACHE_SIZE); 2573 zero_user_segment(page, last_offset, PAGE_SIZE);
2574 } 2574 }
2575 bh = head; 2575 bh = head;
2576 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); 2576 block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
2577 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 2577 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2578 /* first map all the buffers, logging any direct items we find */ 2578 /* first map all the buffers, logging any direct items we find */
2579 do { 2579 do {
@@ -2774,7 +2774,7 @@ static int reiserfs_write_begin(struct file *file,
2774 *fsdata = (void *)(unsigned long)flags; 2774 *fsdata = (void *)(unsigned long)flags;
2775 } 2775 }
2776 2776
2777 index = pos >> PAGE_CACHE_SHIFT; 2777 index = pos >> PAGE_SHIFT;
2778 page = grab_cache_page_write_begin(mapping, index, flags); 2778 page = grab_cache_page_write_begin(mapping, index, flags);
2779 if (!page) 2779 if (!page)
2780 return -ENOMEM; 2780 return -ENOMEM;
@@ -2822,7 +2822,7 @@ static int reiserfs_write_begin(struct file *file,
2822 } 2822 }
2823 if (ret) { 2823 if (ret) {
2824 unlock_page(page); 2824 unlock_page(page);
2825 page_cache_release(page); 2825 put_page(page);
2826 /* Truncate allocated blocks */ 2826 /* Truncate allocated blocks */
2827 reiserfs_truncate_failed_write(inode); 2827 reiserfs_truncate_failed_write(inode);
2828 } 2828 }
@@ -2909,7 +2909,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2909 else 2909 else
2910 th = NULL; 2910 th = NULL;
2911 2911
2912 start = pos & (PAGE_CACHE_SIZE - 1); 2912 start = pos & (PAGE_SIZE - 1);
2913 if (unlikely(copied < len)) { 2913 if (unlikely(copied < len)) {
2914 if (!PageUptodate(page)) 2914 if (!PageUptodate(page))
2915 copied = 0; 2915 copied = 0;
@@ -2974,7 +2974,7 @@ out:
2974 if (locked) 2974 if (locked)
2975 reiserfs_write_unlock(inode->i_sb); 2975 reiserfs_write_unlock(inode->i_sb);
2976 unlock_page(page); 2976 unlock_page(page);
2977 page_cache_release(page); 2977 put_page(page);
2978 2978
2979 if (pos + len > inode->i_size) 2979 if (pos + len > inode->i_size)
2980 reiserfs_truncate_failed_write(inode); 2980 reiserfs_truncate_failed_write(inode);
@@ -2996,7 +2996,7 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2996 unsigned from, unsigned to) 2996 unsigned from, unsigned to)
2997{ 2997{
2998 struct inode *inode = page->mapping->host; 2998 struct inode *inode = page->mapping->host;
2999 loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to; 2999 loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
3000 int ret = 0; 3000 int ret = 0;
3001 int update_sd = 0; 3001 int update_sd = 0;
3002 struct reiserfs_transaction_handle *th = NULL; 3002 struct reiserfs_transaction_handle *th = NULL;
@@ -3181,7 +3181,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
3181 struct inode *inode = page->mapping->host; 3181 struct inode *inode = page->mapping->host;
3182 unsigned int curr_off = 0; 3182 unsigned int curr_off = 0;
3183 unsigned int stop = offset + length; 3183 unsigned int stop = offset + length;
3184 int partial_page = (offset || length < PAGE_CACHE_SIZE); 3184 int partial_page = (offset || length < PAGE_SIZE);
3185 int ret = 1; 3185 int ret = 1;
3186 3186
3187 BUG_ON(!PageLocked(page)); 3187 BUG_ON(!PageLocked(page));
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 036a1fc0a8c3..57045f423893 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -203,7 +203,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
203 * __reiserfs_write_begin on that page. This will force a 203 * __reiserfs_write_begin on that page. This will force a
204 * reiserfs_get_block to unpack the tail for us. 204 * reiserfs_get_block to unpack the tail for us.
205 */ 205 */
206 index = inode->i_size >> PAGE_CACHE_SHIFT; 206 index = inode->i_size >> PAGE_SHIFT;
207 mapping = inode->i_mapping; 207 mapping = inode->i_mapping;
208 page = grab_cache_page(mapping, index); 208 page = grab_cache_page(mapping, index);
209 retval = -ENOMEM; 209 retval = -ENOMEM;
@@ -221,7 +221,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
221 221
222out_unlock: 222out_unlock:
223 unlock_page(page); 223 unlock_page(page);
224 page_cache_release(page); 224 put_page(page);
225 225
226out: 226out:
227 inode_unlock(inode); 227 inode_unlock(inode);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 44c2bdced1c8..2ace90e981f0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -599,18 +599,18 @@ static int journal_list_still_alive(struct super_block *s,
599 * This does a check to see if the buffer belongs to one of these 599 * This does a check to see if the buffer belongs to one of these
600 * lost pages before doing the final put_bh. If page->mapping was 600 * lost pages before doing the final put_bh. If page->mapping was
601 * null, it tries to free buffers on the page, which should make the 601 * null, it tries to free buffers on the page, which should make the
602 * final page_cache_release drop the page from the lru. 602 * final put_page drop the page from the lru.
603 */ 603 */
604static void release_buffer_page(struct buffer_head *bh) 604static void release_buffer_page(struct buffer_head *bh)
605{ 605{
606 struct page *page = bh->b_page; 606 struct page *page = bh->b_page;
607 if (!page->mapping && trylock_page(page)) { 607 if (!page->mapping && trylock_page(page)) {
608 page_cache_get(page); 608 get_page(page);
609 put_bh(bh); 609 put_bh(bh);
610 if (!page->mapping) 610 if (!page->mapping)
611 try_to_free_buffers(page); 611 try_to_free_buffers(page);
612 unlock_page(page); 612 unlock_page(page);
613 page_cache_release(page); 613 put_page(page);
614 } else { 614 } else {
615 put_bh(bh); 615 put_bh(bh);
616 } 616 }
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 24cbe013240f..5feacd689241 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1342,7 +1342,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1342 */ 1342 */
1343 1343
1344 data = kmap_atomic(un_bh->b_page); 1344 data = kmap_atomic(un_bh->b_page);
1345 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); 1345 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1));
1346 memcpy(data + off, 1346 memcpy(data + off,
1347 ih_item_body(PATH_PLAST_BUFFER(path), &s_ih), 1347 ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
1348 ret_value); 1348 ret_value);
@@ -1511,7 +1511,7 @@ static void unmap_buffers(struct page *page, loff_t pos)
1511 1511
1512 if (page) { 1512 if (page) {
1513 if (page_has_buffers(page)) { 1513 if (page_has_buffers(page)) {
1514 tail_index = pos & (PAGE_CACHE_SIZE - 1); 1514 tail_index = pos & (PAGE_SIZE - 1);
1515 cur_index = 0; 1515 cur_index = 0;
1516 head = page_buffers(page); 1516 head = page_buffers(page);
1517 bh = head; 1517 bh = head;
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index f41e19b4bb42..2d5489b0a269 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -151,7 +151,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
151 */ 151 */
152 if (up_to_date_bh) { 152 if (up_to_date_bh) {
153 unsigned pgoff = 153 unsigned pgoff =
154 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); 154 (tail_offset + total_tail - 1) & (PAGE_SIZE - 1);
155 char *kaddr = kmap_atomic(up_to_date_bh->b_page); 155 char *kaddr = kmap_atomic(up_to_date_bh->b_page);
156 memset(kaddr + pgoff, 0, blk_size - total_tail); 156 memset(kaddr + pgoff, 0, blk_size - total_tail);
157 kunmap_atomic(kaddr); 157 kunmap_atomic(kaddr);
@@ -271,7 +271,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
271 * the page was locked and this part of the page was up to date when 271 * the page was locked and this part of the page was up to date when
272 * indirect2direct was called, so we know the bytes are still valid 272 * indirect2direct was called, so we know the bytes are still valid
273 */ 273 */
274 tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); 274 tail = tail + (pos & (PAGE_SIZE - 1));
275 275
276 PATH_LAST_POSITION(path)++; 276 PATH_LAST_POSITION(path)++;
277 277
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 57e0b2310532..28f5f8b11370 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -415,7 +415,7 @@ out:
415static inline void reiserfs_put_page(struct page *page) 415static inline void reiserfs_put_page(struct page *page)
416{ 416{
417 kunmap(page); 417 kunmap(page);
418 page_cache_release(page); 418 put_page(page);
419} 419}
420 420
421static struct page *reiserfs_get_page(struct inode *dir, size_t n) 421static struct page *reiserfs_get_page(struct inode *dir, size_t n)
@@ -427,7 +427,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
427 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this 427 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this
428 */ 428 */
429 mapping_set_gfp_mask(mapping, GFP_NOFS); 429 mapping_set_gfp_mask(mapping, GFP_NOFS);
430 page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL); 430 page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
431 if (!IS_ERR(page)) { 431 if (!IS_ERR(page)) {
432 kmap(page); 432 kmap(page);
433 if (PageError(page)) 433 if (PageError(page))
@@ -526,10 +526,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
526 while (buffer_pos < buffer_size || buffer_pos == 0) { 526 while (buffer_pos < buffer_size || buffer_pos == 0) {
527 size_t chunk; 527 size_t chunk;
528 size_t skip = 0; 528 size_t skip = 0;
529 size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); 529 size_t page_offset = (file_pos & (PAGE_SIZE - 1));
530 530
531 if (buffer_size - buffer_pos > PAGE_CACHE_SIZE) 531 if (buffer_size - buffer_pos > PAGE_SIZE)
532 chunk = PAGE_CACHE_SIZE; 532 chunk = PAGE_SIZE;
533 else 533 else
534 chunk = buffer_size - buffer_pos; 534 chunk = buffer_size - buffer_pos;
535 535
@@ -546,8 +546,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
546 struct reiserfs_xattr_header *rxh; 546 struct reiserfs_xattr_header *rxh;
547 547
548 skip = file_pos = sizeof(struct reiserfs_xattr_header); 548 skip = file_pos = sizeof(struct reiserfs_xattr_header);
549 if (chunk + skip > PAGE_CACHE_SIZE) 549 if (chunk + skip > PAGE_SIZE)
550 chunk = PAGE_CACHE_SIZE - skip; 550 chunk = PAGE_SIZE - skip;
551 rxh = (struct reiserfs_xattr_header *)data; 551 rxh = (struct reiserfs_xattr_header *)data;
552 rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC); 552 rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);
553 rxh->h_hash = cpu_to_le32(xahash); 553 rxh->h_hash = cpu_to_le32(xahash);
@@ -675,8 +675,8 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
675 char *data; 675 char *data;
676 size_t skip = 0; 676 size_t skip = 0;
677 677
678 if (isize - file_pos > PAGE_CACHE_SIZE) 678 if (isize - file_pos > PAGE_SIZE)
679 chunk = PAGE_CACHE_SIZE; 679 chunk = PAGE_SIZE;
680 else 680 else
681 chunk = isize - file_pos; 681 chunk = isize - file_pos;
682 682
diff --git a/fs/seq_file.c b/fs/seq_file.c
index e85664b7c7d9..19f532e7d35e 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -72,9 +72,10 @@ int seq_open(struct file *file, const struct seq_operations *op)
72 72
73 mutex_init(&p->lock); 73 mutex_init(&p->lock);
74 p->op = op; 74 p->op = op;
75#ifdef CONFIG_USER_NS 75
76 p->user_ns = file->f_cred->user_ns; 76 // No refcounting: the lifetime of 'p' is constrained
77#endif 77 // to the lifetime of the file.
78 p->file = file;
78 79
79 /* 80 /*
80 * Wrappers around seq_open(e.g. swaps_open) need to be 81 * Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/fs/splice.c b/fs/splice.c
index 9947b5c69664..b018eb485019 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -88,7 +88,7 @@ out_unlock:
88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, 88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
89 struct pipe_buffer *buf) 89 struct pipe_buffer *buf)
90{ 90{
91 page_cache_release(buf->page); 91 put_page(buf->page);
92 buf->flags &= ~PIPE_BUF_FLAG_LRU; 92 buf->flags &= ~PIPE_BUF_FLAG_LRU;
93} 93}
94 94
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(splice_to_pipe);
268 268
269void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) 269void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
270{ 270{
271 page_cache_release(spd->pages[i]); 271 put_page(spd->pages[i]);
272} 272}
273 273
274/* 274/*
@@ -328,9 +328,9 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
328 if (splice_grow_spd(pipe, &spd)) 328 if (splice_grow_spd(pipe, &spd))
329 return -ENOMEM; 329 return -ENOMEM;
330 330
331 index = *ppos >> PAGE_CACHE_SHIFT; 331 index = *ppos >> PAGE_SHIFT;
332 loff = *ppos & ~PAGE_CACHE_MASK; 332 loff = *ppos & ~PAGE_MASK;
333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 333 req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
334 nr_pages = min(req_pages, spd.nr_pages_max); 334 nr_pages = min(req_pages, spd.nr_pages_max);
335 335
336 /* 336 /*
@@ -365,7 +365,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
365 error = add_to_page_cache_lru(page, mapping, index, 365 error = add_to_page_cache_lru(page, mapping, index,
366 mapping_gfp_constraint(mapping, GFP_KERNEL)); 366 mapping_gfp_constraint(mapping, GFP_KERNEL));
367 if (unlikely(error)) { 367 if (unlikely(error)) {
368 page_cache_release(page); 368 put_page(page);
369 if (error == -EEXIST) 369 if (error == -EEXIST)
370 continue; 370 continue;
371 break; 371 break;
@@ -385,7 +385,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
385 * Now loop over the map and see if we need to start IO on any 385 * Now loop over the map and see if we need to start IO on any
386 * pages, fill in the partial map, etc. 386 * pages, fill in the partial map, etc.
387 */ 387 */
388 index = *ppos >> PAGE_CACHE_SHIFT; 388 index = *ppos >> PAGE_SHIFT;
389 nr_pages = spd.nr_pages; 389 nr_pages = spd.nr_pages;
390 spd.nr_pages = 0; 390 spd.nr_pages = 0;
391 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 391 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
@@ -397,7 +397,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
397 /* 397 /*
398 * this_len is the max we'll use from this page 398 * this_len is the max we'll use from this page
399 */ 399 */
400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 400 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
401 page = spd.pages[page_nr]; 401 page = spd.pages[page_nr];
402 402
403 if (PageReadahead(page)) 403 if (PageReadahead(page))
@@ -426,7 +426,7 @@ retry_lookup:
426 error = -ENOMEM; 426 error = -ENOMEM;
427 break; 427 break;
428 } 428 }
429 page_cache_release(spd.pages[page_nr]); 429 put_page(spd.pages[page_nr]);
430 spd.pages[page_nr] = page; 430 spd.pages[page_nr] = page;
431 } 431 }
432 /* 432 /*
@@ -456,7 +456,7 @@ fill_it:
456 * i_size must be checked after PageUptodate. 456 * i_size must be checked after PageUptodate.
457 */ 457 */
458 isize = i_size_read(mapping->host); 458 isize = i_size_read(mapping->host);
459 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 459 end_index = (isize - 1) >> PAGE_SHIFT;
460 if (unlikely(!isize || index > end_index)) 460 if (unlikely(!isize || index > end_index))
461 break; 461 break;
462 462
@@ -470,7 +470,7 @@ fill_it:
470 /* 470 /*
471 * max good bytes in this page 471 * max good bytes in this page
472 */ 472 */
473 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 473 plen = ((isize - 1) & ~PAGE_MASK) + 1;
474 if (plen <= loff) 474 if (plen <= loff)
475 break; 475 break;
476 476
@@ -494,8 +494,8 @@ fill_it:
494 * we got, 'nr_pages' is how many pages are in the map. 494 * we got, 'nr_pages' is how many pages are in the map.
495 */ 495 */
496 while (page_nr < nr_pages) 496 while (page_nr < nr_pages)
497 page_cache_release(spd.pages[page_nr++]); 497 put_page(spd.pages[page_nr++]);
498 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 498 in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
499 499
500 if (spd.nr_pages) 500 if (spd.nr_pages)
501 error = splice_to_pipe(pipe, &spd); 501 error = splice_to_pipe(pipe, &spd);
@@ -636,8 +636,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
636 goto shrink_ret; 636 goto shrink_ret;
637 } 637 }
638 638
639 offset = *ppos & ~PAGE_CACHE_MASK; 639 offset = *ppos & ~PAGE_MASK;
640 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 640 nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
641 641
642 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { 642 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
643 struct page *page; 643 struct page *page;
@@ -647,7 +647,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
647 if (!page) 647 if (!page)
648 goto err; 648 goto err;
649 649
650 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); 650 this_len = min_t(size_t, len, PAGE_SIZE - offset);
651 vec[i].iov_base = (void __user *) page_address(page); 651 vec[i].iov_base = (void __user *) page_address(page);
652 vec[i].iov_len = this_len; 652 vec[i].iov_len = this_len;
653 spd.pages[i] = page; 653 spd.pages[i] = page;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 0cea9b9236d0..2c2618410d51 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -181,11 +181,11 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
181 in = min(bytes, msblk->devblksize - offset); 181 in = min(bytes, msblk->devblksize - offset);
182 bytes -= in; 182 bytes -= in;
183 while (in) { 183 while (in) {
184 if (pg_offset == PAGE_CACHE_SIZE) { 184 if (pg_offset == PAGE_SIZE) {
185 data = squashfs_next_page(output); 185 data = squashfs_next_page(output);
186 pg_offset = 0; 186 pg_offset = 0;
187 } 187 }
188 avail = min_t(int, in, PAGE_CACHE_SIZE - 188 avail = min_t(int, in, PAGE_SIZE -
189 pg_offset); 189 pg_offset);
190 memcpy(data + pg_offset, bh[k]->b_data + offset, 190 memcpy(data + pg_offset, bh[k]->b_data + offset,
191 avail); 191 avail);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 1cb70a0b2168..23813c078cc9 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -30,7 +30,7 @@
30 * access the metadata and fragment caches. 30 * access the metadata and fragment caches.
31 * 31 *
32 * To avoid out of memory and fragmentation issues with vmalloc the cache 32 * To avoid out of memory and fragmentation issues with vmalloc the cache
33 * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. 33 * uses sequences of kmalloced PAGE_SIZE buffers.
34 * 34 *
35 * It should be noted that the cache is not used for file datablocks, these 35 * It should be noted that the cache is not used for file datablocks, these
36 * are decompressed and cached in the page-cache in the normal way. The 36 * are decompressed and cached in the page-cache in the normal way. The
@@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
231/* 231/*
232 * Initialise cache allocating the specified number of entries, each of 232 * Initialise cache allocating the specified number of entries, each of
233 * size block_size. To avoid vmalloc fragmentation issues each entry 233 * size block_size. To avoid vmalloc fragmentation issues each entry
234 * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. 234 * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
235 */ 235 */
236struct squashfs_cache *squashfs_cache_init(char *name, int entries, 236struct squashfs_cache *squashfs_cache_init(char *name, int entries,
237 int block_size) 237 int block_size)
@@ -255,7 +255,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
255 cache->unused = entries; 255 cache->unused = entries;
256 cache->entries = entries; 256 cache->entries = entries;
257 cache->block_size = block_size; 257 cache->block_size = block_size;
258 cache->pages = block_size >> PAGE_CACHE_SHIFT; 258 cache->pages = block_size >> PAGE_SHIFT;
259 cache->pages = cache->pages ? cache->pages : 1; 259 cache->pages = cache->pages ? cache->pages : 1;
260 cache->name = name; 260 cache->name = name;
261 cache->num_waiters = 0; 261 cache->num_waiters = 0;
@@ -275,7 +275,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
275 } 275 }
276 276
277 for (j = 0; j < cache->pages; j++) { 277 for (j = 0; j < cache->pages; j++) {
278 entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 278 entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
279 if (entry->data[j] == NULL) { 279 if (entry->data[j] == NULL) {
280 ERROR("Failed to allocate %s buffer\n", name); 280 ERROR("Failed to allocate %s buffer\n", name);
281 goto cleanup; 281 goto cleanup;
@@ -314,10 +314,10 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
314 return min(length, entry->length - offset); 314 return min(length, entry->length - offset);
315 315
316 while (offset < entry->length) { 316 while (offset < entry->length) {
317 void *buff = entry->data[offset / PAGE_CACHE_SIZE] 317 void *buff = entry->data[offset / PAGE_SIZE]
318 + (offset % PAGE_CACHE_SIZE); 318 + (offset % PAGE_SIZE);
319 int bytes = min_t(int, entry->length - offset, 319 int bytes = min_t(int, entry->length - offset,
320 PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); 320 PAGE_SIZE - (offset % PAGE_SIZE));
321 321
322 if (bytes >= remaining) { 322 if (bytes >= remaining) {
323 memcpy(buffer, buff, remaining); 323 memcpy(buffer, buff, remaining);
@@ -415,7 +415,7 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
415 */ 415 */
416void *squashfs_read_table(struct super_block *sb, u64 block, int length) 416void *squashfs_read_table(struct super_block *sb, u64 block, int length)
417{ 417{
418 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 418 int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
419 int i, res; 419 int i, res;
420 void *table, *buffer, **data; 420 void *table, *buffer, **data;
421 struct squashfs_page_actor *actor; 421 struct squashfs_page_actor *actor;
@@ -436,7 +436,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
436 goto failed2; 436 goto failed2;
437 } 437 }
438 438
439 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) 439 for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
440 data[i] = buffer; 440 data[i] = buffer;
441 441
442 res = squashfs_read_data(sb, block, length | 442 res = squashfs_read_data(sb, block, length |
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index e9034bf6e5ae..d2bc13636f79 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -102,7 +102,7 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags)
102 * Read decompressor specific options from file system if present 102 * Read decompressor specific options from file system if present
103 */ 103 */
104 if (SQUASHFS_COMP_OPTS(flags)) { 104 if (SQUASHFS_COMP_OPTS(flags)) {
105 buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 105 buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
106 if (buffer == NULL) { 106 if (buffer == NULL) {
107 comp_opts = ERR_PTR(-ENOMEM); 107 comp_opts = ERR_PTR(-ENOMEM);
108 goto out; 108 goto out;
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e5c9689062ba..13d80947bf9e 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -175,7 +175,7 @@ static long long read_indexes(struct super_block *sb, int n,
175{ 175{
176 int err, i; 176 int err, i;
177 long long block = 0; 177 long long block = 0;
178 __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 178 __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
179 179
180 if (blist == NULL) { 180 if (blist == NULL) {
181 ERROR("read_indexes: Failed to allocate block_list\n"); 181 ERROR("read_indexes: Failed to allocate block_list\n");
@@ -183,7 +183,7 @@ static long long read_indexes(struct super_block *sb, int n,
183 } 183 }
184 184
185 while (n) { 185 while (n) {
186 int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); 186 int blocks = min_t(int, n, PAGE_SIZE >> 2);
187 187
188 err = squashfs_read_metadata(sb, blist, start_block, 188 err = squashfs_read_metadata(sb, blist, start_block,
189 offset, blocks << 2); 189 offset, blocks << 2);
@@ -377,19 +377,19 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
377 struct inode *inode = page->mapping->host; 377 struct inode *inode = page->mapping->host;
378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
379 void *pageaddr; 379 void *pageaddr;
380 int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; 380 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
381 int start_index = page->index & ~mask, end_index = start_index | mask; 381 int start_index = page->index & ~mask, end_index = start_index | mask;
382 382
383 /* 383 /*
384 * Loop copying datablock into pages. As the datablock likely covers 384 * Loop copying datablock into pages. As the datablock likely covers
385 * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly 385 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
386 * grab the pages from the page cache, except for the page that we've 386 * grab the pages from the page cache, except for the page that we've
387 * been called to fill. 387 * been called to fill.
388 */ 388 */
389 for (i = start_index; i <= end_index && bytes > 0; i++, 389 for (i = start_index; i <= end_index && bytes > 0; i++,
390 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { 390 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
391 struct page *push_page; 391 struct page *push_page;
392 int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; 392 int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
393 393
394 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); 394 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
395 395
@@ -404,14 +404,14 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
404 404
405 pageaddr = kmap_atomic(push_page); 405 pageaddr = kmap_atomic(push_page);
406 squashfs_copy_data(pageaddr, buffer, offset, avail); 406 squashfs_copy_data(pageaddr, buffer, offset, avail);
407 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 407 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
408 kunmap_atomic(pageaddr); 408 kunmap_atomic(pageaddr);
409 flush_dcache_page(push_page); 409 flush_dcache_page(push_page);
410 SetPageUptodate(push_page); 410 SetPageUptodate(push_page);
411skip_page: 411skip_page:
412 unlock_page(push_page); 412 unlock_page(push_page);
413 if (i != page->index) 413 if (i != page->index)
414 page_cache_release(push_page); 414 put_page(push_page);
415 } 415 }
416} 416}
417 417
@@ -454,7 +454,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
454{ 454{
455 struct inode *inode = page->mapping->host; 455 struct inode *inode = page->mapping->host;
456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
457 int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); 457 int index = page->index >> (msblk->block_log - PAGE_SHIFT);
458 int file_end = i_size_read(inode) >> msblk->block_log; 458 int file_end = i_size_read(inode) >> msblk->block_log;
459 int res; 459 int res;
460 void *pageaddr; 460 void *pageaddr;
@@ -462,8 +462,8 @@ static int squashfs_readpage(struct file *file, struct page *page)
462 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", 462 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
463 page->index, squashfs_i(inode)->start); 463 page->index, squashfs_i(inode)->start);
464 464
465 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 465 if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
466 PAGE_CACHE_SHIFT)) 466 PAGE_SHIFT))
467 goto out; 467 goto out;
468 468
469 if (index < file_end || squashfs_i(inode)->fragment_block == 469 if (index < file_end || squashfs_i(inode)->fragment_block ==
@@ -487,7 +487,7 @@ error_out:
487 SetPageError(page); 487 SetPageError(page);
488out: 488out:
489 pageaddr = kmap_atomic(page); 489 pageaddr = kmap_atomic(page);
490 memset(pageaddr, 0, PAGE_CACHE_SIZE); 490 memset(pageaddr, 0, PAGE_SIZE);
491 kunmap_atomic(pageaddr); 491 kunmap_atomic(pageaddr);
492 flush_dcache_page(page); 492 flush_dcache_page(page);
493 if (!PageError(page)) 493 if (!PageError(page))
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 43e7a7eddac0..cb485d8e0e91 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -30,8 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
30 struct inode *inode = target_page->mapping->host; 30 struct inode *inode = target_page->mapping->host;
31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
32 32
33 int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 33 int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
34 int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; 34 int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
35 int start_index = target_page->index & ~mask; 35 int start_index = target_page->index & ~mask;
36 int end_index = start_index | mask; 36 int end_index = start_index | mask;
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM; 37 int i, n, pages, missing_pages, bytes, res = -ENOMEM;
@@ -68,7 +68,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
68 68
69 if (PageUptodate(page[i])) { 69 if (PageUptodate(page[i])) {
70 unlock_page(page[i]); 70 unlock_page(page[i]);
71 page_cache_release(page[i]); 71 put_page(page[i]);
72 page[i] = NULL; 72 page[i] = NULL;
73 missing_pages++; 73 missing_pages++;
74 } 74 }
@@ -96,10 +96,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
96 goto mark_errored; 96 goto mark_errored;
97 97
98 /* Last page may have trailing bytes not filled */ 98 /* Last page may have trailing bytes not filled */
99 bytes = res % PAGE_CACHE_SIZE; 99 bytes = res % PAGE_SIZE;
100 if (bytes) { 100 if (bytes) {
101 pageaddr = kmap_atomic(page[pages - 1]); 101 pageaddr = kmap_atomic(page[pages - 1]);
102 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); 102 memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
103 kunmap_atomic(pageaddr); 103 kunmap_atomic(pageaddr);
104 } 104 }
105 105
@@ -109,7 +109,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
109 SetPageUptodate(page[i]); 109 SetPageUptodate(page[i]);
110 unlock_page(page[i]); 110 unlock_page(page[i]);
111 if (page[i] != target_page) 111 if (page[i] != target_page)
112 page_cache_release(page[i]); 112 put_page(page[i]);
113 } 113 }
114 114
115 kfree(actor); 115 kfree(actor);
@@ -127,7 +127,7 @@ mark_errored:
127 flush_dcache_page(page[i]); 127 flush_dcache_page(page[i]);
128 SetPageError(page[i]); 128 SetPageError(page[i]);
129 unlock_page(page[i]); 129 unlock_page(page[i]);
130 page_cache_release(page[i]); 130 put_page(page[i]);
131 } 131 }
132 132
133out: 133out:
@@ -153,21 +153,21 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
153 } 153 }
154 154
155 for (n = 0; n < pages && bytes > 0; n++, 155 for (n = 0; n < pages && bytes > 0; n++,
156 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { 156 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
157 int avail = min_t(int, bytes, PAGE_CACHE_SIZE); 157 int avail = min_t(int, bytes, PAGE_SIZE);
158 158
159 if (page[n] == NULL) 159 if (page[n] == NULL)
160 continue; 160 continue;
161 161
162 pageaddr = kmap_atomic(page[n]); 162 pageaddr = kmap_atomic(page[n]);
163 squashfs_copy_data(pageaddr, buffer, offset, avail); 163 squashfs_copy_data(pageaddr, buffer, offset, avail);
164 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 164 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
165 kunmap_atomic(pageaddr); 165 kunmap_atomic(pageaddr);
166 flush_dcache_page(page[n]); 166 flush_dcache_page(page[n]);
167 SetPageUptodate(page[n]); 167 SetPageUptodate(page[n]);
168 unlock_page(page[n]); 168 unlock_page(page[n]);
169 if (page[n] != target_page) 169 if (page[n] != target_page)
170 page_cache_release(page[n]); 170 put_page(page[n]);
171 } 171 }
172 172
173out: 173out:
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index c31e2bc9c081..ff4468bd18b0 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -117,13 +117,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
117 data = squashfs_first_page(output); 117 data = squashfs_first_page(output);
118 buff = stream->output; 118 buff = stream->output;
119 while (data) { 119 while (data) {
120 if (bytes <= PAGE_CACHE_SIZE) { 120 if (bytes <= PAGE_SIZE) {
121 memcpy(data, buff, bytes); 121 memcpy(data, buff, bytes);
122 break; 122 break;
123 } 123 }
124 memcpy(data, buff, PAGE_CACHE_SIZE); 124 memcpy(data, buff, PAGE_SIZE);
125 buff += PAGE_CACHE_SIZE; 125 buff += PAGE_SIZE;
126 bytes -= PAGE_CACHE_SIZE; 126 bytes -= PAGE_SIZE;
127 data = squashfs_next_page(output); 127 data = squashfs_next_page(output);
128 } 128 }
129 squashfs_finish_page(output); 129 squashfs_finish_page(output);
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 244b9fbfff7b..934c17e96590 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -102,13 +102,13 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
102 data = squashfs_first_page(output); 102 data = squashfs_first_page(output);
103 buff = stream->output; 103 buff = stream->output;
104 while (data) { 104 while (data) {
105 if (bytes <= PAGE_CACHE_SIZE) { 105 if (bytes <= PAGE_SIZE) {
106 memcpy(data, buff, bytes); 106 memcpy(data, buff, bytes);
107 break; 107 break;
108 } else { 108 } else {
109 memcpy(data, buff, PAGE_CACHE_SIZE); 109 memcpy(data, buff, PAGE_SIZE);
110 buff += PAGE_CACHE_SIZE; 110 buff += PAGE_SIZE;
111 bytes -= PAGE_CACHE_SIZE; 111 bytes -= PAGE_SIZE;
112 data = squashfs_next_page(output); 112 data = squashfs_next_page(output);
113 } 113 }
114 } 114 }
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 5a1c11f56441..9b7b1b6a7892 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -48,7 +48,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
48 if (actor == NULL) 48 if (actor == NULL)
49 return NULL; 49 return NULL;
50 50
51 actor->length = length ? : pages * PAGE_CACHE_SIZE; 51 actor->length = length ? : pages * PAGE_SIZE;
52 actor->buffer = buffer; 52 actor->buffer = buffer;
53 actor->pages = pages; 53 actor->pages = pages;
54 actor->next_page = 0; 54 actor->next_page = 0;
@@ -88,7 +88,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
88 if (actor == NULL) 88 if (actor == NULL)
89 return NULL; 89 return NULL;
90 90
91 actor->length = length ? : pages * PAGE_CACHE_SIZE; 91 actor->length = length ? : pages * PAGE_SIZE;
92 actor->page = page; 92 actor->page = page;
93 actor->pages = pages; 93 actor->pages = pages;
94 actor->next_page = 0; 94 actor->next_page = 0;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 26dd82008b82..98537eab27e2 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -24,7 +24,7 @@ static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
24 if (actor == NULL) 24 if (actor == NULL)
25 return NULL; 25 return NULL;
26 26
27 actor->length = length ? : pages * PAGE_CACHE_SIZE; 27 actor->length = length ? : pages * PAGE_SIZE;
28 actor->page = page; 28 actor->page = page;
29 actor->pages = pages; 29 actor->pages = pages;
30 actor->next_page = 0; 30 actor->next_page = 0;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5e79bfa4f260..cf01e15a7b16 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -152,7 +152,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
152 * Check the system page size is not larger than the filesystem 152 * Check the system page size is not larger than the filesystem
153 * block size (by default 128K). This is currently not supported. 153 * block size (by default 128K). This is currently not supported.
154 */ 154 */
155 if (PAGE_CACHE_SIZE > msblk->block_size) { 155 if (PAGE_SIZE > msblk->block_size) {
156 ERROR("Page size > filesystem block size (%d). This is " 156 ERROR("Page size > filesystem block size (%d). This is "
157 "currently not supported!\n", msblk->block_size); 157 "currently not supported!\n", msblk->block_size);
158 goto failed_mount; 158 goto failed_mount;
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index dbcc2f54bad4..d688ef42a6a1 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -48,10 +48,10 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
48 struct inode *inode = page->mapping->host; 48 struct inode *inode = page->mapping->host;
49 struct super_block *sb = inode->i_sb; 49 struct super_block *sb = inode->i_sb;
50 struct squashfs_sb_info *msblk = sb->s_fs_info; 50 struct squashfs_sb_info *msblk = sb->s_fs_info;
51 int index = page->index << PAGE_CACHE_SHIFT; 51 int index = page->index << PAGE_SHIFT;
52 u64 block = squashfs_i(inode)->start; 52 u64 block = squashfs_i(inode)->start;
53 int offset = squashfs_i(inode)->offset; 53 int offset = squashfs_i(inode)->offset;
54 int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); 54 int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
55 int bytes, copied; 55 int bytes, copied;
56 void *pageaddr; 56 void *pageaddr;
57 struct squashfs_cache_entry *entry; 57 struct squashfs_cache_entry *entry;
@@ -94,7 +94,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset, 94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
95 length - bytes); 95 length - bytes);
96 if (copied == length - bytes) 96 if (copied == length - bytes)
97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); 97 memset(pageaddr + length, 0, PAGE_SIZE - length);
98 else 98 else
99 block = entry->next_index; 99 block = entry->next_index;
100 kunmap_atomic(pageaddr); 100 kunmap_atomic(pageaddr);
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index c609624e4b8a..6bfaef73d065 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -141,7 +141,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
141 stream->buf.in_pos = 0; 141 stream->buf.in_pos = 0;
142 stream->buf.in_size = 0; 142 stream->buf.in_size = 0;
143 stream->buf.out_pos = 0; 143 stream->buf.out_pos = 0;
144 stream->buf.out_size = PAGE_CACHE_SIZE; 144 stream->buf.out_size = PAGE_SIZE;
145 stream->buf.out = squashfs_first_page(output); 145 stream->buf.out = squashfs_first_page(output);
146 146
147 do { 147 do {
@@ -158,7 +158,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
158 stream->buf.out = squashfs_next_page(output); 158 stream->buf.out = squashfs_next_page(output);
159 if (stream->buf.out != NULL) { 159 if (stream->buf.out != NULL) {
160 stream->buf.out_pos = 0; 160 stream->buf.out_pos = 0;
161 total += PAGE_CACHE_SIZE; 161 total += PAGE_SIZE;
162 } 162 }
163 } 163 }
164 164
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 8727caba6882..2ec24d128bce 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -69,7 +69,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
69 int zlib_err, zlib_init = 0, k = 0; 69 int zlib_err, zlib_init = 0, k = 0;
70 z_stream *stream = strm; 70 z_stream *stream = strm;
71 71
72 stream->avail_out = PAGE_CACHE_SIZE; 72 stream->avail_out = PAGE_SIZE;
73 stream->next_out = squashfs_first_page(output); 73 stream->next_out = squashfs_first_page(output);
74 stream->avail_in = 0; 74 stream->avail_in = 0;
75 75
@@ -85,7 +85,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
85 if (stream->avail_out == 0) { 85 if (stream->avail_out == 0) {
86 stream->next_out = squashfs_next_page(output); 86 stream->next_out = squashfs_next_page(output);
87 if (stream->next_out != NULL) 87 if (stream->next_out != NULL)
88 stream->avail_out = PAGE_CACHE_SIZE; 88 stream->avail_out = PAGE_SIZE;
89 } 89 }
90 90
91 if (!zlib_init) { 91 if (!zlib_init) {
diff --git a/fs/sync.c b/fs/sync.c
index dd5d1711c7ac..2a54c1f22035 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -302,7 +302,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
302 goto out; 302 goto out;
303 303
304 if (sizeof(pgoff_t) == 4) { 304 if (sizeof(pgoff_t) == 4) {
305 if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { 305 if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
306 /* 306 /*
307 * The range starts outside a 32 bit machine's 307 * The range starts outside a 32 bit machine's
308 * pagecache addressing capabilities. Let it "succeed" 308 * pagecache addressing capabilities. Let it "succeed"
@@ -310,7 +310,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
310 ret = 0; 310 ret = 0;
311 goto out; 311 goto out;
312 } 312 }
313 if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { 313 if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
314 /* 314 /*
315 * Out to EOF 315 * Out to EOF
316 */ 316 */
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 63c1bcb224ee..c0f0a3e643eb 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -30,7 +30,7 @@ const struct file_operations sysv_dir_operations = {
30static inline void dir_put_page(struct page *page) 30static inline void dir_put_page(struct page *page)
31{ 31{
32 kunmap(page); 32 kunmap(page);
33 page_cache_release(page); 33 put_page(page);
34} 34}
35 35
36static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) 36static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
@@ -73,8 +73,8 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
73 if (pos >= inode->i_size) 73 if (pos >= inode->i_size)
74 return 0; 74 return 0;
75 75
76 offset = pos & ~PAGE_CACHE_MASK; 76 offset = pos & ~PAGE_MASK;
77 n = pos >> PAGE_CACHE_SHIFT; 77 n = pos >> PAGE_SHIFT;
78 78
79 for ( ; n < npages; n++, offset = 0) { 79 for ( ; n < npages; n++, offset = 0) {
80 char *kaddr, *limit; 80 char *kaddr, *limit;
@@ -85,7 +85,7 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
85 continue; 85 continue;
86 kaddr = (char *)page_address(page); 86 kaddr = (char *)page_address(page);
87 de = (struct sysv_dir_entry *)(kaddr+offset); 87 de = (struct sysv_dir_entry *)(kaddr+offset);
88 limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; 88 limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE;
89 for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) { 89 for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
90 char *name = de->name; 90 char *name = de->name;
91 91
@@ -146,7 +146,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
146 if (!IS_ERR(page)) { 146 if (!IS_ERR(page)) {
147 kaddr = (char*)page_address(page); 147 kaddr = (char*)page_address(page);
148 de = (struct sysv_dir_entry *) kaddr; 148 de = (struct sysv_dir_entry *) kaddr;
149 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; 149 kaddr += PAGE_SIZE - SYSV_DIRSIZE;
150 for ( ; (char *) de <= kaddr ; de++) { 150 for ( ; (char *) de <= kaddr ; de++) {
151 if (!de->inode) 151 if (!de->inode)
152 continue; 152 continue;
@@ -190,7 +190,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
190 goto out; 190 goto out;
191 kaddr = (char*)page_address(page); 191 kaddr = (char*)page_address(page);
192 de = (struct sysv_dir_entry *)kaddr; 192 de = (struct sysv_dir_entry *)kaddr;
193 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; 193 kaddr += PAGE_SIZE - SYSV_DIRSIZE;
194 while ((char *)de <= kaddr) { 194 while ((char *)de <= kaddr) {
195 if (!de->inode) 195 if (!de->inode)
196 goto got_it; 196 goto got_it;
@@ -261,7 +261,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
261 kmap(page); 261 kmap(page);
262 262
263 base = (char*)page_address(page); 263 base = (char*)page_address(page);
264 memset(base, 0, PAGE_CACHE_SIZE); 264 memset(base, 0, PAGE_SIZE);
265 265
266 de = (struct sysv_dir_entry *) base; 266 de = (struct sysv_dir_entry *) base;
267 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 267 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
@@ -273,7 +273,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
273 kunmap(page); 273 kunmap(page);
274 err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); 274 err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
275fail: 275fail:
276 page_cache_release(page); 276 put_page(page);
277 return err; 277 return err;
278} 278}
279 279
@@ -296,7 +296,7 @@ int sysv_empty_dir(struct inode * inode)
296 296
297 kaddr = (char *)page_address(page); 297 kaddr = (char *)page_address(page);
298 de = (struct sysv_dir_entry *)kaddr; 298 de = (struct sysv_dir_entry *)kaddr;
299 kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE; 299 kaddr += PAGE_SIZE-SYSV_DIRSIZE;
300 300
301 for ( ;(char *)de <= kaddr; de++) { 301 for ( ;(char *)de <= kaddr; de++) {
302 if (!de->inode) 302 if (!de->inode)
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 11e83ed0b4bf..90b60c03b588 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -264,11 +264,11 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
264out_dir: 264out_dir:
265 if (dir_de) { 265 if (dir_de) {
266 kunmap(dir_page); 266 kunmap(dir_page);
267 page_cache_release(dir_page); 267 put_page(dir_page);
268 } 268 }
269out_old: 269out_old:
270 kunmap(old_page); 270 kunmap(old_page);
271 page_cache_release(old_page); 271 put_page(old_page);
272out: 272out:
273 return err; 273 return err;
274} 274}
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 065c88f8e4b8..446753d8ac34 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -121,7 +121,7 @@ static int do_readpage(struct page *page)
121 if (block >= beyond) { 121 if (block >= beyond) {
122 /* Reading beyond inode */ 122 /* Reading beyond inode */
123 SetPageChecked(page); 123 SetPageChecked(page);
124 memset(addr, 0, PAGE_CACHE_SIZE); 124 memset(addr, 0, PAGE_SIZE);
125 goto out; 125 goto out;
126 } 126 }
127 127
@@ -223,7 +223,7 @@ static int write_begin_slow(struct address_space *mapping,
223{ 223{
224 struct inode *inode = mapping->host; 224 struct inode *inode = mapping->host;
225 struct ubifs_info *c = inode->i_sb->s_fs_info; 225 struct ubifs_info *c = inode->i_sb->s_fs_info;
226 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 226 pgoff_t index = pos >> PAGE_SHIFT;
227 struct ubifs_budget_req req = { .new_page = 1 }; 227 struct ubifs_budget_req req = { .new_page = 1 };
228 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 228 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
229 struct page *page; 229 struct page *page;
@@ -254,13 +254,13 @@ static int write_begin_slow(struct address_space *mapping,
254 } 254 }
255 255
256 if (!PageUptodate(page)) { 256 if (!PageUptodate(page)) {
257 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) 257 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
258 SetPageChecked(page); 258 SetPageChecked(page);
259 else { 259 else {
260 err = do_readpage(page); 260 err = do_readpage(page);
261 if (err) { 261 if (err) {
262 unlock_page(page); 262 unlock_page(page);
263 page_cache_release(page); 263 put_page(page);
264 ubifs_release_budget(c, &req); 264 ubifs_release_budget(c, &req);
265 return err; 265 return err;
266 } 266 }
@@ -428,7 +428,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
428 struct inode *inode = mapping->host; 428 struct inode *inode = mapping->host;
429 struct ubifs_info *c = inode->i_sb->s_fs_info; 429 struct ubifs_info *c = inode->i_sb->s_fs_info;
430 struct ubifs_inode *ui = ubifs_inode(inode); 430 struct ubifs_inode *ui = ubifs_inode(inode);
431 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 431 pgoff_t index = pos >> PAGE_SHIFT;
432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
433 int skipped_read = 0; 433 int skipped_read = 0;
434 struct page *page; 434 struct page *page;
@@ -446,7 +446,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
446 446
447 if (!PageUptodate(page)) { 447 if (!PageUptodate(page)) {
448 /* The page is not loaded from the flash */ 448 /* The page is not loaded from the flash */
449 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { 449 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
450 /* 450 /*
451 * We change whole page so no need to load it. But we 451 * We change whole page so no need to load it. But we
452 * do not know whether this page exists on the media or 452 * do not know whether this page exists on the media or
@@ -462,7 +462,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
462 err = do_readpage(page); 462 err = do_readpage(page);
463 if (err) { 463 if (err) {
464 unlock_page(page); 464 unlock_page(page);
465 page_cache_release(page); 465 put_page(page);
466 return err; 466 return err;
467 } 467 }
468 } 468 }
@@ -494,7 +494,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
494 mutex_unlock(&ui->ui_mutex); 494 mutex_unlock(&ui->ui_mutex);
495 } 495 }
496 unlock_page(page); 496 unlock_page(page);
497 page_cache_release(page); 497 put_page(page);
498 498
499 return write_begin_slow(mapping, pos, len, pagep, flags); 499 return write_begin_slow(mapping, pos, len, pagep, flags);
500 } 500 }
@@ -549,12 +549,12 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", 549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
550 inode->i_ino, pos, page->index, len, copied, inode->i_size); 550 inode->i_ino, pos, page->index, len, copied, inode->i_size);
551 551
552 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) { 552 if (unlikely(copied < len && len == PAGE_SIZE)) {
553 /* 553 /*
554 * VFS copied less data to the page that it intended and 554 * VFS copied less data to the page that it intended and
555 * declared in its '->write_begin()' call via the @len 555 * declared in its '->write_begin()' call via the @len
556 * argument. If the page was not up-to-date, and @len was 556 * argument. If the page was not up-to-date, and @len was
557 * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did 557 * @PAGE_SIZE, the 'ubifs_write_begin()' function did
558 * not load it from the media (for optimization reasons). This 558 * not load it from the media (for optimization reasons). This
559 * means that part of the page contains garbage. So read the 559 * means that part of the page contains garbage. So read the
560 * page now. 560 * page now.
@@ -593,7 +593,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
593 593
594out: 594out:
595 unlock_page(page); 595 unlock_page(page);
596 page_cache_release(page); 596 put_page(page);
597 return copied; 597 return copied;
598} 598}
599 599
@@ -621,10 +621,10 @@ static int populate_page(struct ubifs_info *c, struct page *page,
621 621
622 addr = zaddr = kmap(page); 622 addr = zaddr = kmap(page);
623 623
624 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 624 end_index = (i_size - 1) >> PAGE_SHIFT;
625 if (!i_size || page->index > end_index) { 625 if (!i_size || page->index > end_index) {
626 hole = 1; 626 hole = 1;
627 memset(addr, 0, PAGE_CACHE_SIZE); 627 memset(addr, 0, PAGE_SIZE);
628 goto out_hole; 628 goto out_hole;
629 } 629 }
630 630
@@ -673,7 +673,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
673 } 673 }
674 674
675 if (end_index == page->index) { 675 if (end_index == page->index) {
676 int len = i_size & (PAGE_CACHE_SIZE - 1); 676 int len = i_size & (PAGE_SIZE - 1);
677 677
678 if (len && len < read) 678 if (len && len < read)
679 memset(zaddr + len, 0, read - len); 679 memset(zaddr + len, 0, read - len);
@@ -773,7 +773,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
773 isize = i_size_read(inode); 773 isize = i_size_read(inode);
774 if (isize == 0) 774 if (isize == 0)
775 goto out_free; 775 goto out_free;
776 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 776 end_index = ((isize - 1) >> PAGE_SHIFT);
777 777
778 for (page_idx = 1; page_idx < page_cnt; page_idx++) { 778 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
779 pgoff_t page_offset = offset + page_idx; 779 pgoff_t page_offset = offset + page_idx;
@@ -788,7 +788,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
788 if (!PageUptodate(page)) 788 if (!PageUptodate(page))
789 err = populate_page(c, page, bu, &n); 789 err = populate_page(c, page, bu, &n);
790 unlock_page(page); 790 unlock_page(page);
791 page_cache_release(page); 791 put_page(page);
792 if (err) 792 if (err)
793 break; 793 break;
794 } 794 }
@@ -905,7 +905,7 @@ static int do_writepage(struct page *page, int len)
905#ifdef UBIFS_DEBUG 905#ifdef UBIFS_DEBUG
906 struct ubifs_inode *ui = ubifs_inode(inode); 906 struct ubifs_inode *ui = ubifs_inode(inode);
907 spin_lock(&ui->ui_lock); 907 spin_lock(&ui->ui_lock);
908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); 908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
909 spin_unlock(&ui->ui_lock); 909 spin_unlock(&ui->ui_lock);
910#endif 910#endif
911 911
@@ -1001,8 +1001,8 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1001 struct inode *inode = page->mapping->host; 1001 struct inode *inode = page->mapping->host;
1002 struct ubifs_inode *ui = ubifs_inode(inode); 1002 struct ubifs_inode *ui = ubifs_inode(inode);
1003 loff_t i_size = i_size_read(inode), synced_i_size; 1003 loff_t i_size = i_size_read(inode), synced_i_size;
1004 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 1004 pgoff_t end_index = i_size >> PAGE_SHIFT;
1005 int err, len = i_size & (PAGE_CACHE_SIZE - 1); 1005 int err, len = i_size & (PAGE_SIZE - 1);
1006 void *kaddr; 1006 void *kaddr;
1007 1007
1008 dbg_gen("ino %lu, pg %lu, pg flags %#lx", 1008 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
@@ -1021,7 +1021,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1021 1021
1022 /* Is the page fully inside @i_size? */ 1022 /* Is the page fully inside @i_size? */
1023 if (page->index < end_index) { 1023 if (page->index < end_index) {
1024 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { 1024 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1025 err = inode->i_sb->s_op->write_inode(inode, NULL); 1025 err = inode->i_sb->s_op->write_inode(inode, NULL);
1026 if (err) 1026 if (err)
1027 goto out_unlock; 1027 goto out_unlock;
@@ -1034,7 +1034,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1034 * with this. 1034 * with this.
1035 */ 1035 */
1036 } 1036 }
1037 return do_writepage(page, PAGE_CACHE_SIZE); 1037 return do_writepage(page, PAGE_SIZE);
1038 } 1038 }
1039 1039
1040 /* 1040 /*
@@ -1045,7 +1045,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1045 * writes to that region are not written out to the file." 1045 * writes to that region are not written out to the file."
1046 */ 1046 */
1047 kaddr = kmap_atomic(page); 1047 kaddr = kmap_atomic(page);
1048 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); 1048 memset(kaddr + len, 0, PAGE_SIZE - len);
1049 flush_dcache_page(page); 1049 flush_dcache_page(page);
1050 kunmap_atomic(kaddr); 1050 kunmap_atomic(kaddr);
1051 1051
@@ -1138,7 +1138,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1138 truncate_setsize(inode, new_size); 1138 truncate_setsize(inode, new_size);
1139 1139
1140 if (offset) { 1140 if (offset) {
1141 pgoff_t index = new_size >> PAGE_CACHE_SHIFT; 1141 pgoff_t index = new_size >> PAGE_SHIFT;
1142 struct page *page; 1142 struct page *page;
1143 1143
1144 page = find_lock_page(inode->i_mapping, index); 1144 page = find_lock_page(inode->i_mapping, index);
@@ -1157,9 +1157,9 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1157 clear_page_dirty_for_io(page); 1157 clear_page_dirty_for_io(page);
1158 if (UBIFS_BLOCKS_PER_PAGE_SHIFT) 1158 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1159 offset = new_size & 1159 offset = new_size &
1160 (PAGE_CACHE_SIZE - 1); 1160 (PAGE_SIZE - 1);
1161 err = do_writepage(page, offset); 1161 err = do_writepage(page, offset);
1162 page_cache_release(page); 1162 put_page(page);
1163 if (err) 1163 if (err)
1164 goto out_budg; 1164 goto out_budg;
1165 /* 1165 /*
@@ -1173,7 +1173,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1173 * having to read it. 1173 * having to read it.
1174 */ 1174 */
1175 unlock_page(page); 1175 unlock_page(page);
1176 page_cache_release(page); 1176 put_page(page);
1177 } 1177 }
1178 } 1178 }
1179 } 1179 }
@@ -1285,7 +1285,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1285 struct ubifs_info *c = inode->i_sb->s_fs_info; 1285 struct ubifs_info *c = inode->i_sb->s_fs_info;
1286 1286
1287 ubifs_assert(PagePrivate(page)); 1287 ubifs_assert(PagePrivate(page));
1288 if (offset || length < PAGE_CACHE_SIZE) 1288 if (offset || length < PAGE_SIZE)
1289 /* Partial page remains dirty */ 1289 /* Partial page remains dirty */
1290 return; 1290 return;
1291 1291
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index a233ba913be4..e98c24ee25a1 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2237,12 +2237,12 @@ static int __init ubifs_init(void)
2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); 2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
2238 2238
2239 /* 2239 /*
2240 * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to 2240 * We require that PAGE_SIZE is greater-than-or-equal-to
2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. 2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
2242 */ 2242 */
2243 if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) { 2243 if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
2244 pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", 2244 pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
2245 current->pid, (unsigned int)PAGE_CACHE_SIZE); 2245 current->pid, (unsigned int)PAGE_SIZE);
2246 return -EINVAL; 2246 return -EINVAL;
2247 } 2247 }
2248 2248
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index c2a57e193a81..4cd7e569cd00 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -46,8 +46,8 @@
46#define UBIFS_SUPER_MAGIC 0x24051905 46#define UBIFS_SUPER_MAGIC 0x24051905
47 47
48/* Number of UBIFS blocks per VFS page */ 48/* Number of UBIFS blocks per VFS page */
49#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE) 49#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE)
50#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT) 50#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT)
51 51
52/* "File system end of life" sequence number watermark */ 52/* "File system end of life" sequence number watermark */
53#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL 53#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1af98963d860..877ba1c9b461 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -46,7 +46,7 @@ static void __udf_adinicb_readpage(struct page *page)
46 46
47 kaddr = kmap(page); 47 kaddr = kmap(page);
48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); 48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
49 memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); 49 memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
50 flush_dcache_page(page); 50 flush_dcache_page(page);
51 SetPageUptodate(page); 51 SetPageUptodate(page);
52 kunmap(page); 52 kunmap(page);
@@ -87,14 +87,14 @@ static int udf_adinicb_write_begin(struct file *file,
87{ 87{
88 struct page *page; 88 struct page *page;
89 89
90 if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) 90 if (WARN_ON_ONCE(pos >= PAGE_SIZE))
91 return -EIO; 91 return -EIO;
92 page = grab_cache_page_write_begin(mapping, 0, flags); 92 page = grab_cache_page_write_begin(mapping, 0, flags);
93 if (!page) 93 if (!page)
94 return -ENOMEM; 94 return -ENOMEM;
95 *pagep = page; 95 *pagep = page;
96 96
97 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) 97 if (!PageUptodate(page) && len != PAGE_SIZE)
98 __udf_adinicb_readpage(page); 98 __udf_adinicb_readpage(page);
99 return 0; 99 return 0;
100} 100}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 166d3ed32c39..2dc461eeb415 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -287,7 +287,7 @@ int udf_expand_file_adinicb(struct inode *inode)
287 if (!PageUptodate(page)) { 287 if (!PageUptodate(page)) {
288 kaddr = kmap(page); 288 kaddr = kmap(page);
289 memset(kaddr + iinfo->i_lenAlloc, 0x00, 289 memset(kaddr + iinfo->i_lenAlloc, 0x00,
290 PAGE_CACHE_SIZE - iinfo->i_lenAlloc); 290 PAGE_SIZE - iinfo->i_lenAlloc);
291 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, 291 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
292 iinfo->i_lenAlloc); 292 iinfo->i_lenAlloc);
293 flush_dcache_page(page); 293 flush_dcache_page(page);
@@ -319,7 +319,7 @@ int udf_expand_file_adinicb(struct inode *inode)
319 inode->i_data.a_ops = &udf_adinicb_aops; 319 inode->i_data.a_ops = &udf_adinicb_aops;
320 up_write(&iinfo->i_data_sem); 320 up_write(&iinfo->i_data_sem);
321 } 321 }
322 page_cache_release(page); 322 put_page(page);
323 mark_inode_dirty(inode); 323 mark_inode_dirty(inode);
324 324
325 return err; 325 return err;
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index dc5fae601c24..0447b949c7f5 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -237,7 +237,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237 sector_t newb, struct page *locked_page) 237 sector_t newb, struct page *locked_page)
238{ 238{
239 const unsigned blks_per_page = 239 const unsigned blks_per_page =
240 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 240 1 << (PAGE_SHIFT - inode->i_blkbits);
241 const unsigned mask = blks_per_page - 1; 241 const unsigned mask = blks_per_page - 1;
242 struct address_space * const mapping = inode->i_mapping; 242 struct address_space * const mapping = inode->i_mapping;
243 pgoff_t index, cur_index, last_index; 243 pgoff_t index, cur_index, last_index;
@@ -255,9 +255,9 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
255 255
256 cur_index = locked_page->index; 256 cur_index = locked_page->index;
257 end = count + beg; 257 end = count + beg;
258 last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 258 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
259 for (i = beg; i < end; i = (i | mask) + 1) { 259 for (i = beg; i < end; i = (i | mask) + 1) {
260 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 260 index = i >> (PAGE_SHIFT - inode->i_blkbits);
261 261
262 if (likely(cur_index != index)) { 262 if (likely(cur_index != index)) {
263 page = ufs_get_locked_page(mapping, index); 263 page = ufs_get_locked_page(mapping, index);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 74f2e80288bf..0b1457292734 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -62,7 +62,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
62static inline void ufs_put_page(struct page *page) 62static inline void ufs_put_page(struct page *page)
63{ 63{
64 kunmap(page); 64 kunmap(page);
65 page_cache_release(page); 65 put_page(page);
66} 66}
67 67
68ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) 68ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
@@ -111,13 +111,13 @@ static void ufs_check_page(struct page *page)
111 struct super_block *sb = dir->i_sb; 111 struct super_block *sb = dir->i_sb;
112 char *kaddr = page_address(page); 112 char *kaddr = page_address(page);
113 unsigned offs, rec_len; 113 unsigned offs, rec_len;
114 unsigned limit = PAGE_CACHE_SIZE; 114 unsigned limit = PAGE_SIZE;
115 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; 115 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
116 struct ufs_dir_entry *p; 116 struct ufs_dir_entry *p;
117 char *error; 117 char *error;
118 118
119 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 119 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
120 limit = dir->i_size & ~PAGE_CACHE_MASK; 120 limit = dir->i_size & ~PAGE_MASK;
121 if (limit & chunk_mask) 121 if (limit & chunk_mask)
122 goto Ebadsize; 122 goto Ebadsize;
123 if (!limit) 123 if (!limit)
@@ -170,7 +170,7 @@ Einumber:
170bad_entry: 170bad_entry:
171 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " 171 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
172 "offset=%lu, rec_len=%d, name_len=%d", 172 "offset=%lu, rec_len=%d, name_len=%d",
173 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 173 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
174 rec_len, ufs_get_de_namlen(sb, p)); 174 rec_len, ufs_get_de_namlen(sb, p));
175 goto fail; 175 goto fail;
176Eend: 176Eend:
@@ -178,7 +178,7 @@ Eend:
178 ufs_error(sb, __func__, 178 ufs_error(sb, __func__,
179 "entry in directory #%lu spans the page boundary" 179 "entry in directory #%lu spans the page boundary"
180 "offset=%lu", 180 "offset=%lu",
181 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); 181 dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
182fail: 182fail:
183 SetPageChecked(page); 183 SetPageChecked(page);
184 SetPageError(page); 184 SetPageError(page);
@@ -211,9 +211,9 @@ ufs_last_byte(struct inode *inode, unsigned long page_nr)
211{ 211{
212 unsigned last_byte = inode->i_size; 212 unsigned last_byte = inode->i_size;
213 213
214 last_byte -= page_nr << PAGE_CACHE_SHIFT; 214 last_byte -= page_nr << PAGE_SHIFT;
215 if (last_byte > PAGE_CACHE_SIZE) 215 if (last_byte > PAGE_SIZE)
216 last_byte = PAGE_CACHE_SIZE; 216 last_byte = PAGE_SIZE;
217 return last_byte; 217 return last_byte;
218} 218}
219 219
@@ -341,7 +341,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
341 kaddr = page_address(page); 341 kaddr = page_address(page);
342 dir_end = kaddr + ufs_last_byte(dir, n); 342 dir_end = kaddr + ufs_last_byte(dir, n);
343 de = (struct ufs_dir_entry *)kaddr; 343 de = (struct ufs_dir_entry *)kaddr;
344 kaddr += PAGE_CACHE_SIZE - reclen; 344 kaddr += PAGE_SIZE - reclen;
345 while ((char *)de <= kaddr) { 345 while ((char *)de <= kaddr) {
346 if ((char *)de == dir_end) { 346 if ((char *)de == dir_end) {
347 /* We hit i_size */ 347 /* We hit i_size */
@@ -432,8 +432,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
432 loff_t pos = ctx->pos; 432 loff_t pos = ctx->pos;
433 struct inode *inode = file_inode(file); 433 struct inode *inode = file_inode(file);
434 struct super_block *sb = inode->i_sb; 434 struct super_block *sb = inode->i_sb;
435 unsigned int offset = pos & ~PAGE_CACHE_MASK; 435 unsigned int offset = pos & ~PAGE_MASK;
436 unsigned long n = pos >> PAGE_CACHE_SHIFT; 436 unsigned long n = pos >> PAGE_SHIFT;
437 unsigned long npages = dir_pages(inode); 437 unsigned long npages = dir_pages(inode);
438 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 438 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
439 int need_revalidate = file->f_version != inode->i_version; 439 int need_revalidate = file->f_version != inode->i_version;
@@ -454,14 +454,14 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
454 ufs_error(sb, __func__, 454 ufs_error(sb, __func__,
455 "bad page in #%lu", 455 "bad page in #%lu",
456 inode->i_ino); 456 inode->i_ino);
457 ctx->pos += PAGE_CACHE_SIZE - offset; 457 ctx->pos += PAGE_SIZE - offset;
458 return -EIO; 458 return -EIO;
459 } 459 }
460 kaddr = page_address(page); 460 kaddr = page_address(page);
461 if (unlikely(need_revalidate)) { 461 if (unlikely(need_revalidate)) {
462 if (offset) { 462 if (offset) {
463 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); 463 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
464 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 464 ctx->pos = (n<<PAGE_SHIFT) + offset;
465 } 465 }
466 file->f_version = inode->i_version; 466 file->f_version = inode->i_version;
467 need_revalidate = 0; 467 need_revalidate = 0;
@@ -574,7 +574,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
574 574
575 kmap(page); 575 kmap(page);
576 base = (char*)page_address(page); 576 base = (char*)page_address(page);
577 memset(base, 0, PAGE_CACHE_SIZE); 577 memset(base, 0, PAGE_SIZE);
578 578
579 de = (struct ufs_dir_entry *) base; 579 de = (struct ufs_dir_entry *) base;
580 580
@@ -594,7 +594,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
594 594
595 err = ufs_commit_chunk(page, 0, chunk_size); 595 err = ufs_commit_chunk(page, 0, chunk_size);
596fail: 596fail:
597 page_cache_release(page); 597 put_page(page);
598 return err; 598 return err;
599} 599}
600 600
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index d897e169ab9c..9f49431e798d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -1051,13 +1051,13 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1051 lastfrag--; 1051 lastfrag--;
1052 1052
1053 lastpage = ufs_get_locked_page(mapping, lastfrag >> 1053 lastpage = ufs_get_locked_page(mapping, lastfrag >>
1054 (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1054 (PAGE_SHIFT - inode->i_blkbits));
1055 if (IS_ERR(lastpage)) { 1055 if (IS_ERR(lastpage)) {
1056 err = -EIO; 1056 err = -EIO;
1057 goto out; 1057 goto out;
1058 } 1058 }
1059 1059
1060 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1060 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1061 bh = page_buffers(lastpage); 1061 bh = page_buffers(lastpage);
1062 for (i = 0; i < end; ++i) 1062 for (i = 0; i < end; ++i)
1063 bh = bh->b_this_page; 1063 bh = bh->b_this_page;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index acf4a3b61b81..a1559f762805 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -305,7 +305,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
305 ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); 305 ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
306 else { 306 else {
307 kunmap(dir_page); 307 kunmap(dir_page);
308 page_cache_release(dir_page); 308 put_page(dir_page);
309 } 309 }
310 inode_dec_link_count(old_dir); 310 inode_dec_link_count(old_dir);
311 } 311 }
@@ -315,11 +315,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
315out_dir: 315out_dir:
316 if (dir_de) { 316 if (dir_de) {
317 kunmap(dir_page); 317 kunmap(dir_page);
318 page_cache_release(dir_page); 318 put_page(dir_page);
319 } 319 }
320out_old: 320out_old:
321 kunmap(old_page); 321 kunmap(old_page);
322 page_cache_release(old_page); 322 put_page(old_page);
323out: 323out:
324 return err; 324 return err;
325} 325}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index b6c2f94e041e..a409e3e7827a 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -261,14 +261,14 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
261 if (unlikely(page->mapping == NULL)) { 261 if (unlikely(page->mapping == NULL)) {
262 /* Truncate got there first */ 262 /* Truncate got there first */
263 unlock_page(page); 263 unlock_page(page);
264 page_cache_release(page); 264 put_page(page);
265 page = NULL; 265 page = NULL;
266 goto out; 266 goto out;
267 } 267 }
268 268
269 if (!PageUptodate(page) || PageError(page)) { 269 if (!PageUptodate(page) || PageError(page)) {
270 unlock_page(page); 270 unlock_page(page);
271 page_cache_release(page); 271 put_page(page);
272 272
273 printk(KERN_ERR "ufs_change_blocknr: " 273 printk(KERN_ERR "ufs_change_blocknr: "
274 "can not read page: ino %lu, index: %lu\n", 274 "can not read page: ino %lu, index: %lu\n",
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 954175928240..b7fbf53dbc81 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -283,7 +283,7 @@ extern struct page *ufs_get_locked_page(struct address_space *mapping,
283static inline void ufs_put_locked_page(struct page *page) 283static inline void ufs_put_locked_page(struct page *page)
284{ 284{
285 unlock_page(page); 285 unlock_page(page);
286 page_cache_release(page); 286 put_page(page);
287} 287}
288 288
289 289
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 041b6948aecc..ce41d7fe753c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3742,11 +3742,11 @@ xfs_bmap_btalloc(
3742 args.prod = align; 3742 args.prod = align;
3743 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3743 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3744 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3744 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3745 } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { 3745 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3746 args.prod = 1; 3746 args.prod = 1;
3747 args.mod = 0; 3747 args.mod = 0;
3748 } else { 3748 } else {
3749 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; 3749 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3750 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 3750 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3751 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3751 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3752 } 3752 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d445a64b979e..e49b2406d15d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -704,7 +704,7 @@ next_buffer:
704 704
705 xfs_iunlock(ip, XFS_ILOCK_EXCL); 705 xfs_iunlock(ip, XFS_ILOCK_EXCL);
706out_invalidate: 706out_invalidate:
707 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); 707 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
708 return; 708 return;
709} 709}
710 710
@@ -925,9 +925,9 @@ xfs_do_writepage(
925 * ---------------------------------^------------------| 925 * ---------------------------------^------------------|
926 */ 926 */
927 offset = i_size_read(inode); 927 offset = i_size_read(inode);
928 end_index = offset >> PAGE_CACHE_SHIFT; 928 end_index = offset >> PAGE_SHIFT;
929 if (page->index < end_index) 929 if (page->index < end_index)
930 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT; 930 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
931 else { 931 else {
932 /* 932 /*
933 * Check whether the page to write out is beyond or straddles 933 * Check whether the page to write out is beyond or straddles
@@ -940,7 +940,7 @@ xfs_do_writepage(
940 * | | Straddles | 940 * | | Straddles |
941 * ---------------------------------^-----------|--------| 941 * ---------------------------------^-----------|--------|
942 */ 942 */
943 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1); 943 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
944 944
945 /* 945 /*
946 * Skip the page if it is fully outside i_size, e.g. due to a 946 * Skip the page if it is fully outside i_size, e.g. due to a
@@ -971,7 +971,7 @@ xfs_do_writepage(
971 * memory is zeroed when mapped, and writes to that region are 971 * memory is zeroed when mapped, and writes to that region are
972 * not written out to the file." 972 * not written out to the file."
973 */ 973 */
974 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); 974 zero_user_segment(page, offset_into_page, PAGE_SIZE);
975 975
976 /* Adjust the end_offset to the end of file */ 976 /* Adjust the end_offset to the end of file */
977 end_offset = offset; 977 end_offset = offset;
@@ -1475,7 +1475,7 @@ xfs_vm_write_failed(
1475 loff_t block_offset; 1475 loff_t block_offset;
1476 loff_t block_start; 1476 loff_t block_start;
1477 loff_t block_end; 1477 loff_t block_end;
1478 loff_t from = pos & (PAGE_CACHE_SIZE - 1); 1478 loff_t from = pos & (PAGE_SIZE - 1);
1479 loff_t to = from + len; 1479 loff_t to = from + len;
1480 struct buffer_head *bh, *head; 1480 struct buffer_head *bh, *head;
1481 struct xfs_mount *mp = XFS_I(inode)->i_mount; 1481 struct xfs_mount *mp = XFS_I(inode)->i_mount;
@@ -1491,7 +1491,7 @@ xfs_vm_write_failed(
1491 * start of the page by using shifts rather than masks the mismatch 1491 * start of the page by using shifts rather than masks the mismatch
1492 * problem. 1492 * problem.
1493 */ 1493 */
1494 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT; 1494 block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
1495 1495
1496 ASSERT(block_offset + from == pos); 1496 ASSERT(block_offset + from == pos);
1497 1497
@@ -1558,12 +1558,12 @@ xfs_vm_write_begin(
1558 struct page **pagep, 1558 struct page **pagep,
1559 void **fsdata) 1559 void **fsdata)
1560{ 1560{
1561 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1561 pgoff_t index = pos >> PAGE_SHIFT;
1562 struct page *page; 1562 struct page *page;
1563 int status; 1563 int status;
1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount; 1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
1565 1565
1566 ASSERT(len <= PAGE_CACHE_SIZE); 1566 ASSERT(len <= PAGE_SIZE);
1567 1567
1568 page = grab_cache_page_write_begin(mapping, index, flags); 1568 page = grab_cache_page_write_begin(mapping, index, flags);
1569 if (!page) 1569 if (!page)
@@ -1592,7 +1592,7 @@ xfs_vm_write_begin(
1592 truncate_pagecache_range(inode, start, pos + len); 1592 truncate_pagecache_range(inode, start, pos + len);
1593 } 1593 }
1594 1594
1595 page_cache_release(page); 1595 put_page(page);
1596 page = NULL; 1596 page = NULL;
1597 } 1597 }
1598 1598
@@ -1620,7 +1620,7 @@ xfs_vm_write_end(
1620{ 1620{
1621 int ret; 1621 int ret;
1622 1622
1623 ASSERT(len <= PAGE_CACHE_SIZE); 1623 ASSERT(len <= PAGE_SIZE);
1624 1624
1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1626 if (unlikely(ret < len)) { 1626 if (unlikely(ret < len)) {
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a32c1dcae2ff..3b6309865c65 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1237,7 +1237,7 @@ xfs_free_file_space(
1237 /* wait for the completion of any pending DIOs */ 1237 /* wait for the completion of any pending DIOs */
1238 inode_dio_wait(VFS_I(ip)); 1238 inode_dio_wait(VFS_I(ip));
1239 1239
1240 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1240 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1241 ioffset = round_down(offset, rounding); 1241 ioffset = round_down(offset, rounding);
1242 iendoffset = round_up(offset + len, rounding) - 1; 1242 iendoffset = round_up(offset + len, rounding) - 1;
1243 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset, 1243 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
@@ -1466,7 +1466,7 @@ xfs_shift_file_space(
1466 if (error) 1466 if (error)
1467 return error; 1467 return error;
1468 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 1468 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1469 offset >> PAGE_CACHE_SHIFT, -1); 1469 offset >> PAGE_SHIFT, -1);
1470 if (error) 1470 if (error)
1471 return error; 1471 return error;
1472 1472
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ac0fd32de31e..569938a4a357 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -106,8 +106,8 @@ xfs_iozero(
106 unsigned offset, bytes; 106 unsigned offset, bytes;
107 void *fsdata; 107 void *fsdata;
108 108
109 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 109 offset = (pos & (PAGE_SIZE -1)); /* Within page */
110 bytes = PAGE_CACHE_SIZE - offset; 110 bytes = PAGE_SIZE - offset;
111 if (bytes > count) 111 if (bytes > count)
112 bytes = count; 112 bytes = count;
113 113
@@ -799,8 +799,8 @@ xfs_file_dio_aio_write(
799 /* see generic_file_direct_write() for why this is necessary */ 799 /* see generic_file_direct_write() for why this is necessary */
800 if (mapping->nrpages) { 800 if (mapping->nrpages) {
801 invalidate_inode_pages2_range(mapping, 801 invalidate_inode_pages2_range(mapping,
802 pos >> PAGE_CACHE_SHIFT, 802 pos >> PAGE_SHIFT,
803 end >> PAGE_CACHE_SHIFT); 803 end >> PAGE_SHIFT);
804 } 804 }
805 805
806 if (ret > 0) { 806 if (ret > 0) {
@@ -1207,9 +1207,9 @@ xfs_find_get_desired_pgoff(
1207 1207
1208 pagevec_init(&pvec, 0); 1208 pagevec_init(&pvec, 0);
1209 1209
1210 index = startoff >> PAGE_CACHE_SHIFT; 1210 index = startoff >> PAGE_SHIFT;
1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1212 end = endoff >> PAGE_CACHE_SHIFT; 1212 end = endoff >> PAGE_SHIFT;
1213 do { 1213 do {
1214 int want; 1214 int want;
1215 unsigned nr_pages; 1215 unsigned nr_pages;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239a0fa9..a8192dc797dc 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -135,7 +135,7 @@ typedef __u32 xfs_nlink_t;
135 * Size of block device i/o is parameterized here. 135 * Size of block device i/o is parameterized here.
136 * Currently the system supports page-sized i/o. 136 * Currently the system supports page-sized i/o.
137 */ 137 */
138#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT 138#define BLKDEV_IOSHIFT PAGE_SHIFT
139#define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) 139#define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT)
140/* number of BB's per block device block */ 140/* number of BB's per block device block */
141#define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) 141#define BLKDEV_BB BTOBB(BLKDEV_IOSIZE)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 536a0ee9cd5a..cfd4210dd015 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -171,7 +171,7 @@ xfs_sb_validate_fsb_count(
171 ASSERT(sbp->sb_blocklog >= BBSHIFT); 171 ASSERT(sbp->sb_blocklog >= BBSHIFT);
172 172
173 /* Limited by ULONG_MAX of page cache index */ 173 /* Limited by ULONG_MAX of page cache index */
174 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 174 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
175 return -EFBIG; 175 return -EFBIG;
176 return 0; 176 return 0;
177} 177}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bac6b3435591..eafe257b357a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -231,12 +231,12 @@ static inline unsigned long
231xfs_preferred_iosize(xfs_mount_t *mp) 231xfs_preferred_iosize(xfs_mount_t *mp)
232{ 232{
233 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE) 233 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
234 return PAGE_CACHE_SIZE; 234 return PAGE_SIZE;
235 return (mp->m_swidth ? 235 return (mp->m_swidth ?
236 (mp->m_swidth << mp->m_sb.sb_blocklog) : 236 (mp->m_swidth << mp->m_sb.sb_blocklog) :
237 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ? 237 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
238 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) : 238 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) :
239 PAGE_CACHE_SIZE)); 239 PAGE_SIZE));
240} 240}
241 241
242#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ 242#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index ade236e90bb3..51ddaf2c2b8c 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -293,8 +293,8 @@ xfs_fs_commit_blocks(
293 * Make sure reads through the pagecache see the new data. 293 * Make sure reads through the pagecache see the new data.
294 */ 294 */
295 error = invalidate_inode_pages2_range(inode->i_mapping, 295 error = invalidate_inode_pages2_range(inode->i_mapping,
296 start >> PAGE_CACHE_SHIFT, 296 start >> PAGE_SHIFT,
297 (end - 1) >> PAGE_CACHE_SHIFT); 297 (end - 1) >> PAGE_SHIFT);
298 WARN_ON_ONCE(error); 298 WARN_ON_ONCE(error);
299 299
300 error = xfs_iomap_write_unwritten(ip, start, length); 300 error = xfs_iomap_write_unwritten(ip, start, length);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d760934109b5..187e14b696c2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -556,10 +556,10 @@ xfs_max_file_offset(
556 /* Figure out maximum filesize, on Linux this can depend on 556 /* Figure out maximum filesize, on Linux this can depend on
557 * the filesystem blocksize (on 32 bit platforms). 557 * the filesystem blocksize (on 32 bit platforms).
558 * __block_write_begin does this in an [unsigned] long... 558 * __block_write_begin does this in an [unsigned] long...
559 * page->index << (PAGE_CACHE_SHIFT - bbits) 559 * page->index << (PAGE_SHIFT - bbits)
560 * So, for page sized blocks (4K on 32 bit platforms), 560 * So, for page sized blocks (4K on 32 bit platforms),
561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
562 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 562 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
563 * but for smaller blocksizes it is less (bbits = log2 bsize). 563 * but for smaller blocksizes it is less (bbits = log2 bsize).
564 * Note1: get_block_t takes a long (implicit cast from above) 564 * Note1: get_block_t takes a long (implicit cast from above)
565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
@@ -570,10 +570,10 @@ xfs_max_file_offset(
570#if BITS_PER_LONG == 32 570#if BITS_PER_LONG == 32
571# if defined(CONFIG_LBDAF) 571# if defined(CONFIG_LBDAF)
572 ASSERT(sizeof(sector_t) == 8); 572 ASSERT(sizeof(sector_t) == 8);
573 pagefactor = PAGE_CACHE_SIZE; 573 pagefactor = PAGE_SIZE;
574 bitshift = BITS_PER_LONG; 574 bitshift = BITS_PER_LONG;
575# else 575# else
576 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); 576 pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
577# endif 577# endif
578#endif 578#endif
579 579
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index e56272c919b5..bf2d34c9d804 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
108 u32 val; 108 u32 val;
109 109
110 preempt_disable(); 110 preempt_disable();
111 if (unlikely(get_user(val, uaddr) != 0)) 111 if (unlikely(get_user(val, uaddr) != 0)) {
112 preempt_enable();
112 return -EFAULT; 113 return -EFAULT;
114 }
113 115
114 if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) 116 if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
117 preempt_enable();
115 return -EFAULT; 118 return -EFAULT;
119 }
116 120
117 *uval = val; 121 *uval = val;
118 preempt_enable(); 122 preempt_enable();
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 461a0558bca4..cebecff536a3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
39{ 39{
40#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) 40#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
41 return false; 41 return false;
42#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
43 return false;
42#else 44#else
43 return true; 45 return true;
44#endif 46#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index afae2316bd43..055a08ddac02 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -92,7 +92,7 @@ struct ttm_placement {
92 */ 92 */
93struct ttm_bus_placement { 93struct ttm_bus_placement {
94 void *addr; 94 void *addr;
95 unsigned long base; 95 phys_addr_t base;
96 unsigned long size; 96 unsigned long size;
97 unsigned long offset; 97 unsigned long offset;
98 bool is_iomem; 98 bool is_iomem;
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..3f103076d0bf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@ struct bdi_writeback {
135 135
136struct backing_dev_info { 136struct backing_dev_info {
137 struct list_head bdi_list; 137 struct list_head bdi_list;
138 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 138 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
139 unsigned int capabilities; /* Device capabilities */ 139 unsigned int capabilities; /* Device capabilities */
140 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 140 congested_fn *congested_fn; /* Function pointer if device is md/dm */
141 void *congested_data; /* Pointer to aux data for congested func */ 141 void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 88bc64f00bb5..6b7481f62218 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43#define BIO_MAX_PAGES 256 43#define BIO_MAX_PAGES 256
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46 46
47/* 47/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7e5d7e018bea..669e419d6234 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1372,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1372 1372
1373static inline void put_dev_sector(Sector p) 1373static inline void put_dev_sector(Sector p)
1374{ 1374{
1375 page_cache_release(p.v); 1375 put_page(p.v);
1376} 1376}
1377 1377
1378static inline bool __bvec_gap_to_prev(struct request_queue *q, 1378static inline bool __bvec_gap_to_prev(struct request_queue *q,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c67f052cc5e5..d48daa3f6f20 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -43,7 +43,7 @@ enum bh_state_bits {
43 */ 43 */
44}; 44};
45 45
46#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) 46#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
47 47
48struct page; 48struct page;
49struct buffer_head; 49struct buffer_head;
@@ -263,7 +263,7 @@ void buffer_init(void);
263static inline void attach_page_buffers(struct page *page, 263static inline void attach_page_buffers(struct page *page,
264 struct buffer_head *head) 264 struct buffer_head *head)
265{ 265{
266 page_cache_get(page); 266 get_page(page);
267 SetPagePrivate(page); 267 SetPagePrivate(page);
268 set_page_private(page, (unsigned long)head); 268 set_page_private(page, (unsigned long)head);
269} 269}
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
12 */ 12 */
13 13
14struct ceph_auth_client; 14struct ceph_auth_client;
15struct ceph_authorizer;
16struct ceph_msg; 15struct ceph_msg;
17 16
17struct ceph_authorizer {
18 void (*destroy)(struct ceph_authorizer *);
19};
20
18struct ceph_auth_handshake { 21struct ceph_auth_handshake {
19 struct ceph_authorizer *authorizer; 22 struct ceph_authorizer *authorizer;
20 void *authorizer_buf; 23 void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
62 struct ceph_auth_handshake *auth); 65 struct ceph_auth_handshake *auth);
63 int (*verify_authorizer_reply)(struct ceph_auth_client *ac, 66 int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
64 struct ceph_authorizer *a, size_t len); 67 struct ceph_authorizer *a, size_t len);
65 void (*destroy_authorizer)(struct ceph_auth_client *ac,
66 struct ceph_authorizer *a);
67 void (*invalidate_authorizer)(struct ceph_auth_client *ac, 68 void (*invalidate_authorizer)(struct ceph_auth_client *ac,
68 int peer_type); 69 int peer_type);
69 70
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
112extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 113extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
113 int peer_type, 114 int peer_type,
114 struct ceph_auth_handshake *auth); 115 struct ceph_auth_handshake *auth);
115extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 116void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
116 struct ceph_authorizer *a);
117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
118 int peer_type, 118 int peer_type,
119 struct ceph_auth_handshake *a); 119 struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e7975e4681e1..db92a8d4926e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc);
176 */ 176 */
177static inline int calc_pages_for(u64 off, u64 len) 177static inline int calc_pages_for(u64 off, u64 len)
178{ 178{
179 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - 179 return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
180 (off >> PAGE_CACHE_SHIFT); 180 (off >> PAGE_SHIFT);
181} 181}
182 182
183extern struct kmem_cache *ceph_inode_cachep; 183extern struct kmem_cache *ceph_inode_cachep;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df806710..cbf460927c42 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@ struct ceph_msg;
16struct ceph_snap_context; 16struct ceph_snap_context;
17struct ceph_osd_request; 17struct ceph_osd_request;
18struct ceph_osd_client; 18struct ceph_osd_client;
19struct ceph_authorizer;
20 19
21/* 20/*
22 * completion callback for async writepages 21 * completion callback for async writepages
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5bc799..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@ struct cgroup_subsys {
444 int (*can_attach)(struct cgroup_taskset *tset); 444 int (*can_attach)(struct cgroup_taskset *tset);
445 void (*cancel_attach)(struct cgroup_taskset *tset); 445 void (*cancel_attach)(struct cgroup_taskset *tset);
446 void (*attach)(struct cgroup_taskset *tset); 446 void (*attach)(struct cgroup_taskset *tset);
447 void (*post_attach)(void);
447 int (*can_fork)(struct task_struct *task); 448 int (*can_fork)(struct task_struct *task);
448 void (*cancel_fork)(struct task_struct *task); 449 void (*cancel_fork)(struct task_struct *task);
449 void (*fork)(struct task_struct *task); 450 void (*fork)(struct task_struct *task);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246feed3..eeae401a2412 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -199,7 +199,7 @@
199#define unreachable() __builtin_unreachable() 199#define unreachable() __builtin_unreachable()
200 200
201/* Mark a function definition as prohibited from being cloned. */ 201/* Mark a function definition as prohibited from being cloned. */
202#define __noclone __attribute__((__noclone__)) 202#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
203 203
204#endif /* GCC_VERSION >= 40500 */ 204#endif /* GCC_VERSION >= 40500 */
205 205
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
137 task_unlock(current); 137 task_unlock(current);
138} 138}
139 139
140extern void cpuset_post_attach_flush(void);
141
142#else /* !CONFIG_CPUSETS */ 140#else /* !CONFIG_CPUSETS */
143 141
144static inline bool cpusets_enabled(void) { return false; } 142static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
245 return false; 243 return false;
246} 244}
247 245
248static inline void cpuset_post_attach_flush(void)
249{
250}
251
252#endif /* !CONFIG_CPUSETS */ 246#endif /* !CONFIG_CPUSETS */
253 247
254#endif /* _LINUX_CPUSET_H */ 248#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7cb043d8f4e8..4bb4de8d95ea 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -161,6 +161,7 @@ struct dentry_operations {
161 struct vfsmount *(*d_automount)(struct path *); 161 struct vfsmount *(*d_automount)(struct path *);
162 int (*d_manage)(struct dentry *, bool); 162 int (*d_manage)(struct dentry *, bool);
163 struct inode *(*d_select_inode)(struct dentry *, unsigned); 163 struct inode *(*d_select_inode)(struct dentry *, unsigned);
164 struct dentry *(*d_real)(struct dentry *, struct inode *);
164} ____cacheline_aligned; 165} ____cacheline_aligned;
165 166
166/* 167/*
@@ -229,6 +230,7 @@ struct dentry_operations {
229#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ 230#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
230 231
231#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */ 232#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
233#define DCACHE_OP_REAL 0x08000000
232 234
233extern seqlock_t rename_lock; 235extern seqlock_t rename_lock;
234 236
@@ -555,4 +557,12 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
555 return upper; 557 return upper;
556} 558}
557 559
560static inline struct dentry *d_real(struct dentry *dentry)
561{
562 if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
563 return dentry->d_op->d_real(dentry, NULL);
564 else
565 return dentry;
566}
567
558#endif /* __LINUX_DCACHE_H */ 568#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index e0ee0b3000b2..5871f292b596 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,37 +15,23 @@
15 15
16#include <linux/errno.h> 16#include <linux/errno.h>
17 17
18struct pts_fs_info;
19
18#ifdef CONFIG_UNIX98_PTYS 20#ifdef CONFIG_UNIX98_PTYS
19 21
20int devpts_new_index(struct inode *ptmx_inode); 22/* Look up a pts fs info and get a ref to it */
21void devpts_kill_index(struct inode *ptmx_inode, int idx); 23struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
22void devpts_add_ref(struct inode *ptmx_inode); 24void devpts_put_ref(struct pts_fs_info *);
23void devpts_del_ref(struct inode *ptmx_inode); 25
26int devpts_new_index(struct pts_fs_info *);
27void devpts_kill_index(struct pts_fs_info *, int);
28
24/* mknod in devpts */ 29/* mknod in devpts */
25struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 30struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
26 void *priv);
27/* get private structure */ 31/* get private structure */
28void *devpts_get_priv(struct inode *pts_inode); 32void *devpts_get_priv(struct dentry *);
29/* unlink */ 33/* unlink */
30void devpts_pty_kill(struct inode *inode); 34void devpts_pty_kill(struct dentry *);
31
32#else
33
34/* Dummy stubs in the no-pty case */
35static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
36static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
37static inline void devpts_add_ref(struct inode *ptmx_inode) { }
38static inline void devpts_del_ref(struct inode *ptmx_inode) { }
39static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
40 dev_t device, int index, void *priv)
41{
42 return ERR_PTR(-EINVAL);
43}
44static inline void *devpts_get_priv(struct inode *pts_inode)
45{
46 return NULL;
47}
48static inline void devpts_pty_kill(struct inode *inode) { }
49 35
50#endif 36#endif
51 37
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 9eb215a155e0..b90e9bdbd1dd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -262,7 +262,7 @@ struct f2fs_node {
262/* 262/*
263 * For NAT entries 263 * For NAT entries
264 */ 264 */
265#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) 265#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
266 266
267struct f2fs_nat_entry { 267struct f2fs_nat_entry {
268 __u8 version; /* latest version of cached nat entry */ 268 __u8 version; /* latest version of cached nat entry */
@@ -282,7 +282,7 @@ struct f2fs_nat_block {
282 * Not allow to change this. 282 * Not allow to change this.
283 */ 283 */
284#define SIT_VBLOCK_MAP_SIZE 64 284#define SIT_VBLOCK_MAP_SIZE 64
285#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) 285#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
286 286
287/* 287/*
288 * Note that f2fs_sit_entry->vblocks has the following bit-field information. 288 * Note that f2fs_sit_entry->vblocks has the following bit-field information.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 14a97194b34b..70e61b58baaf 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -929,7 +929,7 @@ static inline struct file *get_file(struct file *f)
929/* Page cache limit. The filesystems should put that into their s_maxbytes 929/* Page cache limit. The filesystems should put that into their s_maxbytes
930 limits, otherwise bad things can happen in VM. */ 930 limits, otherwise bad things can happen in VM. */
931#if BITS_PER_LONG==32 931#if BITS_PER_LONG==32
932#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 932#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
933#elif BITS_PER_LONG==64 933#elif BITS_PER_LONG==64
934#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) 934#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
935#endif 935#endif
@@ -1241,6 +1241,16 @@ static inline struct inode *file_inode(const struct file *f)
1241 return f->f_inode; 1241 return f->f_inode;
1242} 1242}
1243 1243
1244static inline struct dentry *file_dentry(const struct file *file)
1245{
1246 struct dentry *dentry = file->f_path.dentry;
1247
1248 if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
1249 return dentry->d_op->d_real(dentry, file_inode(file));
1250 else
1251 return dentry;
1252}
1253
1244static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) 1254static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
1245{ 1255{
1246 return locks_lock_inode_wait(file_inode(filp), fl); 1256 return locks_lock_inode_wait(file_inode(filp), fl);
@@ -2067,7 +2077,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int);
2067/* /sys/fs */ 2077/* /sys/fs */
2068extern struct kobject *fs_kobj; 2078extern struct kobject *fs_kobj;
2069 2079
2070#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) 2080#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
2071 2081
2072#ifdef CONFIG_MANDATORY_FILE_LOCKING 2082#ifdef CONFIG_MANDATORY_FILE_LOCKING
2073extern int locks_mandatory_locked(struct file *); 2083extern int locks_mandatory_locked(struct file *);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cd91f75de49b..6027f6bbb061 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -263,9 +263,9 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
263extern struct kmem_cache *fscrypt_info_cachep; 263extern struct kmem_cache *fscrypt_info_cachep;
264int fscrypt_initialize(void); 264int fscrypt_initialize(void);
265 265
266extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *); 266extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
267extern void fscrypt_release_ctx(struct fscrypt_ctx *); 267extern void fscrypt_release_ctx(struct fscrypt_ctx *);
268extern struct page *fscrypt_encrypt_page(struct inode *, struct page *); 268extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
269extern int fscrypt_decrypt_page(struct page *); 269extern int fscrypt_decrypt_page(struct page *);
270extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); 270extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
271extern void fscrypt_pullback_bio_page(struct page **, bool); 271extern void fscrypt_pullback_bio_page(struct page **, bool);
@@ -299,7 +299,8 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
299#endif 299#endif
300 300
301/* crypto.c */ 301/* crypto.c */
302static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i) 302static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
303 gfp_t f)
303{ 304{
304 return ERR_PTR(-EOPNOTSUPP); 305 return ERR_PTR(-EOPNOTSUPP);
305} 306}
@@ -310,7 +311,7 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
310} 311}
311 312
312static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, 313static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
313 struct page *p) 314 struct page *p, gfp_t f)
314{ 315{
315 return ERR_PTR(-EOPNOTSUPP); 316 return ERR_PTR(-EOPNOTSUPP);
316} 317}
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7008623e24b1..d7b9e5346fba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
152} 152}
153 153
154struct page *get_huge_zero_page(void); 154struct page *get_huge_zero_page(void);
155void put_huge_zero_page(void);
155 156
156#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 157#else /* CONFIG_TRANSPARENT_HUGEPAGE */
157#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 158#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page)
208 return false; 209 return false;
209} 210}
210 211
212static inline void put_huge_zero_page(void)
213{
214 BUILD_BUG();
215}
211 216
212static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 217static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
213 unsigned long addr, pmd_t *pmd, int flags) 218 unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a5c539fa5d2b..ef7a6ecd8584 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -195,9 +195,7 @@ struct iommu_ops {
195 /* Get the number of windows per domain */ 195 /* Get the number of windows per domain */
196 u32 (*domain_get_windows)(struct iommu_domain *domain); 196 u32 (*domain_get_windows)(struct iommu_domain *domain);
197 197
198#ifdef CONFIG_OF_IOMMU
199 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 198 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
200#endif
201 199
202 unsigned long pgsize_bitmap; 200 unsigned long pgsize_bitmap;
203 void *priv; 201 void *priv;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..d10ef06971b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
196 * We record lock dependency chains, so that we can cache them: 196 * We record lock dependency chains, so that we can cache them:
197 */ 197 */
198struct lock_chain { 198struct lock_chain {
199 u8 irq_context; 199 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
200 u8 depth; 200 unsigned int irq_context : 2,
201 u16 base; 201 depth : 6,
202 base : 24;
203 /* 4 byte hole */
202 struct hlist_node entry; 204 struct hlist_node entry;
203 u64 chain_key; 205 u64 chain_key;
204}; 206};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8541a913f6a3..d1f904c8b2cb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
828 u8 n_ports; 828 u8 n_ports;
829}; 829};
830 830
831enum mlx4_pci_status {
832 MLX4_PCI_STATUS_DISABLED,
833 MLX4_PCI_STATUS_ENABLED,
834};
835
831struct mlx4_dev_persistent { 836struct mlx4_dev_persistent {
832 struct pci_dev *pdev; 837 struct pci_dev *pdev;
833 struct mlx4_dev *dev; 838 struct mlx4_dev *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
841 u8 state; 846 u8 state;
842 struct mutex interface_state_mutex; /* protect SW state */ 847 struct mutex interface_state_mutex; /* protect SW state */
843 u8 interface_state; 848 u8 interface_state;
849 struct mutex pci_status_mutex; /* sync pci state */
850 enum mlx4_pci_status pci_status;
844}; 851};
845 852
846struct mlx4_dev { 853struct mlx4_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8156e3c9239c..b3575f392492 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -392,6 +392,17 @@ enum {
392 MLX5_CAP_OFF_CMDIF_CSUM = 46, 392 MLX5_CAP_OFF_CMDIF_CSUM = 46,
393}; 393};
394 394
395enum {
396 /*
397 * Max wqe size for rdma read is 512 bytes, so this
398 * limits our max_sge_rd as the wqe needs to fit:
399 * - ctrl segment (16 bytes)
400 * - rdma segment (16 bytes)
401 * - scatter elements (16 bytes each)
402 */
403 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
404};
405
395struct mlx5_inbox_hdr { 406struct mlx5_inbox_hdr {
396 __be16 opcode; 407 __be16 opcode;
397 u8 rsvd[4]; 408 u8 rsvd[4];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index dcd5ac8d3b14..369c837d40f5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -519,8 +519,9 @@ enum mlx5_device_state {
519}; 519};
520 520
521enum mlx5_interface_state { 521enum mlx5_interface_state {
522 MLX5_INTERFACE_STATE_DOWN, 522 MLX5_INTERFACE_STATE_DOWN = BIT(0),
523 MLX5_INTERFACE_STATE_UP, 523 MLX5_INTERFACE_STATE_UP = BIT(1),
524 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
524}; 525};
525 526
526enum mlx5_pci_status { 527enum mlx5_pci_status {
@@ -544,7 +545,7 @@ struct mlx5_core_dev {
544 enum mlx5_device_state state; 545 enum mlx5_device_state state;
545 /* sync interface state */ 546 /* sync interface state */
546 struct mutex intf_state_mutex; 547 struct mutex intf_state_mutex;
547 enum mlx5_interface_state interface_state; 548 unsigned long intf_state;
548 void (*event) (struct mlx5_core_dev *dev, 549 void (*event) (struct mlx5_core_dev *dev,
549 enum mlx5_dev_event event, 550 enum mlx5_dev_event event,
550 unsigned long param); 551 unsigned long param);
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index a1d145abd4eb..b30250ab7604 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
54int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, 54int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
55 enum mlx5_port_status *status); 55 enum mlx5_port_status *status);
56 56
57int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); 57int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
58void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); 58void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
59void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 59void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
60 u8 port); 60 u8 port);
61 61
62int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, 62int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index bd93e6323603..301da4a5e6bf 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
47 u16 vport, u8 *addr); 47 u16 vport, u8 *addr);
48int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
49int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
48int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 50int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
49 u64 *system_image_guid); 51 u64 *system_image_guid);
50int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 52int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d1b7b5..864d7221de84 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
623 * 623 *
624 * A page may belong to an inode's memory mapping. In this case, page->mapping 624 * A page may belong to an inode's memory mapping. In this case, page->mapping
625 * is the pointer to the inode, and page->index is the file offset of the page, 625 * is the pointer to the inode, and page->index is the file offset of the page,
626 * in units of PAGE_CACHE_SIZE. 626 * in units of PAGE_SIZE.
627 * 627 *
628 * If pagecache pages are not associated with an inode, they are said to be 628 * If pagecache pages are not associated with an inode, they are said to be
629 * anonymous pages. These may become associated with the swapcache, and in that 629 * anonymous pages. These may become associated with the swapcache, and in that
@@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page)
1031 page = compound_head(page); 1031 page = compound_head(page);
1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
1033 return true; 1033 return true;
1034 if (PageHuge(page))
1035 return false;
1034 for (i = 0; i < hpage_nr_pages(page); i++) { 1036 for (i = 0; i < hpage_nr_pages(page); i++) {
1035 if (atomic_read(&page[i]._mapcount) >= 0) 1037 if (atomic_read(&page[i]._mapcount) >= 0)
1036 return true; 1038 return true;
@@ -1138,6 +1140,8 @@ struct zap_details {
1138 1140
1139struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1141struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1140 pte_t pte); 1142 pte_t pte);
1143struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1144 pmd_t pmd);
1141 1145
1142int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1146int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1143 unsigned long size); 1147 unsigned long size);
@@ -1250,78 +1254,20 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1250 unsigned long start, unsigned long nr_pages, 1254 unsigned long start, unsigned long nr_pages,
1251 int write, int force, struct page **pages, 1255 int write, int force, struct page **pages,
1252 struct vm_area_struct **vmas); 1256 struct vm_area_struct **vmas);
1253long get_user_pages6(unsigned long start, unsigned long nr_pages, 1257long get_user_pages(unsigned long start, unsigned long nr_pages,
1254 int write, int force, struct page **pages, 1258 int write, int force, struct page **pages,
1255 struct vm_area_struct **vmas); 1259 struct vm_area_struct **vmas);
1256long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, 1260long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1257 int write, int force, struct page **pages, int *locked); 1261 int write, int force, struct page **pages, int *locked);
1258long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1262long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1259 unsigned long start, unsigned long nr_pages, 1263 unsigned long start, unsigned long nr_pages,
1260 int write, int force, struct page **pages, 1264 int write, int force, struct page **pages,
1261 unsigned int gup_flags); 1265 unsigned int gup_flags);
1262long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, 1266long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1263 int write, int force, struct page **pages); 1267 int write, int force, struct page **pages);
1264int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1268int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1265 struct page **pages); 1269 struct page **pages);
1266 1270
1267/* suppress warnings from use in EXPORT_SYMBOL() */
1268#ifndef __DISABLE_GUP_DEPRECATED
1269#define __gup_deprecated __deprecated
1270#else
1271#define __gup_deprecated
1272#endif
1273/*
1274 * These macros provide backward-compatibility with the old
1275 * get_user_pages() variants which took tsk/mm. These
1276 * functions/macros provide both compile-time __deprecated so we
1277 * can catch old-style use and not break the build. The actual
1278 * functions also have WARN_ON()s to let us know at runtime if
1279 * the get_user_pages() should have been the "remote" variant.
1280 *
1281 * These are hideous, but temporary.
1282 *
1283 * If you run into one of these __deprecated warnings, look
1284 * at how you are calling get_user_pages(). If you are calling
1285 * it with current/current->mm as the first two arguments,
1286 * simply remove those arguments. The behavior will be the same
1287 * as it is now. If you are calling it on another task, use
1288 * get_user_pages_remote() instead.
1289 *
1290 * Any questions? Ask Dave Hansen <dave@sr71.net>
1291 */
1292long
1293__gup_deprecated
1294get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
1295 unsigned long start, unsigned long nr_pages,
1296 int write, int force, struct page **pages,
1297 struct vm_area_struct **vmas);
1298#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
1299 get_user_pages
1300#define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \
1301 get_user_pages8, x, \
1302 get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
1303
1304__gup_deprecated
1305long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
1306 unsigned long start, unsigned long nr_pages,
1307 int write, int force, struct page **pages,
1308 int *locked);
1309#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
1310 get_user_pages_locked
1311#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \
1312 get_user_pages_locked8, x, \
1313 get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
1314
1315__gup_deprecated
1316long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
1317 unsigned long start, unsigned long nr_pages,
1318 int write, int force, struct page **pages);
1319#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \
1320 get_user_pages_unlocked
1321#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \
1322 get_user_pages_unlocked7, x, \
1323 get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
1324
1325/* Container for pinned pfns / pages */ 1271/* Container for pinned pfns / pages */
1326struct frame_vector { 1272struct frame_vector {
1327 unsigned int nr_allocated; /* Number of frames we have space for */ 1273 unsigned int nr_allocated; /* Number of frames we have space for */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b37313b..c2d75b4fa86c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct vm_area_struct {
341 341
342 /* Information about our backing store: */ 342 /* Information about our backing store: */
343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
344 units, *not* PAGE_CACHE_SIZE */ 344 units */
345 struct file * vm_file; /* File we map to (can be NULL). */ 345 struct file * vm_file; /* File we map to (can be NULL). */
346 void * vm_private_data; /* was vm_pte (shared mem) */ 346 void * vm_private_data; /* was vm_pte (shared mem) */
347 347
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cb0d5d09c2e4..8395308a2445 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2120,7 +2120,10 @@ struct napi_gro_cb {
2120 /* Used in foo-over-udp, set in udp[46]_gro_receive */ 2120 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2121 u8 is_ipv6:1; 2121 u8 is_ipv6:1;
2122 2122
2123 /* 7 bit hole */ 2123 /* Used in GRE, set in fou/gue_gro_receive */
2124 u8 is_fou:1;
2125
2126 /* 6 bit hole */
2124 2127
2125 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 2128 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2126 __wsum csum; 2129 __wsum csum;
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f2f650f136ee..957049f72290 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@ struct nfs_page {
41 struct page *wb_page; /* page to read in/write out */ 41 struct page *wb_page; /* page to read in/write out */
42 struct nfs_open_context *wb_context; /* File state context info */ 42 struct nfs_open_context *wb_context; /* File state context info */
43 struct nfs_lock_context *wb_lock_context; /* lock context info */ 43 struct nfs_lock_context *wb_lock_context; /* lock context info */
44 pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ 44 pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
45 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 45 unsigned int wb_offset, /* Offset & ~PAGE_MASK */
46 wb_pgbase, /* Start of page data */ 46 wb_pgbase, /* Start of page data */
47 wb_bytes; /* Length of request */ 47 wb_bytes; /* Length of request */
48 struct kref wb_kref; /* reference count */ 48 struct kref wb_kref; /* reference count */
@@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head)
184static inline 184static inline
185loff_t req_offset(struct nfs_page *req) 185loff_t req_offset(struct nfs_page *req)
186{ 186{
187 return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; 187 return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
188} 188}
189 189
190#endif /* _LINUX_NFS_PAGE_H */ 190#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763e4b86..e9fcf90b270d 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
331{ 331{
332 unsigned len = le16_to_cpu(dlen); 332 unsigned len = le16_to_cpu(dlen);
333 333
334#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 334#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
335 if (len == NILFS_MAX_REC_LEN) 335 if (len == NILFS_MAX_REC_LEN)
336 return 1 << 16; 336 return 1 << 16;
337#endif 337#endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
340 340
341static inline __le16 nilfs_rec_len_to_disk(unsigned len) 341static inline __le16 nilfs_rec_len_to_disk(unsigned len)
342{ 342{
343#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 343#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
344 if (len == (1 << 16)) 344 if (len == (1 << 16))
345 return cpu_to_le16(NILFS_MAX_REC_LEN); 345 return cpu_to_le16(NILFS_MAX_REC_LEN);
346 else if (len > (1 << 16)) 346 else if (len > (1 << 16))
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1ebd65c91422..7e1ab155c67c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
86 (__force unsigned long)mask; 86 (__force unsigned long)mask;
87} 87}
88 88
89/*
90 * The page cache can be done in larger chunks than
91 * one page, because it allows for more efficient
92 * throughput (it can then be mapped into user
93 * space in smaller chunks for same flexibility).
94 *
95 * Or rather, it _will_ be done in larger chunks.
96 */
97#define PAGE_CACHE_SHIFT PAGE_SHIFT
98#define PAGE_CACHE_SIZE PAGE_SIZE
99#define PAGE_CACHE_MASK PAGE_MASK
100#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
101
102#define page_cache_get(page) get_page(page)
103#define page_cache_release(page) put_page(page)
104void release_pages(struct page **pages, int nr, bool cold); 89void release_pages(struct page **pages, int nr, bool cold);
105 90
106/* 91/*
@@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
390 return page->index << compound_order(page); 375 return page->index << compound_order(page);
391 376
392 if (likely(!PageTransTail(page))) 377 if (likely(!PageTransTail(page)))
393 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 378 return page->index;
394 379
395 /* 380 /*
396 * We don't initialize ->index for tail pages: calculate based on 381 * We don't initialize ->index for tail pages: calculate based on
397 * head page 382 * head page
398 */ 383 */
399 pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 384 pgoff = compound_head(page)->index;
400 pgoff += page - compound_head(page); 385 pgoff += page - compound_head(page);
401 return pgoff; 386 return pgoff;
402} 387}
@@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
406 */ 391 */
407static inline loff_t page_offset(struct page *page) 392static inline loff_t page_offset(struct page *page)
408{ 393{
409 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 394 return ((loff_t)page->index) << PAGE_SHIFT;
410} 395}
411 396
412static inline loff_t page_file_offset(struct page *page) 397static inline loff_t page_file_offset(struct page *page)
413{ 398{
414 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; 399 return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
415} 400}
416 401
417extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 402extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
425 return linear_hugepage_index(vma, address); 410 return linear_hugepage_index(vma, address);
426 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 411 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
427 pgoff += vma->vm_pgoff; 412 pgoff += vma->vm_pgoff;
428 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 413 return pgoff;
429} 414}
430 415
431extern void __lock_page(struct page *page); 416extern void __lock_page(struct page *page);
@@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
535/* 520/*
536 * Fault a userspace page into pagetables. Return non-zero on a fault. 521 * Fault a userspace page into pagetables. Return non-zero on a fault.
537 * 522 *
538 * This assumes that two userspace pages are always sufficient. That's 523 * This assumes that two userspace pages are always sufficient.
539 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
540 */ 524 */
541static inline int fault_in_pages_writeable(char __user *uaddr, int size) 525static inline int fault_in_pages_writeable(char __user *uaddr, int size)
542{ 526{
@@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page,
671 655
672static inline unsigned long dir_pages(struct inode *inode) 656static inline unsigned long dir_pages(struct inode *inode)
673{ 657{
674 return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> 658 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
675 PAGE_CACHE_SHIFT; 659 PAGE_SHIFT;
676} 660}
677 661
678#endif /* _LINUX_PAGEMAP_H */ 662#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 004b8133417d..932ec74909c6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1111,6 +1111,7 @@ void pci_unlock_rescan_remove(void);
1111/* Vital product data routines */ 1111/* Vital product data routines */
1112ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1112ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1113ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1113ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1114int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1114 1115
1115/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1116/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1116resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); 1117resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index ac6d872ce067..57d146fe44dd 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -72,6 +72,18 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
72} 72}
73#endif 73#endif
74 74
75static inline bool arch_has_pmem_api(void)
76{
77 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
78}
79
80static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
81 size_t size)
82{
83 memcpy(dst, (void __force *) src, size);
84 return 0;
85}
86
75/* 87/*
76 * memcpy_from_pmem - read from persistent memory with error handling 88 * memcpy_from_pmem - read from persistent memory with error handling
77 * @dst: destination buffer 89 * @dst: destination buffer
@@ -83,12 +95,10 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
83static inline int memcpy_from_pmem(void *dst, void __pmem const *src, 95static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
84 size_t size) 96 size_t size)
85{ 97{
86 return arch_memcpy_from_pmem(dst, src, size); 98 if (arch_has_pmem_api())
87} 99 return arch_memcpy_from_pmem(dst, src, size);
88 100 else
89static inline bool arch_has_pmem_api(void) 101 return default_memcpy_from_pmem(dst, src, size);
90{
91 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
92} 102}
93 103
94/** 104/**
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 1c33dd7da4a7..4ae95f7e8597 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
98 if (!is_a_nulls(first)) 98 if (!is_a_nulls(first))
99 first->pprev = &n->next; 99 first->pprev = &n->next;
100} 100}
101
102/**
103 * hlist_nulls_add_tail_rcu
104 * @n: the element to add to the hash list.
105 * @h: the list to add to.
106 *
107 * Description:
108 * Adds the specified element to the end of the specified hlist_nulls,
109 * while permitting racing traversals. NOTE: tail insertion requires
110 * list traversal.
111 *
112 * The caller must take whatever precautions are necessary
113 * (such as holding appropriate locks) to avoid racing
114 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
115 * or hlist_nulls_del_rcu(), running on this same list.
116 * However, it is perfectly legal to run concurrently with
117 * the _rcu list-traversal primitives, such as
118 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
119 * problems on Alpha CPUs. Regardless of the type of CPU, the
120 * list-traversal primitive must be guarded by rcu_read_lock().
121 */
122static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
123 struct hlist_nulls_head *h)
124{
125 struct hlist_nulls_node *i, *last = NULL;
126
127 for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
128 i = hlist_nulls_next_rcu(i))
129 last = i;
130
131 if (last) {
132 n->next = last->next;
133 n->pprev = &last->next;
134 rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
135 } else {
136 hlist_nulls_add_head_rcu(n, h);
137 }
138}
139
101/** 140/**
102 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 141 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
103 * @tpos: the type * to use as a loop cursor. 142 * @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dde00defbaa5..f3d45dd42695 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,13 +7,10 @@
7#include <linux/mutex.h> 7#include <linux/mutex.h>
8#include <linux/cpumask.h> 8#include <linux/cpumask.h>
9#include <linux/nodemask.h> 9#include <linux/nodemask.h>
10#include <linux/fs.h>
11#include <linux/cred.h>
10 12
11struct seq_operations; 13struct seq_operations;
12struct file;
13struct path;
14struct inode;
15struct dentry;
16struct user_namespace;
17 14
18struct seq_file { 15struct seq_file {
19 char *buf; 16 char *buf;
@@ -27,9 +24,7 @@ struct seq_file {
27 struct mutex lock; 24 struct mutex lock;
28 const struct seq_operations *op; 25 const struct seq_operations *op;
29 int poll_event; 26 int poll_event;
30#ifdef CONFIG_USER_NS 27 const struct file *file;
31 struct user_namespace *user_ns;
32#endif
33 void *private; 28 void *private;
34}; 29};
35 30
@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *);
147static inline struct user_namespace *seq_user_ns(struct seq_file *seq) 142static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
148{ 143{
149#ifdef CONFIG_USER_NS 144#ifdef CONFIG_USER_NS
150 return seq->user_ns; 145 return seq->file->f_cred->user_ns;
151#else 146#else
152 extern struct user_namespace init_user_ns; 147 extern struct user_namespace init_user_ns;
153 return &init_user_ns; 148 return &init_user_ns;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..7ca44fb5b675 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
129 * 129 *
130 * These happen to all be powers of 2, which is not strictly 130 * These happen to all be powers of 2, which is not strictly
131 * necessary but helps enforce the real limitation, which is 131 * necessary but helps enforce the real limitation, which is
132 * that they should be multiples of PAGE_CACHE_SIZE. 132 * that they should be multiples of PAGE_SIZE.
133 * 133 *
134 * For UDP transports, a block plus NFS,RPC, and UDP headers 134 * For UDP transports, a block plus NFS,RPC, and UDP headers
135 * has to fit into the IP datagram limit of 64K. The largest 135 * has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d18b65c53dbb..2b83359c19ca 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -433,9 +433,9 @@ struct backing_dev_info;
433#define si_swapinfo(val) \ 433#define si_swapinfo(val) \
434 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 434 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
435/* only sparc can not include linux/pagemap.h in this file 435/* only sparc can not include linux/pagemap.h in this file
436 * so leave page_cache_release and release_pages undeclared... */ 436 * so leave put_page and release_pages undeclared... */
437#define free_page_and_swap_cache(page) \ 437#define free_page_and_swap_cache(page) \
438 page_cache_release(page) 438 put_page(page)
439#define free_pages_and_swap_cache(pages, nr) \ 439#define free_pages_and_swap_cache(pages, nr) \
440 release_pages((pages), (nr), false); 440 release_pages((pages), (nr), false);
441 441
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a55d0523f75d..1b8a5a7876ce 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -352,8 +352,8 @@ struct thermal_zone_of_device_ops {
352 352
353struct thermal_trip { 353struct thermal_trip {
354 struct device_node *np; 354 struct device_node *np;
355 unsigned long int temperature; 355 int temperature;
356 unsigned long int hysteresis; 356 int hysteresis;
357 enum thermal_trip_type type; 357 enum thermal_trip_type type;
358}; 358};
359 359
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 161052477f77..b742b5e47cc2 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,7 +7,7 @@
7 * defined; unless noted otherwise, they are optional, and can be 7 * defined; unless noted otherwise, they are optional, and can be
8 * filled in with a null pointer. 8 * filled in with a null pointer.
9 * 9 *
10 * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) 10 * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
11 * 11 *
12 * Return the tty device corresponding to idx, NULL if there is not 12 * Return the tty device corresponding to idx, NULL if there is not
13 * one currently in use and an ERR_PTR value on error. Called under 13 * one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
250 250
251struct tty_operations { 251struct tty_operations {
252 struct tty_struct * (*lookup)(struct tty_driver *driver, 252 struct tty_struct * (*lookup)(struct tty_driver *driver,
253 struct inode *inode, int idx); 253 struct file *filp, int idx);
254 int (*install)(struct tty_driver *driver, struct tty_struct *tty); 254 int (*install)(struct tty_driver *driver, struct tty_struct *tty);
255 void (*remove)(struct tty_driver *driver, struct tty_struct *tty); 255 void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
256 int (*open)(struct tty_struct * tty, struct file * filp); 256 int (*open)(struct tty_struct * tty, struct file * filp);
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 7f5f78bd15ad..245f57dbbb61 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -79,6 +79,8 @@
79 /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \ 79 /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
80 US_FLAG(MAX_SECTORS_240, 0x08000000) \ 80 US_FLAG(MAX_SECTORS_240, 0x08000000) \
81 /* Sets max_sectors to 240 */ \ 81 /* Sets max_sectors to 240 */ \
82 US_FLAG(NO_REPORT_LUNS, 0x10000000) \
83 /* Cannot handle REPORT_LUNS */ \
82 84
83#define US_FLAG(name, value) US_FL_##name = value , 85#define US_FLAG(name, value) US_FL_##name = value ,
84enum { US_DO_ALL_FLAGS }; 86enum { US_DO_ALL_FLAGS };
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b6c2ba..88e3ab496e8f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@ struct vb2_ops {
375/** 375/**
376 * struct vb2_ops - driver-specific callbacks 376 * struct vb2_ops - driver-specific callbacks
377 * 377 *
378 * @verify_planes_array: Verify that a given user space structure contains
379 * enough planes for the buffer. This is called
380 * for each dequeued buffer.
378 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. 381 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
379 * For V4L2 this is a struct v4l2_buffer. 382 * For V4L2 this is a struct v4l2_buffer.
380 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. 383 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
384 * the vb2_buffer struct. 387 * the vb2_buffer struct.
385 */ 388 */
386struct vb2_buf_ops { 389struct vb2_buf_ops {
390 int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
387 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); 391 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
388 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, 392 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
389 struct vb2_plane *planes); 393 struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
400 * @fileio_read_once: report EOF after reading the first buffer 404 * @fileio_read_once: report EOF after reading the first buffer
401 * @fileio_write_immediately: queue buffer after each write() call 405 * @fileio_write_immediately: queue buffer after each write() call
402 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver 406 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
407 * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
408 * has not been called. This is a vb1 idiom that has been adopted
409 * also by vb2.
403 * @lock: pointer to a mutex that protects the vb2_queue struct. The 410 * @lock: pointer to a mutex that protects the vb2_queue struct. The
404 * driver can set this to a mutex to let the v4l2 core serialize 411 * driver can set this to a mutex to let the v4l2 core serialize
405 * the queuing ioctls. If the driver wants to handle locking 412 * the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
463 unsigned fileio_read_once:1; 470 unsigned fileio_read_once:1;
464 unsigned fileio_write_immediately:1; 471 unsigned fileio_write_immediately:1;
465 unsigned allow_zero_bytesused:1; 472 unsigned allow_zero_bytesused:1;
473 unsigned quirk_poll_must_check_waiting_for_buffers:1;
466 474
467 struct mutex *lock; 475 struct mutex *lock;
468 void *owner; 476 void *owner;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2a19fe111c78..03e322b30218 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -135,6 +135,7 @@ void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
135static inline void tc_action_net_exit(struct tc_action_net *tn) 135static inline void tc_action_net_exit(struct tc_action_net *tn)
136{ 136{
137 tcf_hashinfo_destroy(tn->ops, tn->hinfo); 137 tcf_hashinfo_destroy(tn->ops, tn->hinfo);
138 kfree(tn->hinfo);
138} 139}
139 140
140int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 141int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c0a92e2c286d..74c9693d4941 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -17,6 +17,7 @@
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/inet_sock.h>
20 21
21#ifdef CONFIG_CGROUP_NET_CLASSID 22#ifdef CONFIG_CGROUP_NET_CLASSID
22struct cgroup_cls_state { 23struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
63 * softirqs always disables bh. 64 * softirqs always disables bh.
64 */ 65 */
65 if (in_serving_softirq()) { 66 if (in_serving_softirq()) {
67 struct sock *sk = skb_to_full_sk(skb);
68
66 /* If there is an sock_cgroup_classid we'll use that. */ 69 /* If there is an sock_cgroup_classid we'll use that. */
67 if (!skb->sk) 70 if (!sk || !sk_fullsock(sk))
68 return 0; 71 return 0;
69 72
70 classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data); 73 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
71 } 74 }
72 75
73 return classid; 76 return classid;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 295d291269e2..54c779416eec 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
101struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 101struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
102 const struct in6_addr *addr, bool anycast); 102 const struct in6_addr *addr, bool anycast);
103 103
104struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
105 int flags);
106
104/* 107/*
105 * support functions for ND 108 * support functions for ND
106 * 109 *
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d0aeb97aec5d..1be050ada8c5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -959,6 +959,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
959int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 959int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
960int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, 960int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
961 int addr_len); 961 int addr_len);
962int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
963void ip6_datagram_release_cb(struct sock *sk);
962 964
963int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, 965int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
964 int *addr_len); 966 int *addr_len);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0c09da34b67a..e385eb3076a1 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1001,6 +1001,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
1001 * flag indicates that the PN was verified for replay protection. 1001 * flag indicates that the PN was verified for replay protection.
1002 * Note that this flag is also currently only supported when a frame 1002 * Note that this flag is also currently only supported when a frame
1003 * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set) 1003 * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
1004 * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did
1005 * de-duplication by itself.
1004 * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on 1006 * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
1005 * the frame. 1007 * the frame.
1006 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on 1008 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
diff --git a/include/net/route.h b/include/net/route.h
index 9b0a523bb428..6de665bf1750 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
209void ip_rt_multicast_event(struct in_device *); 209void ip_rt_multicast_event(struct in_device *);
210int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 210int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
211void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 211void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
212struct rtable *rt_dst_alloc(struct net_device *dev,
213 unsigned int flags, u16 type,
214 bool nopolicy, bool noxfrm, bool will_cache);
212 215
213struct in_ifaddr; 216struct in_ifaddr;
214void fib_add_ifaddr(struct in_ifaddr *); 217void fib_add_ifaddr(struct in_ifaddr *);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 65521cfdcade..03fb33efcae2 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -386,11 +386,9 @@ static inline struct list_head *sctp_list_dequeue(struct list_head *list)
386{ 386{
387 struct list_head *result = NULL; 387 struct list_head *result = NULL;
388 388
389 if (list->next != list) { 389 if (!list_empty(list)) {
390 result = list->next; 390 result = list->next;
391 list->next = result->next; 391 list_del_init(result);
392 list->next->prev = list;
393 INIT_LIST_HEAD(result);
394 } 392 }
395 return result; 393 return result;
396} 394}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6df1ce7a411c..5a404c354f4c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -847,6 +847,11 @@ struct sctp_transport {
847 */ 847 */
848 ktime_t last_time_heard; 848 ktime_t last_time_heard;
849 849
850 /* When was the last time that we sent a chunk using this
851 * transport? We use this to check for idle transports
852 */
853 unsigned long last_time_sent;
854
850 /* Last time(in jiffies) when cwnd is reduced due to the congestion 855 /* Last time(in jiffies) when cwnd is reduced due to the congestion
851 * indication based on ECNE chunk. 856 * indication based on ECNE chunk.
852 */ 857 */
@@ -952,7 +957,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
952 struct sctp_sock *); 957 struct sctp_sock *);
953void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); 958void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
954void sctp_transport_free(struct sctp_transport *); 959void sctp_transport_free(struct sctp_transport *);
955void sctp_transport_reset_timers(struct sctp_transport *); 960void sctp_transport_reset_t3_rtx(struct sctp_transport *);
961void sctp_transport_reset_hb_timer(struct sctp_transport *);
956int sctp_transport_hold(struct sctp_transport *); 962int sctp_transport_hold(struct sctp_transport *);
957void sctp_transport_put(struct sctp_transport *); 963void sctp_transport_put(struct sctp_transport *);
958void sctp_transport_update_rto(struct sctp_transport *, __u32); 964void sctp_transport_update_rto(struct sctp_transport *, __u32);
diff --git a/include/net/sock.h b/include/net/sock.h
index 255d3e03727b..121ffc115c4f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
630 630
631static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 631static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
632{ 632{
633 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 633 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
634 sk->sk_family == AF_INET6)
635 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
636 else
637 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
634} 638}
635 639
636static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 640static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d451122e8404..51d77b2ce2b2 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -54,6 +54,8 @@ struct switchdev_attr {
54 struct net_device *orig_dev; 54 struct net_device *orig_dev;
55 enum switchdev_attr_id id; 55 enum switchdev_attr_id id;
56 u32 flags; 56 u32 flags;
57 void *complete_priv;
58 void (*complete)(struct net_device *dev, int err, void *priv);
57 union { 59 union {
58 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
59 u8 stp_state; /* PORT_STP_STATE */ 61 u8 stp_state; /* PORT_STP_STATE */
@@ -75,6 +77,8 @@ struct switchdev_obj {
75 struct net_device *orig_dev; 77 struct net_device *orig_dev;
76 enum switchdev_obj_id id; 78 enum switchdev_obj_id id;
77 u32 flags; 79 u32 flags;
80 void *complete_priv;
81 void (*complete)(struct net_device *dev, int err, void *priv);
78}; 82};
79 83
80/* SWITCHDEV_OBJ_ID_PORT_VLAN */ 84/* SWITCHDEV_OBJ_ID_PORT_VLAN */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b91370f61be6..6db10228113f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -552,6 +552,8 @@ void tcp_send_ack(struct sock *sk);
552void tcp_send_delayed_ack(struct sock *sk); 552void tcp_send_delayed_ack(struct sock *sk);
553void tcp_send_loss_probe(struct sock *sk); 553void tcp_send_loss_probe(struct sock *sk);
554bool tcp_schedule_loss_probe(struct sock *sk); 554bool tcp_schedule_loss_probe(struct sock *sk);
555void tcp_skb_collapse_tstamp(struct sk_buff *skb,
556 const struct sk_buff *next_skb);
555 557
556/* tcp_input.c */ 558/* tcp_input.c */
557void tcp_resume_early_retransmit(struct sock *sk); 559void tcp_resume_early_retransmit(struct sock *sk);
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
34#define _RDMA_IB_H 34#define _RDMA_IB_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/sched.h>
37 38
38struct ib_addr { 39struct ib_addr {
39 union { 40 union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
86 __u64 sib_scope_id; 87 __u64 sib_scope_id;
87}; 88};
88 89
90/*
91 * The IB interfaces that use write() as bi-directional ioctl() are
92 * fundamentally unsafe, since there are lots of ways to trigger "write()"
93 * calls from various contexts with elevated privileges. That includes the
94 * traditional suid executable error message writes, but also various kernel
95 * interfaces that can write to file descriptors.
96 *
97 * This function provides protection for the legacy API by restricting the
98 * calling context.
99 */
100static inline bool ib_safe_file_access(struct file *filp)
101{
102 return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
103}
104
89#endif /* _RDMA_IB_H */ 105#endif /* _RDMA_IB_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c067019ed12a..74d79bde7075 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -516,6 +516,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
516 return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; 516 return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
517} 517}
518 518
519/**
520 * scsi_device_supports_vpd - test if a device supports VPD pages
521 * @sdev: the &struct scsi_device to test
522 *
523 * If the 'try_vpd_pages' flag is set it takes precedence.
524 * Otherwise we will assume VPD pages are supported if the
525 * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
526 */
527static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
528{
529 /* Attempt VPD inquiry if the device blacklist explicitly calls
530 * for it.
531 */
532 if (sdev->try_vpd_pages)
533 return 1;
534 /*
535 * Although VPD inquiries can go to SCSI-2 type devices,
536 * some USB ones crash on receiving them, and the pages
537 * we currently ask for are for SPC-3 and beyond
538 */
539 if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
540 return 1;
541 return 0;
542}
543
519#define MODULE_ALIAS_SCSI_DEVICE(type) \ 544#define MODULE_ALIAS_SCSI_DEVICE(type) \
520 MODULE_ALIAS("scsi:t-" __stringify(type) "*") 545 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
521#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" 546#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fcb5829..f5842bcd9c94 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,7 +9,7 @@
9#ifdef CONFIG_SND_HDA_I915 9#ifdef CONFIG_SND_HDA_I915
10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); 10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
11int snd_hdac_display_power(struct hdac_bus *bus, bool enable); 11int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
12int snd_hdac_get_display_clk(struct hdac_bus *bus); 12void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
13int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); 13int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
14int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, 14int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
15 bool *audio_enabled, char *buffer, int max_bytes); 15 bool *audio_enabled, char *buffer, int max_bytes);
@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
25{ 25{
26 return 0; 26 return 0;
27} 27}
28static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) 28static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
29{ 29{
30 return 0;
31} 30}
32static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, 31static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
33 int rate) 32 int rate)
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
index 2767c55a641e..ca64f0f50b45 100644
--- a/include/sound/hda_regmap.h
+++ b/include/sound/hda_regmap.h
@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
17 unsigned int verb); 17 unsigned int verb);
18int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 18int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
19 unsigned int *val); 19 unsigned int *val);
20int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
21 unsigned int reg, unsigned int *val);
20int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, 22int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
21 unsigned int val); 23 unsigned int val);
22int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, 24int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 677807f29a1c..e90e82ad6875 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,7 +23,7 @@ struct map_lookup;
23struct extent_buffer; 23struct extent_buffer;
24struct btrfs_work; 24struct btrfs_work;
25struct __btrfs_workqueue; 25struct __btrfs_workqueue;
26struct btrfs_qgroup_operation; 26struct btrfs_qgroup_extent_record;
27 27
28#define show_ref_type(type) \ 28#define show_ref_type(type) \
29 __print_symbolic(type, \ 29 __print_symbolic(type, \
@@ -1231,6 +1231,93 @@ DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
1231 1231
1232 TP_ARGS(ref_root, reserved) 1232 TP_ARGS(ref_root, reserved)
1233); 1233);
1234
1235DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
1236 TP_PROTO(struct btrfs_qgroup_extent_record *rec),
1237
1238 TP_ARGS(rec),
1239
1240 TP_STRUCT__entry(
1241 __field( u64, bytenr )
1242 __field( u64, num_bytes )
1243 ),
1244
1245 TP_fast_assign(
1246 __entry->bytenr = rec->bytenr,
1247 __entry->num_bytes = rec->num_bytes;
1248 ),
1249
1250 TP_printk("bytenr = %llu, num_bytes = %llu",
1251 (unsigned long long)__entry->bytenr,
1252 (unsigned long long)__entry->num_bytes)
1253);
1254
1255DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
1256
1257 TP_PROTO(struct btrfs_qgroup_extent_record *rec),
1258
1259 TP_ARGS(rec)
1260);
1261
1262DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
1263
1264 TP_PROTO(struct btrfs_qgroup_extent_record *rec),
1265
1266 TP_ARGS(rec)
1267);
1268
1269TRACE_EVENT(btrfs_qgroup_account_extent,
1270
1271 TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
1272
1273 TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots),
1274
1275 TP_STRUCT__entry(
1276 __field( u64, bytenr )
1277 __field( u64, num_bytes )
1278 __field( u64, nr_old_roots )
1279 __field( u64, nr_new_roots )
1280 ),
1281
1282 TP_fast_assign(
1283 __entry->bytenr = bytenr;
1284 __entry->num_bytes = num_bytes;
1285 __entry->nr_old_roots = nr_old_roots;
1286 __entry->nr_new_roots = nr_new_roots;
1287 ),
1288
1289 TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
1290 "nr_new_roots = %llu",
1291 __entry->bytenr,
1292 __entry->num_bytes,
1293 __entry->nr_old_roots,
1294 __entry->nr_new_roots)
1295);
1296
1297TRACE_EVENT(qgroup_update_counters,
1298
1299 TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count),
1300
1301 TP_ARGS(qgid, cur_old_count, cur_new_count),
1302
1303 TP_STRUCT__entry(
1304 __field( u64, qgid )
1305 __field( u64, cur_old_count )
1306 __field( u64, cur_new_count )
1307 ),
1308
1309 TP_fast_assign(
1310 __entry->qgid = qgid;
1311 __entry->cur_old_count = cur_old_count;
1312 __entry->cur_new_count = cur_new_count;
1313 ),
1314
1315 TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
1316 __entry->qgid,
1317 __entry->cur_old_count,
1318 __entry->cur_new_count)
1319);
1320
1234#endif /* _TRACE_BTRFS_H */ 1321#endif /* _TRACE_BTRFS_H */
1235 1322
1236/* This part must be outside protection */ 1323/* This part must be outside protection */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 2622b33fb2ec..6e0f5f01734c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -717,9 +717,13 @@ __SYSCALL(__NR_membarrier, sys_membarrier)
717__SYSCALL(__NR_mlock2, sys_mlock2) 717__SYSCALL(__NR_mlock2, sys_mlock2)
718#define __NR_copy_file_range 285 718#define __NR_copy_file_range 285
719__SYSCALL(__NR_copy_file_range, sys_copy_file_range) 719__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
720#define __NR_preadv2 286
721__SYSCALL(__NR_preadv2, sys_preadv2)
722#define __NR_pwritev2 287
723__SYSCALL(__NR_pwritev2, sys_pwritev2)
720 724
721#undef __NR_syscalls 725#undef __NR_syscalls
722#define __NR_syscalls 286 726#define __NR_syscalls 288
723 727
724/* 728/*
725 * All syscalls below here should go away really, 729 * All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b71fd0b5cbad..813ffb2e22c9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -96,6 +96,7 @@ header-y += cyclades.h
96header-y += cycx_cfm.h 96header-y += cycx_cfm.h
97header-y += dcbnl.h 97header-y += dcbnl.h
98header-y += dccp.h 98header-y += dccp.h
99header-y += devlink.h
99header-y += dlmconstants.h 100header-y += dlmconstants.h
100header-y += dlm_device.h 101header-y += dlm_device.h
101header-y += dlm.h 102header-y += dlm.h
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 26b0d1e3e3e7..4c58d9917aa4 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -19,8 +19,8 @@
19 19
20#define MACSEC_MAX_KEY_LEN 128 20#define MACSEC_MAX_KEY_LEN 128
21 21
22#define DEFAULT_CIPHER_ID 0x0080020001000001ULL 22#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
23#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 23#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
24 24
25#define MACSEC_MIN_ICV_LEN 8 25#define MACSEC_MIN_ICV_LEN 8
26#define MACSEC_MAX_ICV_LEN 32 26#define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 06d6c6228a7a..d5ce71607972 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -899,7 +899,7 @@ struct usb_ssp_cap_descriptor {
899 __le32 bmAttributes; 899 __le32 bmAttributes;
900#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */ 900#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */
901#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */ 901#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */
902 __u16 wFunctionalitySupport; 902 __le16 wFunctionalitySupport;
903#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf) 903#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf)
904#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8) 904#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
905#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12) 905#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
183 183
184#define V4L2_DV_BT_CEA_3840X2160P24 { \ 184#define V4L2_DV_BT_CEA_3840X2160P24 { \
185 .type = V4L2_DV_BT_656_1120, \ 185 .type = V4L2_DV_BT_656_1120, \
186 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 186 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
187 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
187 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \ 188 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
188 V4L2_DV_BT_STD_CEA861, \ 189 V4L2_DV_BT_STD_CEA861, \
189 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 190 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
191 192
192#define V4L2_DV_BT_CEA_3840X2160P25 { \ 193#define V4L2_DV_BT_CEA_3840X2160P25 { \
193 .type = V4L2_DV_BT_656_1120, \ 194 .type = V4L2_DV_BT_656_1120, \
194 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 195 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
196 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
195 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 197 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
196 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 198 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
197} 199}
198 200
199#define V4L2_DV_BT_CEA_3840X2160P30 { \ 201#define V4L2_DV_BT_CEA_3840X2160P30 { \
200 .type = V4L2_DV_BT_656_1120, \ 202 .type = V4L2_DV_BT_656_1120, \
201 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 203 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
204 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
202 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 205 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
203 V4L2_DV_BT_STD_CEA861, \ 206 V4L2_DV_BT_STD_CEA861, \
204 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 207 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
206 209
207#define V4L2_DV_BT_CEA_3840X2160P50 { \ 210#define V4L2_DV_BT_CEA_3840X2160P50 { \
208 .type = V4L2_DV_BT_656_1120, \ 211 .type = V4L2_DV_BT_656_1120, \
209 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 212 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
213 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
210 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 214 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
211 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 215 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
212} 216}
213 217
214#define V4L2_DV_BT_CEA_3840X2160P60 { \ 218#define V4L2_DV_BT_CEA_3840X2160P60 { \
215 .type = V4L2_DV_BT_656_1120, \ 219 .type = V4L2_DV_BT_656_1120, \
216 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 220 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
221 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
217 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 222 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
218 V4L2_DV_BT_STD_CEA861, \ 223 V4L2_DV_BT_STD_CEA861, \
219 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 224 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
221 226
222#define V4L2_DV_BT_CEA_4096X2160P24 { \ 227#define V4L2_DV_BT_CEA_4096X2160P24 { \
223 .type = V4L2_DV_BT_656_1120, \ 228 .type = V4L2_DV_BT_656_1120, \
224 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 229 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
230 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
225 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \ 231 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
226 V4L2_DV_BT_STD_CEA861, \ 232 V4L2_DV_BT_STD_CEA861, \
227 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 233 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
229 235
230#define V4L2_DV_BT_CEA_4096X2160P25 { \ 236#define V4L2_DV_BT_CEA_4096X2160P25 { \
231 .type = V4L2_DV_BT_656_1120, \ 237 .type = V4L2_DV_BT_656_1120, \
232 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 238 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
239 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
233 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 240 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
234 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 241 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
235} 242}
236 243
237#define V4L2_DV_BT_CEA_4096X2160P30 { \ 244#define V4L2_DV_BT_CEA_4096X2160P30 { \
238 .type = V4L2_DV_BT_656_1120, \ 245 .type = V4L2_DV_BT_656_1120, \
239 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 246 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
247 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
240 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 248 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
241 V4L2_DV_BT_STD_CEA861, \ 249 V4L2_DV_BT_STD_CEA861, \
242 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 250 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
244 252
245#define V4L2_DV_BT_CEA_4096X2160P50 { \ 253#define V4L2_DV_BT_CEA_4096X2160P50 { \
246 .type = V4L2_DV_BT_656_1120, \ 254 .type = V4L2_DV_BT_656_1120, \
247 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 255 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
256 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
248 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 257 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
249 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 258 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
250} 259}
251 260
252#define V4L2_DV_BT_CEA_4096X2160P60 { \ 261#define V4L2_DV_BT_CEA_4096X2160P60 { \
253 .type = V4L2_DV_BT_656_1120, \ 262 .type = V4L2_DV_BT_656_1120, \
254 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 263 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
264 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
255 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 265 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
256 V4L2_DV_BT_STD_CEA861, \ 266 V4L2_DV_BT_STD_CEA861, \
257 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 267 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index c18264df9504..4cb65bbfa654 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -40,6 +40,8 @@
40#define VIRTIO_CONFIG_S_DRIVER_OK 4 40#define VIRTIO_CONFIG_S_DRIVER_OK 4
41/* Driver has finished configuring features */ 41/* Driver has finished configuring features */
42#define VIRTIO_CONFIG_S_FEATURES_OK 8 42#define VIRTIO_CONFIG_S_FEATURES_OK 8
43/* Device entered invalid state, driver must reset it */
44#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40
43/* We've given up on this device. */ 45/* We've given up on this device. */
44#define VIRTIO_CONFIG_S_FAILED 0x80 46#define VIRTIO_CONFIG_S_FAILED 0x80
45 47
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index eeba75395f7d..ad66589f2ae6 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -194,8 +194,9 @@ int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
194int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width); 194int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
195void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format); 195void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
196void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, 196void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
197 u32 pixel_format, int stride, 197 unsigned int uv_stride,
198 int u_offset, int v_offset); 198 unsigned int u_offset,
199 unsigned int v_offset);
199void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, 200void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
200 u32 pixel_format, int stride, int height); 201 u32 pixel_format, int stride, int height);
201int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc); 202int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
@@ -236,7 +237,7 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
236int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc, 237int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
237 unsigned long bandwidth_mbs, int burstsize); 238 unsigned long bandwidth_mbs, int burstsize);
238void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc); 239void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
239int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width); 240void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
240struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel); 241struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
241void ipu_dmfc_put(struct dmfc_channel *dmfc); 242void ipu_dmfc_put(struct dmfc_channel *dmfc);
242 243
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 781c1399c6a3..ade739f67f1d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -307,8 +307,8 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
307 struct inode *inode; 307 struct inode *inode;
308 struct ipc_namespace *ns = data; 308 struct ipc_namespace *ns = data;
309 309
310 sb->s_blocksize = PAGE_CACHE_SIZE; 310 sb->s_blocksize = PAGE_SIZE;
311 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 311 sb->s_blocksize_bits = PAGE_SHIFT;
312 sb->s_magic = MQUEUE_MAGIC; 312 sb->s_magic = MQUEUE_MAGIC;
313 sb->s_op = &mqueue_super_ops; 313 sb->s_op = &mqueue_super_ops;
314 314
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e08f8e9b771..db2574e7b8b0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
1374 } 1374 }
1375 1375
1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
1377 BPF_SIZE(insn->code) == BPF_DW ||
1377 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1378 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
1378 verbose("BPF_LD_ABS uses reserved fields\n"); 1379 verbose("BPF_LD_ABS uses reserved fields\n");
1379 return -EINVAL; 1380 return -EINVAL;
@@ -2029,7 +2030,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2029 if (IS_ERR(map)) { 2030 if (IS_ERR(map)) {
2030 verbose("fd %d is not pointing to valid bpf_map\n", 2031 verbose("fd %d is not pointing to valid bpf_map\n",
2031 insn->imm); 2032 insn->imm);
2032 fdput(f);
2033 return PTR_ERR(map); 2033 return PTR_ERR(map);
2034 } 2034 }
2035 2035
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 671dc05c0b0f..909a7d31ffd3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2825 size_t nbytes, loff_t off, bool threadgroup) 2825 size_t nbytes, loff_t off, bool threadgroup)
2826{ 2826{
2827 struct task_struct *tsk; 2827 struct task_struct *tsk;
2828 struct cgroup_subsys *ss;
2828 struct cgroup *cgrp; 2829 struct cgroup *cgrp;
2829 pid_t pid; 2830 pid_t pid;
2830 int ret; 2831 int ssid, ret;
2831 2832
2832 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 2833 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2833 return -EINVAL; 2834 return -EINVAL;
@@ -2875,8 +2876,10 @@ out_unlock_rcu:
2875 rcu_read_unlock(); 2876 rcu_read_unlock();
2876out_unlock_threadgroup: 2877out_unlock_threadgroup:
2877 percpu_up_write(&cgroup_threadgroup_rwsem); 2878 percpu_up_write(&cgroup_threadgroup_rwsem);
2879 for_each_subsys(ss, ssid)
2880 if (ss->post_attach)
2881 ss->post_attach();
2878 cgroup_kn_unlock(of->kn); 2882 cgroup_kn_unlock(of->kn);
2879 cpuset_post_attach_flush();
2880 return ret ?: nbytes; 2883 return ret ?: nbytes;
2881} 2884}
2882 2885
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ea42e8da861..3e3f6e49eabb 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -36,6 +36,7 @@
36 * @target: The target state 36 * @target: The target state
37 * @thread: Pointer to the hotplug thread 37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute 38 * @should_run: Thread should execute
39 * @rollback: Perform a rollback
39 * @cb_stat: The state for a single callback (install/uninstall) 40 * @cb_stat: The state for a single callback (install/uninstall)
40 * @cb: Single callback function (install/uninstall) 41 * @cb: Single callback function (install/uninstall)
41 * @result: Result of the operation 42 * @result: Result of the operation
@@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
47#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
48 struct task_struct *thread; 49 struct task_struct *thread;
49 bool should_run; 50 bool should_run;
51 bool rollback;
50 enum cpuhp_state cb_state; 52 enum cpuhp_state cb_state;
51 int (*cb)(unsigned int cpu); 53 int (*cb)(unsigned int cpu);
52 int result; 54 int result;
@@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
301 return __cpu_notify(val, cpu, -1, NULL); 303 return __cpu_notify(val, cpu, -1, NULL);
302} 304}
303 305
306static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
307{
308 BUG_ON(cpu_notify(val, cpu));
309}
310
304/* Notifier wrappers for transitioning to state machine */ 311/* Notifier wrappers for transitioning to state machine */
305static int notify_prepare(unsigned int cpu) 312static int notify_prepare(unsigned int cpu)
306{ 313{
@@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
477 } else { 484 } else {
478 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); 485 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
479 } 486 }
487 } else if (st->rollback) {
488 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
489
490 undo_cpu_down(cpu, st, cpuhp_ap_states);
491 /*
492 * This is a momentary workaround to keep the notifier users
493 * happy. Will go away once we got rid of the notifiers.
494 */
495 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
496 st->rollback = false;
480 } else { 497 } else {
481 /* Cannot happen .... */ 498 /* Cannot happen .... */
482 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 499 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
636 read_unlock(&tasklist_lock); 653 read_unlock(&tasklist_lock);
637} 654}
638 655
639static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
640{
641 BUG_ON(cpu_notify(val, cpu));
642}
643
644static int notify_down_prepare(unsigned int cpu) 656static int notify_down_prepare(unsigned int cpu)
645{ 657{
646 int err, nr_calls = 0; 658 int err, nr_calls = 0;
@@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
721 */ 733 */
722 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); 734 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
723 if (err) { 735 if (err) {
724 /* CPU didn't die: tell everyone. Can't complain. */ 736 /* CPU refused to die */
725 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
726 irq_unlock_sparse(); 737 irq_unlock_sparse();
738 /* Unpark the hotplug thread so we can rollback there */
739 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
727 return err; 740 return err;
728 } 741 }
729 BUG_ON(cpu_online(cpu)); 742 BUG_ON(cpu_online(cpu));
@@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
832 * to do the further cleanups. 845 * to do the further cleanups.
833 */ 846 */
834 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); 847 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
848 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
849 st->target = prev_state;
850 st->rollback = true;
851 cpuhp_kick_ap_work(cpu);
852 }
835 853
836 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; 854 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
837out: 855out:
@@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1249 .name = "notify:online", 1267 .name = "notify:online",
1250 .startup = notify_online, 1268 .startup = notify_online,
1251 .teardown = notify_down_prepare, 1269 .teardown = notify_down_prepare,
1270 .skip_onerr = true,
1252 }, 1271 },
1253#endif 1272#endif
1254 /* 1273 /*
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 00ab5c2b7c5b..1902956baba1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -58,7 +58,6 @@
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <linux/atomic.h> 59#include <linux/atomic.h>
60#include <linux/mutex.h> 60#include <linux/mutex.h>
61#include <linux/workqueue.h>
62#include <linux/cgroup.h> 61#include <linux/cgroup.h>
63#include <linux/wait.h> 62#include <linux/wait.h>
64 63
@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1016 } 1015 }
1017} 1016}
1018 1017
1019void cpuset_post_attach_flush(void) 1018static void cpuset_post_attach(void)
1020{ 1019{
1021 flush_workqueue(cpuset_migrate_mm_wq); 1020 flush_workqueue(cpuset_migrate_mm_wq);
1022} 1021}
@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
2087 .can_attach = cpuset_can_attach, 2086 .can_attach = cpuset_can_attach,
2088 .cancel_attach = cpuset_cancel_attach, 2087 .cancel_attach = cpuset_cancel_attach,
2089 .attach = cpuset_attach, 2088 .attach = cpuset_attach,
2089 .post_attach = cpuset_post_attach,
2090 .bind = cpuset_bind, 2090 .bind = cpuset_bind,
2091 .legacy_cftypes = files, 2091 .legacy_cftypes = files,
2092 .early_init = true, 2092 .early_init = true,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 52bedc5a5aaa..4e2ebf6f2f1f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
412 if (ret || !write) 412 if (ret || !write)
413 return ret; 413 return ret;
414 414
415 if (sysctl_perf_cpu_time_max_percent == 100) { 415 if (sysctl_perf_cpu_time_max_percent == 100 ||
416 sysctl_perf_cpu_time_max_percent == 0) {
416 printk(KERN_WARNING 417 printk(KERN_WARNING
417 "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); 418 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
418 WRITE_ONCE(perf_sample_allowed_ns, 0); 419 WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
1105 * function. 1106 * function.
1106 * 1107 *
1107 * Lock order: 1108 * Lock order:
1109 * cred_guard_mutex
1108 * task_struct::perf_event_mutex 1110 * task_struct::perf_event_mutex
1109 * perf_event_context::mutex 1111 * perf_event_context::mutex
1110 * perf_event::child_mutex; 1112 * perf_event::child_mutex;
@@ -3420,7 +3422,6 @@ static struct task_struct *
3420find_lively_task_by_vpid(pid_t vpid) 3422find_lively_task_by_vpid(pid_t vpid)
3421{ 3423{
3422 struct task_struct *task; 3424 struct task_struct *task;
3423 int err;
3424 3425
3425 rcu_read_lock(); 3426 rcu_read_lock();
3426 if (!vpid) 3427 if (!vpid)
@@ -3434,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
3434 if (!task) 3435 if (!task)
3435 return ERR_PTR(-ESRCH); 3436 return ERR_PTR(-ESRCH);
3436 3437
3437 /* Reuse ptrace permission checks for now. */
3438 err = -EACCES;
3439 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
3440 goto errout;
3441
3442 return task; 3438 return task;
3443errout:
3444 put_task_struct(task);
3445 return ERR_PTR(err);
3446
3447} 3439}
3448 3440
3449/* 3441/*
@@ -8413,6 +8405,24 @@ SYSCALL_DEFINE5(perf_event_open,
8413 8405
8414 get_online_cpus(); 8406 get_online_cpus();
8415 8407
8408 if (task) {
8409 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
8410 if (err)
8411 goto err_cpus;
8412
8413 /*
8414 * Reuse ptrace permission checks for now.
8415 *
8416 * We must hold cred_guard_mutex across this and any potential
8417 * perf_install_in_context() call for this new event to
8418 * serialize against exec() altering our credentials (and the
8419 * perf_event_exit_task() that could imply).
8420 */
8421 err = -EACCES;
8422 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
8423 goto err_cred;
8424 }
8425
8416 if (flags & PERF_FLAG_PID_CGROUP) 8426 if (flags & PERF_FLAG_PID_CGROUP)
8417 cgroup_fd = pid; 8427 cgroup_fd = pid;
8418 8428
@@ -8420,7 +8430,7 @@ SYSCALL_DEFINE5(perf_event_open,
8420 NULL, NULL, cgroup_fd); 8430 NULL, NULL, cgroup_fd);
8421 if (IS_ERR(event)) { 8431 if (IS_ERR(event)) {
8422 err = PTR_ERR(event); 8432 err = PTR_ERR(event);
8423 goto err_cpus; 8433 goto err_cred;
8424 } 8434 }
8425 8435
8426 if (is_sampling_event(event)) { 8436 if (is_sampling_event(event)) {
@@ -8479,11 +8489,6 @@ SYSCALL_DEFINE5(perf_event_open,
8479 goto err_context; 8489 goto err_context;
8480 } 8490 }
8481 8491
8482 if (task) {
8483 put_task_struct(task);
8484 task = NULL;
8485 }
8486
8487 /* 8492 /*
8488 * Look up the group leader (we will attach this event to it): 8493 * Look up the group leader (we will attach this event to it):
8489 */ 8494 */
@@ -8581,6 +8586,11 @@ SYSCALL_DEFINE5(perf_event_open,
8581 8586
8582 WARN_ON_ONCE(ctx->parent_ctx); 8587 WARN_ON_ONCE(ctx->parent_ctx);
8583 8588
8589 /*
8590 * This is the point on no return; we cannot fail hereafter. This is
8591 * where we start modifying current state.
8592 */
8593
8584 if (move_group) { 8594 if (move_group) {
8585 /* 8595 /*
8586 * See perf_event_ctx_lock() for comments on the details 8596 * See perf_event_ctx_lock() for comments on the details
@@ -8652,6 +8662,11 @@ SYSCALL_DEFINE5(perf_event_open,
8652 mutex_unlock(&gctx->mutex); 8662 mutex_unlock(&gctx->mutex);
8653 mutex_unlock(&ctx->mutex); 8663 mutex_unlock(&ctx->mutex);
8654 8664
8665 if (task) {
8666 mutex_unlock(&task->signal->cred_guard_mutex);
8667 put_task_struct(task);
8668 }
8669
8655 put_online_cpus(); 8670 put_online_cpus();
8656 8671
8657 mutex_lock(&current->perf_event_mutex); 8672 mutex_lock(&current->perf_event_mutex);
@@ -8684,6 +8699,9 @@ err_alloc:
8684 */ 8699 */
8685 if (!event_file) 8700 if (!event_file)
8686 free_event(event); 8701 free_event(event);
8702err_cred:
8703 if (task)
8704 mutex_unlock(&task->signal->cred_guard_mutex);
8687err_cpus: 8705err_cpus:
8688 put_online_cpus(); 8706 put_online_cpus();
8689err_task: 8707err_task:
@@ -8968,6 +8986,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8968 8986
8969/* 8987/*
8970 * When a child task exits, feed back event values to parent events. 8988 * When a child task exits, feed back event values to parent events.
8989 *
8990 * Can be called with cred_guard_mutex held when called from
8991 * install_exec_creds().
8971 */ 8992 */
8972void perf_event_exit_task(struct task_struct *child) 8993void perf_event_exit_task(struct task_struct *child)
8973{ 8994{
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 220fc17b9718..7edc95edfaee 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -321,7 +321,7 @@ retry:
321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
322 322
323 ret = __replace_page(vma, vaddr, old_page, new_page); 323 ret = __replace_page(vma, vaddr, old_page, new_page);
324 page_cache_release(new_page); 324 put_page(new_page);
325put_old: 325put_old:
326 put_page(old_page); 326 put_page(old_page);
327 327
@@ -539,14 +539,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
539 * see uprobe_register(). 539 * see uprobe_register().
540 */ 540 */
541 if (mapping->a_ops->readpage) 541 if (mapping->a_ops->readpage)
542 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); 542 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
543 else 543 else
544 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); 544 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
545 if (IS_ERR(page)) 545 if (IS_ERR(page))
546 return PTR_ERR(page); 546 return PTR_ERR(page);
547 547
548 copy_from_page(page, offset, insn, nbytes); 548 copy_from_page(page, offset, insn, nbytes);
549 page_cache_release(page); 549 put_page(page);
550 550
551 return 0; 551 return 0;
552} 552}
diff --git a/kernel/futex.c b/kernel/futex.c
index a5d2e74c89e0..c20f06f38ef3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1295,10 +1295,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1295 if (unlikely(should_fail_futex(true))) 1295 if (unlikely(should_fail_futex(true)))
1296 ret = -EFAULT; 1296 ret = -EFAULT;
1297 1297
1298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) 1298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1299 ret = -EFAULT; 1299 ret = -EFAULT;
1300 else if (curval != uval) 1300 } else if (curval != uval) {
1301 ret = -EINVAL; 1301 /*
1302 * If a unconditional UNLOCK_PI operation (user space did not
1303 * try the TID->0 transition) raced with a waiter setting the
1304 * FUTEX_WAITERS flag between get_user() and locking the hash
1305 * bucket lock, retry the operation.
1306 */
1307 if ((FUTEX_TID_MASK & curval) == uval)
1308 ret = -EAGAIN;
1309 else
1310 ret = -EINVAL;
1311 }
1302 if (ret) { 1312 if (ret) {
1303 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1313 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1304 return ret; 1314 return ret;
@@ -1525,8 +1535,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1525 if (likely(&hb1->chain != &hb2->chain)) { 1535 if (likely(&hb1->chain != &hb2->chain)) {
1526 plist_del(&q->list, &hb1->chain); 1536 plist_del(&q->list, &hb1->chain);
1527 hb_waiters_dec(hb1); 1537 hb_waiters_dec(hb1);
1528 plist_add(&q->list, &hb2->chain);
1529 hb_waiters_inc(hb2); 1538 hb_waiters_inc(hb2);
1539 plist_add(&q->list, &hb2->chain);
1530 q->lock_ptr = &hb2->lock; 1540 q->lock_ptr = &hb2->lock;
1531 } 1541 }
1532 get_futex_key_refs(key2); 1542 get_futex_key_refs(key2);
@@ -2623,6 +2633,15 @@ retry:
2623 if (ret == -EFAULT) 2633 if (ret == -EFAULT)
2624 goto pi_faulted; 2634 goto pi_faulted;
2625 /* 2635 /*
2636 * A unconditional UNLOCK_PI op raced against a waiter
2637 * setting the FUTEX_WAITERS bit. Try again.
2638 */
2639 if (ret == -EAGAIN) {
2640 spin_unlock(&hb->lock);
2641 put_futex_key(&key);
2642 goto retry;
2643 }
2644 /*
2626 * wake_futex_pi has detected invalid state. Tell user 2645 * wake_futex_pi has detected invalid state. Tell user
2627 * space. 2646 * space.
2628 */ 2647 */
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c37f34b00a11..14777af8e097 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -94,6 +94,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
94 data = irq_get_irq_data(virq + i); 94 data = irq_get_irq_data(virq + i);
95 cpumask_copy(data->common->affinity, dest); 95 cpumask_copy(data->common->affinity, dest);
96 data->common->ipi_offset = offset; 96 data->common->ipi_offset = offset;
97 irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
97 } 98 }
98 return virq; 99 return virq;
99 100
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3efbee0834a8..a02f2dddd1d7 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1,5 +1,6 @@
1#define pr_fmt(fmt) "kcov: " fmt 1#define pr_fmt(fmt) "kcov: " fmt
2 2
3#define DISABLE_BRANCH_PROFILING
3#include <linux/compiler.h> 4#include <linux/compiler.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/file.h> 6#include <linux/file.h>
@@ -43,7 +44,7 @@ struct kcov {
43 * Entry point from instrumented code. 44 * Entry point from instrumented code.
44 * This is called once per basic-block/edge. 45 * This is called once per basic-block/edge.
45 */ 46 */
46void __sanitizer_cov_trace_pc(void) 47void notrace __sanitizer_cov_trace_pc(void)
47{ 48{
48 struct task_struct *t; 49 struct task_struct *t;
49 enum kcov_mode mode; 50 enum kcov_mode mode;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8d34308ea449..1391d3ee3b86 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1415,6 +1415,9 @@ static int __init crash_save_vmcoreinfo_init(void)
1415 VMCOREINFO_OFFSET(page, lru); 1415 VMCOREINFO_OFFSET(page, lru);
1416 VMCOREINFO_OFFSET(page, _mapcount); 1416 VMCOREINFO_OFFSET(page, _mapcount);
1417 VMCOREINFO_OFFSET(page, private); 1417 VMCOREINFO_OFFSET(page, private);
1418 VMCOREINFO_OFFSET(page, compound_dtor);
1419 VMCOREINFO_OFFSET(page, compound_order);
1420 VMCOREINFO_OFFSET(page, compound_head);
1418 VMCOREINFO_OFFSET(pglist_data, node_zones); 1421 VMCOREINFO_OFFSET(pglist_data, node_zones);
1419 VMCOREINFO_OFFSET(pglist_data, nr_zones); 1422 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1420#ifdef CONFIG_FLAT_NODE_MEM_MAP 1423#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1447,8 +1450,8 @@ static int __init crash_save_vmcoreinfo_init(void)
1447#ifdef CONFIG_X86 1450#ifdef CONFIG_X86
1448 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); 1451 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
1449#endif 1452#endif
1450#ifdef CONFIG_HUGETLBFS 1453#ifdef CONFIG_HUGETLB_PAGE
1451 VMCOREINFO_SYMBOL(free_huge_page); 1454 VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
1452#endif 1455#endif
1453 1456
1454 arch_crash_save_vmcoreinfo(); 1457 arch_crash_save_vmcoreinfo();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7cc43ef856c1..874d53eaf389 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2176,15 +2176,37 @@ cache_hit:
2176 chain->irq_context = hlock->irq_context; 2176 chain->irq_context = hlock->irq_context;
2177 i = get_first_held_lock(curr, hlock); 2177 i = get_first_held_lock(curr, hlock);
2178 chain->depth = curr->lockdep_depth + 1 - i; 2178 chain->depth = curr->lockdep_depth + 1 - i;
2179
2180 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2181 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2182 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2183
2179 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2184 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2180 chain->base = nr_chain_hlocks; 2185 chain->base = nr_chain_hlocks;
2181 nr_chain_hlocks += chain->depth;
2182 for (j = 0; j < chain->depth - 1; j++, i++) { 2186 for (j = 0; j < chain->depth - 1; j++, i++) {
2183 int lock_id = curr->held_locks[i].class_idx - 1; 2187 int lock_id = curr->held_locks[i].class_idx - 1;
2184 chain_hlocks[chain->base + j] = lock_id; 2188 chain_hlocks[chain->base + j] = lock_id;
2185 } 2189 }
2186 chain_hlocks[chain->base + j] = class - lock_classes; 2190 chain_hlocks[chain->base + j] = class - lock_classes;
2187 } 2191 }
2192
2193 if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2194 nr_chain_hlocks += chain->depth;
2195
2196#ifdef CONFIG_DEBUG_LOCKDEP
2197 /*
2198 * Important for check_no_collision().
2199 */
2200 if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2201 if (debug_locks_off_graph_unlock())
2202 return 0;
2203
2204 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2205 dump_stack();
2206 return 0;
2207 }
2208#endif
2209
2188 hlist_add_head_rcu(&chain->entry, hash_head); 2210 hlist_add_head_rcu(&chain->entry, hash_head);
2189 debug_atomic_inc(chain_lookup_misses); 2211 debug_atomic_inc(chain_lookup_misses);
2190 inc_chains(); 2212 inc_chains();
@@ -2932,6 +2954,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2932 return 1; 2954 return 1;
2933} 2955}
2934 2956
2957static inline unsigned int task_irq_context(struct task_struct *task)
2958{
2959 return 2 * !!task->hardirq_context + !!task->softirq_context;
2960}
2961
2935static int separate_irq_context(struct task_struct *curr, 2962static int separate_irq_context(struct task_struct *curr,
2936 struct held_lock *hlock) 2963 struct held_lock *hlock)
2937{ 2964{
@@ -2940,8 +2967,6 @@ static int separate_irq_context(struct task_struct *curr,
2940 /* 2967 /*
2941 * Keep track of points where we cross into an interrupt context: 2968 * Keep track of points where we cross into an interrupt context:
2942 */ 2969 */
2943 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2944 curr->softirq_context;
2945 if (depth) { 2970 if (depth) {
2946 struct held_lock *prev_hlock; 2971 struct held_lock *prev_hlock;
2947 2972
@@ -2973,6 +2998,11 @@ static inline int mark_irqflags(struct task_struct *curr,
2973 return 1; 2998 return 1;
2974} 2999}
2975 3000
3001static inline unsigned int task_irq_context(struct task_struct *task)
3002{
3003 return 0;
3004}
3005
2976static inline int separate_irq_context(struct task_struct *curr, 3006static inline int separate_irq_context(struct task_struct *curr,
2977 struct held_lock *hlock) 3007 struct held_lock *hlock)
2978{ 3008{
@@ -3241,6 +3271,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3241 hlock->acquire_ip = ip; 3271 hlock->acquire_ip = ip;
3242 hlock->instance = lock; 3272 hlock->instance = lock;
3243 hlock->nest_lock = nest_lock; 3273 hlock->nest_lock = nest_lock;
3274 hlock->irq_context = task_irq_context(curr);
3244 hlock->trylock = trylock; 3275 hlock->trylock = trylock;
3245 hlock->read = read; 3276 hlock->read = read;
3246 hlock->check = check; 3277 hlock->check = check;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dbb61a302548..a0f61effad25 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v)
141 int i; 141 int i;
142 142
143 if (v == SEQ_START_TOKEN) { 143 if (v == SEQ_START_TOKEN) {
144 if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
145 seq_printf(m, "(buggered) ");
144 seq_printf(m, "all lock chains:\n"); 146 seq_printf(m, "all lock chains:\n");
145 return 0; 147 return 0;
146 } 148 }
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index eb2a2c9bc3fc..d734b7502001 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -136,10 +136,12 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
136 } 136 }
137 137
138 if (counter == qstat_pv_hash_hops) { 138 if (counter == qstat_pv_hash_hops) {
139 u64 frac; 139 u64 frac = 0;
140 140
141 frac = 100ULL * do_div(stat, kicks); 141 if (kicks) {
142 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 142 frac = 100ULL * do_div(stat, kicks);
143 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
144 }
143 145
144 /* 146 /*
145 * Return a X.XX decimal number 147 * Return a X.XX decimal number
diff --git a/kernel/resource.c b/kernel/resource.c
index 2e78ead30934..9b5f04404152 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -105,16 +105,25 @@ static int r_show(struct seq_file *m, void *v)
105{ 105{
106 struct resource *root = m->private; 106 struct resource *root = m->private;
107 struct resource *r = v, *p; 107 struct resource *r = v, *p;
108 unsigned long long start, end;
108 int width = root->end < 0x10000 ? 4 : 8; 109 int width = root->end < 0x10000 ? 4 : 8;
109 int depth; 110 int depth;
110 111
111 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 112 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
112 if (p->parent == root) 113 if (p->parent == root)
113 break; 114 break;
115
116 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
117 start = r->start;
118 end = r->end;
119 } else {
120 start = end = 0;
121 }
122
114 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 123 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
115 depth * 2, "", 124 depth * 2, "",
116 width, (unsigned long long) r->start, 125 width, start,
117 width, (unsigned long long) r->end, 126 width, end,
118 r->name ? r->name : "<BAD>"); 127 r->name ? r->name : "<BAD>");
119 return 0; 128 return 0;
120} 129}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2232ae3e3ad6..3bfdff06eea7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
666 */ 666 */
667 smp_wmb(); 667 smp_wmb();
668 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 668 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
669 /*
670 * The following mb guarantees that previous clear of a PENDING bit
671 * will not be reordered with any speculative LOADS or STORES from
672 * work->current_func, which is executed afterwards. This possible
673 * reordering can lead to a missed execution on attempt to qeueue
674 * the same @work. E.g. consider this case:
675 *
676 * CPU#0 CPU#1
677 * ---------------------------- --------------------------------
678 *
679 * 1 STORE event_indicated
680 * 2 queue_work_on() {
681 * 3 test_and_set_bit(PENDING)
682 * 4 } set_..._and_clear_pending() {
683 * 5 set_work_data() # clear bit
684 * 6 smp_mb()
685 * 7 work->current_func() {
686 * 8 LOAD event_indicated
687 * }
688 *
689 * Without an explicit full barrier speculative LOAD on line 8 can
690 * be executed before CPU#0 does STORE on line 1. If that happens,
691 * CPU#0 observes the PENDING bit is still set and new execution of
692 * a @work is not queued in a hope, that CPU#1 will eventually
693 * finish the queued @work. Meanwhile CPU#1 does not see
694 * event_indicated is set, because speculative LOAD was executed
695 * before actual STORE.
696 */
697 smp_mb();
669} 698}
670 699
671static void clear_work_data(struct work_struct *work) 700static void clear_work_data(struct work_struct *work)
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 03dd576e6773..59fd7c0b119c 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
524 free_slot = i; 524 free_slot = i;
525 continue; 525 continue;
526 } 526 }
527 if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) { 527 if (assoc_array_ptr_is_leaf(ptr) &&
528 ops->compare_object(assoc_array_ptr_to_leaf(ptr),
529 index_key)) {
528 pr_devel("replace in slot %d\n", i); 530 pr_devel("replace in slot %d\n", i);
529 edit->leaf_p = &node->slots[i]; 531 edit->leaf_p = &node->slots[i];
530 edit->dead_leaf = node->slots[i]; 532 edit->dead_leaf = node->slots[i];
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index abcecdc2d0f2..c79d7ea8a38e 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -11,8 +11,7 @@
11/* 11/*
12 * Detects 64 bits mode 12 * Detects 64 bits mode
13 */ 13 */
14#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \ 14#if defined(CONFIG_64BIT)
15 || defined(__ppc64__) || defined(__LP64__))
16#define LZ4_ARCH64 1 15#define LZ4_ARCH64 1
17#else 16#else
18#define LZ4_ARCH64 0 17#define LZ4_ARCH64 0
@@ -25,9 +24,7 @@
25typedef struct _U16_S { u16 v; } U16_S; 24typedef struct _U16_S { u16 v; } U16_S;
26typedef struct _U32_S { u32 v; } U32_S; 25typedef struct _U32_S { u32 v; } U32_S;
27typedef struct _U64_S { u64 v; } U64_S; 26typedef struct _U64_S { u64 v; } U64_S;
28#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ 27#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
29 || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
30 && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
31 28
32#define A16(x) (((U16_S *)(x))->v) 29#define A16(x) (((U16_S *)(x))->v)
33#define A32(x) (((U32_S *)(x))->v) 30#define A32(x) (((U32_S *)(x))->v)
@@ -35,6 +32,10 @@ typedef struct _U64_S { u64 v; } U64_S;
35 32
36#define PUT4(s, d) (A32(d) = A32(s)) 33#define PUT4(s, d) (A32(d) = A32(s))
37#define PUT8(s, d) (A64(d) = A64(s)) 34#define PUT8(s, d) (A64(d) = A64(s))
35
36#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
37 (d = s - A16(p))
38
38#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ 39#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
39 do { \ 40 do { \
40 A16(p) = v; \ 41 A16(p) = v; \
@@ -51,10 +52,13 @@ typedef struct _U64_S { u64 v; } U64_S;
51#define PUT8(s, d) \ 52#define PUT8(s, d) \
52 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d) 53 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
53 54
54#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ 55#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
55 do { \ 56 (d = s - get_unaligned_le16(p))
56 put_unaligned(v, (u16 *)(p)); \ 57
57 p += 2; \ 58#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
59 do { \
60 put_unaligned_le16(v, (u16 *)(p)); \
61 p += 2; \
58 } while (0) 62 } while (0)
59#endif 63#endif
60 64
@@ -140,9 +144,6 @@ typedef struct _U64_S { u64 v; } U64_S;
140 144
141#endif 145#endif
142 146
143#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
144 (d = s - get_unaligned_le16(p))
145
146#define LZ4_WILDCOPY(s, d, e) \ 147#define LZ4_WILDCOPY(s, d, e) \
147 do { \ 148 do { \
148 LZ4_COPYPACKET(s, d); \ 149 LZ4_COPYPACKET(s, d); \
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 654c9d87e83a..9e0b0315a724 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -210,10 +210,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
210 goto fast_exit; 210 goto fast_exit;
211 211
212 hash = hash_stack(trace->entries, trace->nr_entries); 212 hash = hash_stack(trace->entries, trace->nr_entries);
213 /* Bad luck, we won't store this stack. */
214 if (hash == 0)
215 goto exit;
216
217 bucket = &stack_table[hash & STACK_HASH_MASK]; 213 bucket = &stack_table[hash & STACK_HASH_MASK];
218 214
219 /* 215 /*
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 27a7a26b1ece..8f22fbedc3a6 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2444,6 +2444,22 @@ static struct bpf_test tests[] = {
2444 { { 0, 4294967295U } }, 2444 { { 0, 4294967295U } },
2445 }, 2445 },
2446 { 2446 {
2447 "ALU_ADD_X: 2 + 4294967294 = 0",
2448 .u.insns_int = {
2449 BPF_LD_IMM64(R0, 2),
2450 BPF_LD_IMM64(R1, 4294967294U),
2451 BPF_ALU32_REG(BPF_ADD, R0, R1),
2452 BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
2453 BPF_ALU32_IMM(BPF_MOV, R0, 0),
2454 BPF_EXIT_INSN(),
2455 BPF_ALU32_IMM(BPF_MOV, R0, 1),
2456 BPF_EXIT_INSN(),
2457 },
2458 INTERNAL,
2459 { },
2460 { { 0, 1 } },
2461 },
2462 {
2447 "ALU64_ADD_X: 1 + 2 = 3", 2463 "ALU64_ADD_X: 1 + 2 = 3",
2448 .u.insns_int = { 2464 .u.insns_int = {
2449 BPF_LD_IMM64(R0, 1), 2465 BPF_LD_IMM64(R0, 1),
@@ -2467,6 +2483,23 @@ static struct bpf_test tests[] = {
2467 { }, 2483 { },
2468 { { 0, 4294967295U } }, 2484 { { 0, 4294967295U } },
2469 }, 2485 },
2486 {
2487 "ALU64_ADD_X: 2 + 4294967294 = 4294967296",
2488 .u.insns_int = {
2489 BPF_LD_IMM64(R0, 2),
2490 BPF_LD_IMM64(R1, 4294967294U),
2491 BPF_LD_IMM64(R2, 4294967296ULL),
2492 BPF_ALU64_REG(BPF_ADD, R0, R1),
2493 BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
2494 BPF_MOV32_IMM(R0, 0),
2495 BPF_EXIT_INSN(),
2496 BPF_MOV32_IMM(R0, 1),
2497 BPF_EXIT_INSN(),
2498 },
2499 INTERNAL,
2500 { },
2501 { { 0, 1 } },
2502 },
2470 /* BPF_ALU | BPF_ADD | BPF_K */ 2503 /* BPF_ALU | BPF_ADD | BPF_K */
2471 { 2504 {
2472 "ALU_ADD_K: 1 + 2 = 3", 2505 "ALU_ADD_K: 1 + 2 = 3",
@@ -2502,6 +2535,21 @@ static struct bpf_test tests[] = {
2502 { { 0, 4294967295U } }, 2535 { { 0, 4294967295U } },
2503 }, 2536 },
2504 { 2537 {
2538 "ALU_ADD_K: 4294967294 + 2 = 0",
2539 .u.insns_int = {
2540 BPF_LD_IMM64(R0, 4294967294U),
2541 BPF_ALU32_IMM(BPF_ADD, R0, 2),
2542 BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
2543 BPF_ALU32_IMM(BPF_MOV, R0, 0),
2544 BPF_EXIT_INSN(),
2545 BPF_ALU32_IMM(BPF_MOV, R0, 1),
2546 BPF_EXIT_INSN(),
2547 },
2548 INTERNAL,
2549 { },
2550 { { 0, 1 } },
2551 },
2552 {
2505 "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", 2553 "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
2506 .u.insns_int = { 2554 .u.insns_int = {
2507 BPF_LD_IMM64(R2, 0x0), 2555 BPF_LD_IMM64(R2, 0x0),
@@ -2518,6 +2566,70 @@ static struct bpf_test tests[] = {
2518 { { 0, 0x1 } }, 2566 { { 0, 0x1 } },
2519 }, 2567 },
2520 { 2568 {
2569 "ALU_ADD_K: 0 + 0xffff = 0xffff",
2570 .u.insns_int = {
2571 BPF_LD_IMM64(R2, 0x0),
2572 BPF_LD_IMM64(R3, 0xffff),
2573 BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
2574 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2575 BPF_MOV32_IMM(R0, 2),
2576 BPF_EXIT_INSN(),
2577 BPF_MOV32_IMM(R0, 1),
2578 BPF_EXIT_INSN(),
2579 },
2580 INTERNAL,
2581 { },
2582 { { 0, 0x1 } },
2583 },
2584 {
2585 "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
2586 .u.insns_int = {
2587 BPF_LD_IMM64(R2, 0x0),
2588 BPF_LD_IMM64(R3, 0x7fffffff),
2589 BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
2590 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2591 BPF_MOV32_IMM(R0, 2),
2592 BPF_EXIT_INSN(),
2593 BPF_MOV32_IMM(R0, 1),
2594 BPF_EXIT_INSN(),
2595 },
2596 INTERNAL,
2597 { },
2598 { { 0, 0x1 } },
2599 },
2600 {
2601 "ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
2602 .u.insns_int = {
2603 BPF_LD_IMM64(R2, 0x0),
2604 BPF_LD_IMM64(R3, 0x80000000),
2605 BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
2606 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2607 BPF_MOV32_IMM(R0, 2),
2608 BPF_EXIT_INSN(),
2609 BPF_MOV32_IMM(R0, 1),
2610 BPF_EXIT_INSN(),
2611 },
2612 INTERNAL,
2613 { },
2614 { { 0, 0x1 } },
2615 },
2616 {
2617 "ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
2618 .u.insns_int = {
2619 BPF_LD_IMM64(R2, 0x0),
2620 BPF_LD_IMM64(R3, 0x80008000),
2621 BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
2622 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2623 BPF_MOV32_IMM(R0, 2),
2624 BPF_EXIT_INSN(),
2625 BPF_MOV32_IMM(R0, 1),
2626 BPF_EXIT_INSN(),
2627 },
2628 INTERNAL,
2629 { },
2630 { { 0, 0x1 } },
2631 },
2632 {
2521 "ALU64_ADD_K: 1 + 2 = 3", 2633 "ALU64_ADD_K: 1 + 2 = 3",
2522 .u.insns_int = { 2634 .u.insns_int = {
2523 BPF_LD_IMM64(R0, 1), 2635 BPF_LD_IMM64(R0, 1),
@@ -2551,6 +2663,22 @@ static struct bpf_test tests[] = {
2551 { { 0, 2147483647 } }, 2663 { { 0, 2147483647 } },
2552 }, 2664 },
2553 { 2665 {
2666 "ALU64_ADD_K: 4294967294 + 2 = 4294967296",
2667 .u.insns_int = {
2668 BPF_LD_IMM64(R0, 4294967294U),
2669 BPF_LD_IMM64(R1, 4294967296ULL),
2670 BPF_ALU64_IMM(BPF_ADD, R0, 2),
2671 BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
2672 BPF_ALU32_IMM(BPF_MOV, R0, 0),
2673 BPF_EXIT_INSN(),
2674 BPF_ALU32_IMM(BPF_MOV, R0, 1),
2675 BPF_EXIT_INSN(),
2676 },
2677 INTERNAL,
2678 { },
2679 { { 0, 1 } },
2680 },
2681 {
2554 "ALU64_ADD_K: 2147483646 + -2147483647 = -1", 2682 "ALU64_ADD_K: 2147483646 + -2147483647 = -1",
2555 .u.insns_int = { 2683 .u.insns_int = {
2556 BPF_LD_IMM64(R0, 2147483646), 2684 BPF_LD_IMM64(R0, 2147483646),
@@ -2593,6 +2721,70 @@ static struct bpf_test tests[] = {
2593 { }, 2721 { },
2594 { { 0, 0x1 } }, 2722 { { 0, 0x1 } },
2595 }, 2723 },
2724 {
2725 "ALU64_ADD_K: 0 + 0xffff = 0xffff",
2726 .u.insns_int = {
2727 BPF_LD_IMM64(R2, 0x0),
2728 BPF_LD_IMM64(R3, 0xffff),
2729 BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
2730 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2731 BPF_MOV32_IMM(R0, 2),
2732 BPF_EXIT_INSN(),
2733 BPF_MOV32_IMM(R0, 1),
2734 BPF_EXIT_INSN(),
2735 },
2736 INTERNAL,
2737 { },
2738 { { 0, 0x1 } },
2739 },
2740 {
2741 "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
2742 .u.insns_int = {
2743 BPF_LD_IMM64(R2, 0x0),
2744 BPF_LD_IMM64(R3, 0x7fffffff),
2745 BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
2746 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2747 BPF_MOV32_IMM(R0, 2),
2748 BPF_EXIT_INSN(),
2749 BPF_MOV32_IMM(R0, 1),
2750 BPF_EXIT_INSN(),
2751 },
2752 INTERNAL,
2753 { },
2754 { { 0, 0x1 } },
2755 },
2756 {
2757 "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
2758 .u.insns_int = {
2759 BPF_LD_IMM64(R2, 0x0),
2760 BPF_LD_IMM64(R3, 0xffffffff80000000LL),
2761 BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
2762 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2763 BPF_MOV32_IMM(R0, 2),
2764 BPF_EXIT_INSN(),
2765 BPF_MOV32_IMM(R0, 1),
2766 BPF_EXIT_INSN(),
2767 },
2768 INTERNAL,
2769 { },
2770 { { 0, 0x1 } },
2771 },
2772 {
2773 "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
2774 .u.insns_int = {
2775 BPF_LD_IMM64(R2, 0x0),
2776 BPF_LD_IMM64(R3, 0xffffffff80008000LL),
2777 BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
2778 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2779 BPF_MOV32_IMM(R0, 2),
2780 BPF_EXIT_INSN(),
2781 BPF_MOV32_IMM(R0, 1),
2782 BPF_EXIT_INSN(),
2783 },
2784 INTERNAL,
2785 { },
2786 { { 0, 0x1 } },
2787 },
2596 /* BPF_ALU | BPF_SUB | BPF_X */ 2788 /* BPF_ALU | BPF_SUB | BPF_X */
2597 { 2789 {
2598 "ALU_SUB_X: 3 - 1 = 2", 2790 "ALU_SUB_X: 3 - 1 = 2",
@@ -4222,6 +4414,20 @@ static struct bpf_test tests[] = {
4222 { }, 4414 { },
4223 { { 0, 1 } }, 4415 { { 0, 1 } },
4224 }, 4416 },
4417 {
4418 "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
4419 .u.insns_int = {
4420 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4421 BPF_LD_IMM64(R1, -1),
4422 BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
4423 BPF_EXIT_INSN(),
4424 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4425 BPF_EXIT_INSN(),
4426 },
4427 INTERNAL,
4428 { },
4429 { { 0, 1 } },
4430 },
4225 /* BPF_JMP | BPF_JGE | BPF_K */ 4431 /* BPF_JMP | BPF_JGE | BPF_K */
4226 { 4432 {
4227 "JMP_JGE_K: if (3 >= 2) return 1", 4433 "JMP_JGE_K: if (3 >= 2) return 1",
@@ -4303,7 +4509,7 @@ static struct bpf_test tests[] = {
4303 .u.insns_int = { 4509 .u.insns_int = {
4304 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4510 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4305 BPF_LD_IMM64(R1, 3), 4511 BPF_LD_IMM64(R1, 3),
4306 BPF_JMP_IMM(BPF_JNE, R1, 2, 1), 4512 BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
4307 BPF_EXIT_INSN(), 4513 BPF_EXIT_INSN(),
4308 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4514 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4309 BPF_EXIT_INSN(), 4515 BPF_EXIT_INSN(),
@@ -4317,7 +4523,7 @@ static struct bpf_test tests[] = {
4317 .u.insns_int = { 4523 .u.insns_int = {
4318 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4524 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4319 BPF_LD_IMM64(R1, 3), 4525 BPF_LD_IMM64(R1, 3),
4320 BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1), 4526 BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
4321 BPF_EXIT_INSN(), 4527 BPF_EXIT_INSN(),
4322 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4528 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4323 BPF_EXIT_INSN(), 4529 BPF_EXIT_INSN(),
@@ -4404,6 +4610,21 @@ static struct bpf_test tests[] = {
4404 { }, 4610 { },
4405 { { 0, 1 } }, 4611 { { 0, 1 } },
4406 }, 4612 },
4613 {
4614 "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
4615 .u.insns_int = {
4616 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4617 BPF_LD_IMM64(R1, -1),
4618 BPF_LD_IMM64(R2, 1),
4619 BPF_JMP_REG(BPF_JGT, R1, R2, 1),
4620 BPF_EXIT_INSN(),
4621 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4622 BPF_EXIT_INSN(),
4623 },
4624 INTERNAL,
4625 { },
4626 { { 0, 1 } },
4627 },
4407 /* BPF_JMP | BPF_JGE | BPF_X */ 4628 /* BPF_JMP | BPF_JGE | BPF_X */
4408 { 4629 {
4409 "JMP_JGE_X: if (3 >= 2) return 1", 4630 "JMP_JGE_X: if (3 >= 2) return 1",
@@ -4474,7 +4695,7 @@ static struct bpf_test tests[] = {
4474 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4695 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4475 BPF_LD_IMM64(R1, 3), 4696 BPF_LD_IMM64(R1, 3),
4476 BPF_LD_IMM64(R2, 2), 4697 BPF_LD_IMM64(R2, 2),
4477 BPF_JMP_REG(BPF_JNE, R1, R2, 1), 4698 BPF_JMP_REG(BPF_JSET, R1, R2, 1),
4478 BPF_EXIT_INSN(), 4699 BPF_EXIT_INSN(),
4479 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4700 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4480 BPF_EXIT_INSN(), 4701 BPF_EXIT_INSN(),
@@ -4489,7 +4710,7 @@ static struct bpf_test tests[] = {
4489 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4710 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4490 BPF_LD_IMM64(R1, 3), 4711 BPF_LD_IMM64(R1, 3),
4491 BPF_LD_IMM64(R2, 0xffffffff), 4712 BPF_LD_IMM64(R2, 0xffffffff),
4492 BPF_JMP_REG(BPF_JNE, R1, R2, 1), 4713 BPF_JMP_REG(BPF_JSET, R1, R2, 1),
4493 BPF_EXIT_INSN(), 4714 BPF_EXIT_INSN(),
4494 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4715 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4495 BPF_EXIT_INSN(), 4716 BPF_EXIT_INSN(),
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index bfbd7096b6ed..0c6317b7db38 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -898,7 +898,7 @@ static atomic_t nr_wb_congested[2];
898void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) 898void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
899{ 899{
900 wait_queue_head_t *wqh = &congestion_wqh[sync]; 900 wait_queue_head_t *wqh = &congestion_wqh[sync];
901 enum wb_state bit; 901 enum wb_congested_state bit;
902 902
903 bit = sync ? WB_sync_congested : WB_async_congested; 903 bit = sync ? WB_sync_congested : WB_async_congested;
904 if (test_and_clear_bit(bit, &congested->state)) 904 if (test_and_clear_bit(bit, &congested->state))
@@ -911,7 +911,7 @@ EXPORT_SYMBOL(clear_wb_congested);
911 911
912void set_wb_congested(struct bdi_writeback_congested *congested, int sync) 912void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
913{ 913{
914 enum wb_state bit; 914 enum wb_congested_state bit;
915 915
916 bit = sync ? WB_sync_congested : WB_async_congested; 916 bit = sync ? WB_sync_congested : WB_async_congested;
917 if (!test_and_set_bit(bit, &congested->state)) 917 if (!test_and_set_bit(bit, &congested->state))
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8a5bc66b0c0..b8024fa7101d 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -97,8 +97,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
97 break; 97 break;
98 case POSIX_FADV_WILLNEED: 98 case POSIX_FADV_WILLNEED:
99 /* First and last PARTIAL page! */ 99 /* First and last PARTIAL page! */
100 start_index = offset >> PAGE_CACHE_SHIFT; 100 start_index = offset >> PAGE_SHIFT;
101 end_index = endbyte >> PAGE_CACHE_SHIFT; 101 end_index = endbyte >> PAGE_SHIFT;
102 102
103 /* Careful about overflow on the "+1" */ 103 /* Careful about overflow on the "+1" */
104 nrpages = end_index - start_index + 1; 104 nrpages = end_index - start_index + 1;
@@ -124,8 +124,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
124 * preserved on the expectation that it is better to preserve 124 * preserved on the expectation that it is better to preserve
125 * needed memory than to discard unneeded memory. 125 * needed memory than to discard unneeded memory.
126 */ 126 */
127 start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; 127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
128 end_index = (endbyte >> PAGE_CACHE_SHIFT); 128 end_index = (endbyte >> PAGE_SHIFT);
129 129
130 if (end_index >= start_index) { 130 if (end_index >= start_index) {
131 unsigned long count = invalidate_mapping_pages(mapping, 131 unsigned long count = invalidate_mapping_pages(mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index a8c69c8c0a90..f2479af09da9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -265,7 +265,7 @@ void delete_from_page_cache(struct page *page)
265 265
266 if (freepage) 266 if (freepage)
267 freepage(page); 267 freepage(page);
268 page_cache_release(page); 268 put_page(page);
269} 269}
270EXPORT_SYMBOL(delete_from_page_cache); 270EXPORT_SYMBOL(delete_from_page_cache);
271 271
@@ -352,8 +352,8 @@ EXPORT_SYMBOL(filemap_flush);
352static int __filemap_fdatawait_range(struct address_space *mapping, 352static int __filemap_fdatawait_range(struct address_space *mapping,
353 loff_t start_byte, loff_t end_byte) 353 loff_t start_byte, loff_t end_byte)
354{ 354{
355 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 355 pgoff_t index = start_byte >> PAGE_SHIFT;
356 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 356 pgoff_t end = end_byte >> PAGE_SHIFT;
357 struct pagevec pvec; 357 struct pagevec pvec;
358 int nr_pages; 358 int nr_pages;
359 int ret = 0; 359 int ret = 0;
@@ -550,7 +550,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
550 pgoff_t offset = old->index; 550 pgoff_t offset = old->index;
551 freepage = mapping->a_ops->freepage; 551 freepage = mapping->a_ops->freepage;
552 552
553 page_cache_get(new); 553 get_page(new);
554 new->mapping = mapping; 554 new->mapping = mapping;
555 new->index = offset; 555 new->index = offset;
556 556
@@ -572,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
572 radix_tree_preload_end(); 572 radix_tree_preload_end();
573 if (freepage) 573 if (freepage)
574 freepage(old); 574 freepage(old);
575 page_cache_release(old); 575 put_page(old);
576 } 576 }
577 577
578 return error; 578 return error;
@@ -651,7 +651,7 @@ static int __add_to_page_cache_locked(struct page *page,
651 return error; 651 return error;
652 } 652 }
653 653
654 page_cache_get(page); 654 get_page(page);
655 page->mapping = mapping; 655 page->mapping = mapping;
656 page->index = offset; 656 page->index = offset;
657 657
@@ -675,7 +675,7 @@ err_insert:
675 spin_unlock_irq(&mapping->tree_lock); 675 spin_unlock_irq(&mapping->tree_lock);
676 if (!huge) 676 if (!huge)
677 mem_cgroup_cancel_charge(page, memcg, false); 677 mem_cgroup_cancel_charge(page, memcg, false);
678 page_cache_release(page); 678 put_page(page);
679 return error; 679 return error;
680} 680}
681 681
@@ -1083,7 +1083,7 @@ repeat:
1083 * include/linux/pagemap.h for details. 1083 * include/linux/pagemap.h for details.
1084 */ 1084 */
1085 if (unlikely(page != *pagep)) { 1085 if (unlikely(page != *pagep)) {
1086 page_cache_release(page); 1086 put_page(page);
1087 goto repeat; 1087 goto repeat;
1088 } 1088 }
1089 } 1089 }
@@ -1121,7 +1121,7 @@ repeat:
1121 /* Has the page been truncated? */ 1121 /* Has the page been truncated? */
1122 if (unlikely(page->mapping != mapping)) { 1122 if (unlikely(page->mapping != mapping)) {
1123 unlock_page(page); 1123 unlock_page(page);
1124 page_cache_release(page); 1124 put_page(page);
1125 goto repeat; 1125 goto repeat;
1126 } 1126 }
1127 VM_BUG_ON_PAGE(page->index != offset, page); 1127 VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1168,7 +1168,7 @@ repeat:
1168 if (fgp_flags & FGP_LOCK) { 1168 if (fgp_flags & FGP_LOCK) {
1169 if (fgp_flags & FGP_NOWAIT) { 1169 if (fgp_flags & FGP_NOWAIT) {
1170 if (!trylock_page(page)) { 1170 if (!trylock_page(page)) {
1171 page_cache_release(page); 1171 put_page(page);
1172 return NULL; 1172 return NULL;
1173 } 1173 }
1174 } else { 1174 } else {
@@ -1178,7 +1178,7 @@ repeat:
1178 /* Has the page been truncated? */ 1178 /* Has the page been truncated? */
1179 if (unlikely(page->mapping != mapping)) { 1179 if (unlikely(page->mapping != mapping)) {
1180 unlock_page(page); 1180 unlock_page(page);
1181 page_cache_release(page); 1181 put_page(page);
1182 goto repeat; 1182 goto repeat;
1183 } 1183 }
1184 VM_BUG_ON_PAGE(page->index != offset, page); 1184 VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1209,7 +1209,7 @@ no_page:
1209 err = add_to_page_cache_lru(page, mapping, offset, 1209 err = add_to_page_cache_lru(page, mapping, offset,
1210 gfp_mask & GFP_RECLAIM_MASK); 1210 gfp_mask & GFP_RECLAIM_MASK);
1211 if (unlikely(err)) { 1211 if (unlikely(err)) {
1212 page_cache_release(page); 1212 put_page(page);
1213 page = NULL; 1213 page = NULL;
1214 if (err == -EEXIST) 1214 if (err == -EEXIST)
1215 goto repeat; 1215 goto repeat;
@@ -1278,7 +1278,7 @@ repeat:
1278 1278
1279 /* Has the page moved? */ 1279 /* Has the page moved? */
1280 if (unlikely(page != *slot)) { 1280 if (unlikely(page != *slot)) {
1281 page_cache_release(page); 1281 put_page(page);
1282 goto repeat; 1282 goto repeat;
1283 } 1283 }
1284export: 1284export:
@@ -1343,7 +1343,7 @@ repeat:
1343 1343
1344 /* Has the page moved? */ 1344 /* Has the page moved? */
1345 if (unlikely(page != *slot)) { 1345 if (unlikely(page != *slot)) {
1346 page_cache_release(page); 1346 put_page(page);
1347 goto repeat; 1347 goto repeat;
1348 } 1348 }
1349 1349
@@ -1405,7 +1405,7 @@ repeat:
1405 1405
1406 /* Has the page moved? */ 1406 /* Has the page moved? */
1407 if (unlikely(page != *slot)) { 1407 if (unlikely(page != *slot)) {
1408 page_cache_release(page); 1408 put_page(page);
1409 goto repeat; 1409 goto repeat;
1410 } 1410 }
1411 1411
@@ -1415,7 +1415,7 @@ repeat:
1415 * negatives, which is just confusing to the caller. 1415 * negatives, which is just confusing to the caller.
1416 */ 1416 */
1417 if (page->mapping == NULL || page->index != iter.index) { 1417 if (page->mapping == NULL || page->index != iter.index) {
1418 page_cache_release(page); 1418 put_page(page);
1419 break; 1419 break;
1420 } 1420 }
1421 1421
@@ -1482,7 +1482,7 @@ repeat:
1482 1482
1483 /* Has the page moved? */ 1483 /* Has the page moved? */
1484 if (unlikely(page != *slot)) { 1484 if (unlikely(page != *slot)) {
1485 page_cache_release(page); 1485 put_page(page);
1486 goto repeat; 1486 goto repeat;
1487 } 1487 }
1488 1488
@@ -1549,7 +1549,7 @@ repeat:
1549 1549
1550 /* Has the page moved? */ 1550 /* Has the page moved? */
1551 if (unlikely(page != *slot)) { 1551 if (unlikely(page != *slot)) {
1552 page_cache_release(page); 1552 put_page(page);
1553 goto repeat; 1553 goto repeat;
1554 } 1554 }
1555export: 1555export:
@@ -1610,11 +1610,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1610 unsigned int prev_offset; 1610 unsigned int prev_offset;
1611 int error = 0; 1611 int error = 0;
1612 1612
1613 index = *ppos >> PAGE_CACHE_SHIFT; 1613 index = *ppos >> PAGE_SHIFT;
1614 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1614 prev_index = ra->prev_pos >> PAGE_SHIFT;
1615 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1615 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1616 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1616 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1617 offset = *ppos & ~PAGE_CACHE_MASK; 1617 offset = *ppos & ~PAGE_MASK;
1618 1618
1619 for (;;) { 1619 for (;;) {
1620 struct page *page; 1620 struct page *page;
@@ -1648,7 +1648,7 @@ find_page:
1648 if (PageUptodate(page)) 1648 if (PageUptodate(page))
1649 goto page_ok; 1649 goto page_ok;
1650 1650
1651 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1651 if (inode->i_blkbits == PAGE_SHIFT ||
1652 !mapping->a_ops->is_partially_uptodate) 1652 !mapping->a_ops->is_partially_uptodate)
1653 goto page_not_up_to_date; 1653 goto page_not_up_to_date;
1654 if (!trylock_page(page)) 1654 if (!trylock_page(page))
@@ -1672,18 +1672,18 @@ page_ok:
1672 */ 1672 */
1673 1673
1674 isize = i_size_read(inode); 1674 isize = i_size_read(inode);
1675 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1675 end_index = (isize - 1) >> PAGE_SHIFT;
1676 if (unlikely(!isize || index > end_index)) { 1676 if (unlikely(!isize || index > end_index)) {
1677 page_cache_release(page); 1677 put_page(page);
1678 goto out; 1678 goto out;
1679 } 1679 }
1680 1680
1681 /* nr is the maximum number of bytes to copy from this page */ 1681 /* nr is the maximum number of bytes to copy from this page */
1682 nr = PAGE_CACHE_SIZE; 1682 nr = PAGE_SIZE;
1683 if (index == end_index) { 1683 if (index == end_index) {
1684 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1684 nr = ((isize - 1) & ~PAGE_MASK) + 1;
1685 if (nr <= offset) { 1685 if (nr <= offset) {
1686 page_cache_release(page); 1686 put_page(page);
1687 goto out; 1687 goto out;
1688 } 1688 }
1689 } 1689 }
@@ -1711,11 +1711,11 @@ page_ok:
1711 1711
1712 ret = copy_page_to_iter(page, offset, nr, iter); 1712 ret = copy_page_to_iter(page, offset, nr, iter);
1713 offset += ret; 1713 offset += ret;
1714 index += offset >> PAGE_CACHE_SHIFT; 1714 index += offset >> PAGE_SHIFT;
1715 offset &= ~PAGE_CACHE_MASK; 1715 offset &= ~PAGE_MASK;
1716 prev_offset = offset; 1716 prev_offset = offset;
1717 1717
1718 page_cache_release(page); 1718 put_page(page);
1719 written += ret; 1719 written += ret;
1720 if (!iov_iter_count(iter)) 1720 if (!iov_iter_count(iter))
1721 goto out; 1721 goto out;
@@ -1735,7 +1735,7 @@ page_not_up_to_date_locked:
1735 /* Did it get truncated before we got the lock? */ 1735 /* Did it get truncated before we got the lock? */
1736 if (!page->mapping) { 1736 if (!page->mapping) {
1737 unlock_page(page); 1737 unlock_page(page);
1738 page_cache_release(page); 1738 put_page(page);
1739 continue; 1739 continue;
1740 } 1740 }
1741 1741
@@ -1757,7 +1757,7 @@ readpage:
1757 1757
1758 if (unlikely(error)) { 1758 if (unlikely(error)) {
1759 if (error == AOP_TRUNCATED_PAGE) { 1759 if (error == AOP_TRUNCATED_PAGE) {
1760 page_cache_release(page); 1760 put_page(page);
1761 error = 0; 1761 error = 0;
1762 goto find_page; 1762 goto find_page;
1763 } 1763 }
@@ -1774,7 +1774,7 @@ readpage:
1774 * invalidate_mapping_pages got it 1774 * invalidate_mapping_pages got it
1775 */ 1775 */
1776 unlock_page(page); 1776 unlock_page(page);
1777 page_cache_release(page); 1777 put_page(page);
1778 goto find_page; 1778 goto find_page;
1779 } 1779 }
1780 unlock_page(page); 1780 unlock_page(page);
@@ -1789,7 +1789,7 @@ readpage:
1789 1789
1790readpage_error: 1790readpage_error:
1791 /* UHHUH! A synchronous read error occurred. Report it */ 1791 /* UHHUH! A synchronous read error occurred. Report it */
1792 page_cache_release(page); 1792 put_page(page);
1793 goto out; 1793 goto out;
1794 1794
1795no_cached_page: 1795no_cached_page:
@@ -1805,7 +1805,7 @@ no_cached_page:
1805 error = add_to_page_cache_lru(page, mapping, index, 1805 error = add_to_page_cache_lru(page, mapping, index,
1806 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1806 mapping_gfp_constraint(mapping, GFP_KERNEL));
1807 if (error) { 1807 if (error) {
1808 page_cache_release(page); 1808 put_page(page);
1809 if (error == -EEXIST) { 1809 if (error == -EEXIST) {
1810 error = 0; 1810 error = 0;
1811 goto find_page; 1811 goto find_page;
@@ -1817,10 +1817,10 @@ no_cached_page:
1817 1817
1818out: 1818out:
1819 ra->prev_pos = prev_index; 1819 ra->prev_pos = prev_index;
1820 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1820 ra->prev_pos <<= PAGE_SHIFT;
1821 ra->prev_pos |= prev_offset; 1821 ra->prev_pos |= prev_offset;
1822 1822
1823 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1823 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
1824 file_accessed(filp); 1824 file_accessed(filp);
1825 return written ? written : error; 1825 return written ? written : error;
1826} 1826}
@@ -1912,7 +1912,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
1912 else if (ret == -EEXIST) 1912 else if (ret == -EEXIST)
1913 ret = 0; /* losing race to add is OK */ 1913 ret = 0; /* losing race to add is OK */
1914 1914
1915 page_cache_release(page); 1915 put_page(page);
1916 1916
1917 } while (ret == AOP_TRUNCATED_PAGE); 1917 } while (ret == AOP_TRUNCATED_PAGE);
1918 1918
@@ -2022,8 +2022,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2022 loff_t size; 2022 loff_t size;
2023 int ret = 0; 2023 int ret = 0;
2024 2024
2025 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2025 size = round_up(i_size_read(inode), PAGE_SIZE);
2026 if (offset >= size >> PAGE_CACHE_SHIFT) 2026 if (offset >= size >> PAGE_SHIFT)
2027 return VM_FAULT_SIGBUS; 2027 return VM_FAULT_SIGBUS;
2028 2028
2029 /* 2029 /*
@@ -2049,7 +2049,7 @@ retry_find:
2049 } 2049 }
2050 2050
2051 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 2051 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
2052 page_cache_release(page); 2052 put_page(page);
2053 return ret | VM_FAULT_RETRY; 2053 return ret | VM_FAULT_RETRY;
2054 } 2054 }
2055 2055
@@ -2072,10 +2072,10 @@ retry_find:
2072 * Found the page and have a reference on it. 2072 * Found the page and have a reference on it.
2073 * We must recheck i_size under page lock. 2073 * We must recheck i_size under page lock.
2074 */ 2074 */
2075 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2075 size = round_up(i_size_read(inode), PAGE_SIZE);
2076 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 2076 if (unlikely(offset >= size >> PAGE_SHIFT)) {
2077 unlock_page(page); 2077 unlock_page(page);
2078 page_cache_release(page); 2078 put_page(page);
2079 return VM_FAULT_SIGBUS; 2079 return VM_FAULT_SIGBUS;
2080 } 2080 }
2081 2081
@@ -2120,7 +2120,7 @@ page_not_uptodate:
2120 if (!PageUptodate(page)) 2120 if (!PageUptodate(page))
2121 error = -EIO; 2121 error = -EIO;
2122 } 2122 }
2123 page_cache_release(page); 2123 put_page(page);
2124 2124
2125 if (!error || error == AOP_TRUNCATED_PAGE) 2125 if (!error || error == AOP_TRUNCATED_PAGE)
2126 goto retry_find; 2126 goto retry_find;
@@ -2164,7 +2164,7 @@ repeat:
2164 2164
2165 /* Has the page moved? */ 2165 /* Has the page moved? */
2166 if (unlikely(page != *slot)) { 2166 if (unlikely(page != *slot)) {
2167 page_cache_release(page); 2167 put_page(page);
2168 goto repeat; 2168 goto repeat;
2169 } 2169 }
2170 2170
@@ -2178,8 +2178,8 @@ repeat:
2178 if (page->mapping != mapping || !PageUptodate(page)) 2178 if (page->mapping != mapping || !PageUptodate(page))
2179 goto unlock; 2179 goto unlock;
2180 2180
2181 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2181 size = round_up(i_size_read(mapping->host), PAGE_SIZE);
2182 if (page->index >= size >> PAGE_CACHE_SHIFT) 2182 if (page->index >= size >> PAGE_SHIFT)
2183 goto unlock; 2183 goto unlock;
2184 2184
2185 pte = vmf->pte + page->index - vmf->pgoff; 2185 pte = vmf->pte + page->index - vmf->pgoff;
@@ -2195,7 +2195,7 @@ repeat:
2195unlock: 2195unlock:
2196 unlock_page(page); 2196 unlock_page(page);
2197skip: 2197skip:
2198 page_cache_release(page); 2198 put_page(page);
2199next: 2199next:
2200 if (iter.index == vmf->max_pgoff) 2200 if (iter.index == vmf->max_pgoff)
2201 break; 2201 break;
@@ -2278,7 +2278,7 @@ static struct page *wait_on_page_read(struct page *page)
2278 if (!IS_ERR(page)) { 2278 if (!IS_ERR(page)) {
2279 wait_on_page_locked(page); 2279 wait_on_page_locked(page);
2280 if (!PageUptodate(page)) { 2280 if (!PageUptodate(page)) {
2281 page_cache_release(page); 2281 put_page(page);
2282 page = ERR_PTR(-EIO); 2282 page = ERR_PTR(-EIO);
2283 } 2283 }
2284 } 2284 }
@@ -2301,7 +2301,7 @@ repeat:
2301 return ERR_PTR(-ENOMEM); 2301 return ERR_PTR(-ENOMEM);
2302 err = add_to_page_cache_lru(page, mapping, index, gfp); 2302 err = add_to_page_cache_lru(page, mapping, index, gfp);
2303 if (unlikely(err)) { 2303 if (unlikely(err)) {
2304 page_cache_release(page); 2304 put_page(page);
2305 if (err == -EEXIST) 2305 if (err == -EEXIST)
2306 goto repeat; 2306 goto repeat;
2307 /* Presumably ENOMEM for radix tree node */ 2307 /* Presumably ENOMEM for radix tree node */
@@ -2311,7 +2311,7 @@ repeat:
2311filler: 2311filler:
2312 err = filler(data, page); 2312 err = filler(data, page);
2313 if (err < 0) { 2313 if (err < 0) {
2314 page_cache_release(page); 2314 put_page(page);
2315 return ERR_PTR(err); 2315 return ERR_PTR(err);
2316 } 2316 }
2317 2317
@@ -2364,7 +2364,7 @@ filler:
2364 /* Case c or d, restart the operation */ 2364 /* Case c or d, restart the operation */
2365 if (!page->mapping) { 2365 if (!page->mapping) {
2366 unlock_page(page); 2366 unlock_page(page);
2367 page_cache_release(page); 2367 put_page(page);
2368 goto repeat; 2368 goto repeat;
2369 } 2369 }
2370 2370
@@ -2511,7 +2511,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2511 struct iov_iter data; 2511 struct iov_iter data;
2512 2512
2513 write_len = iov_iter_count(from); 2513 write_len = iov_iter_count(from);
2514 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2514 end = (pos + write_len - 1) >> PAGE_SHIFT;
2515 2515
2516 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2516 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2517 if (written) 2517 if (written)
@@ -2525,7 +2525,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2525 */ 2525 */
2526 if (mapping->nrpages) { 2526 if (mapping->nrpages) {
2527 written = invalidate_inode_pages2_range(mapping, 2527 written = invalidate_inode_pages2_range(mapping,
2528 pos >> PAGE_CACHE_SHIFT, end); 2528 pos >> PAGE_SHIFT, end);
2529 /* 2529 /*
2530 * If a page can not be invalidated, return 0 to fall back 2530 * If a page can not be invalidated, return 0 to fall back
2531 * to buffered write. 2531 * to buffered write.
@@ -2550,7 +2550,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2550 */ 2550 */
2551 if (mapping->nrpages) { 2551 if (mapping->nrpages) {
2552 invalidate_inode_pages2_range(mapping, 2552 invalidate_inode_pages2_range(mapping,
2553 pos >> PAGE_CACHE_SHIFT, end); 2553 pos >> PAGE_SHIFT, end);
2554 } 2554 }
2555 2555
2556 if (written > 0) { 2556 if (written > 0) {
@@ -2611,8 +2611,8 @@ ssize_t generic_perform_write(struct file *file,
2611 size_t copied; /* Bytes copied from user */ 2611 size_t copied; /* Bytes copied from user */
2612 void *fsdata; 2612 void *fsdata;
2613 2613
2614 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2614 offset = (pos & (PAGE_SIZE - 1));
2615 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2615 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2616 iov_iter_count(i)); 2616 iov_iter_count(i));
2617 2617
2618again: 2618again:
@@ -2665,7 +2665,7 @@ again:
2665 * because not all segments in the iov can be copied at 2665 * because not all segments in the iov can be copied at
2666 * once without a pagefault. 2666 * once without a pagefault.
2667 */ 2667 */
2668 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2668 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2669 iov_iter_single_seg_count(i)); 2669 iov_iter_single_seg_count(i));
2670 goto again; 2670 goto again;
2671 } 2671 }
@@ -2752,8 +2752,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2752 iocb->ki_pos = endbyte + 1; 2752 iocb->ki_pos = endbyte + 1;
2753 written += status; 2753 written += status;
2754 invalidate_mapping_pages(mapping, 2754 invalidate_mapping_pages(mapping,
2755 pos >> PAGE_CACHE_SHIFT, 2755 pos >> PAGE_SHIFT,
2756 endbyte >> PAGE_CACHE_SHIFT); 2756 endbyte >> PAGE_SHIFT);
2757 } else { 2757 } else {
2758 /* 2758 /*
2759 * We don't know how much we wrote, so just return 2759 * We don't know how much we wrote, so just return
diff --git a/mm/gup.c b/mm/gup.c
index 7f1c4fb77cfa..c057784c8444 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1,4 +1,3 @@
1#define __DISABLE_GUP_DEPRECATED 1
2#include <linux/kernel.h> 1#include <linux/kernel.h>
3#include <linux/errno.h> 2#include <linux/errno.h>
4#include <linux/err.h> 3#include <linux/err.h>
@@ -839,7 +838,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
839 * if (locked) 838 * if (locked)
840 * up_read(&mm->mmap_sem); 839 * up_read(&mm->mmap_sem);
841 */ 840 */
842long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, 841long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
843 int write, int force, struct page **pages, 842 int write, int force, struct page **pages,
844 int *locked) 843 int *locked)
845{ 844{
@@ -847,7 +846,7 @@ long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
847 write, force, pages, NULL, locked, true, 846 write, force, pages, NULL, locked, true,
848 FOLL_TOUCH); 847 FOLL_TOUCH);
849} 848}
850EXPORT_SYMBOL(get_user_pages_locked6); 849EXPORT_SYMBOL(get_user_pages_locked);
851 850
852/* 851/*
853 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to 852 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
@@ -892,13 +891,13 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
892 * or if "force" shall be set to 1 (get_user_pages_fast misses the 891 * or if "force" shall be set to 1 (get_user_pages_fast misses the
893 * "force" parameter). 892 * "force" parameter).
894 */ 893 */
895long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, 894long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
896 int write, int force, struct page **pages) 895 int write, int force, struct page **pages)
897{ 896{
898 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 897 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
899 write, force, pages, FOLL_TOUCH); 898 write, force, pages, FOLL_TOUCH);
900} 899}
901EXPORT_SYMBOL(get_user_pages_unlocked5); 900EXPORT_SYMBOL(get_user_pages_unlocked);
902 901
903/* 902/*
904 * get_user_pages_remote() - pin user pages in memory 903 * get_user_pages_remote() - pin user pages in memory
@@ -972,7 +971,7 @@ EXPORT_SYMBOL(get_user_pages_remote);
972 * and mm being operated on are the current task's. We also 971 * and mm being operated on are the current task's. We also
973 * obviously don't pass FOLL_REMOTE in here. 972 * obviously don't pass FOLL_REMOTE in here.
974 */ 973 */
975long get_user_pages6(unsigned long start, unsigned long nr_pages, 974long get_user_pages(unsigned long start, unsigned long nr_pages,
976 int write, int force, struct page **pages, 975 int write, int force, struct page **pages,
977 struct vm_area_struct **vmas) 976 struct vm_area_struct **vmas)
978{ 977{
@@ -980,7 +979,7 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
980 write, force, pages, vmas, NULL, false, 979 write, force, pages, vmas, NULL, false,
981 FOLL_TOUCH); 980 FOLL_TOUCH);
982} 981}
983EXPORT_SYMBOL(get_user_pages6); 982EXPORT_SYMBOL(get_user_pages);
984 983
985/** 984/**
986 * populate_vma_page_range() - populate a range of pages in the vma. 985 * populate_vma_page_range() - populate a range of pages in the vma.
@@ -1107,7 +1106,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1107 * @addr: user address 1106 * @addr: user address
1108 * 1107 *
1109 * Returns struct page pointer of user page pinned for dump, 1108 * Returns struct page pointer of user page pinned for dump,
1110 * to be freed afterwards by page_cache_release() or put_page(). 1109 * to be freed afterwards by put_page().
1111 * 1110 *
1112 * Returns NULL on any kind of failure - a hole must then be inserted into 1111 * Returns NULL on any kind of failure - a hole must then be inserted into
1113 * the corefile, to preserve alignment with its headers; and also returns 1112 * the corefile, to preserve alignment with its headers; and also returns
@@ -1491,7 +1490,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1491int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1490int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1492 struct page **pages) 1491 struct page **pages)
1493{ 1492{
1494 struct mm_struct *mm = current->mm;
1495 int nr, ret; 1493 int nr, ret;
1496 1494
1497 start &= PAGE_MASK; 1495 start &= PAGE_MASK;
@@ -1503,8 +1501,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1503 start += nr << PAGE_SHIFT; 1501 start += nr << PAGE_SHIFT;
1504 pages += nr; 1502 pages += nr;
1505 1503
1506 ret = get_user_pages_unlocked(current, mm, start, 1504 ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
1507 nr_pages - nr, write, 0, pages);
1508 1505
1509 /* Have to be a bit careful with return values */ 1506 /* Have to be a bit careful with return values */
1510 if (nr > 0) { 1507 if (nr > 0) {
@@ -1519,38 +1516,3 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1519} 1516}
1520 1517
1521#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ 1518#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
1522
1523long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
1524 unsigned long start, unsigned long nr_pages,
1525 int write, int force, struct page **pages,
1526 struct vm_area_struct **vmas)
1527{
1528 WARN_ONCE(tsk != current, "get_user_pages() called on remote task");
1529 WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm");
1530
1531 return get_user_pages6(start, nr_pages, write, force, pages, vmas);
1532}
1533EXPORT_SYMBOL(get_user_pages8);
1534
1535long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
1536 unsigned long start, unsigned long nr_pages,
1537 int write, int force, struct page **pages, int *locked)
1538{
1539 WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task");
1540 WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm");
1541
1542 return get_user_pages_locked6(start, nr_pages, write, force, pages, locked);
1543}
1544EXPORT_SYMBOL(get_user_pages_locked8);
1545
1546long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
1547 unsigned long start, unsigned long nr_pages,
1548 int write, int force, struct page **pages)
1549{
1550 WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task");
1551 WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm");
1552
1553 return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
1554}
1555EXPORT_SYMBOL(get_user_pages_unlocked7);
1556
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86f9f8b82f8e..df67b53ae3c5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -232,7 +232,7 @@ retry:
232 return READ_ONCE(huge_zero_page); 232 return READ_ONCE(huge_zero_page);
233} 233}
234 234
235static void put_huge_zero_page(void) 235void put_huge_zero_page(void)
236{ 236{
237 /* 237 /*
238 * Counter should never go to zero here. Only shrinker can put 238 * Counter should never go to zero here. Only shrinker can put
@@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1684 if (vma_is_dax(vma)) { 1684 if (vma_is_dax(vma)) {
1685 spin_unlock(ptl); 1685 spin_unlock(ptl);
1686 if (is_huge_zero_pmd(orig_pmd)) 1686 if (is_huge_zero_pmd(orig_pmd))
1687 put_huge_zero_page(); 1687 tlb_remove_page(tlb, pmd_page(orig_pmd));
1688 } else if (is_huge_zero_pmd(orig_pmd)) { 1688 } else if (is_huge_zero_pmd(orig_pmd)) {
1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1690 atomic_long_dec(&tlb->mm->nr_ptes); 1690 atomic_long_dec(&tlb->mm->nr_ptes);
1691 spin_unlock(ptl); 1691 spin_unlock(ptl);
1692 put_huge_zero_page(); 1692 tlb_remove_page(tlb, pmd_page(orig_pmd));
1693 } else { 1693 } else {
1694 struct page *page = pmd_page(orig_pmd); 1694 struct page *page = pmd_page(orig_pmd);
1695 page_remove_rmap(page, true); 1695 page_remove_rmap(page, true);
@@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1960 * page fault if needed. 1960 * page fault if needed.
1961 */ 1961 */
1962 return 0; 1962 return 0;
1963 if (vma->vm_ops) 1963 if (vma->vm_ops || (vm_flags & VM_NO_THP))
1964 /* khugepaged not yet working on file or special mappings */ 1964 /* khugepaged not yet working on file or special mappings */
1965 return 0; 1965 return 0;
1966 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
1967 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1966 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1968 hend = vma->vm_end & HPAGE_PMD_MASK; 1967 hend = vma->vm_end & HPAGE_PMD_MASK;
1969 if (hstart < hend) 1968 if (hstart < hend)
@@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
2352 return false; 2351 return false;
2353 if (is_vma_temporary_stack(vma)) 2352 if (is_vma_temporary_stack(vma))
2354 return false; 2353 return false;
2355 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2354 return !(vma->vm_flags & VM_NO_THP);
2356 return true;
2357} 2355}
2358 2356
2359static void collapse_huge_page(struct mm_struct *mm, 2357static void collapse_huge_page(struct mm_struct *mm,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06058eaa173b..19d0d08b396f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3346,7 +3346,7 @@ retry_avoidcopy:
3346 old_page != pagecache_page) 3346 old_page != pagecache_page)
3347 outside_reserve = 1; 3347 outside_reserve = 1;
3348 3348
3349 page_cache_get(old_page); 3349 get_page(old_page);
3350 3350
3351 /* 3351 /*
3352 * Drop page table lock as buddy allocator may be called. It will 3352 * Drop page table lock as buddy allocator may be called. It will
@@ -3364,7 +3364,7 @@ retry_avoidcopy:
3364 * may get SIGKILLed if it later faults. 3364 * may get SIGKILLed if it later faults.
3365 */ 3365 */
3366 if (outside_reserve) { 3366 if (outside_reserve) {
3367 page_cache_release(old_page); 3367 put_page(old_page);
3368 BUG_ON(huge_pte_none(pte)); 3368 BUG_ON(huge_pte_none(pte));
3369 unmap_ref_private(mm, vma, old_page, address); 3369 unmap_ref_private(mm, vma, old_page, address);
3370 BUG_ON(huge_pte_none(pte)); 3370 BUG_ON(huge_pte_none(pte));
@@ -3425,9 +3425,9 @@ retry_avoidcopy:
3425 spin_unlock(ptl); 3425 spin_unlock(ptl);
3426 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3426 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3427out_release_all: 3427out_release_all:
3428 page_cache_release(new_page); 3428 put_page(new_page);
3429out_release_old: 3429out_release_old:
3430 page_cache_release(old_page); 3430 put_page(old_page);
3431 3431
3432 spin_lock(ptl); /* Caller expects lock to be held */ 3432 spin_lock(ptl); /* Caller expects lock to be held */
3433 return ret; 3433 return ret;
diff --git a/mm/madvise.c b/mm/madvise.c
index a01147359f3b..07427d3fcead 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -170,7 +170,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
171 vma, index); 171 vma, index);
172 if (page) 172 if (page)
173 page_cache_release(page); 173 put_page(page);
174 } 174 }
175 175
176 return 0; 176 return 0;
@@ -204,14 +204,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
204 page = find_get_entry(mapping, index); 204 page = find_get_entry(mapping, index);
205 if (!radix_tree_exceptional_entry(page)) { 205 if (!radix_tree_exceptional_entry(page)) {
206 if (page) 206 if (page)
207 page_cache_release(page); 207 put_page(page);
208 continue; 208 continue;
209 } 209 }
210 swap = radix_to_swp_entry(page); 210 swap = radix_to_swp_entry(page);
211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, 211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
212 NULL, 0); 212 NULL, 0);
213 if (page) 213 if (page)
214 page_cache_release(page); 214 put_page(page);
215 } 215 }
216 216
217 lru_add_drain(); /* Push any new pages onto the LRU now */ 217 lru_add_drain(); /* Push any new pages onto the LRU now */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36db05fa8acb..fe787f5c41bd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
207/* "mc" and its members are protected by cgroup_mutex */ 207/* "mc" and its members are protected by cgroup_mutex */
208static struct move_charge_struct { 208static struct move_charge_struct {
209 spinlock_t lock; /* for from, to */ 209 spinlock_t lock; /* for from, to */
210 struct mm_struct *mm;
210 struct mem_cgroup *from; 211 struct mem_cgroup *from;
211 struct mem_cgroup *to; 212 struct mem_cgroup *to;
212 unsigned long flags; 213 unsigned long flags;
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void)
4667 4668
4668static void mem_cgroup_clear_mc(void) 4669static void mem_cgroup_clear_mc(void)
4669{ 4670{
4671 struct mm_struct *mm = mc.mm;
4672
4670 /* 4673 /*
4671 * we must clear moving_task before waking up waiters at the end of 4674 * we must clear moving_task before waking up waiters at the end of
4672 * task migration. 4675 * task migration.
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void)
4676 spin_lock(&mc.lock); 4679 spin_lock(&mc.lock);
4677 mc.from = NULL; 4680 mc.from = NULL;
4678 mc.to = NULL; 4681 mc.to = NULL;
4682 mc.mm = NULL;
4679 spin_unlock(&mc.lock); 4683 spin_unlock(&mc.lock);
4684
4685 mmput(mm);
4680} 4686}
4681 4687
4682static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4688static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4733 VM_BUG_ON(mc.moved_swap); 4739 VM_BUG_ON(mc.moved_swap);
4734 4740
4735 spin_lock(&mc.lock); 4741 spin_lock(&mc.lock);
4742 mc.mm = mm;
4736 mc.from = from; 4743 mc.from = from;
4737 mc.to = memcg; 4744 mc.to = memcg;
4738 mc.flags = move_flags; 4745 mc.flags = move_flags;
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4742 ret = mem_cgroup_precharge_mc(mm); 4749 ret = mem_cgroup_precharge_mc(mm);
4743 if (ret) 4750 if (ret)
4744 mem_cgroup_clear_mc(); 4751 mem_cgroup_clear_mc();
4752 } else {
4753 mmput(mm);
4745 } 4754 }
4746 mmput(mm);
4747 return ret; 4755 return ret;
4748} 4756}
4749 4757
@@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */
4852 return ret; 4860 return ret;
4853} 4861}
4854 4862
4855static void mem_cgroup_move_charge(struct mm_struct *mm) 4863static void mem_cgroup_move_charge(void)
4856{ 4864{
4857 struct mm_walk mem_cgroup_move_charge_walk = { 4865 struct mm_walk mem_cgroup_move_charge_walk = {
4858 .pmd_entry = mem_cgroup_move_charge_pte_range, 4866 .pmd_entry = mem_cgroup_move_charge_pte_range,
4859 .mm = mm, 4867 .mm = mc.mm,
4860 }; 4868 };
4861 4869
4862 lru_add_drain_all(); 4870 lru_add_drain_all();
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4868 atomic_inc(&mc.from->moving_account); 4876 atomic_inc(&mc.from->moving_account);
4869 synchronize_rcu(); 4877 synchronize_rcu();
4870retry: 4878retry:
4871 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 4879 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4872 /* 4880 /*
4873 * Someone who are holding the mmap_sem might be waiting in 4881 * Someone who are holding the mmap_sem might be waiting in
4874 * waitq. So we cancel all extra charges, wake up all waiters, 4882 * waitq. So we cancel all extra charges, wake up all waiters,
@@ -4885,23 +4893,16 @@ retry:
4885 * additional charge, the page walk just aborts. 4893 * additional charge, the page walk just aborts.
4886 */ 4894 */
4887 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 4895 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4888 up_read(&mm->mmap_sem); 4896 up_read(&mc.mm->mmap_sem);
4889 atomic_dec(&mc.from->moving_account); 4897 atomic_dec(&mc.from->moving_account);
4890} 4898}
4891 4899
4892static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4900static void mem_cgroup_move_task(void)
4893{ 4901{
4894 struct cgroup_subsys_state *css; 4902 if (mc.to) {
4895 struct task_struct *p = cgroup_taskset_first(tset, &css); 4903 mem_cgroup_move_charge();
4896 struct mm_struct *mm = get_task_mm(p);
4897
4898 if (mm) {
4899 if (mc.to)
4900 mem_cgroup_move_charge(mm);
4901 mmput(mm);
4902 }
4903 if (mc.to)
4904 mem_cgroup_clear_mc(); 4904 mem_cgroup_clear_mc();
4905 }
4905} 4906}
4906#else /* !CONFIG_MMU */ 4907#else /* !CONFIG_MMU */
4907static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4908static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4911static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4912static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4912{ 4913{
4913} 4914}
4914static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4915static void mem_cgroup_move_task(void)
4915{ 4916{
4916} 4917}
4917#endif 4918#endif
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
5195 .css_reset = mem_cgroup_css_reset, 5196 .css_reset = mem_cgroup_css_reset,
5196 .can_attach = mem_cgroup_can_attach, 5197 .can_attach = mem_cgroup_can_attach,
5197 .cancel_attach = mem_cgroup_cancel_attach, 5198 .cancel_attach = mem_cgroup_cancel_attach,
5198 .attach = mem_cgroup_move_task, 5199 .post_attach = mem_cgroup_move_task,
5199 .bind = mem_cgroup_bind, 5200 .bind = mem_cgroup_bind,
5200 .dfl_cftypes = memory_files, 5201 .dfl_cftypes = memory_files,
5201 .legacy_cftypes = mem_cgroup_legacy_files, 5202 .legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5a544c6c0717..ca5acee53b7a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -538,7 +538,7 @@ static int delete_from_lru_cache(struct page *p)
538 /* 538 /*
539 * drop the page count elevated by isolate_lru_page() 539 * drop the page count elevated by isolate_lru_page()
540 */ 540 */
541 page_cache_release(p); 541 put_page(p);
542 return 0; 542 return 0;
543 } 543 }
544 return -EIO; 544 return -EIO;
@@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page)
888 } 888 }
889 } 889 }
890 890
891 return get_page_unless_zero(head); 891 if (get_page_unless_zero(head)) {
892 if (head == compound_head(page))
893 return 1;
894
895 pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
896 put_page(head);
897 }
898
899 return 0;
892} 900}
893EXPORT_SYMBOL_GPL(get_hwpoison_page); 901EXPORT_SYMBOL_GPL(get_hwpoison_page);
894 902
diff --git a/mm/memory.c b/mm/memory.c
index 098f00d05461..305537fc8640 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -789,6 +789,46 @@ out:
789 return pfn_to_page(pfn); 789 return pfn_to_page(pfn);
790} 790}
791 791
792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
793struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
794 pmd_t pmd)
795{
796 unsigned long pfn = pmd_pfn(pmd);
797
798 /*
799 * There is no pmd_special() but there may be special pmds, e.g.
800 * in a direct-access (dax) mapping, so let's just replicate the
801 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
802 */
803 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
804 if (vma->vm_flags & VM_MIXEDMAP) {
805 if (!pfn_valid(pfn))
806 return NULL;
807 goto out;
808 } else {
809 unsigned long off;
810 off = (addr - vma->vm_start) >> PAGE_SHIFT;
811 if (pfn == vma->vm_pgoff + off)
812 return NULL;
813 if (!is_cow_mapping(vma->vm_flags))
814 return NULL;
815 }
816 }
817
818 if (is_zero_pfn(pfn))
819 return NULL;
820 if (unlikely(pfn > highest_memmap_pfn))
821 return NULL;
822
823 /*
824 * NOTE! We still have PageReserved() pages in the page tables.
825 * eg. VDSO mappings can cause them to exist.
826 */
827out:
828 return pfn_to_page(pfn);
829}
830#endif
831
792/* 832/*
793 * copy one vm_area from one task to the other. Assumes the page tables 833 * copy one vm_area from one task to the other. Assumes the page tables
794 * already present in the new task to be cleared in the whole range 834 * already present in the new task to be cleared in the whole range
@@ -2054,7 +2094,7 @@ static inline int wp_page_reuse(struct mm_struct *mm,
2054 VM_BUG_ON_PAGE(PageAnon(page), page); 2094 VM_BUG_ON_PAGE(PageAnon(page), page);
2055 mapping = page->mapping; 2095 mapping = page->mapping;
2056 unlock_page(page); 2096 unlock_page(page);
2057 page_cache_release(page); 2097 put_page(page);
2058 2098
2059 if ((dirtied || page_mkwrite) && mapping) { 2099 if ((dirtied || page_mkwrite) && mapping) {
2060 /* 2100 /*
@@ -2188,7 +2228,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2188 } 2228 }
2189 2229
2190 if (new_page) 2230 if (new_page)
2191 page_cache_release(new_page); 2231 put_page(new_page);
2192 2232
2193 pte_unmap_unlock(page_table, ptl); 2233 pte_unmap_unlock(page_table, ptl);
2194 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2234 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -2203,14 +2243,14 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2203 munlock_vma_page(old_page); 2243 munlock_vma_page(old_page);
2204 unlock_page(old_page); 2244 unlock_page(old_page);
2205 } 2245 }
2206 page_cache_release(old_page); 2246 put_page(old_page);
2207 } 2247 }
2208 return page_copied ? VM_FAULT_WRITE : 0; 2248 return page_copied ? VM_FAULT_WRITE : 0;
2209oom_free_new: 2249oom_free_new:
2210 page_cache_release(new_page); 2250 put_page(new_page);
2211oom: 2251oom:
2212 if (old_page) 2252 if (old_page)
2213 page_cache_release(old_page); 2253 put_page(old_page);
2214 return VM_FAULT_OOM; 2254 return VM_FAULT_OOM;
2215} 2255}
2216 2256
@@ -2258,7 +2298,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2258{ 2298{
2259 int page_mkwrite = 0; 2299 int page_mkwrite = 0;
2260 2300
2261 page_cache_get(old_page); 2301 get_page(old_page);
2262 2302
2263 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2303 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2264 int tmp; 2304 int tmp;
@@ -2267,7 +2307,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2267 tmp = do_page_mkwrite(vma, old_page, address); 2307 tmp = do_page_mkwrite(vma, old_page, address);
2268 if (unlikely(!tmp || (tmp & 2308 if (unlikely(!tmp || (tmp &
2269 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 2309 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2270 page_cache_release(old_page); 2310 put_page(old_page);
2271 return tmp; 2311 return tmp;
2272 } 2312 }
2273 /* 2313 /*
@@ -2281,7 +2321,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2281 if (!pte_same(*page_table, orig_pte)) { 2321 if (!pte_same(*page_table, orig_pte)) {
2282 unlock_page(old_page); 2322 unlock_page(old_page);
2283 pte_unmap_unlock(page_table, ptl); 2323 pte_unmap_unlock(page_table, ptl);
2284 page_cache_release(old_page); 2324 put_page(old_page);
2285 return 0; 2325 return 0;
2286 } 2326 }
2287 page_mkwrite = 1; 2327 page_mkwrite = 1;
@@ -2341,7 +2381,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2341 */ 2381 */
2342 if (PageAnon(old_page) && !PageKsm(old_page)) { 2382 if (PageAnon(old_page) && !PageKsm(old_page)) {
2343 if (!trylock_page(old_page)) { 2383 if (!trylock_page(old_page)) {
2344 page_cache_get(old_page); 2384 get_page(old_page);
2345 pte_unmap_unlock(page_table, ptl); 2385 pte_unmap_unlock(page_table, ptl);
2346 lock_page(old_page); 2386 lock_page(old_page);
2347 page_table = pte_offset_map_lock(mm, pmd, address, 2387 page_table = pte_offset_map_lock(mm, pmd, address,
@@ -2349,10 +2389,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2349 if (!pte_same(*page_table, orig_pte)) { 2389 if (!pte_same(*page_table, orig_pte)) {
2350 unlock_page(old_page); 2390 unlock_page(old_page);
2351 pte_unmap_unlock(page_table, ptl); 2391 pte_unmap_unlock(page_table, ptl);
2352 page_cache_release(old_page); 2392 put_page(old_page);
2353 return 0; 2393 return 0;
2354 } 2394 }
2355 page_cache_release(old_page); 2395 put_page(old_page);
2356 } 2396 }
2357 if (reuse_swap_page(old_page)) { 2397 if (reuse_swap_page(old_page)) {
2358 /* 2398 /*
@@ -2375,7 +2415,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2375 /* 2415 /*
2376 * Ok, we need to copy. Oh, well.. 2416 * Ok, we need to copy. Oh, well..
2377 */ 2417 */
2378 page_cache_get(old_page); 2418 get_page(old_page);
2379 2419
2380 pte_unmap_unlock(page_table, ptl); 2420 pte_unmap_unlock(page_table, ptl);
2381 return wp_page_copy(mm, vma, address, page_table, pmd, 2421 return wp_page_copy(mm, vma, address, page_table, pmd,
@@ -2400,7 +2440,6 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
2400 2440
2401 vba = vma->vm_pgoff; 2441 vba = vma->vm_pgoff;
2402 vea = vba + vma_pages(vma) - 1; 2442 vea = vba + vma_pages(vma) - 1;
2403 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2404 zba = details->first_index; 2443 zba = details->first_index;
2405 if (zba < vba) 2444 if (zba < vba)
2406 zba = vba; 2445 zba = vba;
@@ -2619,7 +2658,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2619 * parallel locked swapcache. 2658 * parallel locked swapcache.
2620 */ 2659 */
2621 unlock_page(swapcache); 2660 unlock_page(swapcache);
2622 page_cache_release(swapcache); 2661 put_page(swapcache);
2623 } 2662 }
2624 2663
2625 if (flags & FAULT_FLAG_WRITE) { 2664 if (flags & FAULT_FLAG_WRITE) {
@@ -2641,10 +2680,10 @@ out_nomap:
2641out_page: 2680out_page:
2642 unlock_page(page); 2681 unlock_page(page);
2643out_release: 2682out_release:
2644 page_cache_release(page); 2683 put_page(page);
2645 if (page != swapcache) { 2684 if (page != swapcache) {
2646 unlock_page(swapcache); 2685 unlock_page(swapcache);
2647 page_cache_release(swapcache); 2686 put_page(swapcache);
2648 } 2687 }
2649 return ret; 2688 return ret;
2650} 2689}
@@ -2752,7 +2791,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2752 if (userfaultfd_missing(vma)) { 2791 if (userfaultfd_missing(vma)) {
2753 pte_unmap_unlock(page_table, ptl); 2792 pte_unmap_unlock(page_table, ptl);
2754 mem_cgroup_cancel_charge(page, memcg, false); 2793 mem_cgroup_cancel_charge(page, memcg, false);
2755 page_cache_release(page); 2794 put_page(page);
2756 return handle_userfault(vma, address, flags, 2795 return handle_userfault(vma, address, flags,
2757 VM_UFFD_MISSING); 2796 VM_UFFD_MISSING);
2758 } 2797 }
@@ -2771,10 +2810,10 @@ unlock:
2771 return 0; 2810 return 0;
2772release: 2811release:
2773 mem_cgroup_cancel_charge(page, memcg, false); 2812 mem_cgroup_cancel_charge(page, memcg, false);
2774 page_cache_release(page); 2813 put_page(page);
2775 goto unlock; 2814 goto unlock;
2776oom_free_page: 2815oom_free_page:
2777 page_cache_release(page); 2816 put_page(page);
2778oom: 2817oom:
2779 return VM_FAULT_OOM; 2818 return VM_FAULT_OOM;
2780} 2819}
@@ -2807,7 +2846,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2807 if (unlikely(PageHWPoison(vmf.page))) { 2846 if (unlikely(PageHWPoison(vmf.page))) {
2808 if (ret & VM_FAULT_LOCKED) 2847 if (ret & VM_FAULT_LOCKED)
2809 unlock_page(vmf.page); 2848 unlock_page(vmf.page);
2810 page_cache_release(vmf.page); 2849 put_page(vmf.page);
2811 return VM_FAULT_HWPOISON; 2850 return VM_FAULT_HWPOISON;
2812 } 2851 }
2813 2852
@@ -2996,7 +3035,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2996 if (unlikely(!pte_same(*pte, orig_pte))) { 3035 if (unlikely(!pte_same(*pte, orig_pte))) {
2997 pte_unmap_unlock(pte, ptl); 3036 pte_unmap_unlock(pte, ptl);
2998 unlock_page(fault_page); 3037 unlock_page(fault_page);
2999 page_cache_release(fault_page); 3038 put_page(fault_page);
3000 return ret; 3039 return ret;
3001 } 3040 }
3002 do_set_pte(vma, address, fault_page, pte, false, false); 3041 do_set_pte(vma, address, fault_page, pte, false, false);
@@ -3024,7 +3063,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3024 return VM_FAULT_OOM; 3063 return VM_FAULT_OOM;
3025 3064
3026 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { 3065 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
3027 page_cache_release(new_page); 3066 put_page(new_page);
3028 return VM_FAULT_OOM; 3067 return VM_FAULT_OOM;
3029 } 3068 }
3030 3069
@@ -3041,7 +3080,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3041 pte_unmap_unlock(pte, ptl); 3080 pte_unmap_unlock(pte, ptl);
3042 if (fault_page) { 3081 if (fault_page) {
3043 unlock_page(fault_page); 3082 unlock_page(fault_page);
3044 page_cache_release(fault_page); 3083 put_page(fault_page);
3045 } else { 3084 } else {
3046 /* 3085 /*
3047 * The fault handler has no page to lock, so it holds 3086 * The fault handler has no page to lock, so it holds
@@ -3057,7 +3096,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3057 pte_unmap_unlock(pte, ptl); 3096 pte_unmap_unlock(pte, ptl);
3058 if (fault_page) { 3097 if (fault_page) {
3059 unlock_page(fault_page); 3098 unlock_page(fault_page);
3060 page_cache_release(fault_page); 3099 put_page(fault_page);
3061 } else { 3100 } else {
3062 /* 3101 /*
3063 * The fault handler has no page to lock, so it holds 3102 * The fault handler has no page to lock, so it holds
@@ -3068,7 +3107,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3068 return ret; 3107 return ret;
3069uncharge_out: 3108uncharge_out:
3070 mem_cgroup_cancel_charge(new_page, memcg, false); 3109 mem_cgroup_cancel_charge(new_page, memcg, false);
3071 page_cache_release(new_page); 3110 put_page(new_page);
3072 return ret; 3111 return ret;
3073} 3112}
3074 3113
@@ -3096,7 +3135,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3096 tmp = do_page_mkwrite(vma, fault_page, address); 3135 tmp = do_page_mkwrite(vma, fault_page, address);
3097 if (unlikely(!tmp || 3136 if (unlikely(!tmp ||
3098 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3137 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3099 page_cache_release(fault_page); 3138 put_page(fault_page);
3100 return tmp; 3139 return tmp;
3101 } 3140 }
3102 } 3141 }
@@ -3105,7 +3144,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3105 if (unlikely(!pte_same(*pte, orig_pte))) { 3144 if (unlikely(!pte_same(*pte, orig_pte))) {
3106 pte_unmap_unlock(pte, ptl); 3145 pte_unmap_unlock(pte, ptl);
3107 unlock_page(fault_page); 3146 unlock_page(fault_page);
3108 page_cache_release(fault_page); 3147 put_page(fault_page);
3109 return ret; 3148 return ret;
3110 } 3149 }
3111 do_set_pte(vma, address, fault_page, pte, true, false); 3150 do_set_pte(vma, address, fault_page, pte, true, false);
@@ -3736,7 +3775,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3736 buf, maddr + offset, bytes); 3775 buf, maddr + offset, bytes);
3737 } 3776 }
3738 kunmap(page); 3777 kunmap(page);
3739 page_cache_release(page); 3778 put_page(page);
3740 } 3779 }
3741 len -= bytes; 3780 len -= bytes;
3742 buf += bytes; 3781 buf += bytes;
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c822a7b27e0..f9dfb18a4eba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -975,7 +975,13 @@ out:
975 dec_zone_page_state(page, NR_ISOLATED_ANON + 975 dec_zone_page_state(page, NR_ISOLATED_ANON +
976 page_is_file_cache(page)); 976 page_is_file_cache(page));
977 /* Soft-offlined page shouldn't go through lru cache list */ 977 /* Soft-offlined page shouldn't go through lru cache list */
978 if (reason == MR_MEMORY_FAILURE) { 978 if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
979 /*
980 * With this release, we free successfully migrated
981 * page and set PG_HWPoison on just freed page
982 * intentionally. Although it's rather weird, it's how
983 * HWPoison flag works at the moment.
984 */
979 put_page(page); 985 put_page(page);
980 if (!test_set_page_hwpoison(page)) 986 if (!test_set_page_hwpoison(page))
981 num_poisoned_pages_inc(); 987 num_poisoned_pages_inc();
diff --git a/mm/mincore.c b/mm/mincore.c
index 563f32045490..c0b5ba965200 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -75,7 +75,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
75#endif 75#endif
76 if (page) { 76 if (page) {
77 present = PageUptodate(page); 77 present = PageUptodate(page);
78 page_cache_release(page); 78 put_page(page);
79 } 79 }
80 80
81 return present; 81 return present;
@@ -211,7 +211,7 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
211 * return values: 211 * return values:
212 * zero - success 212 * zero - success
213 * -EFAULT - vec points to an illegal address 213 * -EFAULT - vec points to an illegal address
214 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE 214 * -EINVAL - addr is not a multiple of PAGE_SIZE
215 * -ENOMEM - Addresses in the range [addr, addr + len] are 215 * -ENOMEM - Addresses in the range [addr, addr + len] are
216 * invalid for the address space of this process, or 216 * invalid for the address space of this process, or
217 * specify one or more pages which are not currently 217 * specify one or more pages which are not currently
@@ -226,14 +226,14 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
226 unsigned char *tmp; 226 unsigned char *tmp;
227 227
228 /* Check the start address: needs to be page-aligned.. */ 228 /* Check the start address: needs to be page-aligned.. */
229 if (start & ~PAGE_CACHE_MASK) 229 if (start & ~PAGE_MASK)
230 return -EINVAL; 230 return -EINVAL;
231 231
232 /* ..and we need to be passed a valid user-space range */ 232 /* ..and we need to be passed a valid user-space range */
233 if (!access_ok(VERIFY_READ, (void __user *) start, len)) 233 if (!access_ok(VERIFY_READ, (void __user *) start, len))
234 return -ENOMEM; 234 return -ENOMEM;
235 235
236 /* This also avoids any overflows on PAGE_CACHE_ALIGN */ 236 /* This also avoids any overflows on PAGE_ALIGN */
237 pages = len >> PAGE_SHIFT; 237 pages = len >> PAGE_SHIFT;
238 pages += (offset_in_page(len)) != 0; 238 pages += (offset_in_page(len)) != 0;
239 239
diff --git a/mm/nommu.c b/mm/nommu.c
index de8b6b6580c1..c8bd59a03c71 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -15,8 +15,6 @@
15 15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 17
18#define __DISABLE_GUP_DEPRECATED
19
20#include <linux/export.h> 18#include <linux/export.h>
21#include <linux/mm.h> 19#include <linux/mm.h>
22#include <linux/vmacache.h> 20#include <linux/vmacache.h>
@@ -141,7 +139,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
141 if (pages) { 139 if (pages) {
142 pages[i] = virt_to_page(start); 140 pages[i] = virt_to_page(start);
143 if (pages[i]) 141 if (pages[i])
144 page_cache_get(pages[i]); 142 get_page(pages[i]);
145 } 143 }
146 if (vmas) 144 if (vmas)
147 vmas[i] = vma; 145 vmas[i] = vma;
@@ -161,7 +159,7 @@ finish_or_fault:
161 * slab page or a secondary page from a compound page 159 * slab page or a secondary page from a compound page
162 * - don't permit access to VMAs that don't support it, such as I/O mappings 160 * - don't permit access to VMAs that don't support it, such as I/O mappings
163 */ 161 */
164long get_user_pages6(unsigned long start, unsigned long nr_pages, 162long get_user_pages(unsigned long start, unsigned long nr_pages,
165 int write, int force, struct page **pages, 163 int write, int force, struct page **pages,
166 struct vm_area_struct **vmas) 164 struct vm_area_struct **vmas)
167{ 165{
@@ -175,15 +173,15 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
175 return __get_user_pages(current, current->mm, start, nr_pages, flags, 173 return __get_user_pages(current, current->mm, start, nr_pages, flags,
176 pages, vmas, NULL); 174 pages, vmas, NULL);
177} 175}
178EXPORT_SYMBOL(get_user_pages6); 176EXPORT_SYMBOL(get_user_pages);
179 177
180long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, 178long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
181 int write, int force, struct page **pages, 179 int write, int force, struct page **pages,
182 int *locked) 180 int *locked)
183{ 181{
184 return get_user_pages6(start, nr_pages, write, force, pages, NULL); 182 return get_user_pages(start, nr_pages, write, force, pages, NULL);
185} 183}
186EXPORT_SYMBOL(get_user_pages_locked6); 184EXPORT_SYMBOL(get_user_pages_locked);
187 185
188long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 186long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
189 unsigned long start, unsigned long nr_pages, 187 unsigned long start, unsigned long nr_pages,
@@ -199,13 +197,13 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
199} 197}
200EXPORT_SYMBOL(__get_user_pages_unlocked); 198EXPORT_SYMBOL(__get_user_pages_unlocked);
201 199
202long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, 200long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
203 int write, int force, struct page **pages) 201 int write, int force, struct page **pages)
204{ 202{
205 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 203 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
206 write, force, pages, 0); 204 write, force, pages, 0);
207} 205}
208EXPORT_SYMBOL(get_user_pages_unlocked5); 206EXPORT_SYMBOL(get_user_pages_unlocked);
209 207
210/** 208/**
211 * follow_pfn - look up PFN at a user virtual address 209 * follow_pfn - look up PFN at a user virtual address
@@ -1989,31 +1987,3 @@ static int __meminit init_admin_reserve(void)
1989 return 0; 1987 return 0;
1990} 1988}
1991subsys_initcall(init_admin_reserve); 1989subsys_initcall(init_admin_reserve);
1992
1993long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
1994 unsigned long start, unsigned long nr_pages,
1995 int write, int force, struct page **pages,
1996 struct vm_area_struct **vmas)
1997{
1998 return get_user_pages6(start, nr_pages, write, force, pages, vmas);
1999}
2000EXPORT_SYMBOL(get_user_pages8);
2001
2002long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
2003 unsigned long start, unsigned long nr_pages,
2004 int write, int force, struct page **pages,
2005 int *locked)
2006{
2007 return get_user_pages_locked6(start, nr_pages, write,
2008 force, pages, locked);
2009}
2010EXPORT_SYMBOL(get_user_pages_locked8);
2011
2012long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
2013 unsigned long start, unsigned long nr_pages,
2014 int write, int force, struct page **pages)
2015{
2016 return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
2017}
2018EXPORT_SYMBOL(get_user_pages_unlocked7);
2019
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 11ff8f758631..999792d35ccc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2176,8 +2176,8 @@ int write_cache_pages(struct address_space *mapping,
2176 cycled = 0; 2176 cycled = 0;
2177 end = -1; 2177 end = -1;
2178 } else { 2178 } else {
2179 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2179 index = wbc->range_start >> PAGE_SHIFT;
2180 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2180 end = wbc->range_end >> PAGE_SHIFT;
2181 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2181 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2182 range_whole = 1; 2182 range_whole = 1;
2183 cycled = 1; /* ignore range_cyclic tests */ 2183 cycled = 1; /* ignore range_cyclic tests */
@@ -2382,14 +2382,14 @@ int write_one_page(struct page *page, int wait)
2382 wait_on_page_writeback(page); 2382 wait_on_page_writeback(page);
2383 2383
2384 if (clear_page_dirty_for_io(page)) { 2384 if (clear_page_dirty_for_io(page)) {
2385 page_cache_get(page); 2385 get_page(page);
2386 ret = mapping->a_ops->writepage(page, &wbc); 2386 ret = mapping->a_ops->writepage(page, &wbc);
2387 if (ret == 0 && wait) { 2387 if (ret == 0 && wait) {
2388 wait_on_page_writeback(page); 2388 wait_on_page_writeback(page);
2389 if (PageError(page)) 2389 if (PageError(page))
2390 ret = -EIO; 2390 ret = -EIO;
2391 } 2391 }
2392 page_cache_release(page); 2392 put_page(page);
2393 } else { 2393 } else {
2394 unlock_page(page); 2394 unlock_page(page);
2395 } 2395 }
@@ -2431,7 +2431,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2431 __inc_zone_page_state(page, NR_DIRTIED); 2431 __inc_zone_page_state(page, NR_DIRTIED);
2432 __inc_wb_stat(wb, WB_RECLAIMABLE); 2432 __inc_wb_stat(wb, WB_RECLAIMABLE);
2433 __inc_wb_stat(wb, WB_DIRTIED); 2433 __inc_wb_stat(wb, WB_DIRTIED);
2434 task_io_account_write(PAGE_CACHE_SIZE); 2434 task_io_account_write(PAGE_SIZE);
2435 current->nr_dirtied++; 2435 current->nr_dirtied++;
2436 this_cpu_inc(bdp_ratelimits); 2436 this_cpu_inc(bdp_ratelimits);
2437 } 2437 }
@@ -2450,7 +2450,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2451 dec_zone_page_state(page, NR_FILE_DIRTY); 2451 dec_zone_page_state(page, NR_FILE_DIRTY);
2452 dec_wb_stat(wb, WB_RECLAIMABLE); 2452 dec_wb_stat(wb, WB_RECLAIMABLE);
2453 task_io_account_cancelled_write(PAGE_CACHE_SIZE); 2453 task_io_account_cancelled_write(PAGE_SIZE);
2454 } 2454 }
2455} 2455}
2456 2456
diff --git a/mm/page_io.c b/mm/page_io.c
index 18aac7819cc9..985f23cfa79b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -252,7 +252,7 @@ out:
252 252
253static sector_t swap_page_sector(struct page *page) 253static sector_t swap_page_sector(struct page *page)
254{ 254{
255 return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9); 255 return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
256} 256}
257 257
258int __swap_writepage(struct page *page, struct writeback_control *wbc, 258int __swap_writepage(struct page *page, struct writeback_control *wbc,
@@ -353,7 +353,11 @@ int swap_readpage(struct page *page)
353 353
354 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); 354 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
355 if (!ret) { 355 if (!ret) {
356 swap_slot_free_notify(page); 356 if (trylock_page(page)) {
357 swap_slot_free_notify(page);
358 unlock_page(page);
359 }
360
357 count_vm_event(PSWPIN); 361 count_vm_event(PSWPIN);
358 return 0; 362 return 0;
359 } 363 }
diff --git a/mm/readahead.c b/mm/readahead.c
index 20e58e820e44..40be3ae0afe3 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -47,11 +47,11 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
47 if (!trylock_page(page)) 47 if (!trylock_page(page))
48 BUG(); 48 BUG();
49 page->mapping = mapping; 49 page->mapping = mapping;
50 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 50 do_invalidatepage(page, 0, PAGE_SIZE);
51 page->mapping = NULL; 51 page->mapping = NULL;
52 unlock_page(page); 52 unlock_page(page);
53 } 53 }
54 page_cache_release(page); 54 put_page(page);
55} 55}
56 56
57/* 57/*
@@ -93,14 +93,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
93 read_cache_pages_invalidate_page(mapping, page); 93 read_cache_pages_invalidate_page(mapping, page);
94 continue; 94 continue;
95 } 95 }
96 page_cache_release(page); 96 put_page(page);
97 97
98 ret = filler(data, page); 98 ret = filler(data, page);
99 if (unlikely(ret)) { 99 if (unlikely(ret)) {
100 read_cache_pages_invalidate_pages(mapping, pages); 100 read_cache_pages_invalidate_pages(mapping, pages);
101 break; 101 break;
102 } 102 }
103 task_io_account_read(PAGE_CACHE_SIZE); 103 task_io_account_read(PAGE_SIZE);
104 } 104 }
105 return ret; 105 return ret;
106} 106}
@@ -130,7 +130,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
130 mapping_gfp_constraint(mapping, GFP_KERNEL))) { 130 mapping_gfp_constraint(mapping, GFP_KERNEL))) {
131 mapping->a_ops->readpage(filp, page); 131 mapping->a_ops->readpage(filp, page);
132 } 132 }
133 page_cache_release(page); 133 put_page(page);
134 } 134 }
135 ret = 0; 135 ret = 0;
136 136
@@ -163,7 +163,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
163 if (isize == 0) 163 if (isize == 0)
164 goto out; 164 goto out;
165 165
166 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 166 end_index = ((isize - 1) >> PAGE_SHIFT);
167 167
168 /* 168 /*
169 * Preallocate as many pages as we will need. 169 * Preallocate as many pages as we will need.
@@ -216,7 +216,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
216 while (nr_to_read) { 216 while (nr_to_read) {
217 int err; 217 int err;
218 218
219 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 219 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
220 220
221 if (this_chunk > nr_to_read) 221 if (this_chunk > nr_to_read)
222 this_chunk = nr_to_read; 222 this_chunk = nr_to_read;
@@ -425,7 +425,7 @@ ondemand_readahead(struct address_space *mapping,
425 * trivial case: (offset - prev_offset) == 1 425 * trivial case: (offset - prev_offset) == 1
426 * unaligned reads: (offset - prev_offset) == 0 426 * unaligned reads: (offset - prev_offset) == 0
427 */ 427 */
428 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT; 428 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
429 if (offset - prev_offset <= 1UL) 429 if (offset - prev_offset <= 1UL)
430 goto initial_readahead; 430 goto initial_readahead;
431 431
@@ -558,8 +558,8 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
558 if (f.file) { 558 if (f.file) {
559 if (f.file->f_mode & FMODE_READ) { 559 if (f.file->f_mode & FMODE_READ) {
560 struct address_space *mapping = f.file->f_mapping; 560 struct address_space *mapping = f.file->f_mapping;
561 pgoff_t start = offset >> PAGE_CACHE_SHIFT; 561 pgoff_t start = offset >> PAGE_SHIFT;
562 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 562 pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
563 unsigned long len = end - start + 1; 563 unsigned long len = end - start + 1;
564 ret = do_readahead(mapping, f.file, start, len); 564 ret = do_readahead(mapping, f.file, start, len);
565 } 565 }
diff --git a/mm/rmap.c b/mm/rmap.c
index 395e314b7996..307b555024ef 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1541,7 +1541,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1541 1541
1542discard: 1542discard:
1543 page_remove_rmap(page, PageHuge(page)); 1543 page_remove_rmap(page, PageHuge(page));
1544 page_cache_release(page); 1544 put_page(page);
1545 1545
1546out_unmap: 1546out_unmap:
1547 pte_unmap_unlock(pte, ptl); 1547 pte_unmap_unlock(pte, ptl);
diff --git a/mm/shmem.c b/mm/shmem.c
index 9428c51ab2d6..719bd6b88d98 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -75,8 +75,8 @@ static struct vfsmount *shm_mnt;
75 75
76#include "internal.h" 76#include "internal.h"
77 77
78#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 78#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
79#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 79#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
80 80
81/* Pretend that each entry is of this size in directory's i_size */ 81/* Pretend that each entry is of this size in directory's i_size */
82#define BOGO_DIRENT_SIZE 20 82#define BOGO_DIRENT_SIZE 20
@@ -176,13 +176,13 @@ static inline int shmem_reacct_size(unsigned long flags,
176static inline int shmem_acct_block(unsigned long flags) 176static inline int shmem_acct_block(unsigned long flags)
177{ 177{
178 return (flags & VM_NORESERVE) ? 178 return (flags & VM_NORESERVE) ?
179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0;
180} 180}
181 181
182static inline void shmem_unacct_blocks(unsigned long flags, long pages) 182static inline void shmem_unacct_blocks(unsigned long flags, long pages)
183{ 183{
184 if (flags & VM_NORESERVE) 184 if (flags & VM_NORESERVE)
185 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 185 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
186} 186}
187 187
188static const struct super_operations shmem_ops; 188static const struct super_operations shmem_ops;
@@ -300,7 +300,7 @@ static int shmem_add_to_page_cache(struct page *page,
300 VM_BUG_ON_PAGE(!PageLocked(page), page); 300 VM_BUG_ON_PAGE(!PageLocked(page), page);
301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
302 302
303 page_cache_get(page); 303 get_page(page);
304 page->mapping = mapping; 304 page->mapping = mapping;
305 page->index = index; 305 page->index = index;
306 306
@@ -318,7 +318,7 @@ static int shmem_add_to_page_cache(struct page *page,
318 } else { 318 } else {
319 page->mapping = NULL; 319 page->mapping = NULL;
320 spin_unlock_irq(&mapping->tree_lock); 320 spin_unlock_irq(&mapping->tree_lock);
321 page_cache_release(page); 321 put_page(page);
322 } 322 }
323 return error; 323 return error;
324} 324}
@@ -338,7 +338,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
338 __dec_zone_page_state(page, NR_FILE_PAGES); 338 __dec_zone_page_state(page, NR_FILE_PAGES);
339 __dec_zone_page_state(page, NR_SHMEM); 339 __dec_zone_page_state(page, NR_SHMEM);
340 spin_unlock_irq(&mapping->tree_lock); 340 spin_unlock_irq(&mapping->tree_lock);
341 page_cache_release(page); 341 put_page(page);
342 BUG_ON(error); 342 BUG_ON(error);
343} 343}
344 344
@@ -474,10 +474,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
474{ 474{
475 struct address_space *mapping = inode->i_mapping; 475 struct address_space *mapping = inode->i_mapping;
476 struct shmem_inode_info *info = SHMEM_I(inode); 476 struct shmem_inode_info *info = SHMEM_I(inode);
477 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 477 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
478 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 478 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
479 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 479 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
480 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 480 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
481 struct pagevec pvec; 481 struct pagevec pvec;
482 pgoff_t indices[PAGEVEC_SIZE]; 482 pgoff_t indices[PAGEVEC_SIZE];
483 long nr_swaps_freed = 0; 483 long nr_swaps_freed = 0;
@@ -530,7 +530,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
530 struct page *page = NULL; 530 struct page *page = NULL;
531 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 531 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
532 if (page) { 532 if (page) {
533 unsigned int top = PAGE_CACHE_SIZE; 533 unsigned int top = PAGE_SIZE;
534 if (start > end) { 534 if (start > end) {
535 top = partial_end; 535 top = partial_end;
536 partial_end = 0; 536 partial_end = 0;
@@ -538,7 +538,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
538 zero_user_segment(page, partial_start, top); 538 zero_user_segment(page, partial_start, top);
539 set_page_dirty(page); 539 set_page_dirty(page);
540 unlock_page(page); 540 unlock_page(page);
541 page_cache_release(page); 541 put_page(page);
542 } 542 }
543 } 543 }
544 if (partial_end) { 544 if (partial_end) {
@@ -548,7 +548,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
548 zero_user_segment(page, 0, partial_end); 548 zero_user_segment(page, 0, partial_end);
549 set_page_dirty(page); 549 set_page_dirty(page);
550 unlock_page(page); 550 unlock_page(page);
551 page_cache_release(page); 551 put_page(page);
552 } 552 }
553 } 553 }
554 if (start >= end) 554 if (start >= end)
@@ -833,7 +833,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
833 mem_cgroup_commit_charge(page, memcg, true, false); 833 mem_cgroup_commit_charge(page, memcg, true, false);
834out: 834out:
835 unlock_page(page); 835 unlock_page(page);
836 page_cache_release(page); 836 put_page(page);
837 return error; 837 return error;
838} 838}
839 839
@@ -1080,7 +1080,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1080 if (!newpage) 1080 if (!newpage)
1081 return -ENOMEM; 1081 return -ENOMEM;
1082 1082
1083 page_cache_get(newpage); 1083 get_page(newpage);
1084 copy_highpage(newpage, oldpage); 1084 copy_highpage(newpage, oldpage);
1085 flush_dcache_page(newpage); 1085 flush_dcache_page(newpage);
1086 1086
@@ -1120,8 +1120,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1120 set_page_private(oldpage, 0); 1120 set_page_private(oldpage, 0);
1121 1121
1122 unlock_page(oldpage); 1122 unlock_page(oldpage);
1123 page_cache_release(oldpage); 1123 put_page(oldpage);
1124 page_cache_release(oldpage); 1124 put_page(oldpage);
1125 return error; 1125 return error;
1126} 1126}
1127 1127
@@ -1145,7 +1145,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1145 int once = 0; 1145 int once = 0;
1146 int alloced = 0; 1146 int alloced = 0;
1147 1147
1148 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1148 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1149 return -EFBIG; 1149 return -EFBIG;
1150repeat: 1150repeat:
1151 swap.val = 0; 1151 swap.val = 0;
@@ -1156,7 +1156,7 @@ repeat:
1156 } 1156 }
1157 1157
1158 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1158 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1159 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1159 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1160 error = -EINVAL; 1160 error = -EINVAL;
1161 goto unlock; 1161 goto unlock;
1162 } 1162 }
@@ -1169,7 +1169,7 @@ repeat:
1169 if (sgp != SGP_READ) 1169 if (sgp != SGP_READ)
1170 goto clear; 1170 goto clear;
1171 unlock_page(page); 1171 unlock_page(page);
1172 page_cache_release(page); 1172 put_page(page);
1173 page = NULL; 1173 page = NULL;
1174 } 1174 }
1175 if (page || (sgp == SGP_READ && !swap.val)) { 1175 if (page || (sgp == SGP_READ && !swap.val)) {
@@ -1327,7 +1327,7 @@ clear:
1327 1327
1328 /* Perhaps the file has been truncated since we checked */ 1328 /* Perhaps the file has been truncated since we checked */
1329 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1329 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1330 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1330 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1331 if (alloced) { 1331 if (alloced) {
1332 ClearPageDirty(page); 1332 ClearPageDirty(page);
1333 delete_from_page_cache(page); 1333 delete_from_page_cache(page);
@@ -1355,7 +1355,7 @@ failed:
1355unlock: 1355unlock:
1356 if (page) { 1356 if (page) {
1357 unlock_page(page); 1357 unlock_page(page);
1358 page_cache_release(page); 1358 put_page(page);
1359 } 1359 }
1360 if (error == -ENOSPC && !once++) { 1360 if (error == -ENOSPC && !once++) {
1361 info = SHMEM_I(inode); 1361 info = SHMEM_I(inode);
@@ -1577,7 +1577,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
1577{ 1577{
1578 struct inode *inode = mapping->host; 1578 struct inode *inode = mapping->host;
1579 struct shmem_inode_info *info = SHMEM_I(inode); 1579 struct shmem_inode_info *info = SHMEM_I(inode);
1580 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1580 pgoff_t index = pos >> PAGE_SHIFT;
1581 1581
1582 /* i_mutex is held by caller */ 1582 /* i_mutex is held by caller */
1583 if (unlikely(info->seals)) { 1583 if (unlikely(info->seals)) {
@@ -1601,16 +1601,16 @@ shmem_write_end(struct file *file, struct address_space *mapping,
1601 i_size_write(inode, pos + copied); 1601 i_size_write(inode, pos + copied);
1602 1602
1603 if (!PageUptodate(page)) { 1603 if (!PageUptodate(page)) {
1604 if (copied < PAGE_CACHE_SIZE) { 1604 if (copied < PAGE_SIZE) {
1605 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1605 unsigned from = pos & (PAGE_SIZE - 1);
1606 zero_user_segments(page, 0, from, 1606 zero_user_segments(page, 0, from,
1607 from + copied, PAGE_CACHE_SIZE); 1607 from + copied, PAGE_SIZE);
1608 } 1608 }
1609 SetPageUptodate(page); 1609 SetPageUptodate(page);
1610 } 1610 }
1611 set_page_dirty(page); 1611 set_page_dirty(page);
1612 unlock_page(page); 1612 unlock_page(page);
1613 page_cache_release(page); 1613 put_page(page);
1614 1614
1615 return copied; 1615 return copied;
1616} 1616}
@@ -1635,8 +1635,8 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1635 if (!iter_is_iovec(to)) 1635 if (!iter_is_iovec(to))
1636 sgp = SGP_DIRTY; 1636 sgp = SGP_DIRTY;
1637 1637
1638 index = *ppos >> PAGE_CACHE_SHIFT; 1638 index = *ppos >> PAGE_SHIFT;
1639 offset = *ppos & ~PAGE_CACHE_MASK; 1639 offset = *ppos & ~PAGE_MASK;
1640 1640
1641 for (;;) { 1641 for (;;) {
1642 struct page *page = NULL; 1642 struct page *page = NULL;
@@ -1644,11 +1644,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1644 unsigned long nr, ret; 1644 unsigned long nr, ret;
1645 loff_t i_size = i_size_read(inode); 1645 loff_t i_size = i_size_read(inode);
1646 1646
1647 end_index = i_size >> PAGE_CACHE_SHIFT; 1647 end_index = i_size >> PAGE_SHIFT;
1648 if (index > end_index) 1648 if (index > end_index)
1649 break; 1649 break;
1650 if (index == end_index) { 1650 if (index == end_index) {
1651 nr = i_size & ~PAGE_CACHE_MASK; 1651 nr = i_size & ~PAGE_MASK;
1652 if (nr <= offset) 1652 if (nr <= offset)
1653 break; 1653 break;
1654 } 1654 }
@@ -1666,14 +1666,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1666 * We must evaluate after, since reads (unlike writes) 1666 * We must evaluate after, since reads (unlike writes)
1667 * are called without i_mutex protection against truncate 1667 * are called without i_mutex protection against truncate
1668 */ 1668 */
1669 nr = PAGE_CACHE_SIZE; 1669 nr = PAGE_SIZE;
1670 i_size = i_size_read(inode); 1670 i_size = i_size_read(inode);
1671 end_index = i_size >> PAGE_CACHE_SHIFT; 1671 end_index = i_size >> PAGE_SHIFT;
1672 if (index == end_index) { 1672 if (index == end_index) {
1673 nr = i_size & ~PAGE_CACHE_MASK; 1673 nr = i_size & ~PAGE_MASK;
1674 if (nr <= offset) { 1674 if (nr <= offset) {
1675 if (page) 1675 if (page)
1676 page_cache_release(page); 1676 put_page(page);
1677 break; 1677 break;
1678 } 1678 }
1679 } 1679 }
@@ -1694,7 +1694,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1694 mark_page_accessed(page); 1694 mark_page_accessed(page);
1695 } else { 1695 } else {
1696 page = ZERO_PAGE(0); 1696 page = ZERO_PAGE(0);
1697 page_cache_get(page); 1697 get_page(page);
1698 } 1698 }
1699 1699
1700 /* 1700 /*
@@ -1704,10 +1704,10 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1704 ret = copy_page_to_iter(page, offset, nr, to); 1704 ret = copy_page_to_iter(page, offset, nr, to);
1705 retval += ret; 1705 retval += ret;
1706 offset += ret; 1706 offset += ret;
1707 index += offset >> PAGE_CACHE_SHIFT; 1707 index += offset >> PAGE_SHIFT;
1708 offset &= ~PAGE_CACHE_MASK; 1708 offset &= ~PAGE_MASK;
1709 1709
1710 page_cache_release(page); 1710 put_page(page);
1711 if (!iov_iter_count(to)) 1711 if (!iov_iter_count(to))
1712 break; 1712 break;
1713 if (ret < nr) { 1713 if (ret < nr) {
@@ -1717,7 +1717,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1717 cond_resched(); 1717 cond_resched();
1718 } 1718 }
1719 1719
1720 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1720 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
1721 file_accessed(file); 1721 file_accessed(file);
1722 return retval ? retval : error; 1722 return retval ? retval : error;
1723} 1723}
@@ -1755,9 +1755,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1755 if (splice_grow_spd(pipe, &spd)) 1755 if (splice_grow_spd(pipe, &spd))
1756 return -ENOMEM; 1756 return -ENOMEM;
1757 1757
1758 index = *ppos >> PAGE_CACHE_SHIFT; 1758 index = *ppos >> PAGE_SHIFT;
1759 loff = *ppos & ~PAGE_CACHE_MASK; 1759 loff = *ppos & ~PAGE_MASK;
1760 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1760 req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
1761 nr_pages = min(req_pages, spd.nr_pages_max); 1761 nr_pages = min(req_pages, spd.nr_pages_max);
1762 1762
1763 spd.nr_pages = find_get_pages_contig(mapping, index, 1763 spd.nr_pages = find_get_pages_contig(mapping, index,
@@ -1774,7 +1774,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1774 index++; 1774 index++;
1775 } 1775 }
1776 1776
1777 index = *ppos >> PAGE_CACHE_SHIFT; 1777 index = *ppos >> PAGE_SHIFT;
1778 nr_pages = spd.nr_pages; 1778 nr_pages = spd.nr_pages;
1779 spd.nr_pages = 0; 1779 spd.nr_pages = 0;
1780 1780
@@ -1784,7 +1784,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1784 if (!len) 1784 if (!len)
1785 break; 1785 break;
1786 1786
1787 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1787 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
1788 page = spd.pages[page_nr]; 1788 page = spd.pages[page_nr];
1789 1789
1790 if (!PageUptodate(page) || page->mapping != mapping) { 1790 if (!PageUptodate(page) || page->mapping != mapping) {
@@ -1793,19 +1793,19 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1793 if (error) 1793 if (error)
1794 break; 1794 break;
1795 unlock_page(page); 1795 unlock_page(page);
1796 page_cache_release(spd.pages[page_nr]); 1796 put_page(spd.pages[page_nr]);
1797 spd.pages[page_nr] = page; 1797 spd.pages[page_nr] = page;
1798 } 1798 }
1799 1799
1800 isize = i_size_read(inode); 1800 isize = i_size_read(inode);
1801 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1801 end_index = (isize - 1) >> PAGE_SHIFT;
1802 if (unlikely(!isize || index > end_index)) 1802 if (unlikely(!isize || index > end_index))
1803 break; 1803 break;
1804 1804
1805 if (end_index == index) { 1805 if (end_index == index) {
1806 unsigned int plen; 1806 unsigned int plen;
1807 1807
1808 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1808 plen = ((isize - 1) & ~PAGE_MASK) + 1;
1809 if (plen <= loff) 1809 if (plen <= loff)
1810 break; 1810 break;
1811 1811
@@ -1822,7 +1822,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1822 } 1822 }
1823 1823
1824 while (page_nr < nr_pages) 1824 while (page_nr < nr_pages)
1825 page_cache_release(spd.pages[page_nr++]); 1825 put_page(spd.pages[page_nr++]);
1826 1826
1827 if (spd.nr_pages) 1827 if (spd.nr_pages)
1828 error = splice_to_pipe(pipe, &spd); 1828 error = splice_to_pipe(pipe, &spd);
@@ -1904,10 +1904,10 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1904 else if (offset >= inode->i_size) 1904 else if (offset >= inode->i_size)
1905 offset = -ENXIO; 1905 offset = -ENXIO;
1906 else { 1906 else {
1907 start = offset >> PAGE_CACHE_SHIFT; 1907 start = offset >> PAGE_SHIFT;
1908 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1908 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1909 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1909 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1910 new_offset <<= PAGE_CACHE_SHIFT; 1910 new_offset <<= PAGE_SHIFT;
1911 if (new_offset > offset) { 1911 if (new_offset > offset) {
1912 if (new_offset < inode->i_size) 1912 if (new_offset < inode->i_size)
1913 offset = new_offset; 1913 offset = new_offset;
@@ -2203,8 +2203,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2203 goto out; 2203 goto out;
2204 } 2204 }
2205 2205
2206 start = offset >> PAGE_CACHE_SHIFT; 2206 start = offset >> PAGE_SHIFT;
2207 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2207 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2208 /* Try to avoid a swapstorm if len is impossible to satisfy */ 2208 /* Try to avoid a swapstorm if len is impossible to satisfy */
2209 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2209 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2210 error = -ENOSPC; 2210 error = -ENOSPC;
@@ -2237,8 +2237,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2237 if (error) { 2237 if (error) {
2238 /* Remove the !PageUptodate pages we added */ 2238 /* Remove the !PageUptodate pages we added */
2239 shmem_undo_range(inode, 2239 shmem_undo_range(inode,
2240 (loff_t)start << PAGE_CACHE_SHIFT, 2240 (loff_t)start << PAGE_SHIFT,
2241 (loff_t)index << PAGE_CACHE_SHIFT, true); 2241 (loff_t)index << PAGE_SHIFT, true);
2242 goto undone; 2242 goto undone;
2243 } 2243 }
2244 2244
@@ -2259,7 +2259,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2259 */ 2259 */
2260 set_page_dirty(page); 2260 set_page_dirty(page);
2261 unlock_page(page); 2261 unlock_page(page);
2262 page_cache_release(page); 2262 put_page(page);
2263 cond_resched(); 2263 cond_resched();
2264 } 2264 }
2265 2265
@@ -2280,7 +2280,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2280 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 2280 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2281 2281
2282 buf->f_type = TMPFS_MAGIC; 2282 buf->f_type = TMPFS_MAGIC;
2283 buf->f_bsize = PAGE_CACHE_SIZE; 2283 buf->f_bsize = PAGE_SIZE;
2284 buf->f_namelen = NAME_MAX; 2284 buf->f_namelen = NAME_MAX;
2285 if (sbinfo->max_blocks) { 2285 if (sbinfo->max_blocks) {
2286 buf->f_blocks = sbinfo->max_blocks; 2286 buf->f_blocks = sbinfo->max_blocks;
@@ -2523,7 +2523,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2523 struct shmem_inode_info *info; 2523 struct shmem_inode_info *info;
2524 2524
2525 len = strlen(symname) + 1; 2525 len = strlen(symname) + 1;
2526 if (len > PAGE_CACHE_SIZE) 2526 if (len > PAGE_SIZE)
2527 return -ENAMETOOLONG; 2527 return -ENAMETOOLONG;
2528 2528
2529 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2529 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
@@ -2562,7 +2562,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2562 SetPageUptodate(page); 2562 SetPageUptodate(page);
2563 set_page_dirty(page); 2563 set_page_dirty(page);
2564 unlock_page(page); 2564 unlock_page(page);
2565 page_cache_release(page); 2565 put_page(page);
2566 } 2566 }
2567 dir->i_size += BOGO_DIRENT_SIZE; 2567 dir->i_size += BOGO_DIRENT_SIZE;
2568 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2568 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -2835,7 +2835,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2835 if (*rest) 2835 if (*rest)
2836 goto bad_val; 2836 goto bad_val;
2837 sbinfo->max_blocks = 2837 sbinfo->max_blocks =
2838 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2838 DIV_ROUND_UP(size, PAGE_SIZE);
2839 } else if (!strcmp(this_char,"nr_blocks")) { 2839 } else if (!strcmp(this_char,"nr_blocks")) {
2840 sbinfo->max_blocks = memparse(value, &rest); 2840 sbinfo->max_blocks = memparse(value, &rest);
2841 if (*rest) 2841 if (*rest)
@@ -2940,7 +2940,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2940 2940
2941 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2941 if (sbinfo->max_blocks != shmem_default_max_blocks())
2942 seq_printf(seq, ",size=%luk", 2942 seq_printf(seq, ",size=%luk",
2943 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2943 sbinfo->max_blocks << (PAGE_SHIFT - 10));
2944 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2944 if (sbinfo->max_inodes != shmem_default_max_inodes())
2945 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2945 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2946 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2946 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
@@ -3082,8 +3082,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
3082 sbinfo->free_inodes = sbinfo->max_inodes; 3082 sbinfo->free_inodes = sbinfo->max_inodes;
3083 3083
3084 sb->s_maxbytes = MAX_LFS_FILESIZE; 3084 sb->s_maxbytes = MAX_LFS_FILESIZE;
3085 sb->s_blocksize = PAGE_CACHE_SIZE; 3085 sb->s_blocksize = PAGE_SIZE;
3086 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 3086 sb->s_blocksize_bits = PAGE_SHIFT;
3087 sb->s_magic = TMPFS_MAGIC; 3087 sb->s_magic = TMPFS_MAGIC;
3088 sb->s_op = &shmem_ops; 3088 sb->s_op = &shmem_ops;
3089 sb->s_time_gran = 1; 3089 sb->s_time_gran = 1;
diff --git a/mm/swap.c b/mm/swap.c
index 09fe5e97714a..03aacbcb013f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -114,7 +114,7 @@ void put_pages_list(struct list_head *pages)
114 114
115 victim = list_entry(pages->prev, struct page, lru); 115 victim = list_entry(pages->prev, struct page, lru);
116 list_del(&victim->lru); 116 list_del(&victim->lru);
117 page_cache_release(victim); 117 put_page(victim);
118 } 118 }
119} 119}
120EXPORT_SYMBOL(put_pages_list); 120EXPORT_SYMBOL(put_pages_list);
@@ -142,7 +142,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
142 return seg; 142 return seg;
143 143
144 pages[seg] = kmap_to_page(kiov[seg].iov_base); 144 pages[seg] = kmap_to_page(kiov[seg].iov_base);
145 page_cache_get(pages[seg]); 145 get_page(pages[seg]);
146 } 146 }
147 147
148 return seg; 148 return seg;
@@ -236,7 +236,7 @@ void rotate_reclaimable_page(struct page *page)
236 struct pagevec *pvec; 236 struct pagevec *pvec;
237 unsigned long flags; 237 unsigned long flags;
238 238
239 page_cache_get(page); 239 get_page(page);
240 local_irq_save(flags); 240 local_irq_save(flags);
241 pvec = this_cpu_ptr(&lru_rotate_pvecs); 241 pvec = this_cpu_ptr(&lru_rotate_pvecs);
242 if (!pagevec_add(pvec, page)) 242 if (!pagevec_add(pvec, page))
@@ -294,7 +294,7 @@ void activate_page(struct page *page)
294 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 294 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
295 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 295 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
296 296
297 page_cache_get(page); 297 get_page(page);
298 if (!pagevec_add(pvec, page)) 298 if (!pagevec_add(pvec, page))
299 pagevec_lru_move_fn(pvec, __activate_page, NULL); 299 pagevec_lru_move_fn(pvec, __activate_page, NULL);
300 put_cpu_var(activate_page_pvecs); 300 put_cpu_var(activate_page_pvecs);
@@ -389,7 +389,7 @@ static void __lru_cache_add(struct page *page)
389{ 389{
390 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 390 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
391 391
392 page_cache_get(page); 392 get_page(page);
393 if (!pagevec_space(pvec)) 393 if (!pagevec_space(pvec))
394 __pagevec_lru_add(pvec); 394 __pagevec_lru_add(pvec);
395 pagevec_add(pvec, page); 395 pagevec_add(pvec, page);
@@ -646,7 +646,7 @@ void deactivate_page(struct page *page)
646 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 646 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
648 648
649 page_cache_get(page); 649 get_page(page);
650 if (!pagevec_add(pvec, page)) 650 if (!pagevec_add(pvec, page))
651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
652 put_cpu_var(lru_deactivate_pvecs); 652 put_cpu_var(lru_deactivate_pvecs);
@@ -698,7 +698,7 @@ void lru_add_drain_all(void)
698} 698}
699 699
700/** 700/**
701 * release_pages - batched page_cache_release() 701 * release_pages - batched put_page()
702 * @pages: array of pages to release 702 * @pages: array of pages to release
703 * @nr: number of pages 703 * @nr: number of pages
704 * @cold: whether the pages are cache cold 704 * @cold: whether the pages are cache cold
@@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold)
728 zone = NULL; 728 zone = NULL;
729 } 729 }
730 730
731 if (is_huge_zero_page(page)) {
732 put_huge_zero_page();
733 continue;
734 }
735
731 page = compound_head(page); 736 page = compound_head(page);
732 if (!put_page_testzero(page)) 737 if (!put_page_testzero(page))
733 continue; 738 continue;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 69cb2464e7dc..366ce3518703 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -85,7 +85,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
85 VM_BUG_ON_PAGE(PageSwapCache(page), page); 85 VM_BUG_ON_PAGE(PageSwapCache(page), page);
86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
87 87
88 page_cache_get(page); 88 get_page(page);
89 SetPageSwapCache(page); 89 SetPageSwapCache(page);
90 set_page_private(page, entry.val); 90 set_page_private(page, entry.val);
91 91
@@ -109,7 +109,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
109 VM_BUG_ON(error == -EEXIST); 109 VM_BUG_ON(error == -EEXIST);
110 set_page_private(page, 0UL); 110 set_page_private(page, 0UL);
111 ClearPageSwapCache(page); 111 ClearPageSwapCache(page);
112 page_cache_release(page); 112 put_page(page);
113 } 113 }
114 114
115 return error; 115 return error;
@@ -226,7 +226,7 @@ void delete_from_swap_cache(struct page *page)
226 spin_unlock_irq(&address_space->tree_lock); 226 spin_unlock_irq(&address_space->tree_lock);
227 227
228 swapcache_free(entry); 228 swapcache_free(entry);
229 page_cache_release(page); 229 put_page(page);
230} 230}
231 231
232/* 232/*
@@ -252,7 +252,7 @@ static inline void free_swap_cache(struct page *page)
252void free_page_and_swap_cache(struct page *page) 252void free_page_and_swap_cache(struct page *page)
253{ 253{
254 free_swap_cache(page); 254 free_swap_cache(page);
255 page_cache_release(page); 255 put_page(page);
256} 256}
257 257
258/* 258/*
@@ -380,7 +380,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
380 } while (err != -ENOMEM); 380 } while (err != -ENOMEM);
381 381
382 if (new_page) 382 if (new_page)
383 page_cache_release(new_page); 383 put_page(new_page);
384 return found_page; 384 return found_page;
385} 385}
386 386
@@ -495,7 +495,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
495 continue; 495 continue;
496 if (offset != entry_offset) 496 if (offset != entry_offset)
497 SetPageReadahead(page); 497 SetPageReadahead(page);
498 page_cache_release(page); 498 put_page(page);
499 } 499 }
500 blk_finish_plug(&plug); 500 blk_finish_plug(&plug);
501 501
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 560ad380634c..83874eced5bf 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -119,7 +119,7 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
119 ret = try_to_free_swap(page); 119 ret = try_to_free_swap(page);
120 unlock_page(page); 120 unlock_page(page);
121 } 121 }
122 page_cache_release(page); 122 put_page(page);
123 return ret; 123 return ret;
124} 124}
125 125
@@ -1000,7 +1000,7 @@ int free_swap_and_cache(swp_entry_t entry)
1000 page = find_get_page(swap_address_space(entry), 1000 page = find_get_page(swap_address_space(entry),
1001 entry.val); 1001 entry.val);
1002 if (page && !trylock_page(page)) { 1002 if (page && !trylock_page(page)) {
1003 page_cache_release(page); 1003 put_page(page);
1004 page = NULL; 1004 page = NULL;
1005 } 1005 }
1006 } 1006 }
@@ -1017,7 +1017,7 @@ int free_swap_and_cache(swp_entry_t entry)
1017 SetPageDirty(page); 1017 SetPageDirty(page);
1018 } 1018 }
1019 unlock_page(page); 1019 unlock_page(page);
1020 page_cache_release(page); 1020 put_page(page);
1021 } 1021 }
1022 return p != NULL; 1022 return p != NULL;
1023} 1023}
@@ -1518,7 +1518,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1518 } 1518 }
1519 if (retval) { 1519 if (retval) {
1520 unlock_page(page); 1520 unlock_page(page);
1521 page_cache_release(page); 1521 put_page(page);
1522 break; 1522 break;
1523 } 1523 }
1524 1524
@@ -1570,7 +1570,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1570 */ 1570 */
1571 SetPageDirty(page); 1571 SetPageDirty(page);
1572 unlock_page(page); 1572 unlock_page(page);
1573 page_cache_release(page); 1573 put_page(page);
1574 1574
1575 /* 1575 /*
1576 * Make sure that we aren't completely killing 1576 * Make sure that we aren't completely killing
@@ -2574,7 +2574,7 @@ bad_swap:
2574out: 2574out:
2575 if (page && !IS_ERR(page)) { 2575 if (page && !IS_ERR(page)) {
2576 kunmap(page); 2576 kunmap(page);
2577 page_cache_release(page); 2577 put_page(page);
2578 } 2578 }
2579 if (name) 2579 if (name)
2580 putname(name); 2580 putname(name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 7598b552ae03..b00272810871 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -118,7 +118,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
118 return -EIO; 118 return -EIO;
119 119
120 if (page_has_private(page)) 120 if (page_has_private(page))
121 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 121 do_invalidatepage(page, 0, PAGE_SIZE);
122 122
123 /* 123 /*
124 * Some filesystems seem to re-dirty the page even after 124 * Some filesystems seem to re-dirty the page even after
@@ -159,8 +159,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
159{ 159{
160 if (page_mapped(page)) { 160 if (page_mapped(page)) {
161 unmap_mapping_range(mapping, 161 unmap_mapping_range(mapping,
162 (loff_t)page->index << PAGE_CACHE_SHIFT, 162 (loff_t)page->index << PAGE_SHIFT,
163 PAGE_CACHE_SIZE, 0); 163 PAGE_SIZE, 0);
164 } 164 }
165 return truncate_complete_page(mapping, page); 165 return truncate_complete_page(mapping, page);
166} 166}
@@ -241,8 +241,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
241 return; 241 return;
242 242
243 /* Offsets within partial pages */ 243 /* Offsets within partial pages */
244 partial_start = lstart & (PAGE_CACHE_SIZE - 1); 244 partial_start = lstart & (PAGE_SIZE - 1);
245 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 245 partial_end = (lend + 1) & (PAGE_SIZE - 1);
246 246
247 /* 247 /*
248 * 'start' and 'end' always covers the range of pages to be fully 248 * 'start' and 'end' always covers the range of pages to be fully
@@ -250,7 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
250 * start of the range and 'partial_end' at the end of the range. 250 * start of the range and 'partial_end' at the end of the range.
251 * Note that 'end' is exclusive while 'lend' is inclusive. 251 * Note that 'end' is exclusive while 'lend' is inclusive.
252 */ 252 */
253 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 253 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
254 if (lend == -1) 254 if (lend == -1)
255 /* 255 /*
256 * lend == -1 indicates end-of-file so we have to set 'end' 256 * lend == -1 indicates end-of-file so we have to set 'end'
@@ -259,7 +259,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
259 */ 259 */
260 end = -1; 260 end = -1;
261 else 261 else
262 end = (lend + 1) >> PAGE_CACHE_SHIFT; 262 end = (lend + 1) >> PAGE_SHIFT;
263 263
264 pagevec_init(&pvec, 0); 264 pagevec_init(&pvec, 0);
265 index = start; 265 index = start;
@@ -298,7 +298,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
298 if (partial_start) { 298 if (partial_start) {
299 struct page *page = find_lock_page(mapping, start - 1); 299 struct page *page = find_lock_page(mapping, start - 1);
300 if (page) { 300 if (page) {
301 unsigned int top = PAGE_CACHE_SIZE; 301 unsigned int top = PAGE_SIZE;
302 if (start > end) { 302 if (start > end) {
303 /* Truncation within a single page */ 303 /* Truncation within a single page */
304 top = partial_end; 304 top = partial_end;
@@ -311,7 +311,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
311 do_invalidatepage(page, partial_start, 311 do_invalidatepage(page, partial_start,
312 top - partial_start); 312 top - partial_start);
313 unlock_page(page); 313 unlock_page(page);
314 page_cache_release(page); 314 put_page(page);
315 } 315 }
316 } 316 }
317 if (partial_end) { 317 if (partial_end) {
@@ -324,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
324 do_invalidatepage(page, 0, 324 do_invalidatepage(page, 0,
325 partial_end); 325 partial_end);
326 unlock_page(page); 326 unlock_page(page);
327 page_cache_release(page); 327 put_page(page);
328 } 328 }
329 } 329 }
330 /* 330 /*
@@ -538,7 +538,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
538 if (mapping->a_ops->freepage) 538 if (mapping->a_ops->freepage)
539 mapping->a_ops->freepage(page); 539 mapping->a_ops->freepage(page);
540 540
541 page_cache_release(page); /* pagecache ref */ 541 put_page(page); /* pagecache ref */
542 return 1; 542 return 1;
543failed: 543failed:
544 spin_unlock_irqrestore(&mapping->tree_lock, flags); 544 spin_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -608,18 +608,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
608 * Zap the rest of the file in one hit. 608 * Zap the rest of the file in one hit.
609 */ 609 */
610 unmap_mapping_range(mapping, 610 unmap_mapping_range(mapping,
611 (loff_t)index << PAGE_CACHE_SHIFT, 611 (loff_t)index << PAGE_SHIFT,
612 (loff_t)(1 + end - index) 612 (loff_t)(1 + end - index)
613 << PAGE_CACHE_SHIFT, 613 << PAGE_SHIFT,
614 0); 614 0);
615 did_range_unmap = 1; 615 did_range_unmap = 1;
616 } else { 616 } else {
617 /* 617 /*
618 * Just zap this page 618 * Just zap this page
619 */ 619 */
620 unmap_mapping_range(mapping, 620 unmap_mapping_range(mapping,
621 (loff_t)index << PAGE_CACHE_SHIFT, 621 (loff_t)index << PAGE_SHIFT,
622 PAGE_CACHE_SIZE, 0); 622 PAGE_SIZE, 0);
623 } 623 }
624 } 624 }
625 BUG_ON(page_mapped(page)); 625 BUG_ON(page_mapped(page));
@@ -744,14 +744,14 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
744 744
745 WARN_ON(to > inode->i_size); 745 WARN_ON(to > inode->i_size);
746 746
747 if (from >= to || bsize == PAGE_CACHE_SIZE) 747 if (from >= to || bsize == PAGE_SIZE)
748 return; 748 return;
749 /* Page straddling @from will not have any hole block created? */ 749 /* Page straddling @from will not have any hole block created? */
750 rounded_from = round_up(from, bsize); 750 rounded_from = round_up(from, bsize);
751 if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) 751 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
752 return; 752 return;
753 753
754 index = from >> PAGE_CACHE_SHIFT; 754 index = from >> PAGE_SHIFT;
755 page = find_lock_page(inode->i_mapping, index); 755 page = find_lock_page(inode->i_mapping, index);
756 /* Page not cached? Nothing to do */ 756 /* Page not cached? Nothing to do */
757 if (!page) 757 if (!page)
@@ -763,7 +763,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
763 if (page_mkclean(page)) 763 if (page_mkclean(page))
764 set_page_dirty(page); 764 set_page_dirty(page);
765 unlock_page(page); 765 unlock_page(page);
766 page_cache_release(page); 766 put_page(page);
767} 767}
768EXPORT_SYMBOL(pagecache_isize_extended); 768EXPORT_SYMBOL(pagecache_isize_extended);
769 769
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 9f3a0290b273..af817e5060fb 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -93,7 +93,7 @@ out_release_uncharge_unlock:
93 pte_unmap_unlock(dst_pte, ptl); 93 pte_unmap_unlock(dst_pte, ptl);
94 mem_cgroup_cancel_charge(page, memcg, false); 94 mem_cgroup_cancel_charge(page, memcg, false);
95out_release: 95out_release:
96 page_cache_release(page); 96 put_page(page);
97 goto out; 97 goto out;
98} 98}
99 99
@@ -287,7 +287,7 @@ out_unlock:
287 up_read(&dst_mm->mmap_sem); 287 up_read(&dst_mm->mmap_sem);
288out: 288out:
289 if (page) 289 if (page)
290 page_cache_release(page); 290 put_page(page);
291 BUG_ON(copied < 0); 291 BUG_ON(copied < 0);
292 BUG_ON(err > 0); 292 BUG_ON(err > 0);
293 BUG_ON(!copied && !err); 293 BUG_ON(!copied && !err);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b934223eaa45..142cb61f4822 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2553 sc->gfp_mask |= __GFP_HIGHMEM; 2553 sc->gfp_mask |= __GFP_HIGHMEM;
2554 2554
2555 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2555 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2556 requested_highidx, sc->nodemask) { 2556 gfp_zone(sc->gfp_mask), sc->nodemask) {
2557 enum zone_type classzone_idx; 2557 enum zone_type classzone_idx;
2558 2558
2559 if (!populated_zone(zone)) 2559 if (!populated_zone(zone))
@@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
3318 /* Try to sleep for a short interval */ 3318 /* Try to sleep for a short interval */
3319 if (prepare_kswapd_sleep(pgdat, order, remaining, 3319 if (prepare_kswapd_sleep(pgdat, order, remaining,
3320 balanced_classzone_idx)) { 3320 balanced_classzone_idx)) {
3321 /*
3322 * Compaction records what page blocks it recently failed to
3323 * isolate pages from and skips them in the future scanning.
3324 * When kswapd is going to sleep, it is reasonable to assume
3325 * that pages and compaction may succeed so reset the cache.
3326 */
3327 reset_isolation_suitable(pgdat);
3328
3329 /*
3330 * We have freed the memory, now we should compact it to make
3331 * allocation of the requested order possible.
3332 */
3333 wakeup_kcompactd(pgdat, order, classzone_idx);
3334
3321 remaining = schedule_timeout(HZ/10); 3335 remaining = schedule_timeout(HZ/10);
3322 finish_wait(&pgdat->kswapd_wait, &wait); 3336 finish_wait(&pgdat->kswapd_wait, &wait);
3323 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3337 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
3341 */ 3355 */
3342 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 3356 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3343 3357
3344 /*
3345 * Compaction records what page blocks it recently failed to
3346 * isolate pages from and skips them in the future scanning.
3347 * When kswapd is going to sleep, it is reasonable to assume
3348 * that pages and compaction may succeed so reset the cache.
3349 */
3350 reset_isolation_suitable(pgdat);
3351
3352 /*
3353 * We have freed the memory, now we should compact it to make
3354 * allocation of the requested order possible.
3355 */
3356 wakeup_kcompactd(pgdat, order, classzone_idx);
3357
3358 if (!kthread_should_stop()) 3358 if (!kthread_should_stop())
3359 schedule(); 3359 schedule();
3360 3360
diff --git a/mm/zswap.c b/mm/zswap.c
index bf14508afd64..91dad80d068b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -869,7 +869,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
869 869
870 case ZSWAP_SWAPCACHE_EXIST: 870 case ZSWAP_SWAPCACHE_EXIST:
871 /* page is already in the swap cache, ignore for now */ 871 /* page is already in the swap cache, ignore for now */
872 page_cache_release(page); 872 put_page(page);
873 ret = -EEXIST; 873 ret = -EEXIST;
874 goto fail; 874 goto fail;
875 875
@@ -897,7 +897,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
897 897
898 /* start writeback */ 898 /* start writeback */
899 __swap_writepage(page, &wbc, end_swap_bio_write); 899 __swap_writepage(page, &wbc, end_swap_bio_write);
900 page_cache_release(page); 900 put_page(page);
901 zswap_written_back_pages++; 901 zswap_written_back_pages++;
902 902
903 spin_lock(&tree->lock); 903 spin_lock(&tree->lock);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 253bc77eda3b..7dbc80d01eb0 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
61 e->flags |= MDB_FLAGS_OFFLOAD; 61 e->flags |= MDB_FLAGS_OFFLOAD;
62} 62}
63 63
64static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
65{
66 memset(ip, 0, sizeof(struct br_ip));
67 ip->vid = entry->vid;
68 ip->proto = entry->addr.proto;
69 if (ip->proto == htons(ETH_P_IP))
70 ip->u.ip4 = entry->addr.u.ip4;
71#if IS_ENABLED(CONFIG_IPV6)
72 else
73 ip->u.ip6 = entry->addr.u.ip6;
74#endif
75}
76
64static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 77static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
65 struct net_device *dev) 78 struct net_device *dev)
66{ 79{
@@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
243 + nla_total_size(sizeof(struct br_mdb_entry)); 256 + nla_total_size(sizeof(struct br_mdb_entry));
244} 257}
245 258
246static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, 259struct br_mdb_complete_info {
247 int type, struct net_bridge_port_group *pg) 260 struct net_bridge_port *port;
261 struct br_ip ip;
262};
263
264static void br_mdb_complete(struct net_device *dev, int err, void *priv)
248{ 265{
266 struct br_mdb_complete_info *data = priv;
267 struct net_bridge_port_group __rcu **pp;
268 struct net_bridge_port_group *p;
269 struct net_bridge_mdb_htable *mdb;
270 struct net_bridge_mdb_entry *mp;
271 struct net_bridge_port *port = data->port;
272 struct net_bridge *br = port->br;
273
274 if (err)
275 goto err;
276
277 spin_lock_bh(&br->multicast_lock);
278 mdb = mlock_dereference(br->mdb, br);
279 mp = br_mdb_ip_get(mdb, &data->ip);
280 if (!mp)
281 goto out;
282 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
283 pp = &p->next) {
284 if (p->port != port)
285 continue;
286 p->flags |= MDB_PG_FLAGS_OFFLOAD;
287 }
288out:
289 spin_unlock_bh(&br->multicast_lock);
290err:
291 kfree(priv);
292}
293
294static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
295 struct br_mdb_entry *entry, int type)
296{
297 struct br_mdb_complete_info *complete_info;
249 struct switchdev_obj_port_mdb mdb = { 298 struct switchdev_obj_port_mdb mdb = {
250 .obj = { 299 .obj = {
251 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 300 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
@@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
268 317
269 mdb.obj.orig_dev = port_dev; 318 mdb.obj.orig_dev = port_dev;
270 if (port_dev && type == RTM_NEWMDB) { 319 if (port_dev && type == RTM_NEWMDB) {
271 err = switchdev_port_obj_add(port_dev, &mdb.obj); 320 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
272 if (!err && pg) 321 if (complete_info) {
273 pg->flags |= MDB_PG_FLAGS_OFFLOAD; 322 complete_info->port = p;
323 __mdb_entry_to_br_ip(entry, &complete_info->ip);
324 mdb.obj.complete_priv = complete_info;
325 mdb.obj.complete = br_mdb_complete;
326 switchdev_port_obj_add(port_dev, &mdb.obj);
327 }
274 } else if (port_dev && type == RTM_DELMDB) { 328 } else if (port_dev && type == RTM_DELMDB) {
275 switchdev_port_obj_del(port_dev, &mdb.obj); 329 switchdev_port_obj_del(port_dev, &mdb.obj);
276 } 330 }
@@ -291,21 +345,21 @@ errout:
291 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 345 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
292} 346}
293 347
294void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 348void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
295 int type) 349 struct br_ip *group, int type, u8 flags)
296{ 350{
297 struct br_mdb_entry entry; 351 struct br_mdb_entry entry;
298 352
299 memset(&entry, 0, sizeof(entry)); 353 memset(&entry, 0, sizeof(entry));
300 entry.ifindex = pg->port->dev->ifindex; 354 entry.ifindex = port->dev->ifindex;
301 entry.addr.proto = pg->addr.proto; 355 entry.addr.proto = group->proto;
302 entry.addr.u.ip4 = pg->addr.u.ip4; 356 entry.addr.u.ip4 = group->u.ip4;
303#if IS_ENABLED(CONFIG_IPV6) 357#if IS_ENABLED(CONFIG_IPV6)
304 entry.addr.u.ip6 = pg->addr.u.ip6; 358 entry.addr.u.ip6 = group->u.ip6;
305#endif 359#endif
306 entry.vid = pg->addr.vid; 360 entry.vid = group->vid;
307 __mdb_entry_fill_flags(&entry, pg->flags); 361 __mdb_entry_fill_flags(&entry, flags);
308 __br_mdb_notify(dev, &entry, type, pg); 362 __br_mdb_notify(dev, port, &entry, type);
309} 363}
310 364
311static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 365static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
450} 504}
451 505
452static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 506static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
453 struct br_ip *group, unsigned char state, 507 struct br_ip *group, unsigned char state)
454 struct net_bridge_port_group **pg)
455{ 508{
456 struct net_bridge_mdb_entry *mp; 509 struct net_bridge_mdb_entry *mp;
457 struct net_bridge_port_group *p; 510 struct net_bridge_port_group *p;
@@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
482 if (unlikely(!p)) 535 if (unlikely(!p))
483 return -ENOMEM; 536 return -ENOMEM;
484 rcu_assign_pointer(*pp, p); 537 rcu_assign_pointer(*pp, p);
485 *pg = p;
486 if (state == MDB_TEMPORARY) 538 if (state == MDB_TEMPORARY)
487 mod_timer(&p->timer, now + br->multicast_membership_interval); 539 mod_timer(&p->timer, now + br->multicast_membership_interval);
488 540
@@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
490} 542}
491 543
492static int __br_mdb_add(struct net *net, struct net_bridge *br, 544static int __br_mdb_add(struct net *net, struct net_bridge *br,
493 struct br_mdb_entry *entry, 545 struct br_mdb_entry *entry)
494 struct net_bridge_port_group **pg)
495{ 546{
496 struct br_ip ip; 547 struct br_ip ip;
497 struct net_device *dev; 548 struct net_device *dev;
@@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
509 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 560 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
510 return -EINVAL; 561 return -EINVAL;
511 562
512 memset(&ip, 0, sizeof(ip)); 563 __mdb_entry_to_br_ip(entry, &ip);
513 ip.vid = entry->vid;
514 ip.proto = entry->addr.proto;
515 if (ip.proto == htons(ETH_P_IP))
516 ip.u.ip4 = entry->addr.u.ip4;
517#if IS_ENABLED(CONFIG_IPV6)
518 else
519 ip.u.ip6 = entry->addr.u.ip6;
520#endif
521 564
522 spin_lock_bh(&br->multicast_lock); 565 spin_lock_bh(&br->multicast_lock);
523 ret = br_mdb_add_group(br, p, &ip, entry->state, pg); 566 ret = br_mdb_add_group(br, p, &ip, entry->state);
524 spin_unlock_bh(&br->multicast_lock); 567 spin_unlock_bh(&br->multicast_lock);
525 return ret; 568 return ret;
526} 569}
@@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
528static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) 571static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
529{ 572{
530 struct net *net = sock_net(skb->sk); 573 struct net *net = sock_net(skb->sk);
531 struct net_bridge_port_group *pg;
532 struct net_bridge_vlan_group *vg; 574 struct net_bridge_vlan_group *vg;
533 struct net_device *dev, *pdev; 575 struct net_device *dev, *pdev;
534 struct br_mdb_entry *entry; 576 struct br_mdb_entry *entry;
@@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
558 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 600 if (br_vlan_enabled(br) && vg && entry->vid == 0) {
559 list_for_each_entry(v, &vg->vlan_list, vlist) { 601 list_for_each_entry(v, &vg->vlan_list, vlist) {
560 entry->vid = v->vid; 602 entry->vid = v->vid;
561 err = __br_mdb_add(net, br, entry, &pg); 603 err = __br_mdb_add(net, br, entry);
562 if (err) 604 if (err)
563 break; 605 break;
564 __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 606 __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
565 } 607 }
566 } else { 608 } else {
567 err = __br_mdb_add(net, br, entry, &pg); 609 err = __br_mdb_add(net, br, entry);
568 if (!err) 610 if (!err)
569 __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 611 __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
570 } 612 }
571 613
572 return err; 614 return err;
@@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
584 if (!netif_running(br->dev) || br->multicast_disabled) 626 if (!netif_running(br->dev) || br->multicast_disabled)
585 return -EINVAL; 627 return -EINVAL;
586 628
587 memset(&ip, 0, sizeof(ip)); 629 __mdb_entry_to_br_ip(entry, &ip);
588 ip.vid = entry->vid;
589 ip.proto = entry->addr.proto;
590 if (ip.proto == htons(ETH_P_IP))
591 ip.u.ip4 = entry->addr.u.ip4;
592#if IS_ENABLED(CONFIG_IPV6)
593 else
594 ip.u.ip6 = entry->addr.u.ip6;
595#endif
596 630
597 spin_lock_bh(&br->multicast_lock); 631 spin_lock_bh(&br->multicast_lock);
598 mdb = mlock_dereference(br->mdb, br); 632 mdb = mlock_dereference(br->mdb, br);
@@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
662 entry->vid = v->vid; 696 entry->vid = v->vid;
663 err = __br_mdb_del(br, entry); 697 err = __br_mdb_del(br, entry);
664 if (!err) 698 if (!err)
665 __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 699 __br_mdb_notify(dev, p, entry, RTM_DELMDB);
666 } 700 }
667 } else { 701 } else {
668 err = __br_mdb_del(br, entry); 702 err = __br_mdb_del(br, entry);
669 if (!err) 703 if (!err)
670 __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 704 __br_mdb_notify(dev, p, entry, RTM_DELMDB);
671 } 705 }
672 706
673 return err; 707 return err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a4c15df2b792..191ea66e4d92 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
283 rcu_assign_pointer(*pp, p->next); 283 rcu_assign_pointer(*pp, p->next);
284 hlist_del_init(&p->mglist); 284 hlist_del_init(&p->mglist);
285 del_timer(&p->timer); 285 del_timer(&p->timer);
286 br_mdb_notify(br->dev, p, RTM_DELMDB); 286 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
287 p->flags);
287 call_rcu_bh(&p->rcu, br_multicast_free_pg); 288 call_rcu_bh(&p->rcu, br_multicast_free_pg);
288 289
289 if (!mp->ports && !mp->mglist && 290 if (!mp->ports && !mp->mglist &&
@@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
705 if (unlikely(!p)) 706 if (unlikely(!p))
706 goto err; 707 goto err;
707 rcu_assign_pointer(*pp, p); 708 rcu_assign_pointer(*pp, p);
708 br_mdb_notify(br->dev, p, RTM_NEWMDB); 709 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
709 710
710found: 711found:
711 mod_timer(&p->timer, now + br->multicast_membership_interval); 712 mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -1461,7 +1462,8 @@ br_multicast_leave_group(struct net_bridge *br,
1461 hlist_del_init(&p->mglist); 1462 hlist_del_init(&p->mglist);
1462 del_timer(&p->timer); 1463 del_timer(&p->timer);
1463 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1464 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1464 br_mdb_notify(br->dev, p, RTM_DELMDB); 1465 br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1466 p->flags);
1465 1467
1466 if (!mp->ports && !mp->mglist && 1468 if (!mp->ports && !mp->mglist &&
1467 netif_running(br->dev)) 1469 netif_running(br->dev))
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1b5d145dfcbf..d9da857182ef 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -560,8 +560,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
560 unsigned char flags); 560 unsigned char flags);
561void br_mdb_init(void); 561void br_mdb_init(void);
562void br_mdb_uninit(void); 562void br_mdb_uninit(void);
563void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 563void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
564 int type); 564 struct br_ip *group, int type, u8 flags);
565void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 565void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
566 int type); 566 int type);
567 567
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8570bc7744c2..5a61f35412a0 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
370 left - sizeof(struct ebt_entry_match) < m->match_size) 370 left - sizeof(struct ebt_entry_match) < m->match_size)
371 return -EINVAL; 371 return -EINVAL;
372 372
373 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); 373 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
375 request_module("ebt_%s", m->u.name);
376 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
377 }
374 if (IS_ERR(match)) 378 if (IS_ERR(match))
375 return PTR_ERR(match); 379 return PTR_ERR(match);
376 m->u.match = match; 380 m->u.match = match;
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 6b923bcaa2a4..2bc5965fdd1e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
293} 293}
294EXPORT_SYMBOL(ceph_auth_create_authorizer); 294EXPORT_SYMBOL(ceph_auth_create_authorizer);
295 295
296void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 296void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
297 struct ceph_authorizer *a)
298{ 297{
299 mutex_lock(&ac->mutex); 298 a->destroy(a);
300 if (ac->ops && ac->ops->destroy_authorizer)
301 ac->ops->destroy_authorizer(ac, a);
302 mutex_unlock(&ac->mutex);
303} 299}
304EXPORT_SYMBOL(ceph_auth_destroy_authorizer); 300EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
305 301
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 8c93fa8d81bc..5f836f02ae36 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac)
16 struct ceph_auth_none_info *xi = ac->private; 16 struct ceph_auth_none_info *xi = ac->private;
17 17
18 xi->starting = true; 18 xi->starting = true;
19 xi->built_authorizer = false;
20} 19}
21 20
22static void destroy(struct ceph_auth_client *ac) 21static void destroy(struct ceph_auth_client *ac)
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac)
39 return xi->starting; 38 return xi->starting;
40} 39}
41 40
41static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
42 struct ceph_none_authorizer *au)
43{
44 void *p = au->buf;
45 void *const end = p + sizeof(au->buf);
46 int ret;
47
48 ceph_encode_8_safe(&p, end, 1, e_range);
49 ret = ceph_entity_name_encode(ac->name, &p, end);
50 if (ret < 0)
51 return ret;
52
53 ceph_encode_64_safe(&p, end, ac->global_id, e_range);
54 au->buf_len = p - (void *)au->buf;
55 dout("%s built authorizer len %d\n", __func__, au->buf_len);
56 return 0;
57
58e_range:
59 return -ERANGE;
60}
61
42static int build_request(struct ceph_auth_client *ac, void *buf, void *end) 62static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
43{ 63{
44 return 0; 64 return 0;
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
57 return result; 77 return result;
58} 78}
59 79
80static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
81{
82 kfree(a);
83}
84
60/* 85/*
61 * build an 'authorizer' with our entity_name and global_id. we can 86 * build an 'authorizer' with our entity_name and global_id. it is
62 * reuse a single static copy since it is identical for all services 87 * identical for all services we connect to.
63 * we connect to.
64 */ 88 */
65static int ceph_auth_none_create_authorizer( 89static int ceph_auth_none_create_authorizer(
66 struct ceph_auth_client *ac, int peer_type, 90 struct ceph_auth_client *ac, int peer_type,
67 struct ceph_auth_handshake *auth) 91 struct ceph_auth_handshake *auth)
68{ 92{
69 struct ceph_auth_none_info *ai = ac->private; 93 struct ceph_none_authorizer *au;
70 struct ceph_none_authorizer *au = &ai->au;
71 void *p, *end;
72 int ret; 94 int ret;
73 95
74 if (!ai->built_authorizer) { 96 au = kmalloc(sizeof(*au), GFP_NOFS);
75 p = au->buf; 97 if (!au)
76 end = p + sizeof(au->buf); 98 return -ENOMEM;
77 ceph_encode_8(&p, 1); 99
78 ret = ceph_entity_name_encode(ac->name, &p, end - 8); 100 au->base.destroy = ceph_auth_none_destroy_authorizer;
79 if (ret < 0) 101
80 goto bad; 102 ret = ceph_auth_none_build_authorizer(ac, au);
81 ceph_decode_need(&p, end, sizeof(u64), bad2); 103 if (ret) {
82 ceph_encode_64(&p, ac->global_id); 104 kfree(au);
83 au->buf_len = p - (void *)au->buf; 105 return ret;
84 ai->built_authorizer = true;
85 dout("built authorizer len %d\n", au->buf_len);
86 } 106 }
87 107
88 auth->authorizer = (struct ceph_authorizer *) au; 108 auth->authorizer = (struct ceph_authorizer *) au;
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer(
92 auth->authorizer_reply_buf_len = sizeof (au->reply_buf); 112 auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
93 113
94 return 0; 114 return 0;
95
96bad2:
97 ret = -ERANGE;
98bad:
99 return ret;
100}
101
102static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
103 struct ceph_authorizer *a)
104{
105 /* nothing to do */
106} 115}
107 116
108static const struct ceph_auth_client_ops ceph_auth_none_ops = { 117static const struct ceph_auth_client_ops ceph_auth_none_ops = {
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
114 .build_request = build_request, 123 .build_request = build_request,
115 .handle_reply = handle_reply, 124 .handle_reply = handle_reply,
116 .create_authorizer = ceph_auth_none_create_authorizer, 125 .create_authorizer = ceph_auth_none_create_authorizer,
117 .destroy_authorizer = ceph_auth_none_destroy_authorizer,
118}; 126};
119 127
120int ceph_auth_none_init(struct ceph_auth_client *ac) 128int ceph_auth_none_init(struct ceph_auth_client *ac)
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac)
127 return -ENOMEM; 135 return -ENOMEM;
128 136
129 xi->starting = true; 137 xi->starting = true;
130 xi->built_authorizer = false;
131 138
132 ac->protocol = CEPH_AUTH_NONE; 139 ac->protocol = CEPH_AUTH_NONE;
133 ac->private = xi; 140 ac->private = xi;
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index 059a3ce4b53f..62021535ae4a 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14struct ceph_none_authorizer { 14struct ceph_none_authorizer {
15 struct ceph_authorizer base;
15 char buf[128]; 16 char buf[128];
16 int buf_len; 17 int buf_len;
17 char reply_buf[0]; 18 char reply_buf[0];
@@ -19,8 +20,6 @@ struct ceph_none_authorizer {
19 20
20struct ceph_auth_none_info { 21struct ceph_auth_none_info {
21 bool starting; 22 bool starting;
22 bool built_authorizer;
23 struct ceph_none_authorizer au; /* we only need one; it's static */
24}; 23};
25 24
26int ceph_auth_none_init(struct ceph_auth_client *ac); 25int ceph_auth_none_init(struct ceph_auth_client *ac);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 9e43a315e662..a0905f04bd13 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
565 return -EAGAIN; 565 return -EAGAIN;
566} 566}
567 567
568static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
569{
570 struct ceph_x_authorizer *au = (void *)a;
571
572 ceph_x_authorizer_cleanup(au);
573 kfree(au);
574}
575
568static int ceph_x_create_authorizer( 576static int ceph_x_create_authorizer(
569 struct ceph_auth_client *ac, int peer_type, 577 struct ceph_auth_client *ac, int peer_type,
570 struct ceph_auth_handshake *auth) 578 struct ceph_auth_handshake *auth)
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer(
581 if (!au) 589 if (!au)
582 return -ENOMEM; 590 return -ENOMEM;
583 591
592 au->base.destroy = ceph_x_destroy_authorizer;
593
584 ret = ceph_x_build_authorizer(ac, th, au); 594 ret = ceph_x_build_authorizer(ac, th, au);
585 if (ret) { 595 if (ret) {
586 kfree(au); 596 kfree(au);
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
643 return ret; 653 return ret;
644} 654}
645 655
646static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
647 struct ceph_authorizer *a)
648{
649 struct ceph_x_authorizer *au = (void *)a;
650
651 ceph_x_authorizer_cleanup(au);
652 kfree(au);
653}
654
655
656static void ceph_x_reset(struct ceph_auth_client *ac) 656static void ceph_x_reset(struct ceph_auth_client *ac)
657{ 657{
658 struct ceph_x_info *xi = ac->private; 658 struct ceph_x_info *xi = ac->private;
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
770 .create_authorizer = ceph_x_create_authorizer, 770 .create_authorizer = ceph_x_create_authorizer,
771 .update_authorizer = ceph_x_update_authorizer, 771 .update_authorizer = ceph_x_update_authorizer,
772 .verify_authorizer_reply = ceph_x_verify_authorizer_reply, 772 .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
773 .destroy_authorizer = ceph_x_destroy_authorizer,
774 .invalidate_authorizer = ceph_x_invalidate_authorizer, 773 .invalidate_authorizer = ceph_x_invalidate_authorizer,
775 .reset = ceph_x_reset, 774 .reset = ceph_x_reset,
776 .destroy = ceph_x_destroy, 775 .destroy = ceph_x_destroy,
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 40b1a3cf7397..21a5af904bae 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
26 26
27 27
28struct ceph_x_authorizer { 28struct ceph_x_authorizer {
29 struct ceph_authorizer base;
29 struct ceph_crypto_key session_key; 30 struct ceph_crypto_key session_key;
30 struct ceph_buffer *buf; 31 struct ceph_buffer *buf;
31 unsigned int service; 32 unsigned int service;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1831f6353622..a5502898ea33 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -269,7 +269,7 @@ static void _ceph_msgr_exit(void)
269 } 269 }
270 270
271 BUG_ON(zero_page == NULL); 271 BUG_ON(zero_page == NULL);
272 page_cache_release(zero_page); 272 put_page(zero_page);
273 zero_page = NULL; 273 zero_page = NULL;
274 274
275 ceph_msgr_slab_exit(); 275 ceph_msgr_slab_exit();
@@ -282,7 +282,7 @@ int ceph_msgr_init(void)
282 282
283 BUG_ON(zero_page != NULL); 283 BUG_ON(zero_page != NULL);
284 zero_page = ZERO_PAGE(0); 284 zero_page = ZERO_PAGE(0);
285 page_cache_get(zero_page); 285 get_page(zero_page);
286 286
287 /* 287 /*
288 * The number of active work items is limited by the number of 288 * The number of active work items is limited by the number of
@@ -1602,7 +1602,7 @@ static int write_partial_skip(struct ceph_connection *con)
1602 1602
1603 dout("%s %p %d left\n", __func__, con, con->out_skip); 1603 dout("%s %p %d left\n", __func__, con, con->out_skip);
1604 while (con->out_skip > 0) { 1604 while (con->out_skip > 0) {
1605 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1605 size_t size = min(con->out_skip, (int) PAGE_SIZE);
1606 1606
1607 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1607 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1608 if (ret <= 0) 1608 if (ret <= 0)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 32355d9d0103..40a53a70efdf 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd)
1087 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 1087 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1088 atomic_read(&osd->o_ref) - 1); 1088 atomic_read(&osd->o_ref) - 1);
1089 if (atomic_dec_and_test(&osd->o_ref)) { 1089 if (atomic_dec_and_test(&osd->o_ref)) {
1090 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
1091
1092 if (osd->o_auth.authorizer) 1090 if (osd->o_auth.authorizer)
1093 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 1091 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1094 kfree(osd); 1092 kfree(osd);
1095 } 1093 }
1096} 1094}
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2984 struct ceph_auth_handshake *auth = &o->o_auth; 2982 struct ceph_auth_handshake *auth = &o->o_auth;
2985 2983
2986 if (force_new && auth->authorizer) { 2984 if (force_new && auth->authorizer) {
2987 ceph_auth_destroy_authorizer(ac, auth->authorizer); 2985 ceph_auth_destroy_authorizer(auth->authorizer);
2988 auth->authorizer = NULL; 2986 auth->authorizer = NULL;
2989 } 2987 }
2990 if (!auth->authorizer) { 2988 if (!auth->authorizer) {
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index c7c220a736e5..6864007e64fc 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -56,7 +56,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
56 size_t bit = pl->room; 56 size_t bit = pl->room;
57 int ret; 57 int ret;
58 58
59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), 59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
60 buf, bit); 60 buf, bit);
61 pl->length += bit; 61 pl->length += bit;
62 pl->room -= bit; 62 pl->room -= bit;
@@ -67,7 +67,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
67 return ret; 67 return ret;
68 } 68 }
69 69
70 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); 70 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
71 pl->length += len; 71 pl->length += len;
72 pl->room -= len; 72 pl->room -= len;
73 return 0; 73 return 0;
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 10297f7a89ba..00d2601407c5 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -95,19 +95,19 @@ int ceph_copy_user_to_page_vector(struct page **pages,
95 loff_t off, size_t len) 95 loff_t off, size_t len)
96{ 96{
97 int i = 0; 97 int i = 0;
98 int po = off & ~PAGE_CACHE_MASK; 98 int po = off & ~PAGE_MASK;
99 int left = len; 99 int left = len;
100 int l, bad; 100 int l, bad;
101 101
102 while (left > 0) { 102 while (left > 0) {
103 l = min_t(int, PAGE_CACHE_SIZE-po, left); 103 l = min_t(int, PAGE_SIZE-po, left);
104 bad = copy_from_user(page_address(pages[i]) + po, data, l); 104 bad = copy_from_user(page_address(pages[i]) + po, data, l);
105 if (bad == l) 105 if (bad == l)
106 return -EFAULT; 106 return -EFAULT;
107 data += l - bad; 107 data += l - bad;
108 left -= l - bad; 108 left -= l - bad;
109 po += l - bad; 109 po += l - bad;
110 if (po == PAGE_CACHE_SIZE) { 110 if (po == PAGE_SIZE) {
111 po = 0; 111 po = 0;
112 i++; 112 i++;
113 } 113 }
@@ -121,17 +121,17 @@ void ceph_copy_to_page_vector(struct page **pages,
121 loff_t off, size_t len) 121 loff_t off, size_t len)
122{ 122{
123 int i = 0; 123 int i = 0;
124 size_t po = off & ~PAGE_CACHE_MASK; 124 size_t po = off & ~PAGE_MASK;
125 size_t left = len; 125 size_t left = len;
126 126
127 while (left > 0) { 127 while (left > 0) {
128 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 128 size_t l = min_t(size_t, PAGE_SIZE-po, left);
129 129
130 memcpy(page_address(pages[i]) + po, data, l); 130 memcpy(page_address(pages[i]) + po, data, l);
131 data += l; 131 data += l;
132 left -= l; 132 left -= l;
133 po += l; 133 po += l;
134 if (po == PAGE_CACHE_SIZE) { 134 if (po == PAGE_SIZE) {
135 po = 0; 135 po = 0;
136 i++; 136 i++;
137 } 137 }
@@ -144,17 +144,17 @@ void ceph_copy_from_page_vector(struct page **pages,
144 loff_t off, size_t len) 144 loff_t off, size_t len)
145{ 145{
146 int i = 0; 146 int i = 0;
147 size_t po = off & ~PAGE_CACHE_MASK; 147 size_t po = off & ~PAGE_MASK;
148 size_t left = len; 148 size_t left = len;
149 149
150 while (left > 0) { 150 while (left > 0) {
151 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 151 size_t l = min_t(size_t, PAGE_SIZE-po, left);
152 152
153 memcpy(data, page_address(pages[i]) + po, l); 153 memcpy(data, page_address(pages[i]) + po, l);
154 data += l; 154 data += l;
155 left -= l; 155 left -= l;
156 po += l; 156 po += l;
157 if (po == PAGE_CACHE_SIZE) { 157 if (po == PAGE_SIZE) {
158 po = 0; 158 po = 0;
159 i++; 159 i++;
160 } 160 }
@@ -168,25 +168,25 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
168 */ 168 */
169void ceph_zero_page_vector_range(int off, int len, struct page **pages) 169void ceph_zero_page_vector_range(int off, int len, struct page **pages)
170{ 170{
171 int i = off >> PAGE_CACHE_SHIFT; 171 int i = off >> PAGE_SHIFT;
172 172
173 off &= ~PAGE_CACHE_MASK; 173 off &= ~PAGE_MASK;
174 174
175 dout("zero_page_vector_page %u~%u\n", off, len); 175 dout("zero_page_vector_page %u~%u\n", off, len);
176 176
177 /* leading partial page? */ 177 /* leading partial page? */
178 if (off) { 178 if (off) {
179 int end = min((int)PAGE_CACHE_SIZE, off + len); 179 int end = min((int)PAGE_SIZE, off + len);
180 dout("zeroing %d %p head from %d\n", i, pages[i], 180 dout("zeroing %d %p head from %d\n", i, pages[i],
181 (int)off); 181 (int)off);
182 zero_user_segment(pages[i], off, end); 182 zero_user_segment(pages[i], off, end);
183 len -= (end - off); 183 len -= (end - off);
184 i++; 184 i++;
185 } 185 }
186 while (len >= PAGE_CACHE_SIZE) { 186 while (len >= PAGE_SIZE) {
187 dout("zeroing %d %p len=%d\n", i, pages[i], len); 187 dout("zeroing %d %p len=%d\n", i, pages[i], len);
188 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); 188 zero_user_segment(pages[i], 0, PAGE_SIZE);
189 len -= PAGE_CACHE_SIZE; 189 len -= PAGE_SIZE;
190 i++; 190 i++;
191 } 191 }
192 /* trailing partial page? */ 192 /* trailing partial page? */
diff --git a/net/core/dev.c b/net/core/dev.c
index b9bcbe77d913..77a71cd68535 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4439,6 +4439,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4439 NAPI_GRO_CB(skb)->flush = 0; 4439 NAPI_GRO_CB(skb)->flush = 0;
4440 NAPI_GRO_CB(skb)->free = 0; 4440 NAPI_GRO_CB(skb)->free = 0;
4441 NAPI_GRO_CB(skb)->encap_mark = 0; 4441 NAPI_GRO_CB(skb)->encap_mark = 0;
4442 NAPI_GRO_CB(skb)->is_fou = 0;
4442 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 4443 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4443 4444
4444 /* Setup for GRO checksum validation */ 4445 /* Setup for GRO checksum validation */
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a57bd17805b4..94acfc89ad97 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -603,6 +603,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
603 const struct net_device_ops *ops; 603 const struct net_device_ops *ops;
604 int err; 604 int err;
605 605
606 np->dev = ndev;
606 strlcpy(np->dev_name, ndev->name, IFNAMSIZ); 607 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
607 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); 608 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
608 609
@@ -669,7 +670,6 @@ int netpoll_setup(struct netpoll *np)
669 goto unlock; 670 goto unlock;
670 } 671 }
671 dev_hold(ndev); 672 dev_hold(ndev);
672 np->dev = ndev;
673 673
674 if (netdev_master_upper_dev_get(ndev)) { 674 if (netdev_master_upper_dev_get(ndev)) {
675 np_err(np, "%s is a slave device, aborting\n", np->dev_name); 675 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
@@ -770,7 +770,6 @@ int netpoll_setup(struct netpoll *np)
770 return 0; 770 return 0;
771 771
772put: 772put:
773 np->dev = NULL;
774 dev_put(ndev); 773 dev_put(ndev);
775unlock: 774unlock:
776 rtnl_unlock(); 775 rtnl_unlock();
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d04c2d1c8c87..e561f9f07d6d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4502 __skb_push(skb, offset); 4502 __skb_push(skb, offset);
4503 err = __vlan_insert_tag(skb, skb->vlan_proto, 4503 err = __vlan_insert_tag(skb, skb->vlan_proto,
4504 skb_vlan_tag_get(skb)); 4504 skb_vlan_tag_get(skb));
4505 if (err) 4505 if (err) {
4506 __skb_pull(skb, offset);
4506 return err; 4507 return err;
4508 }
4509
4507 skb->protocol = skb->vlan_proto; 4510 skb->protocol = skb->vlan_proto;
4508 skb->mac_len += VLAN_HLEN; 4511 skb->mac_len += VLAN_HLEN;
4509 __skb_pull(skb, offset);
4510 4512
4511 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4513 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4514 __skb_pull(skb, offset);
4512 } 4515 }
4513 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4516 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4514 return 0; 4517 return 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index b67b9aedb230..7e73c26b6bb4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -221,7 +221,8 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX" 224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
225 "sk_lock-AF_MAX"
225}; 226};
226static const char *const af_family_slock_key_strings[AF_MAX+1] = { 227static const char *const af_family_slock_key_strings[AF_MAX+1] = {
227 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -237,7 +238,8 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
237 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 238 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
238 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
239 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
240 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX" 241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
242 "slock-AF_MAX"
241}; 243};
242static const char *const af_family_clock_key_strings[AF_MAX+1] = { 244static const char *const af_family_clock_key_strings[AF_MAX+1] = {
243 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -253,7 +255,8 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
253 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 255 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
254 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
255 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
256 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX" 258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
259 "clock-AF_MAX"
257}; 260};
258 261
259/* 262/*
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 607a14f20d88..b1dc096d22f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,10 +1034,13 @@ source_ok:
1034 if (!fld.daddr) { 1034 if (!fld.daddr) {
1035 fld.daddr = fld.saddr; 1035 fld.daddr = fld.saddr;
1036 1036
1037 err = -EADDRNOTAVAIL;
1038 if (dev_out) 1037 if (dev_out)
1039 dev_put(dev_out); 1038 dev_put(dev_out);
1039 err = -EINVAL;
1040 dev_out = init_net.loopback_dev; 1040 dev_out = init_net.loopback_dev;
1041 if (!dev_out->dn_ptr)
1042 goto out;
1043 err = -EADDRNOTAVAIL;
1041 dev_hold(dev_out); 1044 dev_hold(dev_out);
1042 if (!fld.daddr) { 1045 if (!fld.daddr) {
1043 fld.daddr = 1046 fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
1110 if (dev_out == NULL) 1113 if (dev_out == NULL)
1111 goto out; 1114 goto out;
1112 dn_db = rcu_dereference_raw(dev_out->dn_ptr); 1115 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1116 if (!dn_db)
1117 goto e_inval;
1113 /* Possible improvement - check all devices for local addr */ 1118 /* Possible improvement - check all devices for local addr */
1114 if (dn_dev_islocal(dev_out, fld.daddr)) { 1119 if (dn_dev_islocal(dev_out, fld.daddr)) {
1115 dev_put(dev_out); 1120 dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
1151 dev_put(dev_out); 1156 dev_put(dev_out);
1152 dev_out = init_net.loopback_dev; 1157 dev_out = init_net.loopback_dev;
1153 dev_hold(dev_out); 1158 dev_hold(dev_out);
1159 if (!dev_out->dn_ptr)
1160 goto e_inval;
1154 fld.flowidn_oif = dev_out->ifindex; 1161 fld.flowidn_oif = dev_out->ifindex;
1155 if (res.fi) 1162 if (res.fi)
1156 dn_fib_info_put(res.fi); 1163 dn_fib_info_put(res.fi);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 8a9246deccfe..63566ec54794 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
904 if (ifa->ifa_flags & IFA_F_SECONDARY) { 904 if (ifa->ifa_flags & IFA_F_SECONDARY) {
905 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 905 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
906 if (!prim) { 906 if (!prim) {
907 pr_warn("%s: bug: prim == NULL\n", __func__); 907 /* if the device has been deleted, we don't perform
908 * address promotion
909 */
910 if (!in_dev->dead)
911 pr_warn("%s: bug: prim == NULL\n", __func__);
908 return; 912 return;
909 } 913 }
910 if (iprim && iprim != prim) { 914 if (iprim && iprim != prim) {
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 5a94aea280d3..a39068b4a4d9 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -203,6 +203,9 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
203 */ 203 */
204 NAPI_GRO_CB(skb)->encap_mark = 0; 204 NAPI_GRO_CB(skb)->encap_mark = 0;
205 205
206 /* Flag this frame as already having an outer encap header */
207 NAPI_GRO_CB(skb)->is_fou = 1;
208
206 rcu_read_lock(); 209 rcu_read_lock();
207 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 210 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
208 ops = rcu_dereference(offloads[proto]); 211 ops = rcu_dereference(offloads[proto]);
@@ -368,6 +371,9 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
368 */ 371 */
369 NAPI_GRO_CB(skb)->encap_mark = 0; 372 NAPI_GRO_CB(skb)->encap_mark = 0;
370 373
374 /* Flag this frame as already having an outer encap header */
375 NAPI_GRO_CB(skb)->is_fou = 1;
376
371 rcu_read_lock(); 377 rcu_read_lock();
372 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 378 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
373 ops = rcu_dereference(offloads[guehdr->proto_ctype]); 379 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index c47539d04b88..6a5bd4317866 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -150,6 +150,14 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
150 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 150 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
151 goto out; 151 goto out;
152 152
153 /* We can only support GRE_CSUM if we can track the location of
154 * the GRE header. In the case of FOU/GUE we cannot because the
155 * outer UDP header displaces the GRE header leaving us in a state
156 * of limbo.
157 */
158 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
159 goto out;
160
153 type = greh->protocol; 161 type = greh->protocol;
154 162
155 rcu_read_lock(); 163 rcu_read_lock();
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 31936d387cfd..af5d1f38217f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -862,9 +862,16 @@ static void __gre_tunnel_init(struct net_device *dev)
862 dev->hw_features |= GRE_FEATURES; 862 dev->hw_features |= GRE_FEATURES;
863 863
864 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { 864 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
865 /* TCP offload with GRE SEQ is not supported. */ 865 /* TCP offload with GRE SEQ is not supported, nor
866 dev->features |= NETIF_F_GSO_SOFTWARE; 866 * can we support 2 levels of outer headers requiring
867 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 867 * an update.
868 */
869 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
870 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
871 dev->features |= NETIF_F_GSO_SOFTWARE;
872 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
873 }
874
868 /* Can use a lockless transmit, unless we generate 875 /* Can use a lockless transmit, unless we generate
869 * output sequences 876 * output sequences
870 */ 877 */
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index dd8c80dc32a2..8f8713b4388f 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
81 return ret; 81 return ret;
82 } 82 }
83 83
84 ret = arptable_filter_table_init(&init_net);
85 if (ret) {
86 unregister_pernet_subsys(&arptable_filter_net_ops);
87 kfree(arpfilter_ops);
88 }
89
84 return ret; 90 return ret;
85} 91}
86 92
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 02c62299d717..60398a9370e7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1438#endif 1438#endif
1439} 1439}
1440 1440
1441static struct rtable *rt_dst_alloc(struct net_device *dev, 1441struct rtable *rt_dst_alloc(struct net_device *dev,
1442 unsigned int flags, u16 type, 1442 unsigned int flags, u16 type,
1443 bool nopolicy, bool noxfrm, bool will_cache) 1443 bool nopolicy, bool noxfrm, bool will_cache)
1444{ 1444{
1445 struct rtable *rt; 1445 struct rtable *rt;
1446 1446
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
1468 1468
1469 return rt; 1469 return rt;
1470} 1470}
1471EXPORT_SYMBOL(rt_dst_alloc);
1471 1472
1472/* called in rcu_read_lock() section */ 1473/* called in rcu_read_lock() section */
1473static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1474static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2045 */ 2046 */
2046 if (fi && res->prefixlen < 4) 2047 if (fi && res->prefixlen < 4)
2047 fi = NULL; 2048 fi = NULL;
2049 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2050 (orig_oif != dev_out->ifindex)) {
2051 /* For local routes that require a particular output interface
2052 * we do not want to cache the result. Caching the result
2053 * causes incorrect behaviour when there are multiple source
2054 * addresses on the interface, the end result being that if the
2055 * intended recipient is waiting on that interface for the
2056 * packet he won't receive it because it will be delivered on
2057 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2058 * be set to the loopback interface as well.
2059 */
2060 fi = NULL;
2048 } 2061 }
2049 2062
2050 fnhe = NULL; 2063 fnhe = NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6e65f79ade8..c124c3c12f7c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1309 if (skb == tcp_highest_sack(sk)) 1309 if (skb == tcp_highest_sack(sk))
1310 tcp_advance_highest_sack(sk, skb); 1310 tcp_advance_highest_sack(sk, skb);
1311 1311
1312 tcp_skb_collapse_tstamp(prev, skb);
1312 tcp_unlink_write_queue(skb, sk); 1313 tcp_unlink_write_queue(skb, sk);
1313 sk_wmem_free_skb(sk, skb); 1314 sk_wmem_free_skb(sk, skb);
1314 1315
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3098 3099
3099 shinfo = skb_shinfo(skb); 3100 shinfo = skb_shinfo(skb);
3100 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && 3101 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
3101 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) 3102 !before(shinfo->tskey, prior_snd_una) &&
3103 before(shinfo->tskey, tcp_sk(sk)->snd_una))
3102 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3104 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3103} 3105}
3104 3106
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d2dc015cd19..441ae9da3a23 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
2441 return window; 2441 return window;
2442} 2442}
2443 2443
2444void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2445 const struct sk_buff *next_skb)
2446{
2447 const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
2448 u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2449
2450 if (unlikely(tsflags)) {
2451 struct skb_shared_info *shinfo = skb_shinfo(skb);
2452
2453 shinfo->tx_flags |= tsflags;
2454 shinfo->tskey = next_shinfo->tskey;
2455 }
2456}
2457
2444/* Collapses two adjacent SKB's during retransmission. */ 2458/* Collapses two adjacent SKB's during retransmission. */
2445static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2459static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2446{ 2460{
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2484 2498
2485 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2499 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2486 2500
2501 tcp_skb_collapse_tstamp(skb, next_skb);
2502
2487 sk_wmem_free_skb(sk, next_skb); 2503 sk_wmem_free_skb(sk, next_skb);
2488} 2504}
2489 2505
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08eed5e16df0..a2e7f55a1f61 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -339,8 +339,13 @@ found:
339 339
340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
341 spin_lock(&hslot2->lock); 341 spin_lock(&hslot2->lock);
342 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 342 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
343 &hslot2->head); 343 sk->sk_family == AF_INET6)
344 hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
345 &hslot2->head);
346 else
347 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
348 &hslot2->head);
344 hslot2->count++; 349 hslot2->count++;
345 spin_unlock(&hslot2->lock); 350 spin_unlock(&hslot2->lock);
346 } 351 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 27aed1afcf81..8ec4b3089e20 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3176,35 +3176,9 @@ static void addrconf_gre_config(struct net_device *dev)
3176} 3176}
3177#endif 3177#endif
3178 3178
3179#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
3180/* If the host route is cached on the addr struct make sure it is associated
3181 * with the proper table. e.g., enslavement can change and if so the cached
3182 * host route needs to move to the new table.
3183 */
3184static void l3mdev_check_host_rt(struct inet6_dev *idev,
3185 struct inet6_ifaddr *ifp)
3186{
3187 if (ifp->rt) {
3188 u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3189
3190 if (tb_id != ifp->rt->rt6i_table->tb6_id) {
3191 ip6_del_rt(ifp->rt);
3192 ifp->rt = NULL;
3193 }
3194 }
3195}
3196#else
3197static void l3mdev_check_host_rt(struct inet6_dev *idev,
3198 struct inet6_ifaddr *ifp)
3199{
3200}
3201#endif
3202
3203static int fixup_permanent_addr(struct inet6_dev *idev, 3179static int fixup_permanent_addr(struct inet6_dev *idev,
3204 struct inet6_ifaddr *ifp) 3180 struct inet6_ifaddr *ifp)
3205{ 3181{
3206 l3mdev_check_host_rt(idev, ifp);
3207
3208 if (!ifp->rt) { 3182 if (!ifp->rt) {
3209 struct rt6_info *rt; 3183 struct rt6_info *rt;
3210 3184
@@ -3255,6 +3229,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3255 void *ptr) 3229 void *ptr)
3256{ 3230{
3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3231 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3232 struct netdev_notifier_changeupper_info *info;
3258 struct inet6_dev *idev = __in6_dev_get(dev); 3233 struct inet6_dev *idev = __in6_dev_get(dev);
3259 int run_pending = 0; 3234 int run_pending = 0;
3260 int err; 3235 int err;
@@ -3303,6 +3278,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3303 break; 3278 break;
3304 3279
3305 if (event == NETDEV_UP) { 3280 if (event == NETDEV_UP) {
3281 /* restore routes for permanent addresses */
3282 addrconf_permanent_addr(dev);
3283
3306 if (!addrconf_qdisc_ok(dev)) { 3284 if (!addrconf_qdisc_ok(dev)) {
3307 /* device is not ready yet. */ 3285 /* device is not ready yet. */
3308 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", 3286 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
@@ -3336,9 +3314,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3336 run_pending = 1; 3314 run_pending = 1;
3337 } 3315 }
3338 3316
3339 /* restore routes for permanent addresses */
3340 addrconf_permanent_addr(dev);
3341
3342 switch (dev->type) { 3317 switch (dev->type) {
3343#if IS_ENABLED(CONFIG_IPV6_SIT) 3318#if IS_ENABLED(CONFIG_IPV6_SIT)
3344 case ARPHRD_SIT: 3319 case ARPHRD_SIT:
@@ -3413,6 +3388,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3413 if (idev) 3388 if (idev)
3414 addrconf_type_change(dev, event); 3389 addrconf_type_change(dev, event);
3415 break; 3390 break;
3391
3392 case NETDEV_CHANGEUPPER:
3393 info = ptr;
3394
3395 /* flush all routes if dev is linked to or unlinked from
3396 * an L3 master device (e.g., VRF)
3397 */
3398 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3399 addrconf_ifdown(dev, 0);
3416 } 3400 }
3417 3401
3418 return NOTIFY_OK; 3402 return NOTIFY_OK;
@@ -3438,6 +3422,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
3438 ipv6_mc_unmap(idev); 3422 ipv6_mc_unmap(idev);
3439} 3423}
3440 3424
3425static bool addr_is_local(const struct in6_addr *addr)
3426{
3427 return ipv6_addr_type(addr) &
3428 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3429}
3430
3441static int addrconf_ifdown(struct net_device *dev, int how) 3431static int addrconf_ifdown(struct net_device *dev, int how)
3442{ 3432{
3443 struct net *net = dev_net(dev); 3433 struct net *net = dev_net(dev);
@@ -3495,7 +3485,8 @@ restart:
3495 * address is retained on a down event 3485 * address is retained on a down event
3496 */ 3486 */
3497 if (!keep_addr || 3487 if (!keep_addr ||
3498 !(ifa->flags & IFA_F_PERMANENT)) { 3488 !(ifa->flags & IFA_F_PERMANENT) ||
3489 addr_is_local(&ifa->addr)) {
3499 hlist_del_init_rcu(&ifa->addr_lst); 3490 hlist_del_init_rcu(&ifa->addr_lst);
3500 goto restart; 3491 goto restart;
3501 } 3492 }
@@ -3539,17 +3530,23 @@ restart:
3539 3530
3540 INIT_LIST_HEAD(&del_list); 3531 INIT_LIST_HEAD(&del_list);
3541 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3532 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3533 struct rt6_info *rt = NULL;
3534
3542 addrconf_del_dad_work(ifa); 3535 addrconf_del_dad_work(ifa);
3543 3536
3544 write_unlock_bh(&idev->lock); 3537 write_unlock_bh(&idev->lock);
3545 spin_lock_bh(&ifa->lock); 3538 spin_lock_bh(&ifa->lock);
3546 3539
3547 if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) { 3540 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3541 !addr_is_local(&ifa->addr)) {
3548 /* set state to skip the notifier below */ 3542 /* set state to skip the notifier below */
3549 state = INET6_IFADDR_STATE_DEAD; 3543 state = INET6_IFADDR_STATE_DEAD;
3550 ifa->state = 0; 3544 ifa->state = 0;
3551 if (!(ifa->flags & IFA_F_NODAD)) 3545 if (!(ifa->flags & IFA_F_NODAD))
3552 ifa->flags |= IFA_F_TENTATIVE; 3546 ifa->flags |= IFA_F_TENTATIVE;
3547
3548 rt = ifa->rt;
3549 ifa->rt = NULL;
3553 } else { 3550 } else {
3554 state = ifa->state; 3551 state = ifa->state;
3555 ifa->state = INET6_IFADDR_STATE_DEAD; 3552 ifa->state = INET6_IFADDR_STATE_DEAD;
@@ -3560,6 +3557,9 @@ restart:
3560 3557
3561 spin_unlock_bh(&ifa->lock); 3558 spin_unlock_bh(&ifa->lock);
3562 3559
3560 if (rt)
3561 ip6_del_rt(rt);
3562
3563 if (state != INET6_IFADDR_STATE_DEAD) { 3563 if (state != INET6_IFADDR_STATE_DEAD) {
3564 __ipv6_ifa_notify(RTM_DELADDR, ifa); 3564 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3565 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); 3565 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
@@ -5325,10 +5325,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5325 if (rt) 5325 if (rt)
5326 ip6_del_rt(rt); 5326 ip6_del_rt(rt);
5327 } 5327 }
5328 dst_hold(&ifp->rt->dst); 5328 if (ifp->rt) {
5329 5329 dst_hold(&ifp->rt->dst);
5330 ip6_del_rt(ifp->rt); 5330 ip6_del_rt(ifp->rt);
5331 5331 }
5332 rt_genid_bump_ipv6(net); 5332 rt_genid_bump_ipv6(net);
5333 break; 5333 break;
5334 } 5334 }
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 428162155280..9dd3882fe6bf 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
44{
45 struct inet_sock *inet = inet_sk(sk);
46 struct ipv6_pinfo *np = inet6_sk(sk);
47
48 memset(fl6, 0, sizeof(*fl6));
49 fl6->flowi6_proto = sk->sk_protocol;
50 fl6->daddr = sk->sk_v6_daddr;
51 fl6->saddr = np->saddr;
52 fl6->flowi6_oif = sk->sk_bound_dev_if;
53 fl6->flowi6_mark = sk->sk_mark;
54 fl6->fl6_dport = inet->inet_dport;
55 fl6->fl6_sport = inet->inet_sport;
56 fl6->flowlabel = np->flow_label;
57
58 if (!fl6->flowi6_oif)
59 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
60
61 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
62 fl6->flowi6_oif = np->mcast_oif;
63
64 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
65}
66
67int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
68{
69 struct ip6_flowlabel *flowlabel = NULL;
70 struct in6_addr *final_p, final;
71 struct ipv6_txoptions *opt;
72 struct dst_entry *dst;
73 struct inet_sock *inet = inet_sk(sk);
74 struct ipv6_pinfo *np = inet6_sk(sk);
75 struct flowi6 fl6;
76 int err = 0;
77
78 if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
79 flowlabel = fl6_sock_lookup(sk, np->flow_label);
80 if (!flowlabel)
81 return -EINVAL;
82 }
83 ip6_datagram_flow_key_init(&fl6, sk);
84
85 rcu_read_lock();
86 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
87 final_p = fl6_update_dst(&fl6, opt, &final);
88 rcu_read_unlock();
89
90 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
91 if (IS_ERR(dst)) {
92 err = PTR_ERR(dst);
93 goto out;
94 }
95
96 if (fix_sk_saddr) {
97 if (ipv6_addr_any(&np->saddr))
98 np->saddr = fl6.saddr;
99
100 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
101 sk->sk_v6_rcv_saddr = fl6.saddr;
102 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
103 if (sk->sk_prot->rehash)
104 sk->sk_prot->rehash(sk);
105 }
106 }
107
108 ip6_dst_store(sk, dst,
109 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
110 &sk->sk_v6_daddr : NULL,
111#ifdef CONFIG_IPV6_SUBTREES
112 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
113 &np->saddr :
114#endif
115 NULL);
116
117out:
118 fl6_sock_release(flowlabel);
119 return err;
120}
121
122void ip6_datagram_release_cb(struct sock *sk)
123{
124 struct dst_entry *dst;
125
126 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
127 return;
128
129 rcu_read_lock();
130 dst = __sk_dst_get(sk);
131 if (!dst || !dst->obsolete ||
132 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
133 rcu_read_unlock();
134 return;
135 }
136 rcu_read_unlock();
137
138 ip6_datagram_dst_update(sk, false);
139}
140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
141
43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 142static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 143{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
47 struct ipv6_pinfo *np = inet6_sk(sk); 146 struct ipv6_pinfo *np = inet6_sk(sk);
48 struct in6_addr *daddr, *final_p, final; 147 struct in6_addr *daddr;
49 struct dst_entry *dst;
50 struct flowi6 fl6;
51 struct ip6_flowlabel *flowlabel = NULL;
52 struct ipv6_txoptions *opt;
53 int addr_type; 148 int addr_type;
54 int err; 149 int err;
150 __be32 fl6_flowlabel = 0;
55 151
56 if (usin->sin6_family == AF_INET) { 152 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 153 if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
66 if (usin->sin6_family != AF_INET6) 162 if (usin->sin6_family != AF_INET6)
67 return -EAFNOSUPPORT; 163 return -EAFNOSUPPORT;
68 164
69 memset(&fl6, 0, sizeof(fl6)); 165 if (np->sndflow)
70 if (np->sndflow) { 166 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
71 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
72 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
74 if (!flowlabel)
75 return -EINVAL;
76 }
77 }
78 167
79 addr_type = ipv6_addr_type(&usin->sin6_addr); 168 addr_type = ipv6_addr_type(&usin->sin6_addr);
80 169
@@ -145,7 +234,7 @@ ipv4_connected:
145 } 234 }
146 235
147 sk->sk_v6_daddr = *daddr; 236 sk->sk_v6_daddr = *daddr;
148 np->flow_label = fl6.flowlabel; 237 np->flow_label = fl6_flowlabel;
149 238
150 inet->inet_dport = usin->sin6_port; 239 inet->inet_dport = usin->sin6_port;
151 240
@@ -154,59 +243,13 @@ ipv4_connected:
154 * destination cache for it. 243 * destination cache for it.
155 */ 244 */
156 245
157 fl6.flowi6_proto = sk->sk_protocol; 246 err = ip6_datagram_dst_update(sk, true);
158 fl6.daddr = sk->sk_v6_daddr; 247 if (err)
159 fl6.saddr = np->saddr;
160 fl6.flowi6_oif = sk->sk_bound_dev_if;
161 fl6.flowi6_mark = sk->sk_mark;
162 fl6.fl6_dport = inet->inet_dport;
163 fl6.fl6_sport = inet->inet_sport;
164
165 if (!fl6.flowi6_oif)
166 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
167
168 if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
169 fl6.flowi6_oif = np->mcast_oif;
170
171 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
172
173 rcu_read_lock();
174 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
175 final_p = fl6_update_dst(&fl6, opt, &final);
176 rcu_read_unlock();
177
178 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
179 err = 0;
180 if (IS_ERR(dst)) {
181 err = PTR_ERR(dst);
182 goto out; 248 goto out;
183 }
184
185 /* source address lookup done in ip6_dst_lookup */
186
187 if (ipv6_addr_any(&np->saddr))
188 np->saddr = fl6.saddr;
189
190 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
191 sk->sk_v6_rcv_saddr = fl6.saddr;
192 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
193 if (sk->sk_prot->rehash)
194 sk->sk_prot->rehash(sk);
195 }
196
197 ip6_dst_store(sk, dst,
198 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
199 &sk->sk_v6_daddr : NULL,
200#ifdef CONFIG_IPV6_SUBTREES
201 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
202 &np->saddr :
203#endif
204 NULL);
205 249
206 sk->sk_state = TCP_ESTABLISHED; 250 sk->sk_state = TCP_ESTABLISHED;
207 sk_set_txhash(sk); 251 sk_set_txhash(sk);
208out: 252out:
209 fl6_sock_release(flowlabel);
210 return err; 253 return err;
211} 254}
212 255
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9428345d3a07..bc972e7152c7 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1090,8 +1090,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1090 int getfrag(void *from, char *to, int offset, int len, 1090 int getfrag(void *from, char *to, int offset, int len,
1091 int odd, struct sk_buff *skb), 1091 int odd, struct sk_buff *skb),
1092 void *from, int length, int hh_len, int fragheaderlen, 1092 void *from, int length, int hh_len, int fragheaderlen,
1093 int transhdrlen, int mtu, unsigned int flags, 1093 int exthdrlen, int transhdrlen, int mtu,
1094 const struct flowi6 *fl6) 1094 unsigned int flags, const struct flowi6 *fl6)
1095 1095
1096{ 1096{
1097 struct sk_buff *skb; 1097 struct sk_buff *skb;
@@ -1116,7 +1116,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1116 skb_put(skb, fragheaderlen + transhdrlen); 1116 skb_put(skb, fragheaderlen + transhdrlen);
1117 1117
1118 /* initialize network header pointer */ 1118 /* initialize network header pointer */
1119 skb_reset_network_header(skb); 1119 skb_set_network_header(skb, exthdrlen);
1120 1120
1121 /* initialize protocol header pointer */ 1121 /* initialize protocol header pointer */
1122 skb->transport_header = skb->network_header + fragheaderlen; 1122 skb->transport_header = skb->network_header + fragheaderlen;
@@ -1358,7 +1358,7 @@ emsgsize:
1358 (rt->dst.dev->features & NETIF_F_UFO) && 1358 (rt->dst.dev->features & NETIF_F_UFO) &&
1359 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1359 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1360 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1360 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1361 hh_len, fragheaderlen, 1361 hh_len, fragheaderlen, exthdrlen,
1362 transhdrlen, mtu, flags, fl6); 1362 transhdrlen, mtu, flags, fl6);
1363 if (err) 1363 if (err)
1364 goto error; 1364 goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eb2ac4bb09ce..1f20345cbc97 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -252,12 +252,12 @@ static int ip6_tnl_create2(struct net_device *dev)
252 252
253 t = netdev_priv(dev); 253 t = netdev_priv(dev);
254 254
255 dev->rtnl_link_ops = &ip6_link_ops;
255 err = register_netdevice(dev); 256 err = register_netdevice(dev);
256 if (err < 0) 257 if (err < 0)
257 goto out; 258 goto out;
258 259
259 strcpy(t->parms.name, dev->name); 260 strcpy(t->parms.name, dev->name);
260 dev->rtnl_link_ops = &ip6_link_ops;
261 261
262 dev_hold(dev); 262 dev_hold(dev);
263 ip6_tnl_link(ip6n, t); 263 ip6_tnl_link(ip6n, t);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ed446639219c..d916d6ab9ad2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
338 return rt; 338 return rt;
339} 339}
340 340
341static struct rt6_info *ip6_dst_alloc(struct net *net, 341struct rt6_info *ip6_dst_alloc(struct net *net,
342 struct net_device *dev, 342 struct net_device *dev,
343 int flags) 343 int flags)
344{ 344{
345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
346 346
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
364 364
365 return rt; 365 return rt;
366} 366}
367EXPORT_SYMBOL(ip6_dst_alloc);
367 368
368static void ip6_dst_destroy(struct dst_entry *dst) 369static void ip6_dst_destroy(struct dst_entry *dst)
369{ 370{
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1417 1418
1418void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 1419void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1419{ 1420{
1421 struct dst_entry *dst;
1422
1420 ip6_update_pmtu(skb, sock_net(sk), mtu, 1423 ip6_update_pmtu(skb, sock_net(sk), mtu,
1421 sk->sk_bound_dev_if, sk->sk_mark); 1424 sk->sk_bound_dev_if, sk->sk_mark);
1425
1426 dst = __sk_dst_get(sk);
1427 if (!dst || !dst->obsolete ||
1428 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1429 return;
1430
1431 bh_lock_sock(sk);
1432 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1433 ip6_datagram_dst_update(sk, false);
1434 bh_unlock_sock(sk);
1422} 1435}
1423EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 1436EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1424 1437
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8125931106be..6bc5c664fa46 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
1539 .sendmsg = udpv6_sendmsg, 1539 .sendmsg = udpv6_sendmsg,
1540 .recvmsg = udpv6_recvmsg, 1540 .recvmsg = udpv6_recvmsg,
1541 .backlog_rcv = __udpv6_queue_rcv_skb, 1541 .backlog_rcv = __udpv6_queue_rcv_skb,
1542 .release_cb = ip6_datagram_release_cb,
1542 .hash = udp_lib_hash, 1543 .hash = udp_lib_hash,
1543 .unhash = udp_lib_unhash, 1544 .unhash = udp_lib_unhash,
1544 .rehash = udp_v6_rehash, 1545 .rehash = udp_v6_rehash,
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ec22078b0914..42de4ccd159f 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
123 struct l2tp_tunnel *tunnel = NULL; 123 struct l2tp_tunnel *tunnel = NULL;
124 int length; 124 int length;
125 125
126 /* Point to L2TP header */
127 optr = ptr = skb->data;
128
129 if (!pskb_may_pull(skb, 4)) 126 if (!pskb_may_pull(skb, 4))
130 goto discard; 127 goto discard;
131 128
129 /* Point to L2TP header */
130 optr = ptr = skb->data;
132 session_id = ntohl(*((__be32 *) ptr)); 131 session_id = ntohl(*((__be32 *) ptr));
133 ptr += 4; 132 ptr += 4;
134 133
@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
156 if (!pskb_may_pull(skb, length)) 155 if (!pskb_may_pull(skb, length))
157 goto discard; 156 goto discard;
158 157
158 /* Point to L2TP header */
159 optr = ptr = skb->data;
160 ptr += 4;
159 pr_debug("%s: ip recv\n", tunnel->name); 161 pr_debug("%s: ip recv\n", tunnel->name);
160 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 162 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
161 } 163 }
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 6b54ff3ff4cb..cd479903d943 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -136,12 +136,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
136 struct l2tp_tunnel *tunnel = NULL; 136 struct l2tp_tunnel *tunnel = NULL;
137 int length; 137 int length;
138 138
139 /* Point to L2TP header */
140 optr = ptr = skb->data;
141
142 if (!pskb_may_pull(skb, 4)) 139 if (!pskb_may_pull(skb, 4))
143 goto discard; 140 goto discard;
144 141
142 /* Point to L2TP header */
143 optr = ptr = skb->data;
145 session_id = ntohl(*((__be32 *) ptr)); 144 session_id = ntohl(*((__be32 *) ptr));
146 ptr += 4; 145 ptr += 4;
147 146
@@ -169,6 +168,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
169 if (!pskb_may_pull(skb, length)) 168 if (!pskb_may_pull(skb, length))
170 goto discard; 169 goto discard;
171 170
171 /* Point to L2TP header */
172 optr = ptr = skb->data;
173 ptr += 4;
172 pr_debug("%s: ip recv\n", tunnel->name); 174 pr_debug("%s: ip recv\n", tunnel->name);
173 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 175 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
174 } 176 }
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 283981108ca8..74142d07ad31 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -343,8 +343,10 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
343 struct ieee80211_chanctx *ctx, 343 struct ieee80211_chanctx *ctx,
344 const struct cfg80211_chan_def *chandef) 344 const struct cfg80211_chan_def *chandef)
345{ 345{
346 if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) 346 if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
347 ieee80211_recalc_chanctx_min_def(local, ctx);
347 return; 348 return;
349 }
348 350
349 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); 351 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
350 352
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 804575ff7af5..422003540169 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1719,6 +1719,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
1719enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta); 1719enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
1720enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta); 1720enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
1721void ieee80211_sta_set_rx_nss(struct sta_info *sta); 1721void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1722enum ieee80211_sta_rx_bandwidth
1723ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
1724enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
1725void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1722void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, 1726void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
1723 struct ieee80211_mgmt *mgmt); 1727 struct ieee80211_mgmt *mgmt);
1724u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1728u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 5b6aec1a0630..002244bca948 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
530 const u8 *target_addr, *orig_addr; 530 const u8 *target_addr, *orig_addr;
531 const u8 *da; 531 const u8 *da;
532 u8 target_flags, ttl, flags; 532 u8 target_flags, ttl, flags;
533 u32 orig_sn, target_sn, lifetime, target_metric; 533 u32 orig_sn, target_sn, lifetime, target_metric = 0;
534 bool reply = false; 534 bool reply = false;
535 bool forward = true; 535 bool forward = true;
536 bool root_is_gate; 536 bool root_is_gate;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d20bab5c146c..861b93ffbe92 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -67,6 +67,7 @@
67 67
68static const struct rhashtable_params sta_rht_params = { 68static const struct rhashtable_params sta_rht_params = {
69 .nelem_hint = 3, /* start small */ 69 .nelem_hint = 3, /* start small */
70 .insecure_elasticity = true, /* Disable chain-length checks. */
70 .automatic_shrinking = true, 71 .automatic_shrinking = true,
71 .head_offset = offsetof(struct sta_info, hash_node), 72 .head_offset = offsetof(struct sta_info, hash_node),
72 .key_offset = offsetof(struct sta_info, addr), 73 .key_offset = offsetof(struct sta_info, addr),
@@ -258,11 +259,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
258} 259}
259 260
260/* Caller must hold local->sta_mtx */ 261/* Caller must hold local->sta_mtx */
261static void sta_info_hash_add(struct ieee80211_local *local, 262static int sta_info_hash_add(struct ieee80211_local *local,
262 struct sta_info *sta) 263 struct sta_info *sta)
263{ 264{
264 rhashtable_insert_fast(&local->sta_hash, &sta->hash_node, 265 return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
265 sta_rht_params); 266 sta_rht_params);
266} 267}
267 268
268static void sta_deliver_ps_frames(struct work_struct *wk) 269static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -524,7 +525,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
524 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 525 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
525 526
526 /* make the station visible */ 527 /* make the station visible */
527 sta_info_hash_add(local, sta); 528 err = sta_info_hash_add(local, sta);
529 if (err)
530 goto out_drop_sta;
528 531
529 list_add_tail_rcu(&sta->list, &local->sta_list); 532 list_add_tail_rcu(&sta->list, &local->sta_list);
530 533
@@ -557,6 +560,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
557 out_remove: 560 out_remove:
558 sta_info_hash_del(local, sta); 561 sta_info_hash_del(local, sta);
559 list_del_rcu(&sta->list); 562 list_del_rcu(&sta->list);
563 out_drop_sta:
560 local->num_sta--; 564 local->num_sta--;
561 synchronize_net(); 565 synchronize_net();
562 __cleanup_single_sta(sta); 566 __cleanup_single_sta(sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 053f5c4fa495..62193f4bc37b 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -377,7 +377,6 @@ DECLARE_EWMA(signal, 1024, 8)
377 * @uploaded: set to true when sta is uploaded to the driver 377 * @uploaded: set to true when sta is uploaded to the driver
378 * @sta: station information we share with the driver 378 * @sta: station information we share with the driver
379 * @sta_state: duplicates information about station state (for debug) 379 * @sta_state: duplicates information about station state (for debug)
380 * @beacon_loss_count: number of times beacon loss has triggered
381 * @rcu_head: RCU head used for freeing this station struct 380 * @rcu_head: RCU head used for freeing this station struct
382 * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, 381 * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
383 * taken from HT/VHT capabilities or VHT operating mode notification 382 * taken from HT/VHT capabilities or VHT operating mode notification
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index c9eeb3f12808..a29ea813b7d5 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -4,7 +4,7 @@
4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2014, Intel Corporation 5 * Copyright 2014, Intel Corporation
6 * Copyright 2014 Intel Mobile Communications GmbH 6 * Copyright 2014 Intel Mobile Communications GmbH
7 * Copyright 2015 Intel Deutschland GmbH 7 * Copyright 2015 - 2016 Intel Deutschland GmbH
8 * 8 *
9 * This file is GPLv2 as found in COPYING. 9 * This file is GPLv2 as found in COPYING.
10 */ 10 */
@@ -15,6 +15,7 @@
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include "ieee80211_i.h" 16#include "ieee80211_i.h"
17#include "driver-ops.h" 17#include "driver-ops.h"
18#include "rate.h"
18 19
19/* give usermode some time for retries in setting up the TDLS session */ 20/* give usermode some time for retries in setting up the TDLS session */
20#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) 21#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
@@ -302,7 +303,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
302 /* IEEE802.11ac-2013 Table E-4 */ 303 /* IEEE802.11ac-2013 Table E-4 */
303 u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; 304 u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
304 struct cfg80211_chan_def uc = sta->tdls_chandef; 305 struct cfg80211_chan_def uc = sta->tdls_chandef;
305 enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta); 306 enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta);
306 int i; 307 int i;
307 308
308 /* only support upgrading non-narrow channels up to 80Mhz */ 309 /* only support upgrading non-narrow channels up to 80Mhz */
@@ -313,7 +314,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
313 if (max_width > NL80211_CHAN_WIDTH_80) 314 if (max_width > NL80211_CHAN_WIDTH_80)
314 max_width = NL80211_CHAN_WIDTH_80; 315 max_width = NL80211_CHAN_WIDTH_80;
315 316
316 if (uc.width == max_width) 317 if (uc.width >= max_width)
317 return; 318 return;
318 /* 319 /*
319 * Channel usage constrains in the IEEE802.11ac-2013 specification only 320 * Channel usage constrains in the IEEE802.11ac-2013 specification only
@@ -324,6 +325,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
324 for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) 325 for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
325 if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { 326 if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
326 uc.center_freq1 = centers_80mhz[i]; 327 uc.center_freq1 = centers_80mhz[i];
328 uc.center_freq2 = 0;
327 uc.width = NL80211_CHAN_WIDTH_80; 329 uc.width = NL80211_CHAN_WIDTH_80;
328 break; 330 break;
329 } 331 }
@@ -332,7 +334,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
332 return; 334 return;
333 335
334 /* proceed to downgrade the chandef until usable or the same */ 336 /* proceed to downgrade the chandef until usable or the same */
335 while (uc.width > max_width && 337 while (uc.width > max_width ||
336 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, 338 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
337 sdata->wdev.iftype)) 339 sdata->wdev.iftype))
338 ieee80211_chandef_downgrade(&uc); 340 ieee80211_chandef_downgrade(&uc);
@@ -1242,18 +1244,44 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
1242 return ret; 1244 return ret;
1243} 1245}
1244 1246
1245static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata) 1247static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
1248 struct sta_info *sta)
1246{ 1249{
1247 struct ieee80211_local *local = sdata->local; 1250 struct ieee80211_local *local = sdata->local;
1248 struct ieee80211_chanctx_conf *conf; 1251 struct ieee80211_chanctx_conf *conf;
1249 struct ieee80211_chanctx *ctx; 1252 struct ieee80211_chanctx *ctx;
1253 enum nl80211_chan_width width;
1254 struct ieee80211_supported_band *sband;
1250 1255
1251 mutex_lock(&local->chanctx_mtx); 1256 mutex_lock(&local->chanctx_mtx);
1252 conf = rcu_dereference_protected(sdata->vif.chanctx_conf, 1257 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1253 lockdep_is_held(&local->chanctx_mtx)); 1258 lockdep_is_held(&local->chanctx_mtx));
1254 if (conf) { 1259 if (conf) {
1260 width = conf->def.width;
1261 sband = local->hw.wiphy->bands[conf->def.chan->band];
1255 ctx = container_of(conf, struct ieee80211_chanctx, conf); 1262 ctx = container_of(conf, struct ieee80211_chanctx, conf);
1256 ieee80211_recalc_chanctx_chantype(local, ctx); 1263 ieee80211_recalc_chanctx_chantype(local, ctx);
1264
1265 /* if width changed and a peer is given, update its BW */
1266 if (width != conf->def.width && sta &&
1267 test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) {
1268 enum ieee80211_sta_rx_bandwidth bw;
1269
1270 bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
1271 bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
1272 if (bw != sta->sta.bandwidth) {
1273 sta->sta.bandwidth = bw;
1274 rate_control_rate_update(local, sband, sta,
1275 IEEE80211_RC_BW_CHANGED);
1276 /*
1277 * if a TDLS peer BW was updated, we need to
1278 * recalc the chandef width again, to get the
1279 * correct chanctx min_def
1280 */
1281 ieee80211_recalc_chanctx_chantype(local, ctx);
1282 }
1283 }
1284
1257 } 1285 }
1258 mutex_unlock(&local->chanctx_mtx); 1286 mutex_unlock(&local->chanctx_mtx);
1259} 1287}
@@ -1350,8 +1378,6 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1350 break; 1378 break;
1351 } 1379 }
1352 1380
1353 iee80211_tdls_recalc_chanctx(sdata);
1354
1355 mutex_lock(&local->sta_mtx); 1381 mutex_lock(&local->sta_mtx);
1356 sta = sta_info_get(sdata, peer); 1382 sta = sta_info_get(sdata, peer);
1357 if (!sta) { 1383 if (!sta) {
@@ -1360,6 +1386,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1360 break; 1386 break;
1361 } 1387 }
1362 1388
1389 iee80211_tdls_recalc_chanctx(sdata, sta);
1363 iee80211_tdls_recalc_ht_protection(sdata, sta); 1390 iee80211_tdls_recalc_ht_protection(sdata, sta);
1364 1391
1365 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); 1392 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
@@ -1390,7 +1417,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1390 iee80211_tdls_recalc_ht_protection(sdata, NULL); 1417 iee80211_tdls_recalc_ht_protection(sdata, NULL);
1391 mutex_unlock(&local->sta_mtx); 1418 mutex_unlock(&local->sta_mtx);
1392 1419
1393 iee80211_tdls_recalc_chanctx(sdata); 1420 iee80211_tdls_recalc_chanctx(sdata, NULL);
1394 break; 1421 break;
1395 default: 1422 default:
1396 ret = -ENOTSUPP; 1423 ret = -ENOTSUPP;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 62ad5321257d..21f6602395f7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1116,11 +1116,15 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1116 reset_agg_timer = true; 1116 reset_agg_timer = true;
1117 } else { 1117 } else {
1118 queued = true; 1118 queued = true;
1119 if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
1120 clear_sta_flag(tx->sta, WLAN_STA_SP);
1121 ps_dbg(tx->sta->sdata,
1122 "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
1123 tx->sta->sta.addr, tx->sta->sta.aid);
1124 }
1119 info->control.vif = &tx->sdata->vif; 1125 info->control.vif = &tx->sdata->vif;
1120 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1126 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1121 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS | 1127 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
1122 IEEE80211_TX_CTL_NO_PS_BUFFER |
1123 IEEE80211_TX_STATUS_EOSP;
1124 __skb_queue_tail(&tid_tx->pending, skb); 1128 __skb_queue_tail(&tid_tx->pending, skb);
1125 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1129 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1126 purge_skb = __skb_dequeue(&tid_tx->pending); 1130 purge_skb = __skb_dequeue(&tid_tx->pending);
@@ -1247,7 +1251,8 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
1247 struct txq_info *txqi; 1251 struct txq_info *txqi;
1248 u8 ac; 1252 u8 ac;
1249 1253
1250 if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE) 1254 if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
1255 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
1251 goto tx_normal; 1256 goto tx_normal;
1252 1257
1253 if (!ieee80211_is_data(hdr->frame_control)) 1258 if (!ieee80211_is_data(hdr->frame_control))
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 89e04d55aa18..e590e2ef9eaf 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -319,7 +319,30 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
319 return IEEE80211_STA_RX_BW_80; 319 return IEEE80211_STA_RX_BW_80;
320} 320}
321 321
322static enum ieee80211_sta_rx_bandwidth 322enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
323{
324 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
325 u32 cap_width;
326
327 if (!vht_cap->vht_supported) {
328 if (!sta->sta.ht_cap.ht_supported)
329 return NL80211_CHAN_WIDTH_20_NOHT;
330
331 return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
332 NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
333 }
334
335 cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
336
337 if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
338 return NL80211_CHAN_WIDTH_160;
339 else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
340 return NL80211_CHAN_WIDTH_80P80;
341
342 return NL80211_CHAN_WIDTH_80;
343}
344
345enum ieee80211_sta_rx_bandwidth
323ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) 346ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
324{ 347{
325 switch (width) { 348 switch (width) {
@@ -347,10 +370,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
347 370
348 bw = ieee80211_sta_cap_rx_bw(sta); 371 bw = ieee80211_sta_cap_rx_bw(sta);
349 bw = min(bw, sta->cur_max_bandwidth); 372 bw = min(bw, sta->cur_max_bandwidth);
350 373 bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
351 /* do not cap the BW of TDLS WIDER_BW peers by the bss */
352 if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
353 bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
354 374
355 return bw; 375 return bw;
356} 376}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index b18c5ed42d95..0b80a7140cc4 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
543 if (!dev) 543 if (!dev)
544 return ERR_PTR(-ENODEV); 544 return ERR_PTR(-ENODEV);
545 545
546 if (IS_ERR(dev))
547 return dev;
548
546 /* The caller is holding rtnl anyways, so release the dev reference */ 549 /* The caller is holding rtnl anyways, so release the dev reference */
547 dev_put(dev); 550 dev_put(dev);
548 551
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 278f3b9356ef..7cc1d9c22a9f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
410 length--; 410 length--;
411 continue; 411 continue;
412 default: 412 default:
413 if (length < 2)
414 return;
413 opsize=*ptr++; 415 opsize=*ptr++;
414 if (opsize < 2) /* "silly options" */ 416 if (opsize < 2) /* "silly options" */
415 return; 417 return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
470 length--; 472 length--;
471 continue; 473 continue;
472 default: 474 default:
475 if (length < 2)
476 return;
473 opsize = *ptr++; 477 opsize = *ptr++;
474 if (opsize < 2) /* "silly options" */ 478 if (opsize < 2) /* "silly options" */
475 return; 479 return;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 215fc08c02ab..330ebd600f25 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
688 688
689 skb_queue_purge(&sk->sk_write_queue); 689 skb_queue_purge(&sk->sk_write_queue);
690 690
691 if (nlk->portid) { 691 if (nlk->portid && nlk->bound) {
692 struct netlink_notify n = { 692 struct netlink_notify n = {
693 .net = sock_net(sk), 693 .net = sock_net(sk),
694 .protocol = sk->sk_protocol, 694 .protocol = sk->sk_protocol,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e9dd47b2a85b..879185fe183f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); 461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
462 462
463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { 463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
464 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, 464 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
465 true); 465 true);
466 memcpy(&flow_key->ipv6.addr.src, masked, 466 memcpy(&flow_key->ipv6.addr.src, masked,
467 sizeof(flow_key->ipv6.addr.src)); 467 sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
483 NULL, &flags) 483 NULL, &flags)
484 != NEXTHDR_ROUTING); 484 != NEXTHDR_ROUTING);
485 485
486 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, 486 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
487 recalc_csum); 487 recalc_csum);
488 memcpy(&flow_key->ipv6.addr.dst, masked, 488 memcpy(&flow_key->ipv6.addr.dst, masked,
489 sizeof(flow_key->ipv6.addr.dst)); 489 sizeof(flow_key->ipv6.addr.dst));
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 1b9d286756be..b5fea1101faa 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
367 } else if (key->eth.type == htons(ETH_P_IPV6)) { 367 } else if (key->eth.type == htons(ETH_P_IPV6)) {
368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
369 369
370 skb_orphan(skb);
370 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
371 err = nf_ct_frag6_gather(net, skb, user); 372 err = nf_ct_frag6_gather(net, skb, user);
372 if (err) 373 if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1ecfa710ca98..18d0becbc46d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3521 i->ifindex = mreq->mr_ifindex; 3521 i->ifindex = mreq->mr_ifindex;
3522 i->alen = mreq->mr_alen; 3522 i->alen = mreq->mr_alen;
3523 memcpy(i->addr, mreq->mr_address, i->alen); 3523 memcpy(i->addr, mreq->mr_address, i->alen);
3524 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3524 i->count = 1; 3525 i->count = 1;
3525 i->next = po->mclist; 3526 i->next = po->mclist;
3526 po->mclist = i; 3527 po->mclist = i;
@@ -4151,7 +4152,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4151 4152
4152 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 4153 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4153 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 4154 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4154 WARN(1, "Tx-ring is not supported.\n"); 4155 net_warn_ratelimited("Tx-ring is not supported.\n");
4155 goto out; 4156 goto out;
4156 } 4157 }
4157 4158
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8246fd..6641bcf7c185 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
301 301
302 __set_bit_le(off, (void *)map->m_page_addrs[i]); 302 set_bit_le(off, (void *)map->m_page_addrs[i]);
303} 303}
304 304
305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) 305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
315 315
316 __clear_bit_le(off, (void *)map->m_page_addrs[i]); 316 clear_bit_le(off, (void *)map->m_page_addrs[i]);
317} 317}
318 318
319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) 319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 8764970f0c24..310cabce2311 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); 194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); 195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); 196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
197 dp->dp_ack_seq = rds_ib_piggyb_ack(ic); 197 dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
198 198
199 /* Advertise flow control */ 199 /* Advertise flow control */
200 if (ic->i_flowctl) { 200 if (ic->i_flowctl) {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 977fb86065b7..abc8cc805e8d 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -796,7 +796,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
796 796
797 addr = kmap_atomic(sg_page(&frag->f_sg)); 797 addr = kmap_atomic(sg_page(&frag->f_sg));
798 798
799 src = addr + frag_off; 799 src = addr + frag->f_sg.offset + frag_off;
800 dst = (void *)map->m_page_addrs[map_page] + map_off; 800 dst = (void *)map->m_page_addrs[map_page] + map_off;
801 for (k = 0; k < to_copy; k += 8) { 801 for (k = 0; k < to_copy; k += 8) {
802 /* Record ports that became uncongested, ie 802 /* Record ports that became uncongested, ie
diff --git a/net/rds/page.c b/net/rds/page.c
index 616f21f4e7d7..e2b5a5832d3d 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -135,8 +135,8 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
135 if (rem->r_offset != 0) 135 if (rem->r_offset != 0)
136 rds_stats_inc(s_page_remainder_hit); 136 rds_stats_inc(s_page_remainder_hit);
137 137
138 rem->r_offset += bytes; 138 rem->r_offset += ALIGN(bytes, 8);
139 if (rem->r_offset == PAGE_SIZE) { 139 if (rem->r_offset >= PAGE_SIZE) {
140 __free_page(rem->r_page); 140 __free_page(rem->r_page);
141 rem->r_page = NULL; 141 rem->r_page = NULL;
142 } 142 }
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f18c35024207..80742edea96f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
159 if (validate) 159 if (validate)
160 skb = validate_xmit_skb_list(skb, dev); 160 skb = validate_xmit_skb_list(skb, dev);
161 161
162 if (skb) { 162 if (likely(skb)) {
163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 163 HARD_TX_LOCK(dev, txq, smp_processor_id());
164 if (!netif_xmit_frozen_or_stopped(txq)) 164 if (!netif_xmit_frozen_or_stopped(txq))
165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
166 166
167 HARD_TX_UNLOCK(dev, txq); 167 HARD_TX_UNLOCK(dev, txq);
168 } else {
169 spin_lock(root_lock);
170 return qdisc_qlen(q);
168 } 171 }
169 spin_lock(root_lock); 172 spin_lock(root_lock);
170 173
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 97745351d58c..9844fe573029 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -705,7 +705,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
705 /* Check whether this chunk and all the rest of pending data will fit 705 /* Check whether this chunk and all the rest of pending data will fit
706 * or delay in hopes of bundling a full sized packet. 706 * or delay in hopes of bundling a full sized packet.
707 */ 707 */
708 if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead) 708 if (chunk->skb->len + q->out_qlen >
709 transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
709 /* Enough data queued to fill a packet */ 710 /* Enough data queued to fill a packet */
710 return SCTP_XMIT_OK; 711 return SCTP_XMIT_OK;
711 712
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 8d3d3625130e..084718f9b3da 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
866 * sender MUST assure that at least one T3-rtx 866 * sender MUST assure that at least one T3-rtx
867 * timer is running. 867 * timer is running.
868 */ 868 */
869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
870 sctp_transport_reset_timers(transport); 870 sctp_transport_reset_t3_rtx(transport);
871 transport->last_time_sent = jiffies;
872 }
871 } 873 }
872 break; 874 break;
873 875
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
924 error = sctp_outq_flush_rtx(q, packet, 926 error = sctp_outq_flush_rtx(q, packet,
925 rtx_timeout, &start_timer); 927 rtx_timeout, &start_timer);
926 928
927 if (start_timer) 929 if (start_timer) {
928 sctp_transport_reset_timers(transport); 930 sctp_transport_reset_t3_rtx(transport);
931 transport->last_time_sent = jiffies;
932 }
929 933
930 /* This can happen on COOKIE-ECHO resend. Only 934 /* This can happen on COOKIE-ECHO resend. Only
931 * one chunk can get bundled with a COOKIE-ECHO. 935 * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1062 list_add_tail(&chunk->transmitted_list, 1066 list_add_tail(&chunk->transmitted_list,
1063 &transport->transmitted); 1067 &transport->transmitted);
1064 1068
1065 sctp_transport_reset_timers(transport); 1069 sctp_transport_reset_t3_rtx(transport);
1070 transport->last_time_sent = jiffies;
1066 1071
1067 /* Only let one DATA chunk get bundled with a 1072 /* Only let one DATA chunk get bundled with a
1068 * COOKIE-ECHO chunk. 1073 * COOKIE-ECHO chunk.
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7f0bf798205b..56f364d8f932 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3080 return SCTP_ERROR_RSRC_LOW; 3080 return SCTP_ERROR_RSRC_LOW;
3081 3081
3082 /* Start the heartbeat timer. */ 3082 /* Start the heartbeat timer. */
3083 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3083 sctp_transport_reset_hb_timer(peer);
3084 sctp_transport_hold(peer);
3085 asoc->new_transport = peer; 3084 asoc->new_transport = peer;
3086 break; 3085 break;
3087 case SCTP_PARAM_DEL_IP: 3086 case SCTP_PARAM_DEL_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 7fe56d0acabf..41b081a64752 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
69 sctp_cmd_seq_t *commands, 69 sctp_cmd_seq_t *commands,
70 gfp_t gfp); 70 gfp_t gfp);
71 71
72static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
73 struct sctp_transport *t);
74/******************************************************************** 72/********************************************************************
75 * Helper functions 73 * Helper functions
76 ********************************************************************/ 74 ********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
367 struct sctp_association *asoc = transport->asoc; 365 struct sctp_association *asoc = transport->asoc;
368 struct sock *sk = asoc->base.sk; 366 struct sock *sk = asoc->base.sk;
369 struct net *net = sock_net(sk); 367 struct net *net = sock_net(sk);
368 u32 elapsed, timeout;
370 369
371 bh_lock_sock(sk); 370 bh_lock_sock(sk);
372 if (sock_owned_by_user(sk)) { 371 if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
378 goto out_unlock; 377 goto out_unlock;
379 } 378 }
380 379
380 /* Check if we should still send the heartbeat or reschedule */
381 elapsed = jiffies - transport->last_time_sent;
382 timeout = sctp_transport_timeout(transport);
383 if (elapsed < timeout) {
384 elapsed = timeout - elapsed;
385 if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
386 sctp_transport_hold(transport);
387 goto out_unlock;
388 }
389
381 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 390 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
382 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), 391 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
383 asoc->state, asoc->ep, asoc, 392 asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
507 0); 516 0);
508 517
509 /* Update the hb timer to resend a heartbeat every rto */ 518 /* Update the hb timer to resend a heartbeat every rto */
510 sctp_cmd_hb_timer_update(commands, transport); 519 sctp_transport_reset_hb_timer(transport);
511 } 520 }
512 521
513 if (transport->state != SCTP_INACTIVE && 522 if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
634 * hold a reference on the transport to make sure none of 643 * hold a reference on the transport to make sure none of
635 * the needed data structures go away. 644 * the needed data structures go away.
636 */ 645 */
637 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 646 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
638 647 sctp_transport_reset_hb_timer(t);
639 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
640 sctp_transport_hold(t);
641 }
642} 648}
643 649
644static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, 650static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
669} 675}
670 676
671 677
672/* Helper function to update the heartbeat timer. */
673static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
674 struct sctp_transport *t)
675{
676 /* Update the heartbeat timer. */
677 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
678 sctp_transport_hold(t);
679}
680
681/* Helper function to handle the reception of an HEARTBEAT ACK. */ 678/* Helper function to handle the reception of an HEARTBEAT ACK. */
682static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, 679static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
683 struct sctp_association *asoc, 680 struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
742 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 739 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
743 740
744 /* Update the heartbeat timer. */ 741 /* Update the heartbeat timer. */
745 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 742 sctp_transport_reset_hb_timer(t);
746 sctp_transport_hold(t);
747 743
748 if (was_unconfirmed && asoc->peer.transport_count == 1) 744 if (was_unconfirmed && asoc->peer.transport_count == 1)
749 sctp_transport_immediate_rtx(t); 745 sctp_transport_immediate_rtx(t);
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1614 1610
1615 case SCTP_CMD_HB_TIMER_UPDATE: 1611 case SCTP_CMD_HB_TIMER_UPDATE:
1616 t = cmd->obj.transport; 1612 t = cmd->obj.transport;
1617 sctp_cmd_hb_timer_update(commands, t); 1613 sctp_transport_reset_hb_timer(t);
1618 break; 1614 break;
1619 1615
1620 case SCTP_CMD_HB_TIMERS_STOP: 1616 case SCTP_CMD_HB_TIMERS_STOP:
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 9b6b48c7524e..81b86678be4d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
183/* Start T3_rtx timer if it is not already running and update the heartbeat 183/* Start T3_rtx timer if it is not already running and update the heartbeat
184 * timer. This routine is called every time a DATA chunk is sent. 184 * timer. This routine is called every time a DATA chunk is sent.
185 */ 185 */
186void sctp_transport_reset_timers(struct sctp_transport *transport) 186void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
187{ 187{
188 /* RFC 2960 6.3.2 Retransmission Timer Rules 188 /* RFC 2960 6.3.2 Retransmission Timer Rules
189 * 189 *
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
197 if (!mod_timer(&transport->T3_rtx_timer, 197 if (!mod_timer(&transport->T3_rtx_timer,
198 jiffies + transport->rto)) 198 jiffies + transport->rto))
199 sctp_transport_hold(transport); 199 sctp_transport_hold(transport);
200}
201
202void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
203{
204 unsigned long expires;
200 205
201 /* When a data chunk is sent, reset the heartbeat interval. */ 206 /* When a data chunk is sent, reset the heartbeat interval. */
202 if (!mod_timer(&transport->hb_timer, 207 expires = jiffies + sctp_transport_timeout(transport);
203 sctp_transport_timeout(transport))) 208 if (time_before(transport->hb_timer.expires, expires) &&
204 sctp_transport_hold(transport); 209 !mod_timer(&transport->hb_timer,
210 expires + prandom_u32_max(transport->rto)))
211 sctp_transport_hold(transport);
205} 212}
206 213
207/* This transport has been assigned to an association. 214/* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
595unsigned long sctp_transport_timeout(struct sctp_transport *trans) 602unsigned long sctp_transport_timeout(struct sctp_transport *trans)
596{ 603{
597 /* RTO + timer slack +/- 50% of RTO */ 604 /* RTO + timer slack +/- 50% of RTO */
598 unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); 605 unsigned long timeout = trans->rto >> 1;
599 606
600 if (trans->state != SCTP_UNCONFIRMED && 607 if (trans->state != SCTP_UNCONFIRMED &&
601 trans->state != SCTP_PF) 608 trans->state != SCTP_PF)
602 timeout += trans->hbinterval; 609 timeout += trans->hbinterval;
603 610
604 return timeout + jiffies; 611 return timeout;
605} 612}
606 613
607/* Reset transport variables to their initial values */ 614/* Reset transport variables to their initial values */
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8c6bc795f060..15612ffa8d57 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1728,8 +1728,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
1728 return 0; 1728 return 0;
1729 } 1729 }
1730 1730
1731 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1731 first = snd_buf->page_base >> PAGE_SHIFT;
1732 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1732 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1733 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1733 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1734 rqstp->rq_enc_pages 1734 rqstp->rq_enc_pages
1735 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1735 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
@@ -1775,10 +1775,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1775 status = alloc_enc_pages(rqstp); 1775 status = alloc_enc_pages(rqstp);
1776 if (status) 1776 if (status)
1777 return status; 1777 return status;
1778 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1778 first = snd_buf->page_base >> PAGE_SHIFT;
1779 inpages = snd_buf->pages + first; 1779 inpages = snd_buf->pages + first;
1780 snd_buf->pages = rqstp->rq_enc_pages; 1780 snd_buf->pages = rqstp->rq_enc_pages;
1781 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1781 snd_buf->page_base -= first << PAGE_SHIFT;
1782 /* 1782 /*
1783 * Give the tail its own page, in case we need extra space in the 1783 * Give the tail its own page, in case we need extra space in the
1784 * head when wrapping: 1784 * head when wrapping:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d94a8e1e9f05..244245bcbbd2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -78,6 +78,7 @@ krb5_encrypt(
78 memcpy(out, in, length); 78 memcpy(out, in, length);
79 sg_init_one(sg, out, length); 79 sg_init_one(sg, out, length);
80 80
81 skcipher_request_set_tfm(req, tfm);
81 skcipher_request_set_callback(req, 0, NULL, NULL); 82 skcipher_request_set_callback(req, 0, NULL, NULL);
82 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 83 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
83 84
@@ -115,6 +116,7 @@ krb5_decrypt(
115 memcpy(out, in, length); 116 memcpy(out, in, length);
116 sg_init_one(sg, out, length); 117 sg_init_one(sg, out, length);
117 118
119 skcipher_request_set_tfm(req, tfm);
118 skcipher_request_set_callback(req, 0, NULL, NULL); 120 skcipher_request_set_callback(req, 0, NULL, NULL);
119 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 121 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
120 122
@@ -465,7 +467,7 @@ encryptor(struct scatterlist *sg, void *data)
465 page_pos = desc->pos - outbuf->head[0].iov_len; 467 page_pos = desc->pos - outbuf->head[0].iov_len;
466 if (page_pos >= 0 && page_pos < outbuf->page_len) { 468 if (page_pos >= 0 && page_pos < outbuf->page_len) {
467 /* pages are not in place: */ 469 /* pages are not in place: */
468 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; 470 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
469 in_page = desc->pages[i]; 471 in_page = desc->pages[i];
470 } else { 472 } else {
471 in_page = sg_page(sg); 473 in_page = sg_page(sg);
@@ -946,7 +948,8 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
946 return PTR_ERR(hmac); 948 return PTR_ERR(hmac);
947 } 949 }
948 950
949 desc = kmalloc(sizeof(*desc), GFP_KERNEL); 951 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
952 GFP_KERNEL);
950 if (!desc) { 953 if (!desc) {
951 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 954 dprintk("%s: failed to allocate shash descriptor for '%s'\n",
952 __func__, kctx->gk5e->cksum_name); 955 __func__, kctx->gk5e->cksum_name);
@@ -1012,7 +1015,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
1012 return PTR_ERR(hmac); 1015 return PTR_ERR(hmac);
1013 } 1016 }
1014 1017
1015 desc = kmalloc(sizeof(*desc), GFP_KERNEL); 1018 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
1019 GFP_KERNEL);
1016 if (!desc) { 1020 if (!desc) {
1017 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 1021 dprintk("%s: failed to allocate shash descriptor for '%s'\n",
1018 __func__, kctx->gk5e->cksum_name); 1022 __func__, kctx->gk5e->cksum_name);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 71341ccb9890..65427492b1c9 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -451,7 +451,8 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
451 goto out_err_free_hmac; 451 goto out_err_free_hmac;
452 452
453 453
454 desc = kmalloc(sizeof(*desc), GFP_KERNEL); 454 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
455 GFP_KERNEL);
455 if (!desc) { 456 if (!desc) {
456 dprintk("%s: failed to allocate hash descriptor for '%s'\n", 457 dprintk("%s: failed to allocate hash descriptor for '%s'\n",
457 __func__, ctx->gk5e->cksum_name); 458 __func__, ctx->gk5e->cksum_name);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 765088e4ad84..a737c2da0837 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -79,9 +79,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
79 len -= buf->head[0].iov_len; 79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) { 80 if (len <= buf->page_len) {
81 unsigned int last = (buf->page_base + len - 1) 81 unsigned int last = (buf->page_base + len - 1)
82 >>PAGE_CACHE_SHIFT; 82 >>PAGE_SHIFT;
83 unsigned int offset = (buf->page_base + len - 1) 83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_CACHE_SIZE - 1); 84 & (PAGE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last]); 85 ptr = kmap_atomic(buf->pages[last]);
86 pad = *(ptr + offset); 86 pad = *(ptr + offset);
87 kunmap_atomic(ptr); 87 kunmap_atomic(ptr);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 008c25d1b9f9..553bf95f7003 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -881,7 +881,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
881 char *kaddr; 881 char *kaddr;
882 ssize_t ret = -ENOMEM; 882 ssize_t ret = -ENOMEM;
883 883
884 if (count >= PAGE_CACHE_SIZE) 884 if (count >= PAGE_SIZE)
885 goto out_slow; 885 goto out_slow;
886 886
887 page = find_or_create_page(mapping, 0, GFP_KERNEL); 887 page = find_or_create_page(mapping, 0, GFP_KERNEL);
@@ -892,7 +892,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
892 ret = cache_do_downcall(kaddr, buf, count, cd); 892 ret = cache_do_downcall(kaddr, buf, count, cd);
893 kunmap(page); 893 kunmap(page);
894 unlock_page(page); 894 unlock_page(page);
895 page_cache_release(page); 895 put_page(page);
896 return ret; 896 return ret;
897out_slow: 897out_slow:
898 return cache_slow_downcall(buf, count, cd); 898 return cache_slow_downcall(buf, count, cd);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 31789ef3e614..fc48eca21fd2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1390,8 +1390,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1390 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1390 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1391 int err; 1391 int err;
1392 1392
1393 sb->s_blocksize = PAGE_CACHE_SIZE; 1393 sb->s_blocksize = PAGE_SIZE;
1394 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1394 sb->s_blocksize_bits = PAGE_SHIFT;
1395 sb->s_magic = RPCAUTH_GSSMAGIC; 1395 sb->s_magic = RPCAUTH_GSSMAGIC;
1396 sb->s_op = &s_ops; 1396 sb->s_op = &s_ops;
1397 sb->s_d_op = &simple_dentry_operations; 1397 sb->s_d_op = &simple_dentry_operations;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 2df87f78e518..de70c78025d7 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -96,8 +96,8 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
96 if (base || xdr->page_base) { 96 if (base || xdr->page_base) {
97 pglen -= base; 97 pglen -= base;
98 base += xdr->page_base; 98 base += xdr->page_base;
99 ppage += base >> PAGE_CACHE_SHIFT; 99 ppage += base >> PAGE_SHIFT;
100 base &= ~PAGE_CACHE_MASK; 100 base &= ~PAGE_MASK;
101 } 101 }
102 do { 102 do {
103 char *kaddr; 103 char *kaddr;
@@ -113,7 +113,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
113 } 113 }
114 } 114 }
115 115
116 len = PAGE_CACHE_SIZE; 116 len = PAGE_SIZE;
117 kaddr = kmap_atomic(*ppage); 117 kaddr = kmap_atomic(*ppage);
118 if (base) { 118 if (base) {
119 len -= base; 119 len -= base;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4c1b53..6bdb3865212d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way: 165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]', 166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 167 * then its address is given as (i << PAGE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap. 169 * they point to may overlap.
170 */ 170 */
@@ -181,20 +181,20 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
181 pgto_base += len; 181 pgto_base += len;
182 pgfrom_base += len; 182 pgfrom_base += len;
183 183
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 184 pgto = pages + (pgto_base >> PAGE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 185 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
186 186
187 pgto_base &= ~PAGE_CACHE_MASK; 187 pgto_base &= ~PAGE_MASK;
188 pgfrom_base &= ~PAGE_CACHE_MASK; 188 pgfrom_base &= ~PAGE_MASK;
189 189
190 do { 190 do {
191 /* Are any pointers crossing a page boundary? */ 191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) { 192 if (pgto_base == 0) {
193 pgto_base = PAGE_CACHE_SIZE; 193 pgto_base = PAGE_SIZE;
194 pgto--; 194 pgto--;
195 } 195 }
196 if (pgfrom_base == 0) { 196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_CACHE_SIZE; 197 pgfrom_base = PAGE_SIZE;
198 pgfrom--; 198 pgfrom--;
199 } 199 }
200 200
@@ -236,11 +236,11 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
236 char *vto; 236 char *vto;
237 size_t copy; 237 size_t copy;
238 238
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 239 pgto = pages + (pgbase >> PAGE_SHIFT);
240 pgbase &= ~PAGE_CACHE_MASK; 240 pgbase &= ~PAGE_MASK;
241 241
242 for (;;) { 242 for (;;) {
243 copy = PAGE_CACHE_SIZE - pgbase; 243 copy = PAGE_SIZE - pgbase;
244 if (copy > len) 244 if (copy > len)
245 copy = len; 245 copy = len;
246 246
@@ -253,7 +253,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
253 break; 253 break;
254 254
255 pgbase += copy; 255 pgbase += copy;
256 if (pgbase == PAGE_CACHE_SIZE) { 256 if (pgbase == PAGE_SIZE) {
257 flush_dcache_page(*pgto); 257 flush_dcache_page(*pgto);
258 pgbase = 0; 258 pgbase = 0;
259 pgto++; 259 pgto++;
@@ -280,11 +280,11 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
280 char *vfrom; 280 char *vfrom;
281 size_t copy; 281 size_t copy;
282 282
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 283 pgfrom = pages + (pgbase >> PAGE_SHIFT);
284 pgbase &= ~PAGE_CACHE_MASK; 284 pgbase &= ~PAGE_MASK;
285 285
286 do { 286 do {
287 copy = PAGE_CACHE_SIZE - pgbase; 287 copy = PAGE_SIZE - pgbase;
288 if (copy > len) 288 if (copy > len)
289 copy = len; 289 copy = len;
290 290
@@ -293,7 +293,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
293 kunmap_atomic(vfrom); 293 kunmap_atomic(vfrom);
294 294
295 pgbase += copy; 295 pgbase += copy;
296 if (pgbase == PAGE_CACHE_SIZE) { 296 if (pgbase == PAGE_SIZE) {
297 pgbase = 0; 297 pgbase = 0;
298 pgfrom++; 298 pgfrom++;
299 } 299 }
@@ -1038,8 +1038,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1038 if (base < buf->page_len) { 1038 if (base < buf->page_len) {
1039 subbuf->page_len = min(buf->page_len - base, len); 1039 subbuf->page_len = min(buf->page_len - base, len);
1040 base += buf->page_base; 1040 base += buf->page_base;
1041 subbuf->page_base = base & ~PAGE_CACHE_MASK; 1041 subbuf->page_base = base & ~PAGE_MASK;
1042 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; 1042 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1043 len -= subbuf->page_len; 1043 len -= subbuf->page_len;
1044 base = 0; 1044 base = 0;
1045 } else { 1045 } else {
@@ -1297,9 +1297,9 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1297 todo -= avail_here; 1297 todo -= avail_here;
1298 1298
1299 base += buf->page_base; 1299 base += buf->page_base;
1300 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 1300 ppages = buf->pages + (base >> PAGE_SHIFT);
1301 base &= ~PAGE_CACHE_MASK; 1301 base &= ~PAGE_MASK;
1302 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 1302 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1303 avail_here); 1303 avail_here);
1304 c = kmap(*ppages) + base; 1304 c = kmap(*ppages) + base;
1305 1305
@@ -1383,7 +1383,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1383 } 1383 }
1384 1384
1385 avail_page = min(avail_here, 1385 avail_page = min(avail_here,
1386 (unsigned int) PAGE_CACHE_SIZE); 1386 (unsigned int) PAGE_SIZE);
1387 } 1387 }
1388 base = buf->page_len; /* align to start of tail */ 1388 base = buf->page_len; /* align to start of tail */
1389 } 1389 }
@@ -1479,9 +1479,9 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1479 if (page_len > len) 1479 if (page_len > len)
1480 page_len = len; 1480 page_len = len;
1481 len -= page_len; 1481 len -= page_len;
1482 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); 1482 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1483 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; 1483 i = (offset + buf->page_base) >> PAGE_SHIFT;
1484 thislen = PAGE_CACHE_SIZE - page_offset; 1484 thislen = PAGE_SIZE - page_offset;
1485 do { 1485 do {
1486 if (thislen > page_len) 1486 if (thislen > page_len)
1487 thislen = page_len; 1487 thislen = page_len;
@@ -1492,7 +1492,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1492 page_len -= thislen; 1492 page_len -= thislen;
1493 i++; 1493 i++;
1494 page_offset = 0; 1494 page_offset = 0;
1495 thislen = PAGE_CACHE_SIZE; 1495 thislen = PAGE_SIZE;
1496 } while (page_len != 0); 1496 } while (page_len != 0);
1497 offset = 0; 1497 offset = 0;
1498 } 1498 }
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 2b9b98f1c2ff..b7e01d88bdc5 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev,
305 if (err && err != -EOPNOTSUPP) 305 if (err && err != -EOPNOTSUPP)
306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
307 err, attr->id); 307 err, attr->id);
308 if (attr->complete)
309 attr->complete(dev, err, attr->complete_priv);
308} 310}
309 311
310static int switchdev_port_attr_set_defer(struct net_device *dev, 312static int switchdev_port_attr_set_defer(struct net_device *dev,
@@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
434 if (err && err != -EOPNOTSUPP) 436 if (err && err != -EOPNOTSUPP)
435 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 437 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
436 err, obj->id); 438 err, obj->id);
439 if (obj->complete)
440 obj->complete(dev, err, obj->complete_priv);
437} 441}
438 442
439static int switchdev_port_obj_add_defer(struct net_device *dev, 443static int switchdev_port_obj_add_defer(struct net_device *dev,
@@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev,
502 if (err && err != -EOPNOTSUPP) 506 if (err && err != -EOPNOTSUPP)
503 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 507 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
504 err, obj->id); 508 err, obj->id);
509 if (obj->complete)
510 obj->complete(dev, err, obj->complete_priv);
505} 511}
506 512
507static int switchdev_port_obj_del_defer(struct net_device *dev, 513static int switchdev_port_obj_del_defer(struct net_device *dev,
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a842870c52..e2bdb07a49a2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
69 if (err) 69 if (err)
70 goto out_nametbl; 70 goto out_nametbl;
71 71
72 INIT_LIST_HEAD(&tn->dist_queue);
72 err = tipc_topsrv_start(net); 73 err = tipc_topsrv_start(net);
73 if (err) 74 if (err)
74 goto out_subscr; 75 goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5504d63503df..eff58dc53aa1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@ struct tipc_net {
103 spinlock_t nametbl_lock; 103 spinlock_t nametbl_lock;
104 struct name_table *nametbl; 104 struct name_table *nametbl;
105 105
106 /* Name dist queue */
107 struct list_head dist_queue;
108
106 /* Topology subscription server */ 109 /* Topology subscription server */
107 struct tipc_server *topsrv; 110 struct tipc_server *topsrv;
108 atomic_t subscription_count; 111 atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ebe9d0ff6e9e..6b626a64b517 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
40 40
41int sysctl_tipc_named_timeout __read_mostly = 2000; 41int sysctl_tipc_named_timeout __read_mostly = 2000;
42 42
43/**
44 * struct tipc_dist_queue - queue holding deferred name table updates
45 */
46static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
47
48struct distr_queue_item { 43struct distr_queue_item {
49 struct distr_item i; 44 struct distr_item i;
50 u32 dtype; 45 u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
229 kfree_rcu(p, rcu); 224 kfree_rcu(p, rcu);
230} 225}
231 226
227/**
228 * tipc_dist_queue_purge - remove deferred updates from a node that went down
229 */
230static void tipc_dist_queue_purge(struct net *net, u32 addr)
231{
232 struct tipc_net *tn = net_generic(net, tipc_net_id);
233 struct distr_queue_item *e, *tmp;
234
235 spin_lock_bh(&tn->nametbl_lock);
236 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
237 if (e->node != addr)
238 continue;
239 list_del(&e->next);
240 kfree(e);
241 }
242 spin_unlock_bh(&tn->nametbl_lock);
243}
244
232void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) 245void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
233{ 246{
234 struct publication *publ, *tmp; 247 struct publication *publ, *tmp;
235 248
236 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 249 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
237 tipc_publ_purge(net, publ, addr); 250 tipc_publ_purge(net, publ, addr);
251 tipc_dist_queue_purge(net, addr);
238} 252}
239 253
240/** 254/**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
279 * tipc_named_add_backlog - add a failed name table update to the backlog 293 * tipc_named_add_backlog - add a failed name table update to the backlog
280 * 294 *
281 */ 295 */
282static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) 296static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
297 u32 type, u32 node)
283{ 298{
284 struct distr_queue_item *e; 299 struct distr_queue_item *e;
300 struct tipc_net *tn = net_generic(net, tipc_net_id);
285 unsigned long now = get_jiffies_64(); 301 unsigned long now = get_jiffies_64();
286 302
287 e = kzalloc(sizeof(*e), GFP_ATOMIC); 303 e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
291 e->node = node; 307 e->node = node;
292 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); 308 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
293 memcpy(e, i, sizeof(*i)); 309 memcpy(e, i, sizeof(*i));
294 list_add_tail(&e->next, &tipc_dist_queue); 310 list_add_tail(&e->next, &tn->dist_queue);
295} 311}
296 312
297/** 313/**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
301void tipc_named_process_backlog(struct net *net) 317void tipc_named_process_backlog(struct net *net)
302{ 318{
303 struct distr_queue_item *e, *tmp; 319 struct distr_queue_item *e, *tmp;
320 struct tipc_net *tn = net_generic(net, tipc_net_id);
304 char addr[16]; 321 char addr[16];
305 unsigned long now = get_jiffies_64(); 322 unsigned long now = get_jiffies_64();
306 323
307 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 324 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
308 if (time_after(e->expires, now)) { 325 if (time_after(e->expires, now)) {
309 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 326 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
310 continue; 327 continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
344 node = msg_orignode(msg); 361 node = msg_orignode(msg);
345 while (count--) { 362 while (count--) {
346 if (!tipc_update_nametbl(net, item, node, mtype)) 363 if (!tipc_update_nametbl(net, item, node, mtype))
347 tipc_named_add_backlog(item, mtype, node); 364 tipc_named_add_backlog(net, item, mtype, node);
348 item++; 365 item++;
349 } 366 }
350 kfree_skb(skb); 367 kfree_skb(skb);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 0a369bb440e7..56214736fe88 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
842 * qp_handle. 842 * qp_handle.
843 */ 843 */
844 if (vmci_handle_is_invalid(e_payload->handle) || 844 if (vmci_handle_is_invalid(e_payload->handle) ||
845 vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) 845 !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
846 return; 846 return;
847 847
848 /* We don't ask for delayed CBs when we subscribe to this event (we 848 /* We don't ask for delayed CBs when we subscribe to this event (we
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1735 /* Retrieve the head sk_buff from the socket's receive queue. */ 1735 /* Retrieve the head sk_buff from the socket's receive queue. */
1736 err = 0; 1736 err = 0;
1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1738 if (err)
1739 return err;
1740
1741 if (!skb) 1738 if (!skb)
1742 return -EAGAIN; 1739 return err;
1743 1740
1744 dg = (struct vmci_datagram *)skb->data; 1741 dg = (struct vmci_datagram *)skb->data;
1745 if (!dg) 1742 if (!dg)
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
2154 2151
2155MODULE_AUTHOR("VMware, Inc."); 2152MODULE_AUTHOR("VMware, Inc.");
2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2153MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2157MODULE_VERSION("1.0.2.0-k"); 2154MODULE_VERSION("1.0.4.0-k");
2158MODULE_LICENSE("GPL v2"); 2155MODULE_LICENSE("GPL v2");
2159MODULE_ALIAS("vmware_vsock"); 2156MODULE_ALIAS("vmware_vsock");
2160MODULE_ALIAS_NETPROTO(PF_VSOCK); 2157MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98c924260b3d..056a7307862b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
13216 struct wireless_dev *wdev; 13216 struct wireless_dev *wdev;
13217 struct cfg80211_beacon_registration *reg, *tmp; 13217 struct cfg80211_beacon_registration *reg, *tmp;
13218 13218
13219 if (state != NETLINK_URELEASE) 13219 if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
13220 return NOTIFY_DONE; 13220 return NOTIFY_DONE;
13221 13221
13222 rcu_read_lock(); 13222 rcu_read_lock();
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 502c9fc8db85..b820cc96a3bc 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -76,16 +76,10 @@ HOSTLOADLIBES_offwaketime += -lelf
76HOSTLOADLIBES_spintest += -lelf 76HOSTLOADLIBES_spintest += -lelf
77HOSTLOADLIBES_map_perf_test += -lelf -lrt 77HOSTLOADLIBES_map_perf_test += -lelf -lrt
78 78
79# point this to your LLVM backend with bpf support 79# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
80LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc 80# But, there is no easy way to fix it, so just exclude it since it is
81
82# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
83# But, ehere is not easy way to fix it, so just exclude it since it is
84# useless for BPF samples. 81# useless for BPF samples.
85$(obj)/%.o: $(src)/%.c 82$(obj)/%.o: $(src)/%.c
86 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 83 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
87 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ 84 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
88 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ 85 -O2 -emit-llvm -c $< -o -| llc -march=bpf -filetype=obj -o $@
89 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
90 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
91 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 9363500131a7..7904a2a493de 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -82,6 +82,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
82#define PT_REGS_FP(x) ((x)->bp) 82#define PT_REGS_FP(x) ((x)->bp)
83#define PT_REGS_RC(x) ((x)->ax) 83#define PT_REGS_RC(x) ((x)->ax)
84#define PT_REGS_SP(x) ((x)->sp) 84#define PT_REGS_SP(x) ((x)->sp)
85#define PT_REGS_IP(x) ((x)->ip)
85 86
86#elif defined(__s390x__) 87#elif defined(__s390x__)
87 88
@@ -94,6 +95,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
94#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ 95#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
95#define PT_REGS_RC(x) ((x)->gprs[2]) 96#define PT_REGS_RC(x) ((x)->gprs[2])
96#define PT_REGS_SP(x) ((x)->gprs[15]) 97#define PT_REGS_SP(x) ((x)->gprs[15])
98#define PT_REGS_IP(x) ((x)->ip)
97 99
98#elif defined(__aarch64__) 100#elif defined(__aarch64__)
99 101
@@ -106,6 +108,30 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
106#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */ 108#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
107#define PT_REGS_RC(x) ((x)->regs[0]) 109#define PT_REGS_RC(x) ((x)->regs[0])
108#define PT_REGS_SP(x) ((x)->sp) 110#define PT_REGS_SP(x) ((x)->sp)
111#define PT_REGS_IP(x) ((x)->pc)
112
113#elif defined(__powerpc__)
114
115#define PT_REGS_PARM1(x) ((x)->gpr[3])
116#define PT_REGS_PARM2(x) ((x)->gpr[4])
117#define PT_REGS_PARM3(x) ((x)->gpr[5])
118#define PT_REGS_PARM4(x) ((x)->gpr[6])
119#define PT_REGS_PARM5(x) ((x)->gpr[7])
120#define PT_REGS_RC(x) ((x)->gpr[3])
121#define PT_REGS_SP(x) ((x)->sp)
122#define PT_REGS_IP(x) ((x)->nip)
109 123
110#endif 124#endif
125
126#ifdef __powerpc__
127#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
128#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
129#else
130#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
131 bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
132#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
133 bpf_probe_read(&(ip), sizeof(ip), \
134 (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
135#endif
136
111#endif 137#endif
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 95af56ec5739..3147377e8fd3 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -17,6 +17,7 @@
17#include <linux/bpf.h> 17#include <linux/bpf.h>
18#include <string.h> 18#include <string.h>
19#include <time.h> 19#include <time.h>
20#include <sys/resource.h>
20#include "libbpf.h" 21#include "libbpf.h"
21#include "bpf_load.h" 22#include "bpf_load.h"
22 23
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
index 4b27619d91a4..ce0167d09cdc 100644
--- a/samples/bpf/spintest_kern.c
+++ b/samples/bpf/spintest_kern.c
@@ -34,7 +34,7 @@ struct bpf_map_def SEC("maps") stackmap = {
34#define PROG(foo) \ 34#define PROG(foo) \
35int foo(struct pt_regs *ctx) \ 35int foo(struct pt_regs *ctx) \
36{ \ 36{ \
37 long v = ctx->ip, *val; \ 37 long v = PT_REGS_IP(ctx), *val; \
38\ 38\
39 val = bpf_map_lookup_elem(&my_map, &v); \ 39 val = bpf_map_lookup_elem(&my_map, &v); \
40 bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \ 40 bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index 09c1adc27d42..6d6eefd0d465 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx)
27 long init_val = 1; 27 long init_val = 1;
28 long *value; 28 long *value;
29 29
30 /* x64/s390x specific: read ip of kfree_skb caller. 30 /* read ip of kfree_skb caller.
31 * non-portable version of __builtin_return_address(0) 31 * non-portable version of __builtin_return_address(0)
32 */ 32 */
33 bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx)); 33 BPF_KPROBE_READ_RET_IP(loc, ctx);
34 34
35 value = bpf_map_lookup_elem(&my_map, &loc); 35 value = bpf_map_lookup_elem(&my_map, &loc);
36 if (value) 36 if (value)
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
index ac4671420cf1..6dd8e384de96 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4_kern.c
@@ -40,7 +40,7 @@ int bpf_prog2(struct pt_regs *ctx)
40 long ip = 0; 40 long ip = 0;
41 41
42 /* get ip address of kmem_cache_alloc_node() caller */ 42 /* get ip address of kmem_cache_alloc_node() caller */
43 bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); 43 BPF_KRETPROBE_READ_RET_IP(ip, ctx);
44 44
45 struct pair v = { 45 struct pair v = {
46 .val = bpf_ktime_get_ns(), 46 .val = bpf_ktime_get_ns(),
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index e000f44e37b8..c1b7ef3e24c1 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -650,7 +650,7 @@ int main(int argc, char **argv)
650 } 650 }
651 651
652 hdr = fopen(headername, "w"); 652 hdr = fopen(headername, "w");
653 if (!out) { 653 if (!hdr) {
654 perror(headername); 654 perror(headername);
655 exit(1); 655 exit(1);
656 } 656 }
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 023cc4cad5c1..626f3bb24c55 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -104,12 +104,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all);
104 */ 104 */
105void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus) 105void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
106{ 106{
107 struct hdac_stream *s; 107 struct hdac_stream *s, *_s;
108 struct hdac_ext_stream *stream; 108 struct hdac_ext_stream *stream;
109 struct hdac_bus *bus = ebus_to_hbus(ebus); 109 struct hdac_bus *bus = ebus_to_hbus(ebus);
110 110
111 while (!list_empty(&bus->stream_list)) { 111 list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
112 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
113 stream = stream_to_hdac_ext_stream(s); 112 stream = stream_to_hdac_ext_stream(s);
114 snd_hdac_ext_stream_decouple(ebus, stream, false); 113 snd_hdac_ext_stream_decouple(ebus, stream, false);
115 list_del(&s->list); 114 list_del(&s->list);
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index d1a4d6973330..03c9872c31cf 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -299,13 +299,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
299int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid, 299int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
300 int parm) 300 int parm)
301{ 301{
302 int val; 302 unsigned int cmd, val;
303 303
304 if (codec->regmap) 304 cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
305 regcache_cache_bypass(codec->regmap, true); 305 if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
306 val = snd_hdac_read_parm(codec, nid, parm); 306 return -1;
307 if (codec->regmap)
308 regcache_cache_bypass(codec->regmap, false);
309 return val; 307 return val;
310} 308}
311EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached); 309EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index fb96aead8257..607bbeaebddf 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -20,6 +20,7 @@
20#include <sound/core.h> 20#include <sound/core.h>
21#include <sound/hdaudio.h> 21#include <sound/hdaudio.h>
22#include <sound/hda_i915.h> 22#include <sound/hda_i915.h>
23#include <sound/hda_register.h>
23 24
24static struct i915_audio_component *hdac_acomp; 25static struct i915_audio_component *hdac_acomp;
25 26
@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
97} 98}
98EXPORT_SYMBOL_GPL(snd_hdac_display_power); 99EXPORT_SYMBOL_GPL(snd_hdac_display_power);
99 100
101#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
102 ((pci)->device == 0x0c0c) || \
103 ((pci)->device == 0x0d0c) || \
104 ((pci)->device == 0x160c))
105
100/** 106/**
101 * snd_hdac_get_display_clk - Get CDCLK in kHz 107 * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
102 * @bus: HDA core bus 108 * @bus: HDA core bus
103 * 109 *
104 * This function is supposed to be used only by a HD-audio controller 110 * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
105 * driver that needs the interaction with i915 graphics. 111 * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
112 * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
113 * BCLK = CDCLK * M / N
114 * The values will be lost when the display power well is disabled and need to
115 * be restored to avoid abnormal playback speed.
106 * 116 *
107 * This function queries CDCLK value in kHz from the graphics driver and 117 * Call this function at initializing and changing power well, as well as
108 * returns the value. A negative code is returned in error. 118 * at ELD notifier for the hotplug.
109 */ 119 */
110int snd_hdac_get_display_clk(struct hdac_bus *bus) 120void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
111{ 121{
112 struct i915_audio_component *acomp = bus->audio_component; 122 struct i915_audio_component *acomp = bus->audio_component;
123 struct pci_dev *pci = to_pci_dev(bus->dev);
124 int cdclk_freq;
125 unsigned int bclk_m, bclk_n;
126
127 if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
128 return; /* only for i915 binding */
129 if (!CONTROLLER_IN_GPU(pci))
130 return; /* only HSW/BDW */
131
132 cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
133 switch (cdclk_freq) {
134 case 337500:
135 bclk_m = 16;
136 bclk_n = 225;
137 break;
138
139 case 450000:
140 default: /* default CDCLK 450MHz */
141 bclk_m = 4;
142 bclk_n = 75;
143 break;
144
145 case 540000:
146 bclk_m = 4;
147 bclk_n = 90;
148 break;
149
150 case 675000:
151 bclk_m = 8;
152 bclk_n = 225;
153 break;
154 }
113 155
114 if (!acomp || !acomp->ops) 156 snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
115 return -ENODEV; 157 snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
116
117 return acomp->ops->get_cdclk_freq(acomp->dev);
118} 158}
119EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); 159EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
120 160
121/* There is a fixed mapping between audio pin node and display port 161/* There is a fixed mapping between audio pin node and display port
122 * on current Intel platforms: 162 * on current Intel platforms:
@@ -267,6 +307,18 @@ int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops
267} 307}
268EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier); 308EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
269 309
310/* check whether intel graphics is present */
311static bool i915_gfx_present(void)
312{
313 static struct pci_device_id ids[] = {
314 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
315 .class = PCI_BASE_CLASS_DISPLAY << 16,
316 .class_mask = 0xff << 16 },
317 {}
318 };
319 return pci_dev_present(ids);
320}
321
270/** 322/**
271 * snd_hdac_i915_init - Initialize i915 audio component 323 * snd_hdac_i915_init - Initialize i915 audio component
272 * @bus: HDA core bus 324 * @bus: HDA core bus
@@ -286,6 +338,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
286 struct i915_audio_component *acomp; 338 struct i915_audio_component *acomp;
287 int ret; 339 int ret;
288 340
341 if (!i915_gfx_present())
342 return -ENODEV;
343
289 acomp = kzalloc(sizeof(*acomp), GFP_KERNEL); 344 acomp = kzalloc(sizeof(*acomp), GFP_KERNEL);
290 if (!acomp) 345 if (!acomp)
291 return -ENOMEM; 346 return -ENOMEM;
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index bdbcd6b75ff6..87041ddd29cb 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -453,14 +453,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
453EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw); 453EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
454 454
455static int reg_raw_read(struct hdac_device *codec, unsigned int reg, 455static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
456 unsigned int *val) 456 unsigned int *val, bool uncached)
457{ 457{
458 if (!codec->regmap) 458 if (uncached || !codec->regmap)
459 return hda_reg_read(codec, reg, val); 459 return hda_reg_read(codec, reg, val);
460 else 460 else
461 return regmap_read(codec->regmap, reg, val); 461 return regmap_read(codec->regmap, reg, val);
462} 462}
463 463
464static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
465 unsigned int reg, unsigned int *val,
466 bool uncached)
467{
468 int err;
469
470 err = reg_raw_read(codec, reg, val, uncached);
471 if (err == -EAGAIN) {
472 err = snd_hdac_power_up_pm(codec);
473 if (!err)
474 err = reg_raw_read(codec, reg, val, uncached);
475 snd_hdac_power_down_pm(codec);
476 }
477 return err;
478}
479
464/** 480/**
465 * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt 481 * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
466 * @codec: the codec object 482 * @codec: the codec object
@@ -472,19 +488,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
472int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 488int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
473 unsigned int *val) 489 unsigned int *val)
474{ 490{
475 int err; 491 return __snd_hdac_regmap_read_raw(codec, reg, val, false);
476
477 err = reg_raw_read(codec, reg, val);
478 if (err == -EAGAIN) {
479 err = snd_hdac_power_up_pm(codec);
480 if (!err)
481 err = reg_raw_read(codec, reg, val);
482 snd_hdac_power_down_pm(codec);
483 }
484 return err;
485} 492}
486EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw); 493EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
487 494
495/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
496 * cache but always via hda verbs.
497 */
498int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
499 unsigned int reg, unsigned int *val)
500{
501 return __snd_hdac_regmap_read_raw(codec, reg, val, true);
502}
503
488/** 504/**
489 * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt 505 * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
490 * @codec: the codec object 506 * @codec: the codec object
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 7b248cdf06e2..fdcfa29e2205 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -591,7 +591,7 @@ static int sscape_upload_microcode(struct snd_card *card, int version)
591 } 591 }
592 err = upload_dma_data(sscape, init_fw->data, init_fw->size); 592 err = upload_dma_data(sscape, init_fw->data, init_fw->size);
593 if (err == 0) 593 if (err == 0)
594 snd_printk(KERN_INFO "sscape: MIDI firmware loaded %d KBs\n", 594 snd_printk(KERN_INFO "sscape: MIDI firmware loaded %zu KBs\n",
595 init_fw->size >> 10); 595 init_fw->size >> 10);
596 596
597 release_firmware(init_fw); 597 release_firmware(init_fw);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 7ca5b89f088a..dfaf1a93fb8a 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
826 bool allow_powerdown) 826 bool allow_powerdown)
827{ 827{
828 hda_nid_t nid, changed = 0; 828 hda_nid_t nid, changed = 0;
829 int i, state; 829 int i, state, power;
830 830
831 for (i = 0; i < path->depth; i++) { 831 for (i = 0; i < path->depth; i++) {
832 nid = path->path[i]; 832 nid = path->path[i];
@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
838 state = AC_PWRST_D0; 838 state = AC_PWRST_D0;
839 else 839 else
840 state = AC_PWRST_D3; 840 state = AC_PWRST_D3;
841 if (!snd_hda_check_power_state(codec, nid, state)) { 841 power = snd_hda_codec_read(codec, nid, 0,
842 AC_VERB_GET_POWER_STATE, 0);
843 if (power != (state | (state << 4))) {
842 snd_hda_codec_write(codec, nid, 0, 844 snd_hda_codec_write(codec, nid, 0,
843 AC_VERB_SET_POWER_STATE, state); 845 AC_VERB_SET_POWER_STATE, state);
844 changed = nid; 846 changed = nid;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b680be0e937d..9a0d1445ca5c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
857#define azx_del_card_list(chip) /* NOP */ 857#define azx_del_card_list(chip) /* NOP */
858#endif /* CONFIG_PM */ 858#endif /* CONFIG_PM */
859 859
860/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
861 * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
862 * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
863 * BCLK = CDCLK * M / N
864 * The values will be lost when the display power well is disabled and need to
865 * be restored to avoid abnormal playback speed.
866 */
867static void haswell_set_bclk(struct hda_intel *hda)
868{
869 struct azx *chip = &hda->chip;
870 int cdclk_freq;
871 unsigned int bclk_m, bclk_n;
872
873 if (!hda->need_i915_power)
874 return;
875
876 cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
877 switch (cdclk_freq) {
878 case 337500:
879 bclk_m = 16;
880 bclk_n = 225;
881 break;
882
883 case 450000:
884 default: /* default CDCLK 450MHz */
885 bclk_m = 4;
886 bclk_n = 75;
887 break;
888
889 case 540000:
890 bclk_m = 4;
891 bclk_n = 90;
892 break;
893
894 case 675000:
895 bclk_m = 8;
896 bclk_n = 225;
897 break;
898 }
899
900 azx_writew(chip, HSW_EM4, bclk_m);
901 azx_writew(chip, HSW_EM5, bclk_n);
902}
903
904#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) 860#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
905/* 861/*
906 * power management 862 * power management
@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
958 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 914 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
959 && hda->need_i915_power) { 915 && hda->need_i915_power) {
960 snd_hdac_display_power(azx_bus(chip), true); 916 snd_hdac_display_power(azx_bus(chip), true);
961 haswell_set_bclk(hda); 917 snd_hdac_i915_set_bclk(azx_bus(chip));
962 } 918 }
963 if (chip->msi) 919 if (chip->msi)
964 if (pci_enable_msi(pci) < 0) 920 if (pci_enable_msi(pci) < 0)
@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
1058 bus = azx_bus(chip); 1014 bus = azx_bus(chip);
1059 if (hda->need_i915_power) { 1015 if (hda->need_i915_power) {
1060 snd_hdac_display_power(bus, true); 1016 snd_hdac_display_power(bus, true);
1061 haswell_set_bclk(hda); 1017 snd_hdac_i915_set_bclk(bus);
1062 } else { 1018 } else {
1063 /* toggle codec wakeup bit for STATESTS read */ 1019 /* toggle codec wakeup bit for STATESTS read */
1064 snd_hdac_set_codec_wakeup(bus, true); 1020 snd_hdac_set_codec_wakeup(bus, true);
@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
1796 /* initialize chip */ 1752 /* initialize chip */
1797 azx_init_pci(chip); 1753 azx_init_pci(chip);
1798 1754
1799 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 1755 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
1800 struct hda_intel *hda; 1756 snd_hdac_i915_set_bclk(bus);
1801
1802 hda = container_of(chip, struct hda_intel, chip);
1803 haswell_set_bclk(hda);
1804 }
1805 1757
1806 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0); 1758 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
1807 1759
@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
2232 /* Broxton-P(Apollolake) */ 2184 /* Broxton-P(Apollolake) */
2233 { PCI_DEVICE(0x8086, 0x5a98), 2185 { PCI_DEVICE(0x8086, 0x5a98),
2234 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2186 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
2187 /* Broxton-T */
2188 { PCI_DEVICE(0x8086, 0x1a98),
2189 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
2235 /* Haswell */ 2190 /* Haswell */
2236 { PCI_DEVICE(0x8086, 0x0a0c), 2191 { PCI_DEVICE(0x8086, 0x0a0c),
2237 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 2192 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a47e8ae0eb30..80bbadc83721 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
361{ 361{
362 struct cs_spec *spec = codec->spec; 362 struct cs_spec *spec = codec->spec;
363 int err; 363 int err;
364 int i;
364 365
365 err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0); 366 err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
366 if (err < 0) 367 if (err < 0)
@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
370 if (err < 0) 371 if (err < 0)
371 return err; 372 return err;
372 373
374 /* keep the ADCs powered up when it's dynamically switchable */
375 if (spec->gen.dyn_adc_switch) {
376 unsigned int done = 0;
377 for (i = 0; i < spec->gen.input_mux.num_items; i++) {
378 int idx = spec->gen.dyn_adc_idx[i];
379 if (done & (1 << idx))
380 continue;
381 snd_hda_gen_fix_pin_power(codec,
382 spec->gen.adc_nids[idx]);
383 done |= 1 << idx;
384 }
385 }
386
373 return 0; 387 return 0;
374} 388}
375 389
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5af372d01834..1483f85999ec 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1396,7 +1396,6 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
1396 struct hda_codec *codec = per_pin->codec; 1396 struct hda_codec *codec = per_pin->codec;
1397 struct hdmi_spec *spec = codec->spec; 1397 struct hdmi_spec *spec = codec->spec;
1398 struct hdmi_eld *eld = &spec->temp_eld; 1398 struct hdmi_eld *eld = &spec->temp_eld;
1399 struct hdmi_eld *pin_eld = &per_pin->sink_eld;
1400 hda_nid_t pin_nid = per_pin->pin_nid; 1399 hda_nid_t pin_nid = per_pin->pin_nid;
1401 /* 1400 /*
1402 * Always execute a GetPinSense verb here, even when called from 1401 * Always execute a GetPinSense verb here, even when called from
@@ -1413,15 +1412,15 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
1413 present = snd_hda_pin_sense(codec, pin_nid); 1412 present = snd_hda_pin_sense(codec, pin_nid);
1414 1413
1415 mutex_lock(&per_pin->lock); 1414 mutex_lock(&per_pin->lock);
1416 pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE); 1415 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
1417 if (pin_eld->monitor_present) 1416 if (eld->monitor_present)
1418 eld->eld_valid = !!(present & AC_PINSENSE_ELDV); 1417 eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
1419 else 1418 else
1420 eld->eld_valid = false; 1419 eld->eld_valid = false;
1421 1420
1422 codec_dbg(codec, 1421 codec_dbg(codec,
1423 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 1422 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
1424 codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid); 1423 codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
1425 1424
1426 if (eld->eld_valid) { 1425 if (eld->eld_valid) {
1427 if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer, 1426 if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
@@ -1441,7 +1440,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
1441 else 1440 else
1442 update_eld(codec, per_pin, eld); 1441 update_eld(codec, per_pin, eld);
1443 1442
1444 ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid; 1443 ret = !repoll || !eld->monitor_present || eld->eld_valid;
1445 1444
1446 jack = snd_hda_jack_tbl_get(codec, pin_nid); 1445 jack = snd_hda_jack_tbl_get(codec, pin_nid);
1447 if (jack) 1446 if (jack)
@@ -1859,6 +1858,8 @@ static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
1859 struct hdmi_spec *spec = codec->spec; 1858 struct hdmi_spec *spec = codec->spec;
1860 struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx); 1859 struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
1861 1860
1861 if (!per_pin)
1862 return;
1862 mutex_lock(&per_pin->lock); 1863 mutex_lock(&per_pin->lock);
1863 per_pin->chmap_set = true; 1864 per_pin->chmap_set = true;
1864 memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap)); 1865 memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
@@ -2231,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
2231 if (atomic_read(&(codec)->core.in_pm)) 2232 if (atomic_read(&(codec)->core.in_pm))
2232 return; 2233 return;
2233 2234
2235 snd_hdac_i915_set_bclk(&codec->bus->core);
2234 check_presence_and_report(codec, pin_nid); 2236 check_presence_and_report(codec, pin_nid);
2235} 2237}
2236 2238
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index fefe83f2beab..ac4490a96863 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4760,6 +4760,7 @@ enum {
4760 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 4760 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
4761 ALC280_FIXUP_HP_HEADSET_MIC, 4761 ALC280_FIXUP_HP_HEADSET_MIC,
4762 ALC221_FIXUP_HP_FRONT_MIC, 4762 ALC221_FIXUP_HP_FRONT_MIC,
4763 ALC292_FIXUP_TPT460,
4763}; 4764};
4764 4765
4765static const struct hda_fixup alc269_fixups[] = { 4766static const struct hda_fixup alc269_fixups[] = {
@@ -5409,6 +5410,12 @@ static const struct hda_fixup alc269_fixups[] = {
5409 { } 5410 { }
5410 }, 5411 },
5411 }, 5412 },
5413 [ALC292_FIXUP_TPT460] = {
5414 .type = HDA_FIXUP_FUNC,
5415 .v.func = alc_fixup_tpt440_dock,
5416 .chained = true,
5417 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
5418 },
5412}; 5419};
5413 5420
5414static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5421static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5442,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5442 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5449 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5443 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5450 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5444 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5451 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5452 SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5445 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK), 5453 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5446 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5454 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5447 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5455 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5563,7 +5571,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5563 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), 5571 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
5564 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 5572 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
5565 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5573 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
5566 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5574 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
5567 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 5575 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5568 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 5576 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5569 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 5577 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -5576,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5576 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5584 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5577 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5585 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5578 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5586 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5587 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5579 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5588 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5580 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5589 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5581 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5590 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5658,6 +5667,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5658 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"}, 5667 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
5659 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, 5668 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
5660 {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, 5669 {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
5670 {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
5661 {} 5671 {}
5662}; 5672};
5663#define ALC225_STANDARD_PINS \ 5673#define ALC225_STANDARD_PINS \
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index c5194f5b150a..d7e71f309299 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
1341 } 1341 }
1342 1342
1343 pcxhr_msg_thread(mgr); 1343 pcxhr_msg_thread(mgr);
1344 mutex_unlock(&mgr->lock);
1344 return IRQ_HANDLED; 1345 return IRQ_HANDLED;
1345} 1346}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 649e92a252ae..7ef3a0c16478 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -629,6 +629,7 @@ config SND_SOC_RT5514
629 629
630config SND_SOC_RT5616 630config SND_SOC_RT5616
631 tristate "Realtek RT5616 CODEC" 631 tristate "Realtek RT5616 CODEC"
632 depends on I2C
632 633
633config SND_SOC_RT5631 634config SND_SOC_RT5631
634 tristate "Realtek ALC5631/RT5631 CODEC" 635 tristate "Realtek ALC5631/RT5631 CODEC"
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 92d22a018d68..83959312f7a0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -249,6 +249,18 @@ int arizona_init_spk(struct snd_soc_codec *codec)
249} 249}
250EXPORT_SYMBOL_GPL(arizona_init_spk); 250EXPORT_SYMBOL_GPL(arizona_init_spk);
251 251
252int arizona_free_spk(struct snd_soc_codec *codec)
253{
254 struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
255 struct arizona *arizona = priv->arizona;
256
257 arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
258 arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
259
260 return 0;
261}
262EXPORT_SYMBOL_GPL(arizona_free_spk);
263
252static const struct snd_soc_dapm_route arizona_mono_routes[] = { 264static const struct snd_soc_dapm_route arizona_mono_routes[] = {
253 { "OUT1R", NULL, "OUT1L" }, 265 { "OUT1R", NULL, "OUT1L" },
254 { "OUT2R", NULL, "OUT2L" }, 266 { "OUT2R", NULL, "OUT2L" },
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 1ea8e4ecf8d4..ce0531b8c632 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -307,6 +307,8 @@ extern int arizona_init_spk(struct snd_soc_codec *codec);
307extern int arizona_init_gpio(struct snd_soc_codec *codec); 307extern int arizona_init_gpio(struct snd_soc_codec *codec);
308extern int arizona_init_mono(struct snd_soc_codec *codec); 308extern int arizona_init_mono(struct snd_soc_codec *codec);
309 309
310extern int arizona_free_spk(struct snd_soc_codec *codec);
311
310extern int arizona_init_dai(struct arizona_priv *priv, int dai); 312extern int arizona_init_dai(struct arizona_priv *priv, int dai);
311 313
312int arizona_set_output_mode(struct snd_soc_codec *codec, int output, 314int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 44c30fe3e315..287d13740be4 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -274,7 +274,9 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
274 if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0) 274 if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
275 pdata->sdout_share = val; 275 pdata->sdout_share = val;
276 276
277 of_property_read_u32(np, "cirrus,boost-manager", &val); 277 if (of_property_read_u32(np, "cirrus,boost-manager", &val))
278 val = -1u;
279
278 switch (val) { 280 switch (val) {
279 case CS35L32_BOOST_MGR_AUTO: 281 case CS35L32_BOOST_MGR_AUTO:
280 case CS35L32_BOOST_MGR_AUTO_AUDIO: 282 case CS35L32_BOOST_MGR_AUTO_AUDIO:
@@ -282,13 +284,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
282 case CS35L32_BOOST_MGR_FIXED: 284 case CS35L32_BOOST_MGR_FIXED:
283 pdata->boost_mng = val; 285 pdata->boost_mng = val;
284 break; 286 break;
287 case -1u:
285 default: 288 default:
286 dev_err(&i2c_client->dev, 289 dev_err(&i2c_client->dev,
287 "Wrong cirrus,boost-manager DT value %d\n", val); 290 "Wrong cirrus,boost-manager DT value %d\n", val);
288 pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS; 291 pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
289 } 292 }
290 293
291 of_property_read_u32(np, "cirrus,sdout-datacfg", &val); 294 if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
295 val = -1u;
292 switch (val) { 296 switch (val) {
293 case CS35L32_DATA_CFG_LR_VP: 297 case CS35L32_DATA_CFG_LR_VP:
294 case CS35L32_DATA_CFG_LR_STAT: 298 case CS35L32_DATA_CFG_LR_STAT:
@@ -296,13 +300,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
296 case CS35L32_DATA_CFG_LR_VPSTAT: 300 case CS35L32_DATA_CFG_LR_VPSTAT:
297 pdata->sdout_datacfg = val; 301 pdata->sdout_datacfg = val;
298 break; 302 break;
303 case -1u:
299 default: 304 default:
300 dev_err(&i2c_client->dev, 305 dev_err(&i2c_client->dev,
301 "Wrong cirrus,sdout-datacfg DT value %d\n", val); 306 "Wrong cirrus,sdout-datacfg DT value %d\n", val);
302 pdata->sdout_datacfg = CS35L32_DATA_CFG_LR; 307 pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
303 } 308 }
304 309
305 of_property_read_u32(np, "cirrus,battery-threshold", &val); 310 if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
311 val = -1u;
306 switch (val) { 312 switch (val) {
307 case CS35L32_BATT_THRESH_3_1V: 313 case CS35L32_BATT_THRESH_3_1V:
308 case CS35L32_BATT_THRESH_3_2V: 314 case CS35L32_BATT_THRESH_3_2V:
@@ -310,13 +316,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
310 case CS35L32_BATT_THRESH_3_4V: 316 case CS35L32_BATT_THRESH_3_4V:
311 pdata->batt_thresh = val; 317 pdata->batt_thresh = val;
312 break; 318 break;
319 case -1u:
313 default: 320 default:
314 dev_err(&i2c_client->dev, 321 dev_err(&i2c_client->dev,
315 "Wrong cirrus,battery-threshold DT value %d\n", val); 322 "Wrong cirrus,battery-threshold DT value %d\n", val);
316 pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V; 323 pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
317 } 324 }
318 325
319 of_property_read_u32(np, "cirrus,battery-recovery", &val); 326 if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
327 val = -1u;
320 switch (val) { 328 switch (val) {
321 case CS35L32_BATT_RECOV_3_1V: 329 case CS35L32_BATT_RECOV_3_1V:
322 case CS35L32_BATT_RECOV_3_2V: 330 case CS35L32_BATT_RECOV_3_2V:
@@ -326,6 +334,7 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
326 case CS35L32_BATT_RECOV_3_6V: 334 case CS35L32_BATT_RECOV_3_6V:
327 pdata->batt_recov = val; 335 pdata->batt_recov = val;
328 break; 336 break;
337 case -1u:
329 default: 338 default:
330 dev_err(&i2c_client->dev, 339 dev_err(&i2c_client->dev,
331 "Wrong cirrus,battery-recovery DT value %d\n", val); 340 "Wrong cirrus,battery-recovery DT value %d\n", val);
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 576087bda330..00e9b6fc1b5c 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1108,6 +1108,9 @@ static int cs47l24_codec_remove(struct snd_soc_codec *codec)
1108 priv->core.arizona->dapm = NULL; 1108 priv->core.arizona->dapm = NULL;
1109 1109
1110 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 1110 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
1111
1112 arizona_free_spk(codec);
1113
1111 return 0; 1114 return 0;
1112} 1115}
1113 1116
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 26f9459cb3bc..aaa038ffc8a5 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1420,32 +1420,39 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
1420} 1420}
1421 1421
1422#ifdef CONFIG_PM 1422#ifdef CONFIG_PM
1423static int hdmi_codec_resume(struct snd_soc_codec *codec) 1423static int hdmi_codec_prepare(struct device *dev)
1424{ 1424{
1425 struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec); 1425 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1426 struct hdac_device *hdac = &edev->hdac;
1427
1428 pm_runtime_get_sync(&edev->hdac.dev);
1429
1430 /*
1431 * Power down afg.
1432 * codec_read is preferred over codec_write to set the power state.
1433 * This way verb is send to set the power state and response
1434 * is received. So setting power state is ensured without using loop
1435 * to read the state.
1436 */
1437 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1438 AC_PWRST_D3);
1439
1440 return 0;
1441}
1442
1443static void hdmi_codec_complete(struct device *dev)
1444{
1445 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1426 struct hdac_hdmi_priv *hdmi = edev->private_data; 1446 struct hdac_hdmi_priv *hdmi = edev->private_data;
1427 struct hdac_hdmi_pin *pin; 1447 struct hdac_hdmi_pin *pin;
1428 struct hdac_device *hdac = &edev->hdac; 1448 struct hdac_device *hdac = &edev->hdac;
1429 struct hdac_bus *bus = hdac->bus;
1430 int err;
1431 unsigned long timeout;
1432
1433 hdac_hdmi_skl_enable_all_pins(&edev->hdac);
1434 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1435 1449
1436 /* Power up afg */ 1450 /* Power up afg */
1437 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) { 1451 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1438 1452 AC_PWRST_D0);
1439 snd_hdac_codec_write(hdac, hdac->afg, 0,
1440 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
1441 1453
1442 /* Wait till power state is set to D0 */ 1454 hdac_hdmi_skl_enable_all_pins(&edev->hdac);
1443 timeout = jiffies + msecs_to_jiffies(1000); 1455 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1444 while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
1445 && time_before(jiffies, timeout)) {
1446 msleep(50);
1447 }
1448 }
1449 1456
1450 /* 1457 /*
1451 * As the ELD notify callback request is not entertained while the 1458 * As the ELD notify callback request is not entertained while the
@@ -1455,28 +1462,16 @@ static int hdmi_codec_resume(struct snd_soc_codec *codec)
1455 list_for_each_entry(pin, &hdmi->pin_list, head) 1462 list_for_each_entry(pin, &hdmi->pin_list, head)
1456 hdac_hdmi_present_sense(pin, 1); 1463 hdac_hdmi_present_sense(pin, 1);
1457 1464
1458 /* 1465 pm_runtime_put_sync(&edev->hdac.dev);
1459 * Codec power is turned ON during controller resume.
1460 * Turn it OFF here
1461 */
1462 err = snd_hdac_display_power(bus, false);
1463 if (err < 0) {
1464 dev_err(bus->dev,
1465 "Cannot turn OFF display power on i915, err: %d\n",
1466 err);
1467 return err;
1468 }
1469
1470 return 0;
1471} 1466}
1472#else 1467#else
1473#define hdmi_codec_resume NULL 1468#define hdmi_codec_prepare NULL
1469#define hdmi_codec_complete NULL
1474#endif 1470#endif
1475 1471
1476static struct snd_soc_codec_driver hdmi_hda_codec = { 1472static struct snd_soc_codec_driver hdmi_hda_codec = {
1477 .probe = hdmi_codec_probe, 1473 .probe = hdmi_codec_probe,
1478 .remove = hdmi_codec_remove, 1474 .remove = hdmi_codec_remove,
1479 .resume = hdmi_codec_resume,
1480 .idle_bias_off = true, 1475 .idle_bias_off = true,
1481}; 1476};
1482 1477
@@ -1561,7 +1556,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1561 struct hdac_ext_device *edev = to_hda_ext_device(dev); 1556 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1562 struct hdac_device *hdac = &edev->hdac; 1557 struct hdac_device *hdac = &edev->hdac;
1563 struct hdac_bus *bus = hdac->bus; 1558 struct hdac_bus *bus = hdac->bus;
1564 unsigned long timeout;
1565 int err; 1559 int err;
1566 1560
1567 dev_dbg(dev, "Enter: %s\n", __func__); 1561 dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1570,20 +1564,15 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1570 if (!bus) 1564 if (!bus)
1571 return 0; 1565 return 0;
1572 1566
1573 /* Power down afg */ 1567 /*
1574 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) { 1568 * Power down afg.
1575 snd_hdac_codec_write(hdac, hdac->afg, 0, 1569 * codec_read is preferred over codec_write to set the power state.
1576 AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 1570 * This way verb is send to set the power state and response
1577 1571 * is received. So setting power state is ensured without using loop
1578 /* Wait till power state is set to D3 */ 1572 * to read the state.
1579 timeout = jiffies + msecs_to_jiffies(1000); 1573 */
1580 while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3) 1574 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1581 && time_before(jiffies, timeout)) { 1575 AC_PWRST_D3);
1582
1583 msleep(50);
1584 }
1585 }
1586
1587 err = snd_hdac_display_power(bus, false); 1576 err = snd_hdac_display_power(bus, false);
1588 if (err < 0) { 1577 if (err < 0) {
1589 dev_err(bus->dev, "Cannot turn on display power on i915\n"); 1578 dev_err(bus->dev, "Cannot turn on display power on i915\n");
@@ -1616,9 +1605,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1616 hdac_hdmi_skl_enable_dp12(&edev->hdac); 1605 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1617 1606
1618 /* Power up afg */ 1607 /* Power up afg */
1619 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) 1608 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1620 snd_hdac_codec_write(hdac, hdac->afg, 0, 1609 AC_PWRST_D0);
1621 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
1622 1610
1623 return 0; 1611 return 0;
1624} 1612}
@@ -1629,6 +1617,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1629 1617
1630static const struct dev_pm_ops hdac_hdmi_pm = { 1618static const struct dev_pm_ops hdac_hdmi_pm = {
1631 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) 1619 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
1620 .prepare = hdmi_codec_prepare,
1621 .complete = hdmi_codec_complete,
1632}; 1622};
1633 1623
1634static const struct hda_device_id hdmi_list[] = { 1624static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 1c8729984c2b..683769f0f246 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -343,9 +343,12 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = {
343 SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL, 343 SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
344 0), 344 0),
345 345
346 /* ADC for button press detection */ 346 /* ADC for button press detection. A dapm supply widget is used to
347 SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL, 347 * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
348 NAU8825_SAR_ADC_EN_SFT, 0), 348 * during suspend.
349 */
350 SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
351 NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
349 352
350 SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0), 353 SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
351 SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0), 354 SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
@@ -607,6 +610,16 @@ static bool nau8825_is_jack_inserted(struct regmap *regmap)
607 610
608static void nau8825_restart_jack_detection(struct regmap *regmap) 611static void nau8825_restart_jack_detection(struct regmap *regmap)
609{ 612{
613 /* Chip needs one FSCLK cycle in order to generate interrupts,
614 * as we cannot guarantee one will be provided by the system. Turning
615 * master mode on then off enables us to generate that FSCLK cycle
616 * with a minimum of contention on the clock bus.
617 */
618 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
619 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
620 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
621 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
622
610 /* this will restart the entire jack detection process including MIC/GND 623 /* this will restart the entire jack detection process including MIC/GND
611 * switching and create interrupts. We have to go from 0 to 1 and back 624 * switching and create interrupts. We have to go from 0 to 1 and back
612 * to 0 to restart. 625 * to 0 to restart.
@@ -728,7 +741,10 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
728 struct regmap *regmap = nau8825->regmap; 741 struct regmap *regmap = nau8825->regmap;
729 int active_irq, clear_irq = 0, event = 0, event_mask = 0; 742 int active_irq, clear_irq = 0, event = 0, event_mask = 0;
730 743
731 regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq); 744 if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
745 dev_err(nau8825->dev, "failed to read irq status\n");
746 return IRQ_NONE;
747 }
732 748
733 if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) == 749 if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
734 NAU8825_JACK_EJECTION_DETECTED) { 750 NAU8825_JACK_EJECTION_DETECTED) {
@@ -1141,33 +1157,74 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
1141 return ret; 1157 return ret;
1142 } 1158 }
1143 } 1159 }
1144
1145 ret = regcache_sync(nau8825->regmap);
1146 if (ret) {
1147 dev_err(codec->dev,
1148 "Failed to sync cache: %d\n", ret);
1149 return ret;
1150 }
1151 } 1160 }
1152
1153 break; 1161 break;
1154 1162
1155 case SND_SOC_BIAS_OFF: 1163 case SND_SOC_BIAS_OFF:
1156 if (nau8825->mclk_freq) 1164 if (nau8825->mclk_freq)
1157 clk_disable_unprepare(nau8825->mclk); 1165 clk_disable_unprepare(nau8825->mclk);
1158
1159 regcache_mark_dirty(nau8825->regmap);
1160 break; 1166 break;
1161 } 1167 }
1162 return 0; 1168 return 0;
1163} 1169}
1164 1170
1171#ifdef CONFIG_PM
1172static int nau8825_suspend(struct snd_soc_codec *codec)
1173{
1174 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1175
1176 disable_irq(nau8825->irq);
1177 regcache_cache_only(nau8825->regmap, true);
1178 regcache_mark_dirty(nau8825->regmap);
1179
1180 return 0;
1181}
1182
1183static int nau8825_resume(struct snd_soc_codec *codec)
1184{
1185 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1186
1187 /* The chip may lose power and reset in S3. regcache_sync restores
1188 * register values including configurations for sysclk, irq, and
1189 * jack/button detection.
1190 */
1191 regcache_cache_only(nau8825->regmap, false);
1192 regcache_sync(nau8825->regmap);
1193
1194 /* Check the jack plug status directly. If the headset is unplugged
1195 * during S3 when the chip has no power, there will be no jack
1196 * detection irq even after the nau8825_restart_jack_detection below,
1197 * because the chip just thinks no headset has ever been plugged in.
1198 */
1199 if (!nau8825_is_jack_inserted(nau8825->regmap)) {
1200 nau8825_eject_jack(nau8825);
1201 snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
1202 }
1203
1204 enable_irq(nau8825->irq);
1205
1206 /* Run jack detection to check the type (OMTP or CTIA) of the headset
1207 * if there is one. This handles the case where a different type of
1208 * headset is plugged in during S3. This triggers an IRQ iff a headset
1209 * is already plugged in.
1210 */
1211 nau8825_restart_jack_detection(nau8825->regmap);
1212
1213 return 0;
1214}
1215#else
1216#define nau8825_suspend NULL
1217#define nau8825_resume NULL
1218#endif
1219
1165static struct snd_soc_codec_driver nau8825_codec_driver = { 1220static struct snd_soc_codec_driver nau8825_codec_driver = {
1166 .probe = nau8825_codec_probe, 1221 .probe = nau8825_codec_probe,
1167 .set_sysclk = nau8825_set_sysclk, 1222 .set_sysclk = nau8825_set_sysclk,
1168 .set_pll = nau8825_set_pll, 1223 .set_pll = nau8825_set_pll,
1169 .set_bias_level = nau8825_set_bias_level, 1224 .set_bias_level = nau8825_set_bias_level,
1170 .suspend_bias_off = true, 1225 .suspend_bias_off = true,
1226 .suspend = nau8825_suspend,
1227 .resume = nau8825_resume,
1171 1228
1172 .controls = nau8825_controls, 1229 .controls = nau8825_controls,
1173 .num_controls = ARRAY_SIZE(nau8825_controls), 1230 .num_controls = ARRAY_SIZE(nau8825_controls),
@@ -1277,16 +1334,6 @@ static int nau8825_setup_irq(struct nau8825 *nau8825)
1277 regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL, 1334 regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
1278 NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR); 1335 NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
1279 1336
1280 /* Chip needs one FSCLK cycle in order to generate interrupts,
1281 * as we cannot guarantee one will be provided by the system. Turning
1282 * master mode on then off enables us to generate that FSCLK cycle
1283 * with a minimum of contention on the clock bus.
1284 */
1285 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
1286 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
1287 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
1288 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
1289
1290 ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL, 1337 ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
1291 nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, 1338 nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1292 "nau8825", nau8825); 1339 "nau8825", nau8825);
@@ -1354,36 +1401,6 @@ static int nau8825_i2c_remove(struct i2c_client *client)
1354 return 0; 1401 return 0;
1355} 1402}
1356 1403
1357#ifdef CONFIG_PM_SLEEP
1358static int nau8825_suspend(struct device *dev)
1359{
1360 struct i2c_client *client = to_i2c_client(dev);
1361 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1362
1363 disable_irq(client->irq);
1364 regcache_cache_only(nau8825->regmap, true);
1365 regcache_mark_dirty(nau8825->regmap);
1366
1367 return 0;
1368}
1369
1370static int nau8825_resume(struct device *dev)
1371{
1372 struct i2c_client *client = to_i2c_client(dev);
1373 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1374
1375 regcache_cache_only(nau8825->regmap, false);
1376 regcache_sync(nau8825->regmap);
1377 enable_irq(client->irq);
1378
1379 return 0;
1380}
1381#endif
1382
1383static const struct dev_pm_ops nau8825_pm = {
1384 SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
1385};
1386
1387static const struct i2c_device_id nau8825_i2c_ids[] = { 1404static const struct i2c_device_id nau8825_i2c_ids[] = {
1388 { "nau8825", 0 }, 1405 { "nau8825", 0 },
1389 { } 1406 { }
@@ -1410,7 +1427,6 @@ static struct i2c_driver nau8825_driver = {
1410 .name = "nau8825", 1427 .name = "nau8825",
1411 .of_match_table = of_match_ptr(nau8825_of_ids), 1428 .of_match_table = of_match_ptr(nau8825_of_ids),
1412 .acpi_match_table = ACPI_PTR(nau8825_acpi_match), 1429 .acpi_match_table = ACPI_PTR(nau8825_acpi_match),
1413 .pm = &nau8825_pm,
1414 }, 1430 },
1415 .probe = nau8825_i2c_probe, 1431 .probe = nau8825_i2c_probe,
1416 .remove = nau8825_i2c_remove, 1432 .remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index e8b5ba04417a..09e8988bbb2d 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
359 359
360/* Interface data select */ 360/* Interface data select */
361static const char * const rt5640_data_select[] = { 361static const char * const rt5640_data_select[] = {
362 "Normal", "left copy to right", "right copy to left", "Swap"}; 362 "Normal", "Swap", "left copy to right", "right copy to left"};
363 363
364static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA, 364static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
365 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select); 365 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 1761c3a98b76..58b664b06c16 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -443,39 +443,39 @@
443#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14) 443#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
444#define RT5640_IF1_DAC_SEL_SFT 14 444#define RT5640_IF1_DAC_SEL_SFT 14
445#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14) 445#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
446#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14) 446#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
447#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14) 447#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
448#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14) 448#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
449#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12) 449#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
450#define RT5640_IF1_ADC_SEL_SFT 12 450#define RT5640_IF1_ADC_SEL_SFT 12
451#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12) 451#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
452#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12) 452#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
453#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12) 453#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
454#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12) 454#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
455#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10) 455#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
456#define RT5640_IF2_DAC_SEL_SFT 10 456#define RT5640_IF2_DAC_SEL_SFT 10
457#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10) 457#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
458#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10) 458#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
459#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10) 459#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
460#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10) 460#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
461#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8) 461#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
462#define RT5640_IF2_ADC_SEL_SFT 8 462#define RT5640_IF2_ADC_SEL_SFT 8
463#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8) 463#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
464#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8) 464#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
465#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8) 465#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
466#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8) 466#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
467#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6) 467#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
468#define RT5640_IF3_DAC_SEL_SFT 6 468#define RT5640_IF3_DAC_SEL_SFT 6
469#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6) 469#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
470#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6) 470#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
471#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6) 471#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
472#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6) 472#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
473#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4) 473#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
474#define RT5640_IF3_ADC_SEL_SFT 4 474#define RT5640_IF3_ADC_SEL_SFT 4
475#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4) 475#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
476#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4) 476#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
477#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4) 477#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
478#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4) 478#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
479 479
480/* REC Left Mixer Control 1 (0x3b) */ 480/* REC Left Mixer Control 1 (0x3b) */
481#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13) 481#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index a8b3e3f701f9..1bae17ee8817 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1955,11 +1955,16 @@ err_adsp2_codec_probe:
1955static int wm5102_codec_remove(struct snd_soc_codec *codec) 1955static int wm5102_codec_remove(struct snd_soc_codec *codec)
1956{ 1956{
1957 struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); 1957 struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
1958 struct arizona *arizona = priv->core.arizona;
1958 1959
1959 wm_adsp2_codec_remove(&priv->core.adsp[0], codec); 1960 wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
1960 1961
1961 priv->core.arizona->dapm = NULL; 1962 priv->core.arizona->dapm = NULL;
1962 1963
1964 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
1965
1966 arizona_free_spk(codec);
1967
1963 return 0; 1968 return 0;
1964} 1969}
1965 1970
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 83ba70fe16e6..2728ac545ffe 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2298,6 +2298,8 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
2298 2298
2299 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 2299 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
2300 2300
2301 arizona_free_spk(codec);
2302
2301 return 0; 2303 return 0;
2302} 2304}
2303 2305
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 88223608a33f..720a14e0687d 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2471,7 +2471,7 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
2471 break; 2471 break;
2472 default: 2472 default:
2473 dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n"); 2473 dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
2474 dspclk = wm8962->sysclk; 2474 dspclk = wm8962->sysclk_rate;
2475 } 2475 }
2476 2476
2477 dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk); 2477 dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 52d766efe14f..6b0785b5a5c5 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1072,6 +1072,8 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
1072 1072
1073 priv->core.arizona->dapm = NULL; 1073 priv->core.arizona->dapm = NULL;
1074 1074
1075 arizona_free_spk(codec);
1076
1075 return 0; 1077 return 0;
1076} 1078}
1077 1079
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 012396074a8a..449f66636205 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1324,6 +1324,8 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
1324 1324
1325 priv->core.arizona->dapm = NULL; 1325 priv->core.arizona->dapm = NULL;
1326 1326
1327 arizona_free_spk(codec);
1328
1327 return 0; 1329 return 0;
1328} 1330}
1329 1331
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index b3e6c2300457..1120f4f4d011 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -163,7 +163,6 @@ config SND_SOC_INTEL_SKYLAKE
163 tristate 163 tristate
164 select SND_HDA_EXT_CORE 164 select SND_HDA_EXT_CORE
165 select SND_SOC_TOPOLOGY 165 select SND_SOC_TOPOLOGY
166 select SND_HDA_I915
167 select SND_SOC_INTEL_SST 166 select SND_SOC_INTEL_SST
168 167
169config SND_SOC_INTEL_SKL_RT286_MACH 168config SND_SOC_INTEL_SKL_RT286_MACH
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index ac60f1301e21..91565229d074 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -1345,7 +1345,7 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1345 return 0; 1345 return 0;
1346 1346
1347 /* wait for pause to complete before we reset the stream */ 1347 /* wait for pause to complete before we reset the stream */
1348 while (stream->running && tries--) 1348 while (stream->running && --tries)
1349 msleep(1); 1349 msleep(1);
1350 if (!tries) { 1350 if (!tries) {
1351 dev_err(hsw->dev, "error: reset stream %d still running\n", 1351 dev_err(hsw->dev, "error: reset stream %d still running\n",
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index a5267e8a96e0..2962ef22fc84 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -336,6 +336,11 @@ void skl_dsp_free(struct sst_dsp *dsp)
336 skl_ipc_int_disable(dsp); 336 skl_ipc_int_disable(dsp);
337 337
338 free_irq(dsp->irq, dsp); 338 free_irq(dsp->irq, dsp);
339 dsp->cl_dev.ops.cl_cleanup_controller(dsp);
340 skl_cldma_int_disable(dsp);
341 skl_ipc_op_int_disable(dsp);
342 skl_ipc_int_disable(dsp);
343
339 skl_dsp_disable_core(dsp); 344 skl_dsp_disable_core(dsp);
340} 345}
341EXPORT_SYMBOL_GPL(skl_dsp_free); 346EXPORT_SYMBOL_GPL(skl_dsp_free);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 545b4e77b8aa..cdb78b7e5a14 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -239,6 +239,7 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
239{ 239{
240 int multiplier = 1; 240 int multiplier = 1;
241 struct skl_module_fmt *in_fmt, *out_fmt; 241 struct skl_module_fmt *in_fmt, *out_fmt;
242 int in_rate, out_rate;
242 243
243 244
244 /* Since fixups is applied to pin 0 only, ibs, obs needs 245 /* Since fixups is applied to pin 0 only, ibs, obs needs
@@ -249,15 +250,24 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
249 250
250 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 251 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
251 multiplier = 5; 252 multiplier = 5;
252 mcfg->ibs = (in_fmt->s_freq / 1000) * 253
253 (mcfg->in_fmt->channels) * 254 if (in_fmt->s_freq % 1000)
254 (mcfg->in_fmt->bit_depth >> 3) * 255 in_rate = (in_fmt->s_freq / 1000) + 1;
255 multiplier; 256 else
256 257 in_rate = (in_fmt->s_freq / 1000);
257 mcfg->obs = (mcfg->out_fmt->s_freq / 1000) * 258
258 (mcfg->out_fmt->channels) * 259 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
259 (mcfg->out_fmt->bit_depth >> 3) * 260 (mcfg->in_fmt->bit_depth >> 3) *
260 multiplier; 261 multiplier;
262
263 if (mcfg->out_fmt->s_freq % 1000)
264 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
265 else
266 out_rate = (mcfg->out_fmt->s_freq / 1000);
267
268 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
269 (mcfg->out_fmt->bit_depth >> 3) *
270 multiplier;
261} 271}
262 272
263static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 273static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
@@ -485,11 +495,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
485 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 495 if (!skl_is_pipe_mcps_avail(skl, mconfig))
486 return -ENOMEM; 496 return -ENOMEM;
487 497
498 skl_tplg_alloc_pipe_mcps(skl, mconfig);
499
488 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { 500 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
489 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 501 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
490 mconfig->id.module_id, mconfig->guid); 502 mconfig->id.module_id, mconfig->guid);
491 if (ret < 0) 503 if (ret < 0)
492 return ret; 504 return ret;
505
506 mconfig->m_state = SKL_MODULE_LOADED;
493 } 507 }
494 508
495 /* update blob if blob is null for be with default value */ 509 /* update blob if blob is null for be with default value */
@@ -509,7 +523,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
509 ret = skl_tplg_set_module_params(w, ctx); 523 ret = skl_tplg_set_module_params(w, ctx);
510 if (ret < 0) 524 if (ret < 0)
511 return ret; 525 return ret;
512 skl_tplg_alloc_pipe_mcps(skl, mconfig);
513 } 526 }
514 527
515 return 0; 528 return 0;
@@ -524,7 +537,8 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
524 list_for_each_entry(w_module, &pipe->w_list, node) { 537 list_for_each_entry(w_module, &pipe->w_list, node) {
525 mconfig = w_module->w->priv; 538 mconfig = w_module->w->priv;
526 539
527 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod) 540 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
541 mconfig->m_state > SKL_MODULE_UNINIT)
528 return ctx->dsp->fw_ops.unload_mod(ctx->dsp, 542 return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
529 mconfig->id.module_id); 543 mconfig->id.module_id);
530 } 544 }
@@ -558,6 +572,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
558 if (!skl_is_pipe_mem_avail(skl, mconfig)) 572 if (!skl_is_pipe_mem_avail(skl, mconfig))
559 return -ENOMEM; 573 return -ENOMEM;
560 574
575 skl_tplg_alloc_pipe_mem(skl, mconfig);
576 skl_tplg_alloc_pipe_mcps(skl, mconfig);
577
561 /* 578 /*
562 * Create a list of modules for pipe. 579 * Create a list of modules for pipe.
563 * This list contains modules from source to sink 580 * This list contains modules from source to sink
@@ -601,9 +618,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
601 src_module = dst_module; 618 src_module = dst_module;
602 } 619 }
603 620
604 skl_tplg_alloc_pipe_mem(skl, mconfig);
605 skl_tplg_alloc_pipe_mcps(skl, mconfig);
606
607 return 0; 621 return 0;
608} 622}
609 623
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index de3c401284d9..d2d923002d5c 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -274,10 +274,10 @@ struct skl_pipe {
274 274
275enum skl_module_state { 275enum skl_module_state {
276 SKL_MODULE_UNINIT = 0, 276 SKL_MODULE_UNINIT = 0,
277 SKL_MODULE_INIT_DONE = 1, 277 SKL_MODULE_LOADED = 1,
278 SKL_MODULE_LOADED = 2, 278 SKL_MODULE_INIT_DONE = 2,
279 SKL_MODULE_UNLOADED = 3, 279 SKL_MODULE_BIND_DONE = 3,
280 SKL_MODULE_BIND_DONE = 4 280 SKL_MODULE_UNLOADED = 4,
281}; 281};
282 282
283struct skl_module_cfg { 283struct skl_module_cfg {
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index ab5e25aaeee3..3982f5536f2d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -222,6 +222,7 @@ static int skl_suspend(struct device *dev)
222 struct hdac_ext_bus *ebus = pci_get_drvdata(pci); 222 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
223 struct skl *skl = ebus_to_skl(ebus); 223 struct skl *skl = ebus_to_skl(ebus);
224 struct hdac_bus *bus = ebus_to_hbus(ebus); 224 struct hdac_bus *bus = ebus_to_hbus(ebus);
225 int ret = 0;
225 226
226 /* 227 /*
227 * Do not suspend if streams which are marked ignore suspend are 228 * Do not suspend if streams which are marked ignore suspend are
@@ -232,10 +233,20 @@ static int skl_suspend(struct device *dev)
232 enable_irq_wake(bus->irq); 233 enable_irq_wake(bus->irq);
233 pci_save_state(pci); 234 pci_save_state(pci);
234 pci_disable_device(pci); 235 pci_disable_device(pci);
235 return 0;
236 } else { 236 } else {
237 return _skl_suspend(ebus); 237 ret = _skl_suspend(ebus);
238 if (ret < 0)
239 return ret;
240 }
241
242 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
243 ret = snd_hdac_display_power(bus, false);
244 if (ret < 0)
245 dev_err(bus->dev,
246 "Cannot turn OFF display power on i915\n");
238 } 247 }
248
249 return ret;
239} 250}
240 251
241static int skl_resume(struct device *dev) 252static int skl_resume(struct device *dev)
@@ -316,17 +327,20 @@ static int skl_free(struct hdac_ext_bus *ebus)
316 327
317 if (bus->irq >= 0) 328 if (bus->irq >= 0)
318 free_irq(bus->irq, (void *)bus); 329 free_irq(bus->irq, (void *)bus);
319 if (bus->remap_addr)
320 iounmap(bus->remap_addr);
321
322 snd_hdac_bus_free_stream_pages(bus); 330 snd_hdac_bus_free_stream_pages(bus);
323 snd_hdac_stream_free_all(ebus); 331 snd_hdac_stream_free_all(ebus);
324 snd_hdac_link_free_all(ebus); 332 snd_hdac_link_free_all(ebus);
333
334 if (bus->remap_addr)
335 iounmap(bus->remap_addr);
336
325 pci_release_regions(skl->pci); 337 pci_release_regions(skl->pci);
326 pci_disable_device(skl->pci); 338 pci_disable_device(skl->pci);
327 339
328 snd_hdac_ext_bus_exit(ebus); 340 snd_hdac_ext_bus_exit(ebus);
329 341
342 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
343 snd_hdac_i915_exit(&ebus->bus);
330 return 0; 344 return 0;
331} 345}
332 346
@@ -719,12 +733,12 @@ static void skl_remove(struct pci_dev *pci)
719 if (skl->tplg) 733 if (skl->tplg)
720 release_firmware(skl->tplg); 734 release_firmware(skl->tplg);
721 735
722 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
723 snd_hdac_i915_exit(&ebus->bus);
724
725 if (pci_dev_run_wake(pci)) 736 if (pci_dev_run_wake(pci))
726 pm_runtime_get_noresume(&pci->dev); 737 pm_runtime_get_noresume(&pci->dev);
727 pci_dev_put(pci); 738
739 /* codec removal, invoke bus_device_remove */
740 snd_hdac_ext_bus_device_remove(ebus);
741
728 skl_platform_unregister(&pci->dev); 742 skl_platform_unregister(&pci->dev);
729 skl_free_dsp(skl); 743 skl_free_dsp(skl);
730 skl_machine_device_unregister(skl); 744 skl_machine_device_unregister(skl);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 801ae1a81dfd..c4464858bf01 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
2188 int count = 0; 2188 int count = 0;
2189 char *state = "not set"; 2189 char *state = "not set";
2190 2190
2191 /* card won't be set for the dummy component, as a spot fix
2192 * we're checking for that case specifically here but in future
2193 * we will ensure that the dummy component looks like others.
2194 */
2195 if (!cmpnt->card)
2196 return 0;
2197
2191 list_for_each_entry(w, &cmpnt->card->widgets, list) { 2198 list_for_each_entry(w, &cmpnt->card->widgets, list) {
2192 if (w->dapm != dapm) 2199 if (w->dapm != dapm)
2193 continue; 2200 continue;
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index d14bf411515b..a452ad7cec40 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -15,7 +15,6 @@ config SND_USB_AUDIO
15 select SND_RAWMIDI 15 select SND_RAWMIDI
16 select SND_PCM 16 select SND_PCM
17 select BITREVERSE 17 select BITREVERSE
18 select SND_USB_AUDIO_USE_MEDIA_CONTROLLER if MEDIA_CONTROLLER && (MEDIA_SUPPORT=y || MEDIA_SUPPORT=SND_USB_AUDIO)
19 help 18 help
20 Say Y here to include support for USB audio and USB MIDI 19 Say Y here to include support for USB audio and USB MIDI
21 devices. 20 devices.
@@ -23,9 +22,6 @@ config SND_USB_AUDIO
23 To compile this driver as a module, choose M here: the module 22 To compile this driver as a module, choose M here: the module
24 will be called snd-usb-audio. 23 will be called snd-usb-audio.
25 24
26config SND_USB_AUDIO_USE_MEDIA_CONTROLLER
27 bool
28
29config SND_USB_UA101 25config SND_USB_UA101
30 tristate "Edirol UA-101/UA-1000 driver" 26 tristate "Edirol UA-101/UA-1000 driver"
31 select SND_PCM 27 select SND_PCM
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 8dca3c407f5a..2d2d122b069f 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -15,8 +15,6 @@ snd-usb-audio-objs := card.o \
15 quirks.o \ 15 quirks.o \
16 stream.o 16 stream.o
17 17
18snd-usb-audio-$(CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER) += media.o
19
20snd-usbmidi-lib-objs := midi.o 18snd-usbmidi-lib-objs := midi.o
21 19
22# Toplevel Module Dependency 20# Toplevel Module Dependency
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 63244bbba8c7..3fc63583a537 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -66,7 +66,6 @@
66#include "format.h" 66#include "format.h"
67#include "power.h" 67#include "power.h"
68#include "stream.h" 68#include "stream.h"
69#include "media.h"
70 69
71MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 70MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
72MODULE_DESCRIPTION("USB Audio"); 71MODULE_DESCRIPTION("USB Audio");
@@ -612,11 +611,6 @@ static int usb_audio_probe(struct usb_interface *intf,
612 if (err < 0) 611 if (err < 0)
613 goto __error; 612 goto __error;
614 613
615 if (quirk->media_device) {
616 /* don't want to fail when media_snd_device_create() fails */
617 media_snd_device_create(chip, intf);
618 }
619
620 usb_chip[chip->index] = chip; 614 usb_chip[chip->index] = chip;
621 chip->num_interfaces++; 615 chip->num_interfaces++;
622 usb_set_intfdata(intf, chip); 616 usb_set_intfdata(intf, chip);
@@ -673,14 +667,6 @@ static void usb_audio_disconnect(struct usb_interface *intf)
673 list_for_each(p, &chip->midi_list) { 667 list_for_each(p, &chip->midi_list) {
674 snd_usbmidi_disconnect(p); 668 snd_usbmidi_disconnect(p);
675 } 669 }
676 /*
677 * Nice to check quirk && quirk->media_device
678 * need some special handlings. Doesn't look like
679 * we have access to quirk here
680 * Acceses mixer_list
681 */
682 media_snd_device_delete(chip);
683
684 /* release mixer resources */ 670 /* release mixer resources */
685 list_for_each_entry(mixer, &chip->mixer_list, list) { 671 list_for_each_entry(mixer, &chip->mixer_list, list) {
686 snd_usb_mixer_disconnect(mixer); 672 snd_usb_mixer_disconnect(mixer);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 34a0898e2238..71778ca4b26a 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -105,8 +105,6 @@ struct snd_usb_endpoint {
105 struct list_head list; 105 struct list_head list;
106}; 106};
107 107
108struct media_ctl;
109
110struct snd_usb_substream { 108struct snd_usb_substream {
111 struct snd_usb_stream *stream; 109 struct snd_usb_stream *stream;
112 struct usb_device *dev; 110 struct usb_device *dev;
@@ -158,7 +156,6 @@ struct snd_usb_substream {
158 } dsd_dop; 156 } dsd_dop;
159 157
160 bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */ 158 bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */
161 struct media_ctl *media_ctl;
162}; 159};
163 160
164struct snd_usb_stream { 161struct snd_usb_stream {
diff --git a/sound/usb/media.c b/sound/usb/media.c
deleted file mode 100644
index 93a50d01490c..000000000000
--- a/sound/usb/media.c
+++ /dev/null
@@ -1,318 +0,0 @@
1/*
2 * media.c - Media Controller specific ALSA driver code
3 *
4 * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
5 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
6 *
7 * This file is released under the GPLv2.
8 */
9
10/*
11 * This file adds Media Controller support to ALSA driver
12 * to use the Media Controller API to share tuner with DVB
13 * and V4L2 drivers that control media device. Media device
14 * is created based on existing quirks framework. Using this
15 * approach, the media controller API usage can be added for
16 * a specific device.
17*/
18
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/usb.h>
24
25#include <sound/pcm.h>
26#include <sound/core.h>
27
28#include "usbaudio.h"
29#include "card.h"
30#include "mixer.h"
31#include "media.h"
32
33static int media_snd_enable_source(struct media_ctl *mctl)
34{
35 if (mctl && mctl->media_dev->enable_source)
36 return mctl->media_dev->enable_source(&mctl->media_entity,
37 &mctl->media_pipe);
38 return 0;
39}
40
41static void media_snd_disable_source(struct media_ctl *mctl)
42{
43 if (mctl && mctl->media_dev->disable_source)
44 mctl->media_dev->disable_source(&mctl->media_entity);
45}
46
47int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
48 int stream)
49{
50 struct media_device *mdev;
51 struct media_ctl *mctl;
52 struct device *pcm_dev = &pcm->streams[stream].dev;
53 u32 intf_type;
54 int ret = 0;
55 u16 mixer_pad;
56 struct media_entity *entity;
57
58 mdev = subs->stream->chip->media_dev;
59 if (!mdev)
60 return -ENODEV;
61
62 if (subs->media_ctl)
63 return 0;
64
65 /* allocate media_ctl */
66 mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
67 if (!mctl)
68 return -ENOMEM;
69
70 mctl->media_dev = mdev;
71 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
72 intf_type = MEDIA_INTF_T_ALSA_PCM_PLAYBACK;
73 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_PLAYBACK;
74 mctl->media_pad.flags = MEDIA_PAD_FL_SOURCE;
75 mixer_pad = 1;
76 } else {
77 intf_type = MEDIA_INTF_T_ALSA_PCM_CAPTURE;
78 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_CAPTURE;
79 mctl->media_pad.flags = MEDIA_PAD_FL_SINK;
80 mixer_pad = 2;
81 }
82 mctl->media_entity.name = pcm->name;
83 media_entity_pads_init(&mctl->media_entity, 1, &mctl->media_pad);
84 ret = media_device_register_entity(mctl->media_dev,
85 &mctl->media_entity);
86 if (ret)
87 goto free_mctl;
88
89 mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0,
90 MAJOR(pcm_dev->devt),
91 MINOR(pcm_dev->devt));
92 if (!mctl->intf_devnode) {
93 ret = -ENOMEM;
94 goto unregister_entity;
95 }
96 mctl->intf_link = media_create_intf_link(&mctl->media_entity,
97 &mctl->intf_devnode->intf,
98 MEDIA_LNK_FL_ENABLED);
99 if (!mctl->intf_link) {
100 ret = -ENOMEM;
101 goto devnode_remove;
102 }
103
104 /* create link between mixer and audio */
105 media_device_for_each_entity(entity, mdev) {
106 switch (entity->function) {
107 case MEDIA_ENT_F_AUDIO_MIXER:
108 ret = media_create_pad_link(entity, mixer_pad,
109 &mctl->media_entity, 0,
110 MEDIA_LNK_FL_ENABLED);
111 if (ret)
112 goto remove_intf_link;
113 break;
114 }
115 }
116
117 subs->media_ctl = mctl;
118 return 0;
119
120remove_intf_link:
121 media_remove_intf_link(mctl->intf_link);
122devnode_remove:
123 media_devnode_remove(mctl->intf_devnode);
124unregister_entity:
125 media_device_unregister_entity(&mctl->media_entity);
126free_mctl:
127 kfree(mctl);
128 return ret;
129}
130
131void media_snd_stream_delete(struct snd_usb_substream *subs)
132{
133 struct media_ctl *mctl = subs->media_ctl;
134
135 if (mctl && mctl->media_dev) {
136 struct media_device *mdev;
137
138 mdev = subs->stream->chip->media_dev;
139 if (mdev && media_devnode_is_registered(&mdev->devnode)) {
140 media_devnode_remove(mctl->intf_devnode);
141 media_device_unregister_entity(&mctl->media_entity);
142 media_entity_cleanup(&mctl->media_entity);
143 }
144 kfree(mctl);
145 subs->media_ctl = NULL;
146 }
147}
148
149int media_snd_start_pipeline(struct snd_usb_substream *subs)
150{
151 struct media_ctl *mctl = subs->media_ctl;
152
153 if (mctl)
154 return media_snd_enable_source(mctl);
155 return 0;
156}
157
158void media_snd_stop_pipeline(struct snd_usb_substream *subs)
159{
160 struct media_ctl *mctl = subs->media_ctl;
161
162 if (mctl)
163 media_snd_disable_source(mctl);
164}
165
166int media_snd_mixer_init(struct snd_usb_audio *chip)
167{
168 struct device *ctl_dev = &chip->card->ctl_dev;
169 struct media_intf_devnode *ctl_intf;
170 struct usb_mixer_interface *mixer;
171 struct media_device *mdev = chip->media_dev;
172 struct media_mixer_ctl *mctl;
173 u32 intf_type = MEDIA_INTF_T_ALSA_CONTROL;
174 int ret;
175
176 if (!mdev)
177 return -ENODEV;
178
179 ctl_intf = chip->ctl_intf_media_devnode;
180 if (!ctl_intf) {
181 ctl_intf = media_devnode_create(mdev, intf_type, 0,
182 MAJOR(ctl_dev->devt),
183 MINOR(ctl_dev->devt));
184 if (!ctl_intf)
185 return -ENOMEM;
186 chip->ctl_intf_media_devnode = ctl_intf;
187 }
188
189 list_for_each_entry(mixer, &chip->mixer_list, list) {
190
191 if (mixer->media_mixer_ctl)
192 continue;
193
194 /* allocate media_mixer_ctl */
195 mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
196 if (!mctl)
197 return -ENOMEM;
198
199 mctl->media_dev = mdev;
200 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_MIXER;
201 mctl->media_entity.name = chip->card->mixername;
202 mctl->media_pad[0].flags = MEDIA_PAD_FL_SINK;
203 mctl->media_pad[1].flags = MEDIA_PAD_FL_SOURCE;
204 mctl->media_pad[2].flags = MEDIA_PAD_FL_SOURCE;
205 media_entity_pads_init(&mctl->media_entity, MEDIA_MIXER_PAD_MAX,
206 mctl->media_pad);
207 ret = media_device_register_entity(mctl->media_dev,
208 &mctl->media_entity);
209 if (ret) {
210 kfree(mctl);
211 return ret;
212 }
213
214 mctl->intf_link = media_create_intf_link(&mctl->media_entity,
215 &ctl_intf->intf,
216 MEDIA_LNK_FL_ENABLED);
217 if (!mctl->intf_link) {
218 media_device_unregister_entity(&mctl->media_entity);
219 media_entity_cleanup(&mctl->media_entity);
220 kfree(mctl);
221 return -ENOMEM;
222 }
223 mctl->intf_devnode = ctl_intf;
224 mixer->media_mixer_ctl = mctl;
225 }
226 return 0;
227}
228
229static void media_snd_mixer_delete(struct snd_usb_audio *chip)
230{
231 struct usb_mixer_interface *mixer;
232 struct media_device *mdev = chip->media_dev;
233
234 if (!mdev)
235 return;
236
237 list_for_each_entry(mixer, &chip->mixer_list, list) {
238 struct media_mixer_ctl *mctl;
239
240 mctl = mixer->media_mixer_ctl;
241 if (!mixer->media_mixer_ctl)
242 continue;
243
244 if (media_devnode_is_registered(&mdev->devnode)) {
245 media_device_unregister_entity(&mctl->media_entity);
246 media_entity_cleanup(&mctl->media_entity);
247 }
248 kfree(mctl);
249 mixer->media_mixer_ctl = NULL;
250 }
251 if (media_devnode_is_registered(&mdev->devnode))
252 media_devnode_remove(chip->ctl_intf_media_devnode);
253 chip->ctl_intf_media_devnode = NULL;
254}
255
256int media_snd_device_create(struct snd_usb_audio *chip,
257 struct usb_interface *iface)
258{
259 struct media_device *mdev;
260 struct usb_device *usbdev = interface_to_usbdev(iface);
261 int ret;
262
263 mdev = media_device_get_devres(&usbdev->dev);
264 if (!mdev)
265 return -ENOMEM;
266 if (!mdev->dev) {
267 /* register media device */
268 mdev->dev = &usbdev->dev;
269 if (usbdev->product)
270 strlcpy(mdev->model, usbdev->product,
271 sizeof(mdev->model));
272 if (usbdev->serial)
273 strlcpy(mdev->serial, usbdev->serial,
274 sizeof(mdev->serial));
275 strcpy(mdev->bus_info, usbdev->devpath);
276 mdev->hw_revision = le16_to_cpu(usbdev->descriptor.bcdDevice);
277 media_device_init(mdev);
278 }
279 if (!media_devnode_is_registered(&mdev->devnode)) {
280 ret = media_device_register(mdev);
281 if (ret) {
282 dev_err(&usbdev->dev,
283 "Couldn't register media device. Error: %d\n",
284 ret);
285 return ret;
286 }
287 }
288
289 /* save media device - avoid lookups */
290 chip->media_dev = mdev;
291
292 /* Create media entities for mixer and control dev */
293 ret = media_snd_mixer_init(chip);
294 if (ret) {
295 dev_err(&usbdev->dev,
296 "Couldn't create media mixer entities. Error: %d\n",
297 ret);
298
299 /* clear saved media_dev */
300 chip->media_dev = NULL;
301
302 return ret;
303 }
304 return 0;
305}
306
307void media_snd_device_delete(struct snd_usb_audio *chip)
308{
309 struct media_device *mdev = chip->media_dev;
310
311 media_snd_mixer_delete(chip);
312
313 if (mdev) {
314 if (media_devnode_is_registered(&mdev->devnode))
315 media_device_unregister(mdev);
316 chip->media_dev = NULL;
317 }
318}
diff --git a/sound/usb/media.h b/sound/usb/media.h
deleted file mode 100644
index 1dcdcdc5f7aa..000000000000
--- a/sound/usb/media.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * media.h - Media Controller specific ALSA driver code
3 *
4 * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
5 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
6 *
7 * This file is released under the GPLv2.
8 */
9
10/*
11 * This file adds Media Controller support to ALSA driver
12 * to use the Media Controller API to share tuner with DVB
13 * and V4L2 drivers that control media device. Media device
14 * is created based on existing quirks framework. Using this
15 * approach, the media controller API usage can be added for
16 * a specific device.
17*/
18#ifndef __MEDIA_H
19
20#ifdef CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER
21
22#include <media/media-device.h>
23#include <media/media-entity.h>
24#include <sound/asound.h>
25
26struct media_ctl {
27 struct media_device *media_dev;
28 struct media_entity media_entity;
29 struct media_intf_devnode *intf_devnode;
30 struct media_link *intf_link;
31 struct media_pad media_pad;
32 struct media_pipeline media_pipe;
33};
34
35/*
36 * One source pad each for SNDRV_PCM_STREAM_CAPTURE and
37 * SNDRV_PCM_STREAM_PLAYBACK. One for sink pad to link
38 * to AUDIO Source
39*/
40#define MEDIA_MIXER_PAD_MAX (SNDRV_PCM_STREAM_LAST + 2)
41
42struct media_mixer_ctl {
43 struct media_device *media_dev;
44 struct media_entity media_entity;
45 struct media_intf_devnode *intf_devnode;
46 struct media_link *intf_link;
47 struct media_pad media_pad[MEDIA_MIXER_PAD_MAX];
48 struct media_pipeline media_pipe;
49};
50
51int media_snd_device_create(struct snd_usb_audio *chip,
52 struct usb_interface *iface);
53void media_snd_device_delete(struct snd_usb_audio *chip);
54int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
55 int stream);
56void media_snd_stream_delete(struct snd_usb_substream *subs);
57int media_snd_start_pipeline(struct snd_usb_substream *subs);
58void media_snd_stop_pipeline(struct snd_usb_substream *subs);
59#else
60static inline int media_snd_device_create(struct snd_usb_audio *chip,
61 struct usb_interface *iface)
62 { return 0; }
63static inline void media_snd_device_delete(struct snd_usb_audio *chip) { }
64static inline int media_snd_stream_init(struct snd_usb_substream *subs,
65 struct snd_pcm *pcm, int stream)
66 { return 0; }
67static inline void media_snd_stream_delete(struct snd_usb_substream *subs) { }
68static inline int media_snd_start_pipeline(struct snd_usb_substream *subs)
69 { return 0; }
70static inline void media_snd_stop_pipeline(struct snd_usb_substream *subs) { }
71#endif
72#endif /* __MEDIA_H */
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index f3789446ab9c..3417ef347e40 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -3,8 +3,6 @@
3 3
4#include <sound/info.h> 4#include <sound/info.h>
5 5
6struct media_mixer_ctl;
7
8struct usb_mixer_interface { 6struct usb_mixer_interface {
9 struct snd_usb_audio *chip; 7 struct snd_usb_audio *chip;
10 struct usb_host_interface *hostif; 8 struct usb_host_interface *hostif;
@@ -24,7 +22,6 @@ struct usb_mixer_interface {
24 struct urb *rc_urb; 22 struct urb *rc_urb;
25 struct usb_ctrlrequest *rc_setup_packet; 23 struct usb_ctrlrequest *rc_setup_packet;
26 u8 rc_buffer[6]; 24 u8 rc_buffer[6];
27 struct media_mixer_ctl *media_mixer_ctl;
28}; 25};
29 26
30#define MAX_CHANNELS 16 /* max logical channels */ 27#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index ddca6547399b..1f8fb0d904e0 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
349}; 349};
350 350
351/* 351/*
352 * Dell usb dock with ALC4020 codec had a firmware problem where it got
353 * screwed up when zero volume is passed; just skip it as a workaround
354 */
355static const struct usbmix_name_map dell_alc4020_map[] = {
356 { 16, NULL },
357 { 19, NULL },
358 { 0 }
359};
360
361/*
352 * Control map entries 362 * Control map entries
353 */ 363 */
354 364
@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
431 .map = aureon_51_2_map, 441 .map = aureon_51_2_map,
432 }, 442 },
433 { 443 {
444 .id = USB_ID(0x0bda, 0x4014),
445 .map = dell_alc4020_map,
446 },
447 {
434 .id = USB_ID(0x0dba, 0x1000), 448 .id = USB_ID(0x0dba, 0x1000),
435 .map = mbox1_map, 449 .map = mbox1_map,
436 }, 450 },
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 0e4e0640c504..44d178ee9177 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -35,7 +35,6 @@
35#include "pcm.h" 35#include "pcm.h"
36#include "clock.h" 36#include "clock.h"
37#include "power.h" 37#include "power.h"
38#include "media.h"
39 38
40#define SUBSTREAM_FLAG_DATA_EP_STARTED 0 39#define SUBSTREAM_FLAG_DATA_EP_STARTED 0
41#define SUBSTREAM_FLAG_SYNC_EP_STARTED 1 40#define SUBSTREAM_FLAG_SYNC_EP_STARTED 1
@@ -718,14 +717,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
718 struct audioformat *fmt; 717 struct audioformat *fmt;
719 int ret; 718 int ret;
720 719
721 ret = media_snd_start_pipeline(subs);
722 if (ret)
723 return ret;
724
725 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, 720 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
726 params_buffer_bytes(hw_params)); 721 params_buffer_bytes(hw_params));
727 if (ret < 0) 722 if (ret < 0)
728 goto err_ret; 723 return ret;
729 724
730 subs->pcm_format = params_format(hw_params); 725 subs->pcm_format = params_format(hw_params);
731 subs->period_bytes = params_period_bytes(hw_params); 726 subs->period_bytes = params_period_bytes(hw_params);
@@ -739,27 +734,22 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
739 dev_dbg(&subs->dev->dev, 734 dev_dbg(&subs->dev->dev,
740 "cannot set format: format = %#x, rate = %d, channels = %d\n", 735 "cannot set format: format = %#x, rate = %d, channels = %d\n",
741 subs->pcm_format, subs->cur_rate, subs->channels); 736 subs->pcm_format, subs->cur_rate, subs->channels);
742 ret = -EINVAL; 737 return -EINVAL;
743 goto err_ret;
744 } 738 }
745 739
746 ret = snd_usb_lock_shutdown(subs->stream->chip); 740 ret = snd_usb_lock_shutdown(subs->stream->chip);
747 if (ret < 0) 741 if (ret < 0)
748 goto err_ret; 742 return ret;
749 ret = set_format(subs, fmt); 743 ret = set_format(subs, fmt);
750 snd_usb_unlock_shutdown(subs->stream->chip); 744 snd_usb_unlock_shutdown(subs->stream->chip);
751 if (ret < 0) 745 if (ret < 0)
752 goto err_ret; 746 return ret;
753 747
754 subs->interface = fmt->iface; 748 subs->interface = fmt->iface;
755 subs->altset_idx = fmt->altset_idx; 749 subs->altset_idx = fmt->altset_idx;
756 subs->need_setup_ep = true; 750 subs->need_setup_ep = true;
757 751
758 return 0; 752 return 0;
759
760err_ret:
761 media_snd_stop_pipeline(subs);
762 return ret;
763} 753}
764 754
765/* 755/*
@@ -771,7 +761,6 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
771{ 761{
772 struct snd_usb_substream *subs = substream->runtime->private_data; 762 struct snd_usb_substream *subs = substream->runtime->private_data;
773 763
774 media_snd_stop_pipeline(subs);
775 subs->cur_audiofmt = NULL; 764 subs->cur_audiofmt = NULL;
776 subs->cur_rate = 0; 765 subs->cur_rate = 0;
777 subs->period_bytes = 0; 766 subs->period_bytes = 0;
@@ -1232,7 +1221,6 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
1232 struct snd_usb_stream *as = snd_pcm_substream_chip(substream); 1221 struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
1233 struct snd_pcm_runtime *runtime = substream->runtime; 1222 struct snd_pcm_runtime *runtime = substream->runtime;
1234 struct snd_usb_substream *subs = &as->substream[direction]; 1223 struct snd_usb_substream *subs = &as->substream[direction];
1235 int ret;
1236 1224
1237 subs->interface = -1; 1225 subs->interface = -1;
1238 subs->altset_idx = 0; 1226 subs->altset_idx = 0;
@@ -1246,12 +1234,7 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
1246 subs->dsd_dop.channel = 0; 1234 subs->dsd_dop.channel = 0;
1247 subs->dsd_dop.marker = 1; 1235 subs->dsd_dop.marker = 1;
1248 1236
1249 ret = setup_hw_info(runtime, subs); 1237 return setup_hw_info(runtime, subs);
1250 if (ret == 0)
1251 ret = media_snd_stream_init(subs, as->pcm, direction);
1252 if (ret)
1253 snd_usb_autosuspend(subs->stream->chip);
1254 return ret;
1255} 1238}
1256 1239
1257static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) 1240static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
@@ -1260,7 +1243,6 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
1260 struct snd_usb_substream *subs = &as->substream[direction]; 1243 struct snd_usb_substream *subs = &as->substream[direction];
1261 1244
1262 stop_endpoints(subs, true); 1245 stop_endpoints(subs, true);
1263 media_snd_stop_pipeline(subs);
1264 1246
1265 if (subs->interface >= 0 && 1247 if (subs->interface >= 0 &&
1266 !snd_usb_lock_shutdown(subs->stream->chip)) { 1248 !snd_usb_lock_shutdown(subs->stream->chip)) {
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 9d087b19c70c..c60a776e815d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2886,7 +2886,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2886 .product_name = pname, \ 2886 .product_name = pname, \
2887 .ifnum = QUIRK_ANY_INTERFACE, \ 2887 .ifnum = QUIRK_ANY_INTERFACE, \
2888 .type = QUIRK_AUDIO_ALIGN_TRANSFER, \ 2888 .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
2889 .media_device = 1, \
2890 } \ 2889 } \
2891} 2890}
2892 2891
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6178bb5d0731..0adfd9537cf7 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1134,9 +1134,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1134 case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */ 1134 case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
1135 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ 1135 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
1136 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1136 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
1137 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
1137 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ 1138 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
1138 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1139 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1139 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1140 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1141 case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
1140 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ 1142 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
1141 return true; 1143 return true;
1142 } 1144 }
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 6fe7f210bd4e..8e9548bc1f1a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -36,7 +36,6 @@
36#include "format.h" 36#include "format.h"
37#include "clock.h" 37#include "clock.h"
38#include "stream.h" 38#include "stream.h"
39#include "media.h"
40 39
41/* 40/*
42 * free a substream 41 * free a substream
@@ -53,7 +52,6 @@ static void free_substream(struct snd_usb_substream *subs)
53 kfree(fp); 52 kfree(fp);
54 } 53 }
55 kfree(subs->rate_list.list); 54 kfree(subs->rate_list.list);
56 media_snd_stream_delete(subs);
57} 55}
58 56
59 57
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index a161c7c1b126..b665d85555cb 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -30,9 +30,6 @@
30 * 30 *
31 */ 31 */
32 32
33struct media_device;
34struct media_intf_devnode;
35
36struct snd_usb_audio { 33struct snd_usb_audio {
37 int index; 34 int index;
38 struct usb_device *dev; 35 struct usb_device *dev;
@@ -63,8 +60,6 @@ struct snd_usb_audio {
63 bool autoclock; /* from the 'autoclock' module param */ 60 bool autoclock; /* from the 'autoclock' module param */
64 61
65 struct usb_host_interface *ctrl_intf; /* the audio control interface */ 62 struct usb_host_interface *ctrl_intf; /* the audio control interface */
66 struct media_device *media_dev;
67 struct media_intf_devnode *ctl_intf_media_devnode;
68}; 63};
69 64
70#define usb_audio_err(chip, fmt, args...) \ 65#define usb_audio_err(chip, fmt, args...) \
@@ -115,7 +110,6 @@ struct snd_usb_audio_quirk {
115 const char *product_name; 110 const char *product_name;
116 int16_t ifnum; 111 int16_t ifnum;
117 uint16_t type; 112 uint16_t type;
118 bool media_device;
119 const void *data; 113 const void *data;
120}; 114};
121 115
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 5a95896105bc..55a60d331f47 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -299,18 +299,38 @@ they mean, and suggestions for how to fix them.
299Errors in .c files 299Errors in .c files
300------------------ 300------------------
301 301
302If you're getting an objtool error in a compiled .c file, chances are 3021. c_file.o: warning: objtool: funcA() falls through to next function funcB()
303the file uses an asm() statement which has a "call" instruction. An
304asm() statement with a call instruction must declare the use of the
305stack pointer in its output operand. For example, on x86_64:
306 303
307 register void *__sp asm("rsp"); 304 This means that funcA() doesn't end with a return instruction or an
308 asm volatile("call func" : "+r" (__sp)); 305 unconditional jump, and that objtool has determined that the function
306 can fall through into the next function. There could be different
307 reasons for this:
309 308
310Otherwise the stack frame may not get created before the call. 309 1) funcA()'s last instruction is a call to a "noreturn" function like
310 panic(). In this case the noreturn function needs to be added to
311 objtool's hard-coded global_noreturns array. Feel free to bug the
312 objtool maintainer, or you can submit a patch.
311 313
312Another possible cause for errors in C code is if the Makefile removes 314 2) funcA() uses the unreachable() annotation in a section of code
313-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options. 315 that is actually reachable.
316
317 3) If funcA() calls an inline function, the object code for funcA()
318 might be corrupt due to a gcc bug. For more details, see:
319 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
320
3212. If you're getting any other objtool error in a compiled .c file, it
322 may be because the file uses an asm() statement which has a "call"
323 instruction. An asm() statement with a call instruction must declare
324 the use of the stack pointer in its output operand. For example, on
325 x86_64:
326
327 register void *__sp asm("rsp");
328 asm volatile("call func" : "+r" (__sp));
329
330 Otherwise the stack frame may not get created before the call.
331
3323. Another possible cause for errors in C code is if the Makefile removes
333 -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
314 334
315Also see the above section for .S file errors for more information what 335Also see the above section for .S file errors for more information what
316the individual error messages mean. 336the individual error messages mean.
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 7515cb2e879a..e8a1e69eb92c 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -54,6 +54,7 @@ struct instruction {
54 struct symbol *call_dest; 54 struct symbol *call_dest;
55 struct instruction *jump_dest; 55 struct instruction *jump_dest;
56 struct list_head alts; 56 struct list_head alts;
57 struct symbol *func;
57}; 58};
58 59
59struct alternative { 60struct alternative {
@@ -66,6 +67,7 @@ struct objtool_file {
66 struct list_head insn_list; 67 struct list_head insn_list;
67 DECLARE_HASHTABLE(insn_hash, 16); 68 DECLARE_HASHTABLE(insn_hash, 16);
68 struct section *rodata, *whitelist; 69 struct section *rodata, *whitelist;
70 bool ignore_unreachables, c_file;
69}; 71};
70 72
71const char *objname; 73const char *objname;
@@ -228,7 +230,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
228 } 230 }
229 } 231 }
230 232
231 if (insn->type == INSN_JUMP_DYNAMIC) 233 if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
232 /* sibling call */ 234 /* sibling call */
233 return 0; 235 return 0;
234 } 236 }
@@ -248,6 +250,7 @@ static int dead_end_function(struct objtool_file *file, struct symbol *func)
248static int decode_instructions(struct objtool_file *file) 250static int decode_instructions(struct objtool_file *file)
249{ 251{
250 struct section *sec; 252 struct section *sec;
253 struct symbol *func;
251 unsigned long offset; 254 unsigned long offset;
252 struct instruction *insn; 255 struct instruction *insn;
253 int ret; 256 int ret;
@@ -281,6 +284,21 @@ static int decode_instructions(struct objtool_file *file)
281 hash_add(file->insn_hash, &insn->hash, insn->offset); 284 hash_add(file->insn_hash, &insn->hash, insn->offset);
282 list_add_tail(&insn->list, &file->insn_list); 285 list_add_tail(&insn->list, &file->insn_list);
283 } 286 }
287
288 list_for_each_entry(func, &sec->symbol_list, list) {
289 if (func->type != STT_FUNC)
290 continue;
291
292 if (!find_insn(file, sec, func->offset)) {
293 WARN("%s(): can't find starting instruction",
294 func->name);
295 return -1;
296 }
297
298 func_for_each_insn(file, func, insn)
299 if (!insn->func)
300 insn->func = func;
301 }
284 } 302 }
285 303
286 return 0; 304 return 0;
@@ -664,13 +682,40 @@ static int add_func_switch_tables(struct objtool_file *file,
664 text_rela->addend); 682 text_rela->addend);
665 683
666 /* 684 /*
667 * TODO: Document where this is needed, or get rid of it.
668 *
669 * rare case: jmpq *[addr](%rip) 685 * rare case: jmpq *[addr](%rip)
686 *
687 * This check is for a rare gcc quirk, currently only seen in
688 * three driver functions in the kernel, only with certain
689 * obscure non-distro configs.
690 *
691 * As part of an optimization, gcc makes a copy of an existing
692 * switch jump table, modifies it, and then hard-codes the jump
693 * (albeit with an indirect jump) to use a single entry in the
694 * table. The rest of the jump table and some of its jump
695 * targets remain as dead code.
696 *
697 * In such a case we can just crudely ignore all unreachable
698 * instruction warnings for the entire object file. Ideally we
699 * would just ignore them for the function, but that would
700 * require redesigning the code quite a bit. And honestly
701 * that's just not worth doing: unreachable instruction
702 * warnings are of questionable value anyway, and this is such
703 * a rare issue.
704 *
705 * kbuild reports:
706 * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com
707 * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com
708 * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com
709 *
710 * gcc bug:
711 * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604
670 */ 712 */
671 if (!rodata_rela) 713 if (!rodata_rela) {
672 rodata_rela = find_rela_by_dest(file->rodata, 714 rodata_rela = find_rela_by_dest(file->rodata,
673 text_rela->addend + 4); 715 text_rela->addend + 4);
716 if (rodata_rela)
717 file->ignore_unreachables = true;
718 }
674 719
675 if (!rodata_rela) 720 if (!rodata_rela)
676 continue; 721 continue;
@@ -732,9 +777,6 @@ static int decode_sections(struct objtool_file *file)
732{ 777{
733 int ret; 778 int ret;
734 779
735 file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard");
736 file->rodata = find_section_by_name(file->elf, ".rodata");
737
738 ret = decode_instructions(file); 780 ret = decode_instructions(file);
739 if (ret) 781 if (ret)
740 return ret; 782 return ret;
@@ -799,6 +841,7 @@ static int validate_branch(struct objtool_file *file,
799 struct alternative *alt; 841 struct alternative *alt;
800 struct instruction *insn; 842 struct instruction *insn;
801 struct section *sec; 843 struct section *sec;
844 struct symbol *func = NULL;
802 unsigned char state; 845 unsigned char state;
803 int ret; 846 int ret;
804 847
@@ -813,6 +856,16 @@ static int validate_branch(struct objtool_file *file,
813 } 856 }
814 857
815 while (1) { 858 while (1) {
859 if (file->c_file && insn->func) {
860 if (func && func != insn->func) {
861 WARN("%s() falls through to next function %s()",
862 func->name, insn->func->name);
863 return 1;
864 }
865
866 func = insn->func;
867 }
868
816 if (insn->visited) { 869 if (insn->visited) {
817 if (frame_state(insn->state) != frame_state(state)) { 870 if (frame_state(insn->state) != frame_state(state)) {
818 WARN_FUNC("frame pointer state mismatch", 871 WARN_FUNC("frame pointer state mismatch",
@@ -823,13 +876,6 @@ static int validate_branch(struct objtool_file *file,
823 return 0; 876 return 0;
824 } 877 }
825 878
826 /*
827 * Catch a rare case where a noreturn function falls through to
828 * the next function.
829 */
830 if (is_fentry_call(insn) && (state & STATE_FENTRY))
831 return 0;
832
833 insn->visited = true; 879 insn->visited = true;
834 insn->state = state; 880 insn->state = state;
835 881
@@ -1035,12 +1081,8 @@ static int validate_functions(struct objtool_file *file)
1035 continue; 1081 continue;
1036 1082
1037 insn = find_insn(file, sec, func->offset); 1083 insn = find_insn(file, sec, func->offset);
1038 if (!insn) { 1084 if (!insn)
1039 WARN("%s(): can't find starting instruction",
1040 func->name);
1041 warnings++;
1042 continue; 1085 continue;
1043 }
1044 1086
1045 ret = validate_branch(file, insn, 0); 1087 ret = validate_branch(file, insn, 0);
1046 warnings += ret; 1088 warnings += ret;
@@ -1056,13 +1098,14 @@ static int validate_functions(struct objtool_file *file)
1056 if (insn->visited) 1098 if (insn->visited)
1057 continue; 1099 continue;
1058 1100
1059 if (!ignore_unreachable_insn(func, insn) &&
1060 !warnings) {
1061 WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
1062 warnings++;
1063 }
1064
1065 insn->visited = true; 1101 insn->visited = true;
1102
1103 if (file->ignore_unreachables || warnings ||
1104 ignore_unreachable_insn(func, insn))
1105 continue;
1106
1107 WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
1108 warnings++;
1066 } 1109 }
1067 } 1110 }
1068 } 1111 }
@@ -1133,6 +1176,10 @@ int cmd_check(int argc, const char **argv)
1133 1176
1134 INIT_LIST_HEAD(&file.insn_list); 1177 INIT_LIST_HEAD(&file.insn_list);
1135 hash_init(file.insn_hash); 1178 hash_init(file.insn_hash);
1179 file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard");
1180 file.rodata = find_section_by_name(file.elf, ".rodata");
1181 file.ignore_unreachables = false;
1182 file.c_file = find_section_by_name(file.elf, ".comment");
1136 1183
1137 ret = decode_sections(&file); 1184 ret = decode_sections(&file);
1138 if (ret < 0) 1185 if (ret < 0)
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 407f11b97c8d..617578440989 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1130,7 +1130,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1130 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n", 1130 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1131 ret); 1131 ret);
1132 1132
1133 if (pt->synth_opts.callchain) 1133 if (pt->synth_opts.last_branch)
1134 intel_pt_reset_last_branch_rb(ptq); 1134 intel_pt_reset_last_branch_rb(ptq);
1135 1135
1136 return ret; 1136 return ret;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 20a257a12ea5..acbf7ff2ee6e 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -66,6 +66,8 @@ unsigned int do_slm_cstates;
66unsigned int use_c1_residency_msr; 66unsigned int use_c1_residency_msr;
67unsigned int has_aperf; 67unsigned int has_aperf;
68unsigned int has_epb; 68unsigned int has_epb;
69unsigned int do_irtl_snb;
70unsigned int do_irtl_hsw;
69unsigned int units = 1000000; /* MHz etc */ 71unsigned int units = 1000000; /* MHz etc */
70unsigned int genuine_intel; 72unsigned int genuine_intel;
71unsigned int has_invariant_tsc; 73unsigned int has_invariant_tsc;
@@ -187,7 +189,7 @@ struct pkg_data {
187 unsigned long long pkg_any_core_c0; 189 unsigned long long pkg_any_core_c0;
188 unsigned long long pkg_any_gfxe_c0; 190 unsigned long long pkg_any_gfxe_c0;
189 unsigned long long pkg_both_core_gfxe_c0; 191 unsigned long long pkg_both_core_gfxe_c0;
190 unsigned long long gfx_rc6_ms; 192 long long gfx_rc6_ms;
191 unsigned int gfx_mhz; 193 unsigned int gfx_mhz;
192 unsigned int package_id; 194 unsigned int package_id;
193 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ 195 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
@@ -621,8 +623,14 @@ int format_counters(struct thread_data *t, struct core_data *c,
621 outp += sprintf(outp, "%8d", p->pkg_temp_c); 623 outp += sprintf(outp, "%8d", p->pkg_temp_c);
622 624
623 /* GFXrc6 */ 625 /* GFXrc6 */
624 if (do_gfx_rc6_ms) 626 if (do_gfx_rc6_ms) {
625 outp += sprintf(outp, "%8.2f", 100.0 * p->gfx_rc6_ms / 1000.0 / interval_float); 627 if (p->gfx_rc6_ms == -1) { /* detect counter reset */
628 outp += sprintf(outp, " ***.**");
629 } else {
630 outp += sprintf(outp, "%8.2f",
631 p->gfx_rc6_ms / 10.0 / interval_float);
632 }
633 }
626 634
627 /* GFXMHz */ 635 /* GFXMHz */
628 if (do_gfx_mhz) 636 if (do_gfx_mhz)
@@ -766,7 +774,12 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
766 old->pc10 = new->pc10 - old->pc10; 774 old->pc10 = new->pc10 - old->pc10;
767 old->pkg_temp_c = new->pkg_temp_c; 775 old->pkg_temp_c = new->pkg_temp_c;
768 776
769 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; 777 /* flag an error when rc6 counter resets/wraps */
778 if (old->gfx_rc6_ms > new->gfx_rc6_ms)
779 old->gfx_rc6_ms = -1;
780 else
781 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
782
770 old->gfx_mhz = new->gfx_mhz; 783 old->gfx_mhz = new->gfx_mhz;
771 784
772 DELTA_WRAP32(new->energy_pkg, old->energy_pkg); 785 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -1296,6 +1309,7 @@ int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S,
1296int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1309int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1297int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1310int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1298int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1311int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1312int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1299 1313
1300 1314
1301static void 1315static void
@@ -1579,6 +1593,47 @@ dump_config_tdp(void)
1579 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 1593 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
1580 fprintf(outf, ")\n"); 1594 fprintf(outf, ")\n");
1581} 1595}
1596
1597unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
1598
1599void print_irtl(void)
1600{
1601 unsigned long long msr;
1602
1603 get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
1604 fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
1605 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1606 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1607
1608 get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
1609 fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
1610 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1611 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1612
1613 get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
1614 fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
1615 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1616 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1617
1618 if (!do_irtl_hsw)
1619 return;
1620
1621 get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
1622 fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
1623 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1624 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1625
1626 get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
1627 fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
1628 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1629 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1630
1631 get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
1632 fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
1633 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1634 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1635
1636}
1582void free_fd_percpu(void) 1637void free_fd_percpu(void)
1583{ 1638{
1584 int i; 1639 int i;
@@ -2144,6 +2199,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
2144 case 0x56: /* BDX-DE */ 2199 case 0x56: /* BDX-DE */
2145 case 0x4E: /* SKL */ 2200 case 0x4E: /* SKL */
2146 case 0x5E: /* SKL */ 2201 case 0x5E: /* SKL */
2202 case 0x8E: /* KBL */
2203 case 0x9E: /* KBL */
2204 case 0x55: /* SKX */
2147 pkg_cstate_limits = hsw_pkg_cstate_limits; 2205 pkg_cstate_limits = hsw_pkg_cstate_limits;
2148 break; 2206 break;
2149 case 0x37: /* BYT */ 2207 case 0x37: /* BYT */
@@ -2156,6 +2214,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
2156 case 0x57: /* PHI */ 2214 case 0x57: /* PHI */
2157 pkg_cstate_limits = phi_pkg_cstate_limits; 2215 pkg_cstate_limits = phi_pkg_cstate_limits;
2158 break; 2216 break;
2217 case 0x5C: /* BXT */
2218 pkg_cstate_limits = bxt_pkg_cstate_limits;
2219 break;
2159 default: 2220 default:
2160 return 0; 2221 return 0;
2161 } 2222 }
@@ -2248,6 +2309,9 @@ int has_config_tdp(unsigned int family, unsigned int model)
2248 case 0x56: /* BDX-DE */ 2309 case 0x56: /* BDX-DE */
2249 case 0x4E: /* SKL */ 2310 case 0x4E: /* SKL */
2250 case 0x5E: /* SKL */ 2311 case 0x5E: /* SKL */
2312 case 0x8E: /* KBL */
2313 case 0x9E: /* KBL */
2314 case 0x55: /* SKX */
2251 2315
2252 case 0x57: /* Knights Landing */ 2316 case 0x57: /* Knights Landing */
2253 return 1; 2317 return 1;
@@ -2585,13 +2649,19 @@ void rapl_probe(unsigned int family, unsigned int model)
2585 case 0x47: /* BDW */ 2649 case 0x47: /* BDW */
2586 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; 2650 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
2587 break; 2651 break;
2652 case 0x5C: /* BXT */
2653 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
2654 break;
2588 case 0x4E: /* SKL */ 2655 case 0x4E: /* SKL */
2589 case 0x5E: /* SKL */ 2656 case 0x5E: /* SKL */
2657 case 0x8E: /* KBL */
2658 case 0x9E: /* KBL */
2590 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2659 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2591 break; 2660 break;
2592 case 0x3F: /* HSX */ 2661 case 0x3F: /* HSX */
2593 case 0x4F: /* BDX */ 2662 case 0x4F: /* BDX */
2594 case 0x56: /* BDX-DE */ 2663 case 0x56: /* BDX-DE */
2664 case 0x55: /* SKX */
2595 case 0x57: /* KNL */ 2665 case 0x57: /* KNL */
2596 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2666 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2597 break; 2667 break;
@@ -2871,6 +2941,10 @@ int has_snb_msrs(unsigned int family, unsigned int model)
2871 case 0x56: /* BDX-DE */ 2941 case 0x56: /* BDX-DE */
2872 case 0x4E: /* SKL */ 2942 case 0x4E: /* SKL */
2873 case 0x5E: /* SKL */ 2943 case 0x5E: /* SKL */
2944 case 0x8E: /* KBL */
2945 case 0x9E: /* KBL */
2946 case 0x55: /* SKX */
2947 case 0x5C: /* BXT */
2874 return 1; 2948 return 1;
2875 } 2949 }
2876 return 0; 2950 return 0;
@@ -2879,9 +2953,14 @@ int has_snb_msrs(unsigned int family, unsigned int model)
2879/* 2953/*
2880 * HSW adds support for additional MSRs: 2954 * HSW adds support for additional MSRs:
2881 * 2955 *
2882 * MSR_PKG_C8_RESIDENCY 0x00000630 2956 * MSR_PKG_C8_RESIDENCY 0x00000630
2883 * MSR_PKG_C9_RESIDENCY 0x00000631 2957 * MSR_PKG_C9_RESIDENCY 0x00000631
2884 * MSR_PKG_C10_RESIDENCY 0x00000632 2958 * MSR_PKG_C10_RESIDENCY 0x00000632
2959 *
2960 * MSR_PKGC8_IRTL 0x00000633
2961 * MSR_PKGC9_IRTL 0x00000634
2962 * MSR_PKGC10_IRTL 0x00000635
2963 *
2885 */ 2964 */
2886int has_hsw_msrs(unsigned int family, unsigned int model) 2965int has_hsw_msrs(unsigned int family, unsigned int model)
2887{ 2966{
@@ -2893,6 +2972,9 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
2893 case 0x3D: /* BDW */ 2972 case 0x3D: /* BDW */
2894 case 0x4E: /* SKL */ 2973 case 0x4E: /* SKL */
2895 case 0x5E: /* SKL */ 2974 case 0x5E: /* SKL */
2975 case 0x8E: /* KBL */
2976 case 0x9E: /* KBL */
2977 case 0x5C: /* BXT */
2896 return 1; 2978 return 1;
2897 } 2979 }
2898 return 0; 2980 return 0;
@@ -2914,6 +2996,8 @@ int has_skl_msrs(unsigned int family, unsigned int model)
2914 switch (model) { 2996 switch (model) {
2915 case 0x4E: /* SKL */ 2997 case 0x4E: /* SKL */
2916 case 0x5E: /* SKL */ 2998 case 0x5E: /* SKL */
2999 case 0x8E: /* KBL */
3000 case 0x9E: /* KBL */
2917 return 1; 3001 return 1;
2918 } 3002 }
2919 return 0; 3003 return 0;
@@ -3187,7 +3271,7 @@ void process_cpuid()
3187 if (debug) 3271 if (debug)
3188 decode_misc_enable_msr(); 3272 decode_misc_enable_msr();
3189 3273
3190 if (max_level >= 0x7) { 3274 if (max_level >= 0x7 && debug) {
3191 int has_sgx; 3275 int has_sgx;
3192 3276
3193 ecx = 0; 3277 ecx = 0;
@@ -3221,7 +3305,15 @@ void process_cpuid()
3221 switch(model) { 3305 switch(model) {
3222 case 0x4E: /* SKL */ 3306 case 0x4E: /* SKL */
3223 case 0x5E: /* SKL */ 3307 case 0x5E: /* SKL */
3224 crystal_hz = 24000000; /* 24 MHz */ 3308 case 0x8E: /* KBL */
3309 case 0x9E: /* KBL */
3310 crystal_hz = 24000000; /* 24.0 MHz */
3311 break;
3312 case 0x55: /* SKX */
3313 crystal_hz = 25000000; /* 25.0 MHz */
3314 break;
3315 case 0x5C: /* BXT */
3316 crystal_hz = 19200000; /* 19.2 MHz */
3225 break; 3317 break;
3226 default: 3318 default:
3227 crystal_hz = 0; 3319 crystal_hz = 0;
@@ -3254,11 +3346,13 @@ void process_cpuid()
3254 3346
3255 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); 3347 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
3256 do_snb_cstates = has_snb_msrs(family, model); 3348 do_snb_cstates = has_snb_msrs(family, model);
3349 do_irtl_snb = has_snb_msrs(family, model);
3257 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); 3350 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
3258 do_pc3 = (pkg_cstate_limit >= PCL__3); 3351 do_pc3 = (pkg_cstate_limit >= PCL__3);
3259 do_pc6 = (pkg_cstate_limit >= PCL__6); 3352 do_pc6 = (pkg_cstate_limit >= PCL__6);
3260 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); 3353 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
3261 do_c8_c9_c10 = has_hsw_msrs(family, model); 3354 do_c8_c9_c10 = has_hsw_msrs(family, model);
3355 do_irtl_hsw = has_hsw_msrs(family, model);
3262 do_skl_residency = has_skl_msrs(family, model); 3356 do_skl_residency = has_skl_msrs(family, model);
3263 do_slm_cstates = is_slm(family, model); 3357 do_slm_cstates = is_slm(family, model);
3264 do_knl_cstates = is_knl(family, model); 3358 do_knl_cstates = is_knl(family, model);
@@ -3564,6 +3658,9 @@ void turbostat_init()
3564 3658
3565 if (debug) 3659 if (debug)
3566 for_all_cpus(print_thermal, ODD_COUNTERS); 3660 for_all_cpus(print_thermal, ODD_COUNTERS);
3661
3662 if (debug && do_irtl_snb)
3663 print_irtl();
3567} 3664}
3568 3665
3569int fork_it(char **argv) 3666int fork_it(char **argv)
@@ -3629,7 +3726,7 @@ int get_and_dump_counters(void)
3629} 3726}
3630 3727
3631void print_version() { 3728void print_version() {
3632 fprintf(outf, "turbostat version 4.11 27 Feb 2016" 3729 fprintf(outf, "turbostat version 4.12 5 Apr 2016"
3633 " - Len Brown <lenb@kernel.org>\n"); 3730 " - Len Brown <lenb@kernel.org>\n");
3634} 3731}
3635 3732
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 69bb3fc38fb2..0840684deb7d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -3,3 +3,4 @@ psock_fanout
3psock_tpacket 3psock_tpacket
4reuseport_bpf 4reuseport_bpf
5reuseport_bpf_cpu 5reuseport_bpf_cpu
6reuseport_dualstack
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c658792d47b4..0e5340742620 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
4 4
5CFLAGS += -I../../../../usr/include/ 5CFLAGS += -I../../../../usr/include/
6 6
7NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu 7NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
8 8
9all: $(NET_PROGS) 9all: $(NET_PROGS)
10%: %.c 10%: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644
index 000000000000..90958aaaafb9
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_dualstack.c
@@ -0,0 +1,208 @@
1/*
2 * It is possible to use SO_REUSEPORT to open multiple sockets bound to
3 * equivalent local addresses using AF_INET and AF_INET6 at the same time. If
4 * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
5 * receive a given incoming packet. However, when it is not set, incoming v4
6 * packets should prefer the AF_INET socket(s). This behavior was defined with
7 * the original SO_REUSEPORT implementation, but broke with
8 * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
9 * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
10 * AF_INET preference for v4 packets.
11 */
12
13#define _GNU_SOURCE
14
15#include <arpa/inet.h>
16#include <errno.h>
17#include <error.h>
18#include <linux/in.h>
19#include <linux/unistd.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
23#include <sys/epoll.h>
24#include <sys/types.h>
25#include <sys/socket.h>
26#include <unistd.h>
27
28static const int PORT = 8888;
29
30static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
31{
32 struct sockaddr_storage addr;
33 struct sockaddr_in *addr4;
34 struct sockaddr_in6 *addr6;
35 int opt, i;
36
37 switch (family) {
38 case AF_INET:
39 addr4 = (struct sockaddr_in *)&addr;
40 addr4->sin_family = AF_INET;
41 addr4->sin_addr.s_addr = htonl(INADDR_ANY);
42 addr4->sin_port = htons(PORT);
43 break;
44 case AF_INET6:
45 addr6 = (struct sockaddr_in6 *)&addr;
46 addr6->sin6_family = AF_INET6;
47 addr6->sin6_addr = in6addr_any;
48 addr6->sin6_port = htons(PORT);
49 break;
50 default:
51 error(1, 0, "Unsupported family %d", family);
52 }
53
54 for (i = 0; i < count; ++i) {
55 rcv_fds[i] = socket(family, proto, 0);
56 if (rcv_fds[i] < 0)
57 error(1, errno, "failed to create receive socket");
58
59 opt = 1;
60 if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
61 sizeof(opt)))
62 error(1, errno, "failed to set SO_REUSEPORT");
63
64 if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
65 error(1, errno, "failed to bind receive socket");
66
67 if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
68 error(1, errno, "failed to listen on receive port");
69 }
70}
71
72static void send_from_v4(int proto)
73{
74 struct sockaddr_in saddr, daddr;
75 int fd;
76
77 saddr.sin_family = AF_INET;
78 saddr.sin_addr.s_addr = htonl(INADDR_ANY);
79 saddr.sin_port = 0;
80
81 daddr.sin_family = AF_INET;
82 daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
83 daddr.sin_port = htons(PORT);
84
85 fd = socket(AF_INET, proto, 0);
86 if (fd < 0)
87 error(1, errno, "failed to create send socket");
88
89 if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
90 error(1, errno, "failed to bind send socket");
91
92 if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
93 error(1, errno, "failed to connect send socket");
94
95 if (send(fd, "a", 1, 0) < 0)
96 error(1, errno, "failed to send message");
97
98 close(fd);
99}
100
101static int receive_once(int epfd, int proto)
102{
103 struct epoll_event ev;
104 int i, fd;
105 char buf[8];
106
107 i = epoll_wait(epfd, &ev, 1, -1);
108 if (i < 0)
109 error(1, errno, "epoll_wait failed");
110
111 if (proto == SOCK_STREAM) {
112 fd = accept(ev.data.fd, NULL, NULL);
113 if (fd < 0)
114 error(1, errno, "failed to accept");
115 i = recv(fd, buf, sizeof(buf), 0);
116 close(fd);
117 } else {
118 i = recv(ev.data.fd, buf, sizeof(buf), 0);
119 }
120
121 if (i < 0)
122 error(1, errno, "failed to recv");
123
124 return ev.data.fd;
125}
126
127static void test(int *rcv_fds, int count, int proto)
128{
129 struct epoll_event ev;
130 int epfd, i, test_fd;
131 uint16_t test_family;
132 socklen_t len;
133
134 epfd = epoll_create(1);
135 if (epfd < 0)
136 error(1, errno, "failed to create epoll");
137
138 ev.events = EPOLLIN;
139 for (i = 0; i < count; ++i) {
140 ev.data.fd = rcv_fds[i];
141 if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
142 error(1, errno, "failed to register sock epoll");
143 }
144
145 send_from_v4(proto);
146
147 test_fd = receive_once(epfd, proto);
148 if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
149 error(1, errno, "failed to read socket domain");
150 if (test_family != AF_INET)
151 error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
152 test_family);
153
154 close(epfd);
155}
156
157int main(void)
158{
159 int rcv_fds[32], i;
160
161 fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
162 build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
163 build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
164 test(rcv_fds, 10, SOCK_DGRAM);
165 for (i = 0; i < 10; ++i)
166 close(rcv_fds[i]);
167
168 fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
169 build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
170 build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
171 test(rcv_fds, 10, SOCK_DGRAM);
172 for (i = 0; i < 10; ++i)
173 close(rcv_fds[i]);
174
175 /* NOTE: UDP socket lookups traverse a different code path when there
176 * are > 10 sockets in a group.
177 */
178 fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
179 build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
180 build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
181 test(rcv_fds, 32, SOCK_DGRAM);
182 for (i = 0; i < 32; ++i)
183 close(rcv_fds[i]);
184
185 fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
186 build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
187 build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
188 test(rcv_fds, 32, SOCK_DGRAM);
189 for (i = 0; i < 32; ++i)
190 close(rcv_fds[i]);
191
192 fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
193 build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
194 build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
195 test(rcv_fds, 10, SOCK_STREAM);
196 for (i = 0; i < 10; ++i)
197 close(rcv_fds[i]);
198
199 fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
200 build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
201 build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
202 test(rcv_fds, 10, SOCK_STREAM);
203 for (i = 0; i < 10; ++i)
204 close(rcv_fds[i]);
205
206 fprintf(stderr, "SUCCESS\n");
207 return 0;
208}
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index b9453b838162..150829dd7998 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1497,15 +1497,15 @@ TEST_F(TRACE_syscall, syscall_dropped)
1497#define SECCOMP_SET_MODE_FILTER 1 1497#define SECCOMP_SET_MODE_FILTER 1
1498#endif 1498#endif
1499 1499
1500#ifndef SECCOMP_FLAG_FILTER_TSYNC 1500#ifndef SECCOMP_FILTER_FLAG_TSYNC
1501#define SECCOMP_FLAG_FILTER_TSYNC 1 1501#define SECCOMP_FILTER_FLAG_TSYNC 1
1502#endif 1502#endif
1503 1503
1504#ifndef seccomp 1504#ifndef seccomp
1505int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter) 1505int seccomp(unsigned int op, unsigned int flags, void *args)
1506{ 1506{
1507 errno = 0; 1507 errno = 0;
1508 return syscall(__NR_seccomp, op, flags, filter); 1508 return syscall(__NR_seccomp, op, flags, args);
1509} 1509}
1510#endif 1510#endif
1511 1511
@@ -1613,7 +1613,7 @@ TEST(TSYNC_first)
1613 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1613 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1614 } 1614 }
1615 1615
1616 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1616 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1617 &prog); 1617 &prog);
1618 ASSERT_NE(ENOSYS, errno) { 1618 ASSERT_NE(ENOSYS, errno) {
1619 TH_LOG("Kernel does not support seccomp syscall!"); 1619 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1831,7 +1831,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
1831 self->sibling_count++; 1831 self->sibling_count++;
1832 } 1832 }
1833 1833
1834 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1834 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1835 &self->apply_prog); 1835 &self->apply_prog);
1836 ASSERT_EQ(0, ret) { 1836 ASSERT_EQ(0, ret) {
1837 TH_LOG("Could install filter on all threads!"); 1837 TH_LOG("Could install filter on all threads!");
@@ -1892,7 +1892,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
1892 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1892 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1893 } 1893 }
1894 1894
1895 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1895 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1896 &self->apply_prog); 1896 &self->apply_prog);
1897 ASSERT_NE(ENOSYS, errno) { 1897 ASSERT_NE(ENOSYS, errno) {
1898 TH_LOG("Kernel does not support seccomp syscall!"); 1898 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1940,7 +1940,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
1940 self->sibling_count++; 1940 self->sibling_count++;
1941 } 1941 }
1942 1942
1943 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1943 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1944 &self->apply_prog); 1944 &self->apply_prog);
1945 ASSERT_EQ(self->sibling[0].system_tid, ret) { 1945 ASSERT_EQ(self->sibling[0].system_tid, ret) {
1946 TH_LOG("Did not fail on diverged sibling."); 1946 TH_LOG("Did not fail on diverged sibling.");
@@ -1992,7 +1992,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
1992 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 1992 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1993 } 1993 }
1994 1994
1995 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1995 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1996 &self->apply_prog); 1996 &self->apply_prog);
1997 ASSERT_EQ(ret, self->sibling[0].system_tid) { 1997 ASSERT_EQ(ret, self->sibling[0].system_tid) {
1998 TH_LOG("Did not fail on diverged sibling."); 1998 TH_LOG("Did not fail on diverged sibling.");
@@ -2021,7 +2021,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2021 /* Switch to the remaining sibling */ 2021 /* Switch to the remaining sibling */
2022 sib = !sib; 2022 sib = !sib;
2023 2023
2024 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2024 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2025 &self->apply_prog); 2025 &self->apply_prog);
2026 ASSERT_EQ(0, ret) { 2026 ASSERT_EQ(0, ret) {
2027 TH_LOG("Expected the remaining sibling to sync"); 2027 TH_LOG("Expected the remaining sibling to sync");
@@ -2044,7 +2044,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2044 while (!kill(self->sibling[sib].system_tid, 0)) 2044 while (!kill(self->sibling[sib].system_tid, 0))
2045 sleep(0.1); 2045 sleep(0.1);
2046 2046
2047 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2047 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2048 &self->apply_prog); 2048 &self->apply_prog);
2049 ASSERT_EQ(0, ret); /* just us chickens */ 2049 ASSERT_EQ(0, ret); /* just us chickens */
2050} 2050}
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a9ad4fe3f68f..9aaa35dd9144 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -91,6 +91,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
92 vcpu->arch.timer_cpu.armed = false; 92 vcpu->arch.timer_cpu.armed = false;
93 93
94 WARN_ON(!kvm_timer_should_fire(vcpu));
95
94 /* 96 /*
95 * If the vcpu is blocked we want to wake it up so that it will see 97 * If the vcpu is blocked we want to wake it up so that it will see
96 * the timer has expired when entering the guest. 98 * the timer has expired when entering the guest.
@@ -98,10 +100,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
98 kvm_vcpu_kick(vcpu); 100 kvm_vcpu_kick(vcpu);
99} 101}
100 102
103static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
104{
105 cycle_t cval, now;
106
107 cval = vcpu->arch.timer_cpu.cntv_cval;
108 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
109
110 if (now < cval) {
111 u64 ns;
112
113 ns = cyclecounter_cyc2ns(timecounter->cc,
114 cval - now,
115 timecounter->mask,
116 &timecounter->frac);
117 return ns;
118 }
119
120 return 0;
121}
122
101static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) 123static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
102{ 124{
103 struct arch_timer_cpu *timer; 125 struct arch_timer_cpu *timer;
126 struct kvm_vcpu *vcpu;
127 u64 ns;
128
104 timer = container_of(hrt, struct arch_timer_cpu, timer); 129 timer = container_of(hrt, struct arch_timer_cpu, timer);
130 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
131
132 /*
133 * Check that the timer has really expired from the guest's
134 * PoV (NTP on the host may have forced it to expire
135 * early). If we should have slept longer, restart it.
136 */
137 ns = kvm_timer_compute_delta(vcpu);
138 if (unlikely(ns)) {
139 hrtimer_forward_now(hrt, ns_to_ktime(ns));
140 return HRTIMER_RESTART;
141 }
142
105 queue_work(wqueue, &timer->expired); 143 queue_work(wqueue, &timer->expired);
106 return HRTIMER_NORESTART; 144 return HRTIMER_NORESTART;
107} 145}
@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
176void kvm_timer_schedule(struct kvm_vcpu *vcpu) 214void kvm_timer_schedule(struct kvm_vcpu *vcpu)
177{ 215{
178 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 216 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
179 u64 ns;
180 cycle_t cval, now;
181 217
182 BUG_ON(timer_is_armed(timer)); 218 BUG_ON(timer_is_armed(timer));
183 219
@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
197 return; 233 return;
198 234
199 /* The timer has not yet expired, schedule a background timer */ 235 /* The timer has not yet expired, schedule a background timer */
200 cval = timer->cntv_cval; 236 timer_arm(timer, kvm_timer_compute_delta(vcpu));
201 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
202
203 ns = cyclecounter_cyc2ns(timecounter->cc,
204 cval - now,
205 timecounter->mask,
206 &timecounter->frac);
207 timer_arm(timer, ns);
208} 237}
209 238
210void kvm_timer_unschedule(struct kvm_vcpu *vcpu) 239void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index b5754c6c5508..575c7aa30d7e 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
193{ 193{
194 u64 reg = 0; 194 u64 reg = 0;
195 195
196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) 196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0); 197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1); 199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
200 reg &= kvm_pmu_valid_counter_mask(vcpu); 200 reg &= kvm_pmu_valid_counter_mask(vcpu);
201 }
201 202
202 return reg; 203 return reg;
203} 204}