aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-04-06 22:01:47 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-04-06 22:01:47 -0400
commit053f78d359953be40043972c98e16b3f1cd9fc27 (patch)
tree80185e1554da6362dd3ca411a3e724864c59dc05
parent1bb025f6db789ea0bb674eaed15ee843ef0b2e88 (diff)
parent3d085c7413d32bb6895e5b9b5ee6a7d2180159c5 (diff)
Merge tag 'lkdtm-4.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into char-misc-linus
Kees briefly writes: fixes some possible memory allocation leaks on error paths
-rw-r--r--Documentation/devicetree/bindings/clock/qca,ath79-pll.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt12
-rw-r--r--Documentation/filesystems/cramfs.txt2
-rw-r--r--Documentation/filesystems/tmpfs.txt2
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arm/kvm/arm.c52
-rw-r--r--arch/arm/mm/flush.c4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c6
-rw-r--r--arch/mips/alchemy/common/dbdma.c4
-rw-r--r--arch/mips/alchemy/devboards/db1000.c18
-rw-r--r--arch/mips/alchemy/devboards/db1550.c4
-rw-r--r--arch/mips/ath79/clock.c44
-rw-r--r--arch/mips/bcm47xx/sprom.c4
-rw-r--r--arch/mips/boot/compressed/Makefile7
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c14
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c2
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/configs/ci20_defconfig14
-rw-r--r--arch/mips/dec/int-handler.S2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/include/asm/cpu-info.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-generic/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h2
-rw-r--r--arch/mips/include/asm/mips-cm.h2
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h2
-rw-r--r--arch/mips/include/asm/pci/bridge.h18
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h2
-rw-r--r--arch/mips/include/asm/sgiarcs.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h2
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h2
-rw-r--r--arch/mips/include/asm/uaccess.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c12
-rw-r--r--arch/mips/kernel/module-rela.c19
-rw-r--r--arch/mips/kernel/module.c19
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smp.c12
-rw-r--r--arch/mips/kernel/traps.c46
-rw-r--r--arch/mips/kernel/unaligned.c51
-rw-r--r--arch/mips/kvm/tlb.c2
-rw-r--r--arch/mips/kvm/trap_emul.c2
-rw-r--r--arch/mips/math-emu/ieee754dp.c6
-rw-r--r--arch/mips/math-emu/ieee754sp.c6
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c5
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/parisc/kernel/cache.c2
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c4
-rw-r--r--arch/s390/hypfs/inode.c4
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kernel/setup.c37
-rw-r--r--arch/x86/kvm/hyperv.c5
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/x86.c20
-rw-r--r--arch/x86/xen/apic.c12
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--block/bio.c12
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-settings.c12
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/ioctl.c4
-rw-r--r--block/partition-generic.c8
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c10
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c52
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c63
-rw-r--r--drivers/media/usb/au0828/au0828.h9
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c4
-rw-r--r--drivers/misc/lkdtm.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/oprofile/oprofilefs.c4
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c35
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/pinctrl-xway.c17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c14
-rw-r--r--drivers/pinctrl/sh-pfc/core.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/spi/spi-imx.c16
-rw-r--r--drivers/spi/spi-omap2-mcspi.c62
-rw-r--r--drivers/spi/spi-rockchip.c16
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c16
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c10
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h10
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h2
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c22
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c30
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c16
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c44
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/xen/events/events_base.c28
-rw-r--r--fs/9p/vfs_addr.c18
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/affs/file.c26
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/file.c4
-rw-r--r--fs/afs/mntpt.c6
-rw-r--r--fs/afs/super.c4
-rw-r--r--fs/afs/write.c26
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/check-integrity.c64
-rw-r--r--fs/btrfs/compression.c84
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/extent_io.c266
-rw-r--r--fs/btrfs/extent_io.h6
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c40
-rw-r--r--fs/btrfs/free-space-cache.c30
-rw-r--r--fs/btrfs/inode-map.c10
-rw-r--r--fs/btrfs/inode.c104
-rw-r--r--fs/btrfs/ioctl.c84
-rw-r--r--fs/btrfs/lzo.c32
-rw-r--r--fs/btrfs/raid56.c28
-rw-r--r--fs/btrfs/reada.c30
-rw-r--r--fs/btrfs/relocation.c16
-rw-r--r--fs/btrfs/scrub.c24
-rw-r--r--fs/btrfs/send.c16
-rw-r--r--fs/btrfs/struct-funcs.c4
-rw-r--r--fs/btrfs/tests/extent-io-tests.c44
-rw-r--r--fs/btrfs/tests/free-space-tests.c2
-rw-r--r--fs/btrfs/volumes.c14
-rw-r--r--fs/btrfs/zlib.c38
-rw-r--r--fs/buffer.c100
-rw-r--r--fs/cachefiles/rdwr.c38
-rw-r--r--fs/ceph/addr.c114
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/file.c32
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/super.c8
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifssmb.c16
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c96
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/configfs/mount.c4
-rw-r--r--fs/cramfs/README26
-rw-r--r--fs/cramfs/inode.c32
-rw-r--r--fs/crypto/crypto.c8
-rw-r--r--fs/dax.c34
-rw-r--r--fs/direct-io.c26
-rw-r--r--fs/dlm/lowcomms.c8
-rw-r--r--fs/ecryptfs/crypto.c22
-rw-r--r--fs/ecryptfs/inode.c8
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/main.c8
-rw-r--r--fs/ecryptfs/mmap.c44
-rw-r--r--fs/ecryptfs/read_write.c14
-rw-r--r--fs/efivarfs/super.c4
-rw-r--r--fs/exofs/dir.c30
-rw-r--r--fs/exofs/inode.c34
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/ext2/dir.c36
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext4/crypto.c8
-rw-r--r--fs/ext4/dir.c4
-rw-r--r--fs/ext4/ext4.h4
-rw-r--r--fs/ext4/file.c4
-rw-r--r--fs/ext4/inline.c18
-rw-r--r--fs/ext4/inode.c118
-rw-r--r--fs/ext4/mballoc.c40
-rw-r--r--fs/ext4/move_extent.c16
-rw-r--r--fs/ext4/page-io.c4
-rw-r--r--fs/ext4/readpage.c12
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/ext4/symlink.c4
-rw-r--r--fs/f2fs/data.c52
-rw-r--r--fs/f2fs/debug.c6
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/file.c74
-rw-r--r--fs/f2fs/inline.c10
-rw-r--r--fs/f2fs/namei.c16
-rw-r--r--fs/f2fs/node.c10
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c16
-rw-r--r--fs/f2fs/super.c108
-rw-r--r--fs/freevxfs/vxfs_immed.c4
-rw-r--r--fs/freevxfs/vxfs_lookup.c12
-rw-r--r--fs/freevxfs/vxfs_subr.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fscache/page.c10
-rw-r--r--fs/fuse/dev.c26
-rw-r--r--fs/fuse/file.c72
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/gfs2/aops.c44
-rw-r--r--fs/gfs2/bmap.c12
-rw-r--r--fs/gfs2/file.c16
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/quota.c14
-rw-r--r--fs/gfs2/rgrp.c5
-rw-r--r--fs/hfs/bnode.c12
-rw-r--r--fs/hfs/btree.c20
-rw-r--r--fs/hfs/inode.c8
-rw-r--r--fs/hfsplus/bitmap.c2
-rw-r--r--fs/hfsplus/bnode.c90
-rw-r--r--fs/hfsplus/btree.c22
-rw-r--r--fs/hfsplus/inode.c8
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/xattr.c6
-rw-r--r--fs/hostfs/hostfs_kern.c18
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/isofs/compress.c36
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/jffs2/debug.c8
-rw-r--r--fs/jffs2/file.c23
-rw-r--r--fs/jffs2/fs.c8
-rw-r--r--fs/jffs2/gc.c8
-rw-r--r--fs/jffs2/nodelist.c8
-rw-r--r--fs/jffs2/write.c7
-rw-r--r--fs/jfs/jfs_metapage.c42
-rw-r--r--fs/jfs/jfs_metapage.h4
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/mount.c4
-rw-r--r--fs/libfs.c24
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/logfs/dev_mtd.c10
-rw-r--r--fs/logfs/dir.c12
-rw-r--r--fs/logfs/file.c26
-rw-r--r--fs/logfs/readwrite.c20
-rw-r--r--fs/logfs/segment.c28
-rw-r--r--fs/logfs/super.c16
-rw-r--r--fs/minix/dir.c18
-rw-r--r--fs/minix/namei.c4
-rw-r--r--fs/mpage.c22
-rw-r--r--fs/ncpfs/dir.c10
-rw-r--r--fs/ncpfs/ncplib_kernel.h2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c24
-rw-r--r--fs/nfs/blocklayout/blocklayout.h4
-rw-r--r--fs/nfs/client.c8
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/direct.c8
-rw-r--r--fs/nfs/file.c20
-rw-r--r--fs/nfs/internal.h6
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/objlayout/objio_osd.c2
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c6
-rw-r--r--fs/nfs/read.c16
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nilfs2/bmap.c2
-rw-r--r--fs/nilfs2/btnode.c10
-rw-r--r--fs/nilfs2/dir.c32
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/mdt.c14
-rw-r--r--fs/nilfs2/namei.c4
-rw-r--r--fs/nilfs2/page.c18
-rw-r--r--fs/nilfs2/recovery.c4
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ntfs/aops.c50
-rw-r--r--fs/ntfs/aops.h4
-rw-r--r--fs/ntfs/attrib.c28
-rw-r--r--fs/ntfs/bitmap.c10
-rw-r--r--fs/ntfs/compress.c77
-rw-r--r--fs/ntfs/dir.c56
-rw-r--r--fs/ntfs/file.c56
-rw-r--r--fs/ntfs/index.c14
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/lcnalloc.c6
-rw-r--r--fs/ntfs/logfile.c16
-rw-r--r--fs/ntfs/mft.c34
-rw-r--r--fs/ntfs/ntfs.h2
-rw-r--r--fs/ntfs/super.c72
-rw-r--r--fs/ocfs2/alloc.c28
-rw-r--r--fs/ocfs2/aops.c50
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/mmap.c6
-rw-r--r--fs/ocfs2/ocfs2.h20
-rw-r--r--fs/ocfs2/quota_global.c11
-rw-r--r--fs/ocfs2/refcounttree.c24
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/orangefs/inode.c10
-rw-r--r--fs/orangefs/orangefs-bufmap.c4
-rw-r--r--fs/orangefs/orangefs-utils.c2
-rw-r--r--fs/pipe.c6
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/proc/vmcore.c4
-rw-r--r--fs/pstore/inode.c4
-rw-r--r--fs/qnx6/dir.c16
-rw-r--r--fs/qnx6/inode.c4
-rw-r--r--fs/qnx6/qnx6.h2
-rw-r--r--fs/quota/dquot.c13
-rw-r--r--fs/ramfs/inode.c4
-rw-r--r--fs/reiserfs/file.c4
-rw-r--r--fs/reiserfs/inode.c44
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/tail_conversion.c4
-rw-r--r--fs/reiserfs/xattr.c18
-rw-r--r--fs/splice.c32
-rw-r--r--fs/squashfs/block.c4
-rw-r--r--fs/squashfs/cache.c18
-rw-r--r--fs/squashfs/decompressor.c2
-rw-r--r--fs/squashfs/file.c24
-rw-r--r--fs/squashfs/file_direct.c22
-rw-r--r--fs/squashfs/lz4_wrapper.c8
-rw-r--r--fs/squashfs/lzo_wrapper.c8
-rw-r--r--fs/squashfs/page_actor.c4
-rw-r--r--fs/squashfs/page_actor.h2
-rw-r--r--fs/squashfs/super.c2
-rw-r--r--fs/squashfs/symlink.c6
-rw-r--r--fs/squashfs/xz_wrapper.c4
-rw-r--r--fs/squashfs/zlib_wrapper.c4
-rw-r--r--fs/sync.c4
-rw-r--r--fs/sysv/dir.c18
-rw-r--r--fs/sysv/namei.c4
-rw-r--r--fs/ubifs/file.c54
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/file.c6
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/ufs/balloc.c6
-rw-r--r--fs/ufs/dir.c32
-rw-r--r--fs/ufs/inode.c4
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--fs/ufs/util.c4
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/xfs_aops.c22
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mount.h4
-rw-r--r--fs/xfs/xfs_pnfs.c4
-rw-r--r--fs/xfs/xfs_super.c8
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nilfs2_fs.h4
-rw-r--r--include/linux/pagemap.h32
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/swap.h4
-rw-r--r--ipc/mqueue.c4
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--mm/fadvise.c8
-rw-r--r--mm/filemap.c126
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/madvise.c6
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c55
-rw-r--r--mm/mincore.c8
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page-writeback.c12
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/readahead.c20
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/shmem.c130
-rw-r--r--mm/swap.c14
-rw-r--r--mm/swap_state.c12
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/truncate.c40
-rw-r--r--mm/userfaultfd.c4
-rw-r--r--mm/zswap.c4
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/pagelist.c4
-rw-r--r--net/ceph/pagevec.c30
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/socklib.c6
-rw-r--r--net/sunrpc/xdr.c50
-rw-r--r--sound/usb/Kconfig4
-rw-r--r--sound/usb/Makefile2
-rw-r--r--sound/usb/card.c14
-rw-r--r--sound/usb/card.h3
-rw-r--r--sound/usb/media.c318
-rw-r--r--sound/usb/media.h72
-rw-r--r--sound/usb/mixer.h3
-rw-r--r--sound/usb/pcm.c28
-rw-r--r--sound/usb/quirks-table.h1
-rw-r--r--sound/usb/stream.c2
-rw-r--r--sound/usb/usbaudio.h6
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c22
508 files changed, 3542 insertions, 3836 deletions
diff --git a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
index e0fc2c11dd00..241fb0545b9e 100644
--- a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
+++ b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9XXX PLL controller
3The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB. 3The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
4 4
5Required Properties: 5Required Properties:
6- compatible: has to be "qca,<soctype>-cpu-intc" and one of the following 6- compatible: has to be "qca,<soctype>-pll" and one of the following
7 fallbacks: 7 fallbacks:
8 - "qca,ar7100-pll" 8 - "qca,ar7100-pll"
9 - "qca,ar7240-pll" 9 - "qca,ar7240-pll"
@@ -21,8 +21,8 @@ Optional properties:
21 21
22Example: 22Example:
23 23
24 memory-controller@18050000 { 24 pll-controller@18050000 {
25 compatible = "qca,ar9132-ppl", "qca,ar9130-pll"; 25 compatible = "qca,ar9132-pll", "qca,ar9130-pll";
26 reg = <0x18050000 0x20>; 26 reg = <0x18050000 0x20>;
27 27
28 clock-names = "ref"; 28 clock-names = "ref";
diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
index 08a4a32c8eb0..0326154c7925 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
@@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug
134mfio81 dreq0, mips_trace_data, eth_debug 134mfio81 dreq0, mips_trace_data, eth_debug
135mfio82 dreq1, mips_trace_data, eth_debug 135mfio82 dreq1, mips_trace_data, eth_debug
136mfio83 mips_pll_lock, mips_trace_data, usb_debug 136mfio83 mips_pll_lock, mips_trace_data, usb_debug
137mfio84 sys_pll_lock, mips_trace_data, usb_debug 137mfio84 audio_pll_lock, mips_trace_data, usb_debug
138mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug 138mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug
139mfio86 bt_pll_lock, mips_trace_data, sdhost_debug 139mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug
140mfio87 rpu_v_pll_lock, dreq2, socif_debug 140mfio87 sys_pll_lock, dreq2, socif_debug
141mfio88 rpu_l_pll_lock, dreq3, socif_debug 141mfio88 wifi_pll_lock, dreq3, socif_debug
142mfio89 audio_pll_lock, dreq4, dreq5 142mfio89 bt_pll_lock, dreq4, dreq5
143tck 143tck
144trstn 144trstn
145tdi 145tdi
diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt
index 31f53f0ab957..4006298f6707 100644
--- a/Documentation/filesystems/cramfs.txt
+++ b/Documentation/filesystems/cramfs.txt
@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
38which the timestamp reverts to 1970, i.e. moves backwards in time. 38which the timestamp reverts to 1970, i.e. moves backwards in time.
39 39
40Currently, cramfs must be written and read with architectures of the 40Currently, cramfs must be written and read with architectures of the
41same endianness, and can be read only by kernels with PAGE_CACHE_SIZE 41same endianness, and can be read only by kernels with PAGE_SIZE
42== 4096. At least the latter of these is a bug, but it hasn't been 42== 4096. At least the latter of these is a bug, but it hasn't been
43decided what the best fix is. For the moment if you have larger pages 43decided what the best fix is. For the moment if you have larger pages
44you can just change the #define in mkcramfs.c, so long as you don't 44you can just change the #define in mkcramfs.c, so long as you don't
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index d392e1505f17..d9c11d25bf02 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
60 default is half of your physical RAM without swap. If you 60 default is half of your physical RAM without swap. If you
61 oversize your tmpfs instances the machine will deadlock 61 oversize your tmpfs instances the machine will deadlock
62 since the OOM handler will not be able to free that memory. 62 since the OOM handler will not be able to free that memory.
63nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE. 63nr_blocks: The same as size, but in blocks of PAGE_SIZE.
64nr_inodes: The maximum number of inodes for this instance. The default 64nr_inodes: The maximum number of inodes for this instance. The default
65 is half of the number of your physical RAM pages, or (on a 65 is half of the number of your physical RAM pages, or (on a
66 machine with highmem) the number of lowmem RAM pages, 66 machine with highmem) the number of lowmem RAM pages,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b02a7d598258..4164bd6397a2 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -708,9 +708,9 @@ struct address_space_operations {
708 from the address space. This generally corresponds to either a 708 from the address space. This generally corresponds to either a
709 truncation, punch hole or a complete invalidation of the address 709 truncation, punch hole or a complete invalidation of the address
710 space (in the latter case 'offset' will always be 0 and 'length' 710 space (in the latter case 'offset' will always be 0 and 'length'
711 will be PAGE_CACHE_SIZE). Any private data associated with the page 711 will be PAGE_SIZE). Any private data associated with the page
712 should be updated to reflect this truncation. If offset is 0 and 712 should be updated to reflect this truncation. If offset is 0 and
713 length is PAGE_CACHE_SIZE, then the private data should be released, 713 length is PAGE_SIZE, then the private data should be released,
714 because the page must be able to be completely discarded. This may 714 because the page must be able to be completely discarded. This may
715 be done by calling the ->releasepage function, but in this case the 715 be done by calling the ->releasepage function, but in this case the
716 release MUST succeed. 716 release MUST succeed.
diff --git a/MAINTAINERS b/MAINTAINERS
index 1c32f8a3d6c4..40eb1dbe2ae5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8712,6 +8712,8 @@ F: drivers/pinctrl/sh-pfc/
8712 8712
8713PIN CONTROLLER - SAMSUNG 8713PIN CONTROLLER - SAMSUNG
8714M: Tomasz Figa <tomasz.figa@gmail.com> 8714M: Tomasz Figa <tomasz.figa@gmail.com>
8715M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
8716M: Sylwester Nawrocki <s.nawrocki@samsung.com>
8715L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8717L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8716L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 8718L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
8717S: Maintained 8719S: Maintained
@@ -12205,9 +12207,9 @@ S: Maintained
12205F: drivers/media/tuners/tuner-xc2028.* 12207F: drivers/media/tuners/tuner-xc2028.*
12206 12208
12207XEN HYPERVISOR INTERFACE 12209XEN HYPERVISOR INTERFACE
12208M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
12209M: Boris Ostrovsky <boris.ostrovsky@oracle.com> 12210M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
12210M: David Vrabel <david.vrabel@citrix.com> 12211M: David Vrabel <david.vrabel@citrix.com>
12212M: Juergen Gross <jgross@suse.com>
12211L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12213L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12212T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git 12214T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
12213S: Supported 12215S: Supported
@@ -12219,16 +12221,16 @@ F: include/xen/
12219F: include/uapi/xen/ 12221F: include/uapi/xen/
12220 12222
12221XEN HYPERVISOR ARM 12223XEN HYPERVISOR ARM
12222M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> 12224M: Stefano Stabellini <sstabellini@kernel.org>
12223L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12225L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12224S: Supported 12226S: Maintained
12225F: arch/arm/xen/ 12227F: arch/arm/xen/
12226F: arch/arm/include/asm/xen/ 12228F: arch/arm/include/asm/xen/
12227 12229
12228XEN HYPERVISOR ARM64 12230XEN HYPERVISOR ARM64
12229M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> 12231M: Stefano Stabellini <sstabellini@kernel.org>
12230L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12232L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12231S: Supported 12233S: Maintained
12232F: arch/arm64/xen/ 12234F: arch/arm64/xen/
12233F: arch/arm64/include/asm/xen/ 12235F: arch/arm64/include/asm/xen/
12234 12236
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d7709e3930a3..9e5eddbb856f 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
628 628
629 /* kernel reading from page with U-mapping */ 629 /* kernel reading from page with U-mapping */
630 phys_addr_t paddr = (unsigned long)page_address(page); 630 phys_addr_t paddr = (unsigned long)page_address(page);
631 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 631 unsigned long vaddr = page->index << PAGE_SHIFT;
632 632
633 if (addr_not_cache_congruent(paddr, vaddr)) 633 if (addr_not_cache_congruent(paddr, vaddr))
634 __flush_dcache_page(paddr, vaddr); 634 __flush_dcache_page(paddr, vaddr);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 6accd66d26f0..b5384311dec4 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy)
1061 kvm_arm_init_debug(); 1061 kvm_arm_init_debug();
1062} 1062}
1063 1063
1064static void cpu_hyp_reinit(void)
1065{
1066 if (is_kernel_in_hyp_mode()) {
1067 /*
1068 * cpu_init_stage2() is safe to call even if the PM
1069 * event was cancelled before the CPU was reset.
1070 */
1071 cpu_init_stage2(NULL);
1072 } else {
1073 if (__hyp_get_vectors() == hyp_default_vectors)
1074 cpu_init_hyp_mode(NULL);
1075 }
1076}
1077
1064static int hyp_init_cpu_notify(struct notifier_block *self, 1078static int hyp_init_cpu_notify(struct notifier_block *self,
1065 unsigned long action, void *cpu) 1079 unsigned long action, void *cpu)
1066{ 1080{
1067 switch (action) { 1081 switch (action) {
1068 case CPU_STARTING: 1082 case CPU_STARTING:
1069 case CPU_STARTING_FROZEN: 1083 case CPU_STARTING_FROZEN:
1070 if (__hyp_get_vectors() == hyp_default_vectors) 1084 cpu_hyp_reinit();
1071 cpu_init_hyp_mode(NULL);
1072 break;
1073 } 1085 }
1074 1086
1075 return NOTIFY_OK; 1087 return NOTIFY_OK;
@@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1084 unsigned long cmd, 1096 unsigned long cmd,
1085 void *v) 1097 void *v)
1086{ 1098{
1087 if (cmd == CPU_PM_EXIT && 1099 if (cmd == CPU_PM_EXIT) {
1088 __hyp_get_vectors() == hyp_default_vectors) { 1100 cpu_hyp_reinit();
1089 cpu_init_hyp_mode(NULL);
1090 return NOTIFY_OK; 1101 return NOTIFY_OK;
1091 } 1102 }
1092 1103
@@ -1128,6 +1139,22 @@ static int init_subsystems(void)
1128 int err; 1139 int err;
1129 1140
1130 /* 1141 /*
1142 * Register CPU Hotplug notifier
1143 */
1144 cpu_notifier_register_begin();
1145 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1146 cpu_notifier_register_done();
1147 if (err) {
1148 kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
1149 return err;
1150 }
1151
1152 /*
1153 * Register CPU lower-power notifier
1154 */
1155 hyp_cpu_pm_init();
1156
1157 /*
1131 * Init HYP view of VGIC 1158 * Init HYP view of VGIC
1132 */ 1159 */
1133 err = kvm_vgic_hyp_init(); 1160 err = kvm_vgic_hyp_init();
@@ -1270,19 +1297,6 @@ static int init_hyp_mode(void)
1270 free_boot_hyp_pgd(); 1297 free_boot_hyp_pgd();
1271#endif 1298#endif
1272 1299
1273 cpu_notifier_register_begin();
1274
1275 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1276
1277 cpu_notifier_register_done();
1278
1279 if (err) {
1280 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1281 goto out_err;
1282 }
1283
1284 hyp_cpu_pm_init();
1285
1286 /* set size of VMID supported by CPU */ 1300 /* set size of VMID supported by CPU */
1287 kvm_vmid_bits = kvm_get_vmid_bits(); 1301 kvm_vmid_bits = kvm_get_vmid_bits();
1288 kvm_info("%d-bit VMID\n", kvm_vmid_bits); 1302 kvm_info("%d-bit VMID\n", kvm_vmid_bits);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index d0ba3551d49a..3cced8455727 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
235 */ 235 */
236 if (mapping && cache_is_vipt_aliasing()) 236 if (mapping && cache_is_vipt_aliasing())
237 flush_pfn_alias(page_to_pfn(page), 237 flush_pfn_alias(page_to_pfn(page),
238 page->index << PAGE_CACHE_SHIFT); 238 page->index << PAGE_SHIFT);
239} 239}
240 240
241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
250 * data in the current VM view associated with this page. 250 * data in the current VM view associated with this page.
251 * - aliasing VIPT: we only need to find one mapping of this page. 251 * - aliasing VIPT: we only need to find one mapping of this page.
252 */ 252 */
253 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 253 pgoff = page->index;
254 254
255 flush_dcache_mmap_lock(mapping); 255 flush_dcache_mmap_lock(mapping);
256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 0e391dbfc420..4150fd8bae01 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -124,7 +124,9 @@
124#define VTCR_EL2_SL0_LVL1 (1 << 6) 124#define VTCR_EL2_SL0_LVL1 (1 << 6)
125#define VTCR_EL2_T0SZ_MASK 0x3f 125#define VTCR_EL2_T0SZ_MASK 0x3f
126#define VTCR_EL2_T0SZ_40B 24 126#define VTCR_EL2_T0SZ_40B 24
127#define VTCR_EL2_VS 19 127#define VTCR_EL2_VS_SHIFT 19
128#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
129#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
128 130
129/* 131/*
130 * We configure the Stage-2 page tables to always restrict the IPA space to be 132 * We configure the Stage-2 page tables to always restrict the IPA space to be
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 1a78d6e2a78b..12874164b0ae 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -141,6 +141,9 @@
141#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 141#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
142#define ID_AA64MMFR1_HADBS_SHIFT 0 142#define ID_AA64MMFR1_HADBS_SHIFT 0
143 143
144#define ID_AA64MMFR1_VMIDBITS_8 0
145#define ID_AA64MMFR1_VMIDBITS_16 2
146
144/* id_aa64mmfr2 */ 147/* id_aa64mmfr2 */
145#define ID_AA64MMFR2_UAO_SHIFT 4 148#define ID_AA64MMFR2_UAO_SHIFT 4
146 149
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index bfc54fd82797..5a9f3bf542b0 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -36,8 +36,10 @@ void __hyp_text __init_stage2_translation(void)
36 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS 36 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
37 * bit in VTCR_EL2. 37 * bit in VTCR_EL2.
38 */ 38 */
39 tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; 39 tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
40 val |= (tmp == 2) ? VTCR_EL2_VS : 0; 40 val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
41 VTCR_EL2_VS_16BIT :
42 VTCR_EL2_VS_8BIT;
41 43
42 write_sysreg(val, vtcr_el2); 44 write_sysreg(val, vtcr_el2);
43} 45}
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 745695db5ba0..f2f264b5aafe 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -261,7 +261,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
261 au1x_dma_chan_t *cp; 261 au1x_dma_chan_t *cp;
262 262
263 /* 263 /*
264 * We do the intialization on the first channel allocation. 264 * We do the initialization on the first channel allocation.
265 * We have to wait because of the interrupt handler initialization 265 * We have to wait because of the interrupt handler initialization
266 * which can't be done successfully during board set up. 266 * which can't be done successfully during board set up.
267 */ 267 */
@@ -964,7 +964,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
964 dp->dscr_source1 = dscr->dscr_source1; 964 dp->dscr_source1 = dscr->dscr_source1;
965 dp->dscr_cmd1 = dscr->dscr_cmd1; 965 dp->dscr_cmd1 = dscr->dscr_cmd1;
966 nbytes = dscr->dscr_cmd1; 966 nbytes = dscr->dscr_cmd1;
967 /* Allow the caller to specifiy if an interrupt is generated */ 967 /* Allow the caller to specify if an interrupt is generated */
968 dp->dscr_cmd0 &= ~DSCR_CMD0_IE; 968 dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
969 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V; 969 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
970 ctp->chan_ptr->ddma_dbell = 0; 970 ctp->chan_ptr->ddma_dbell = 0;
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index bdeed9d13c6f..433c4b9a9f0a 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
503 if (board == BCSR_WHOAMI_DB1500) { 503 if (board == BCSR_WHOAMI_DB1500) {
504 c0 = AU1500_GPIO2_INT; 504 c0 = AU1500_GPIO2_INT;
505 c1 = AU1500_GPIO5_INT; 505 c1 = AU1500_GPIO5_INT;
506 d0 = AU1500_GPIO0_INT; 506 d0 = 0; /* GPIO number, NOT irq! */
507 d1 = AU1500_GPIO3_INT; 507 d1 = 3; /* GPIO number, NOT irq! */
508 s0 = AU1500_GPIO1_INT; 508 s0 = AU1500_GPIO1_INT;
509 s1 = AU1500_GPIO4_INT; 509 s1 = AU1500_GPIO4_INT;
510 } else if (board == BCSR_WHOAMI_DB1100) { 510 } else if (board == BCSR_WHOAMI_DB1100) {
511 c0 = AU1100_GPIO2_INT; 511 c0 = AU1100_GPIO2_INT;
512 c1 = AU1100_GPIO5_INT; 512 c1 = AU1100_GPIO5_INT;
513 d0 = AU1100_GPIO0_INT; 513 d0 = 0; /* GPIO number, NOT irq! */
514 d1 = AU1100_GPIO3_INT; 514 d1 = 3; /* GPIO number, NOT irq! */
515 s0 = AU1100_GPIO1_INT; 515 s0 = AU1100_GPIO1_INT;
516 s1 = AU1100_GPIO4_INT; 516 s1 = AU1100_GPIO4_INT;
517 517
@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
545 } else if (board == BCSR_WHOAMI_DB1000) { 545 } else if (board == BCSR_WHOAMI_DB1000) {
546 c0 = AU1000_GPIO2_INT; 546 c0 = AU1000_GPIO2_INT;
547 c1 = AU1000_GPIO5_INT; 547 c1 = AU1000_GPIO5_INT;
548 d0 = AU1000_GPIO0_INT; 548 d0 = 0; /* GPIO number, NOT irq! */
549 d1 = AU1000_GPIO3_INT; 549 d1 = 3; /* GPIO number, NOT irq! */
550 s0 = AU1000_GPIO1_INT; 550 s0 = AU1000_GPIO1_INT;
551 s1 = AU1000_GPIO4_INT; 551 s1 = AU1000_GPIO4_INT;
552 platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs)); 552 platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
553 } else if ((board == BCSR_WHOAMI_PB1500) || 553 } else if ((board == BCSR_WHOAMI_PB1500) ||
554 (board == BCSR_WHOAMI_PB1500R2)) { 554 (board == BCSR_WHOAMI_PB1500R2)) {
555 c0 = AU1500_GPIO203_INT; 555 c0 = AU1500_GPIO203_INT;
556 d0 = AU1500_GPIO201_INT; 556 d0 = 1; /* GPIO number, NOT irq! */
557 s0 = AU1500_GPIO202_INT; 557 s0 = AU1500_GPIO202_INT;
558 twosocks = 0; 558 twosocks = 0;
559 flashsize = 64; 559 flashsize = 64;
@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
566 */ 566 */
567 } else if (board == BCSR_WHOAMI_PB1100) { 567 } else if (board == BCSR_WHOAMI_PB1100) {
568 c0 = AU1100_GPIO11_INT; 568 c0 = AU1100_GPIO11_INT;
569 d0 = AU1100_GPIO9_INT; 569 d0 = 9; /* GPIO number, NOT irq! */
570 s0 = AU1100_GPIO10_INT; 570 s0 = AU1100_GPIO10_INT;
571 twosocks = 0; 571 twosocks = 0;
572 flashsize = 64; 572 flashsize = 64;
@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
583 } else 583 } else
584 return 0; /* unknown board, no further dev setup to do */ 584 return 0; /* unknown board, no further dev setup to do */
585 585
586 irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
587 irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW); 586 irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
588 irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW); 587 irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
589 588
@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
597 c0, d0, /*s0*/0, 0, 0); 596 c0, d0, /*s0*/0, 0, 0);
598 597
599 if (twosocks) { 598 if (twosocks) {
600 irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
601 irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW); 599 irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
602 irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW); 600 irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
603 601
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index b518f029f5e7..1c01d6eadb08 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
514 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 514 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
515 AU1000_PCMCIA_IO_PHYS_ADDR, 515 AU1000_PCMCIA_IO_PHYS_ADDR,
516 AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 516 AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
517 AU1550_GPIO3_INT, AU1550_GPIO0_INT, 517 AU1550_GPIO3_INT, 0,
518 /*AU1550_GPIO21_INT*/0, 0, 0); 518 /*AU1550_GPIO21_INT*/0, 0, 0);
519 519
520 db1x_register_pcmcia_socket( 520 db1x_register_pcmcia_socket(
@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
524 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, 524 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
525 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, 525 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
526 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, 526 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
527 AU1550_GPIO5_INT, AU1550_GPIO1_INT, 527 AU1550_GPIO5_INT, 1,
528 /*AU1550_GPIO22_INT*/0, 0, 1); 528 /*AU1550_GPIO22_INT*/0, 0, 1);
529 529
530 platform_device_register(&db1550_nand_dev); 530 platform_device_register(&db1550_nand_dev);
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index eb5117ced95a..618dfd735eed 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -26,8 +26,7 @@
26#include "common.h" 26#include "common.h"
27 27
28#define AR71XX_BASE_FREQ 40000000 28#define AR71XX_BASE_FREQ 40000000
29#define AR724X_BASE_FREQ 5000000 29#define AR724X_BASE_FREQ 40000000
30#define AR913X_BASE_FREQ 5000000
31 30
32static struct clk *clks[3]; 31static struct clk *clks[3];
33static struct clk_onecell_data clk_data = { 32static struct clk_onecell_data clk_data = {
@@ -103,8 +102,8 @@ static void __init ar724x_clocks_init(void)
103 div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK); 102 div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
104 freq = div * ref_rate; 103 freq = div * ref_rate;
105 104
106 div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK); 105 div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
107 freq *= div; 106 freq /= div;
108 107
109 cpu_rate = freq; 108 cpu_rate = freq;
110 109
@@ -123,39 +122,6 @@ static void __init ar724x_clocks_init(void)
123 clk_add_alias("uart", NULL, "ahb", NULL); 122 clk_add_alias("uart", NULL, "ahb", NULL);
124} 123}
125 124
126static void __init ar913x_clocks_init(void)
127{
128 unsigned long ref_rate;
129 unsigned long cpu_rate;
130 unsigned long ddr_rate;
131 unsigned long ahb_rate;
132 u32 pll;
133 u32 freq;
134 u32 div;
135
136 ref_rate = AR913X_BASE_FREQ;
137 pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
138
139 div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK);
140 freq = div * ref_rate;
141
142 cpu_rate = freq;
143
144 div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
145 ddr_rate = freq / div;
146
147 div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
148 ahb_rate = cpu_rate / div;
149
150 ath79_add_sys_clkdev("ref", ref_rate);
151 clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
152 clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
153 clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
154
155 clk_add_alias("wdt", NULL, "ahb", NULL);
156 clk_add_alias("uart", NULL, "ahb", NULL);
157}
158
159static void __init ar933x_clocks_init(void) 125static void __init ar933x_clocks_init(void)
160{ 126{
161 unsigned long ref_rate; 127 unsigned long ref_rate;
@@ -443,10 +409,8 @@ void __init ath79_clocks_init(void)
443{ 409{
444 if (soc_is_ar71xx()) 410 if (soc_is_ar71xx())
445 ar71xx_clocks_init(); 411 ar71xx_clocks_init();
446 else if (soc_is_ar724x()) 412 else if (soc_is_ar724x() || soc_is_ar913x())
447 ar724x_clocks_init(); 413 ar724x_clocks_init();
448 else if (soc_is_ar913x())
449 ar913x_clocks_init();
450 else if (soc_is_ar933x()) 414 else if (soc_is_ar933x())
451 ar933x_clocks_init(); 415 ar933x_clocks_init();
452 else if (soc_is_ar934x()) 416 else if (soc_is_ar934x())
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 959c145a0a2c..ca7ad131d057 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -714,11 +714,11 @@ void bcm47xx_sprom_register_fallbacks(void)
714{ 714{
715#if defined(CONFIG_BCM47XX_SSB) 715#if defined(CONFIG_BCM47XX_SSB)
716 if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb)) 716 if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
717 pr_warn("Failed to registered ssb SPROM handler\n"); 717 pr_warn("Failed to register ssb SPROM handler\n");
718#endif 718#endif
719 719
720#if defined(CONFIG_BCM47XX_BCMA) 720#if defined(CONFIG_BCM47XX_BCMA)
721 if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma)) 721 if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
722 pr_warn("Failed to registered bcma SPROM handler\n"); 722 pr_warn("Failed to register bcma SPROM handler\n");
723#endif 723#endif
724} 724}
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 4eff1ef02eff..309d2ad67e4d 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -39,10 +39,11 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
39vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o 39vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
40endif 40endif
41 41
42vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o 42vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
43 43
44$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib 44extra-y += ashldi3.c bswapsi.c
45$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c 45$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
46$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
46 $(call cmd,shipped) 47 $(call cmd,shipped)
47 48
48targets := $(notdir $(vmlinuzobjs-y)) 49targets := $(notdir $(vmlinuzobjs-y))
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index adb33e355043..56035e5b7008 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -82,7 +82,7 @@
82 }; 82 };
83 83
84 gisb-arb@400000 { 84 gisb-arb@400000 {
85 compatible = "brcm,bcm7400-gisb-arb"; 85 compatible = "brcm,bcm7435-gisb-arb";
86 reg = <0x400000 0xdc>; 86 reg = <0x400000 0xdc>;
87 native-endian; 87 native-endian;
88 interrupt-parent = <&sun_l2_intc>; 88 interrupt-parent = <&sun_l2_intc>;
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 3ad4ba9b12fd..3c2ed9ee5b2f 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -83,7 +83,7 @@
83 }; 83 };
84 84
85 pll: pll-controller@18050000 { 85 pll: pll-controller@18050000 {
86 compatible = "qca,ar9132-ppl", 86 compatible = "qca,ar9132-pll",
87 "qca,ar9130-pll"; 87 "qca,ar9130-pll";
88 reg = <0x18050000 0x20>; 88 reg = <0x18050000 0x20>;
89 89
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index e535ee3c26a4..4f1540e5f963 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -18,7 +18,7 @@
18 reg = <0x0 0x2000000>; 18 reg = <0x0 0x2000000>;
19 }; 19 };
20 20
21 extosc: oscillator { 21 extosc: ref {
22 compatible = "fixed-clock"; 22 compatible = "fixed-clock";
23 #clock-cells = <0>; 23 #clock-cells = <0>;
24 clock-frequency = <40000000>; 24 clock-frequency = <40000000>;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
index e59d1b79f24c..2f415d9d0f3c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
@@ -68,7 +68,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
68 gmx_rx_int_en.s.pause_drp = 1; 68 gmx_rx_int_en.s.pause_drp = 1;
69 /* Skipping gmx_rx_int_en.s.reserved_16_18 */ 69 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
70 /*gmx_rx_int_en.s.ifgerr = 1; */ 70 /*gmx_rx_int_en.s.ifgerr = 1; */
71 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 71 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
72 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 72 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
73 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 73 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
74 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 74 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -89,7 +89,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
89 /*gmx_rx_int_en.s.phy_spd = 1; */ 89 /*gmx_rx_int_en.s.phy_spd = 1; */
90 /*gmx_rx_int_en.s.phy_link = 1; */ 90 /*gmx_rx_int_en.s.phy_link = 1; */
91 /*gmx_rx_int_en.s.ifgerr = 1; */ 91 /*gmx_rx_int_en.s.ifgerr = 1; */
92 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 92 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
93 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 93 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
94 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 94 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
95 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 95 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -112,7 +112,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
112 /*gmx_rx_int_en.s.phy_spd = 1; */ 112 /*gmx_rx_int_en.s.phy_spd = 1; */
113 /*gmx_rx_int_en.s.phy_link = 1; */ 113 /*gmx_rx_int_en.s.phy_link = 1; */
114 /*gmx_rx_int_en.s.ifgerr = 1; */ 114 /*gmx_rx_int_en.s.ifgerr = 1; */
115 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 115 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
116 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 116 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
117 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 117 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
118 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 118 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -134,7 +134,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
134 /*gmx_rx_int_en.s.phy_spd = 1; */ 134 /*gmx_rx_int_en.s.phy_spd = 1; */
135 /*gmx_rx_int_en.s.phy_link = 1; */ 135 /*gmx_rx_int_en.s.phy_link = 1; */
136 /*gmx_rx_int_en.s.ifgerr = 1; */ 136 /*gmx_rx_int_en.s.ifgerr = 1; */
137 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 137 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
138 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 138 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
139 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 139 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
140 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 140 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -156,7 +156,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
156 /*gmx_rx_int_en.s.phy_spd = 1; */ 156 /*gmx_rx_int_en.s.phy_spd = 1; */
157 /*gmx_rx_int_en.s.phy_link = 1; */ 157 /*gmx_rx_int_en.s.phy_link = 1; */
158 /*gmx_rx_int_en.s.ifgerr = 1; */ 158 /*gmx_rx_int_en.s.ifgerr = 1; */
159 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 159 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
160 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 160 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
161 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 161 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
162 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 162 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -179,7 +179,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
179 /*gmx_rx_int_en.s.phy_spd = 1; */ 179 /*gmx_rx_int_en.s.phy_spd = 1; */
180 /*gmx_rx_int_en.s.phy_link = 1; */ 180 /*gmx_rx_int_en.s.phy_link = 1; */
181 /*gmx_rx_int_en.s.ifgerr = 1; */ 181 /*gmx_rx_int_en.s.ifgerr = 1; */
182 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 182 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
183 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 183 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
184 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 184 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
185 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 185 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -209,7 +209,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
209 gmx_rx_int_en.s.pause_drp = 1; 209 gmx_rx_int_en.s.pause_drp = 1;
210 /* Skipping gmx_rx_int_en.s.reserved_16_18 */ 210 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
211 /*gmx_rx_int_en.s.ifgerr = 1; */ 211 /*gmx_rx_int_en.s.ifgerr = 1; */
212 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 212 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
213 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 213 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
214 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 214 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
215 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 215 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 87be167a7a6a..676fab50dd2b 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -189,7 +189,7 @@ void cvmx_pko_initialize_global(void)
189 /* 189 /*
190 * Set the size of the PKO command buffers to an odd number of 190 * Set the size of the PKO command buffers to an odd number of
191 * 64bit words. This allows the normal two word send to stay 191 * 64bit words. This allows the normal two word send to stay
192 * aligned and never span a comamnd word buffer. 192 * aligned and never span a command word buffer.
193 */ 193 */
194 config.u64 = 0; 194 config.u64 = 0;
195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; 195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index b7fa9ae28c36..42412ba0f3bf 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -331,7 +331,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
331 } 331 }
332 332
333 if (!(avail_coremask & (1 << coreid))) { 333 if (!(avail_coremask & (1 << coreid))) {
334 /* core not available, assume, that catched by simple-executive */ 334 /* core not available, assume, that caught by simple-executive */
335 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 335 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
336 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 336 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
337 } 337 }
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 4e36b6e1869c..43e0ba24470c 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -17,13 +17,12 @@ CONFIG_IKCONFIG=y
17CONFIG_IKCONFIG_PROC=y 17CONFIG_IKCONFIG_PROC=y
18CONFIG_LOG_BUF_SHIFT=14 18CONFIG_LOG_BUF_SHIFT=14
19CONFIG_CGROUPS=y 19CONFIG_CGROUPS=y
20CONFIG_MEMCG=y
21CONFIG_CGROUP_SCHED=y
20CONFIG_CGROUP_FREEZER=y 22CONFIG_CGROUP_FREEZER=y
21CONFIG_CGROUP_DEVICE=y
22CONFIG_CPUSETS=y 23CONFIG_CPUSETS=y
24CONFIG_CGROUP_DEVICE=y
23CONFIG_CGROUP_CPUACCT=y 25CONFIG_CGROUP_CPUACCT=y
24CONFIG_MEMCG=y
25CONFIG_MEMCG_KMEM=y
26CONFIG_CGROUP_SCHED=y
27CONFIG_NAMESPACES=y 26CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 27CONFIG_USER_NS=y
29CONFIG_CC_OPTIMIZE_FOR_SIZE=y 28CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,6 +51,11 @@ CONFIG_DEVTMPFS=y
52# CONFIG_ALLOW_DEV_COREDUMP is not set 51# CONFIG_ALLOW_DEV_COREDUMP is not set
53CONFIG_DMA_CMA=y 52CONFIG_DMA_CMA=y
54CONFIG_CMA_SIZE_MBYTES=32 53CONFIG_CMA_SIZE_MBYTES=32
54CONFIG_MTD=y
55CONFIG_MTD_NAND=y
56CONFIG_MTD_NAND_JZ4780=y
57CONFIG_MTD_UBI=y
58CONFIG_MTD_UBI_FASTMAP=y
55CONFIG_NETDEVICES=y 59CONFIG_NETDEVICES=y
56# CONFIG_NET_VENDOR_ARC is not set 60# CONFIG_NET_VENDOR_ARC is not set
57# CONFIG_NET_CADENCE is not set 61# CONFIG_NET_CADENCE is not set
@@ -103,7 +107,7 @@ CONFIG_PROC_KCORE=y
103# CONFIG_PROC_PAGE_MONITOR is not set 107# CONFIG_PROC_PAGE_MONITOR is not set
104CONFIG_TMPFS=y 108CONFIG_TMPFS=y
105CONFIG_CONFIGFS_FS=y 109CONFIG_CONFIGFS_FS=y
106# CONFIG_MISC_FILESYSTEMS is not set 110CONFIG_UBIFS_FS=y
107# CONFIG_NETWORK_FILESYSTEMS is not set 111# CONFIG_NETWORK_FILESYSTEMS is not set
108CONFIG_NLS=y 112CONFIG_NLS=y
109CONFIG_NLS_CODEPAGE_437=y 113CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 8c6f508e59de..d7b99180c6e1 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -5,7 +5,7 @@
5 * Written by Ralf Baechle and Andreas Busse, modified for DECstation 5 * Written by Ralf Baechle and Andreas Busse, modified for DECstation
6 * support by Paul Antoine and Harald Koerfgen. 6 * support by Paul Antoine and Harald Koerfgen.
7 * 7 *
8 * completly rewritten: 8 * completely rewritten:
9 * Copyright (C) 1998 Harald Koerfgen 9 * Copyright (C) 1998 Harald Koerfgen
10 * 10 *
11 * Rewritten extensively for controller-driven IRQ support 11 * Rewritten extensively for controller-driven IRQ support
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 5537b94572b2..0d75b5a0bad4 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -9,7 +9,7 @@
9 * PROM library functions for acquiring/using memory descriptors given to us 9 * PROM library functions for acquiring/using memory descriptors given to us
10 * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set 10 * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set
11 * because on some machines like SGI IP27 the ARC memory configuration data 11 * because on some machines like SGI IP27 the ARC memory configuration data
12 * completly bogus and alternate easier to use mechanisms are available. 12 * completely bogus and alternate easier to use mechanisms are available.
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index e7dc785a91ca..af12c1f9f1a8 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -102,7 +102,7 @@ extern void cpu_probe(void);
102extern void cpu_report(void); 102extern void cpu_report(void);
103 103
104extern const char *__cpu_name[]; 104extern const char *__cpu_name[];
105#define cpu_name_string() __cpu_name[smp_processor_id()] 105#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
106 106
107struct seq_file; 107struct seq_file;
108struct notifier_block; 108struct notifier_block;
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index cf92fe733995..c4873e8594ef 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -141,7 +141,7 @@ octeon_main_processor:
141.endm 141.endm
142 142
143/* 143/*
144 * Do SMP slave processor setup necessary before we can savely execute C code. 144 * Do SMP slave processor setup necessary before we can safely execute C code.
145 */ 145 */
146 .macro smp_slave_setup 146 .macro smp_slave_setup
147 .endm 147 .endm
diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
index 13b0751b010a..a229297c880b 100644
--- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
@@ -16,7 +16,7 @@
16 .endm 16 .endm
17 17
18/* 18/*
19 * Do SMP slave processor setup necessary before we can savely execute C code. 19 * Do SMP slave processor setup necessary before we can safely execute C code.
20 */ 20 */
21 .macro smp_slave_setup 21 .macro smp_slave_setup
22 .endm 22 .endm
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
index cf4384bfa846..b0b7261ff3ad 100644
--- a/arch/mips/include/asm/mach-ip27/irq.h
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -11,7 +11,7 @@
11#define __ASM_MACH_IP27_IRQ_H 11#define __ASM_MACH_IP27_IRQ_H
12 12
13/* 13/*
14 * A hardwired interrupt number is completly stupid for this system - a 14 * A hardwired interrupt number is completely stupid for this system - a
15 * large configuration might have thousands if not tenthousands of 15 * large configuration might have thousands if not tenthousands of
16 * interrupts. 16 * interrupts.
17 */ 17 */
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index b087cb83da3a..f992c1db876b 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -81,7 +81,7 @@
81 .endm 81 .endm
82 82
83/* 83/*
84 * Do SMP slave processor setup necessary before we can savely execute C code. 84 * Do SMP slave processor setup necessary before we can safely execute C code.
85 */ 85 */
86 .macro smp_slave_setup 86 .macro smp_slave_setup
87 GET_NASID_ASM t1 87 GET_NASID_ASM t1
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index bf8c3e1860e7..7c7708a23baa 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -27,7 +27,7 @@ enum jz_gpio_function {
27 27
28/* 28/*
29 Usually a driver for a SoC component has to request several gpio pins and 29 Usually a driver for a SoC component has to request several gpio pins and
30 configure them as funcion pins. 30 configure them as function pins.
31 jz_gpio_bulk_request can be used to ease this process. 31 jz_gpio_bulk_request can be used to ease this process.
32 Usually one would do something like: 32 Usually one would do something like:
33 33
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b196825a1de9..d4635391c36a 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -28,7 +28,7 @@ extern void __iomem *mips_cm_l2sync_base;
28 * This function returns the physical base address of the Coherence Manager 28 * This function returns the physical base address of the Coherence Manager
29 * global control block, or 0 if no Coherence Manager is present. It provides 29 * global control block, or 0 if no Coherence Manager is present. It provides
30 * a default implementation which reads the CMGCRBase register where available, 30 * a default implementation which reads the CMGCRBase register where available,
31 * and may be overriden by platforms which determine this address in a 31 * and may be overridden by platforms which determine this address in a
32 * different way by defining a function with the same prototype except for the 32 * different way by defining a function with the same prototype except for the
33 * name mips_cm_phys_base (without underscores). 33 * name mips_cm_phys_base (without underscores).
34 */ 34 */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
index 1f6ea8352ca9..20621e1ca238 100644
--- a/arch/mips/include/asm/mips-r2-to-r6-emul.h
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -79,7 +79,7 @@ struct r2_decoder_table {
79}; 79};
80 80
81 81
82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
83 const char *str); 83 const char *str);
84 84
85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR 85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index f7dd17d0dc22..f4f1996e0fac 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -33,7 +33,7 @@
33/* Packet buffers */ 33/* Packet buffers */
34#define CVMX_FPA_PACKET_POOL (0) 34#define CVMX_FPA_PACKET_POOL (0)
35#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE 35#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
36/* Work queue entrys */ 36/* Work queue entries */
37#define CVMX_FPA_WQE_POOL (1) 37#define CVMX_FPA_WQE_POOL (1)
38#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE 38#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
39/* PKO queue command buffers */ 39/* PKO queue command buffers */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 19e139c9f337..3e982e0c397e 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -189,7 +189,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
189static inline void *cvmx_phys_to_ptr(uint64_t physical_address) 189static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
190{ 190{
191 if (sizeof(void *) == 8) { 191 if (sizeof(void *) == 8) {
192 /* Just set the top bit, avoiding any TLB uglyness */ 192 /* Just set the top bit, avoiding any TLB ugliness */
193 return CASTPTR(void, 193 return CASTPTR(void,
194 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 194 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
195 physical_address)); 195 physical_address));
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 8d7a63b52ac7..3206245d1ed6 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -269,16 +269,16 @@ typedef struct bridge_err_cmdword_s {
269 union { 269 union {
270 u32 cmd_word; 270 u32 cmd_word;
271 struct { 271 struct {
272 u32 didn:4, /* Destination ID */ 272 u32 didn:4, /* Destination ID */
273 sidn:4, /* Source ID */ 273 sidn:4, /* Source ID */
274 pactyp:4, /* Packet type */ 274 pactyp:4, /* Packet type */
275 tnum:5, /* Trans Number */ 275 tnum:5, /* Trans Number */
276 coh:1, /* Coh Transacti */ 276 coh:1, /* Coh Transaction */
277 ds:2, /* Data size */ 277 ds:2, /* Data size */
278 gbr:1, /* GBR enable */ 278 gbr:1, /* GBR enable */
279 vbpm:1, /* VBPM message */ 279 vbpm:1, /* VBPM message */
280 error:1, /* Error occurred */ 280 error:1, /* Error occurred */
281 barr:1, /* Barrier op */ 281 barr:1, /* Barrier op */
282 rsvd:8; 282 rsvd:8;
283 } berr_st; 283 } berr_st;
284 } berr_un; 284 } berr_un;
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 59920b345942..4a9c99050c13 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -147,7 +147,7 @@ struct hpc3_ethregs {
147#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */ 147#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
148#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */ 148#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
149#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */ 149#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
150#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */ 150#define HPC3_EPCFG_TST 0x1000 /* Diagnostic ram test feature bit */
151 151
152 u32 _unused2[0x1000/4 - 8]; /* padding */ 152 u32 _unused2[0x1000/4 - 8]; /* padding */
153 153
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 26ddfff28c8e..105a9479ac5f 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -144,7 +144,7 @@ struct linux_tinfo {
144struct linux_vdirent { 144struct linux_vdirent {
145 ULONG namelen; 145 ULONG namelen;
146 unsigned char attr; 146 unsigned char attr;
147 char fname[32]; /* XXX imperical, should be a define */ 147 char fname[32]; /* XXX empirical, should be a define */
148}; 148};
149 149
150/* Other stuff for files. */ 150/* Other stuff for files. */
@@ -179,7 +179,7 @@ struct linux_finfo {
179 enum linux_devtypes dtype; 179 enum linux_devtypes dtype;
180 unsigned long namelen; 180 unsigned long namelen;
181 unsigned char attr; 181 unsigned char attr;
182 char name[32]; /* XXX imperical, should be define */ 182 char name[32]; /* XXX empirical, should be define */
183}; 183};
184 184
185/* This describes the vector containing function pointers to the ARC 185/* This describes the vector containing function pointers to the ARC
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index e33f0363235b..feb385180f87 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -355,7 +355,7 @@ struct ioc3_etxd {
355#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */ 355#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */
356#define SSCR_RESET 0x80000000 /* reset DMA channels */ 356#define SSCR_RESET 0x80000000 /* reset DMA channels */
357 357
358/* all producer/comsumer pointers are the same bitfield */ 358/* all producer/consumer pointers are the same bitfield */
359#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */ 359#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
360#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */ 360#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
361#define PROD_CONS_PTR_OFF 3 361#define PROD_CONS_PTR_OFF 3
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 5998b13e9764..57ece90f8cf1 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -628,7 +628,7 @@ typedef union h1_icrbb_u {
628/* 628/*
629 * Values for field imsgtype 629 * Values for field imsgtype
630 */ 630 */
631#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ 631#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Message from Xtalk */
632#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ 632#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
633#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */ 633#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
634#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ 634#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 095ecafe6bd3..7f109d4f64a4 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -95,7 +95,7 @@ static inline bool eva_kernel_access(void)
95} 95}
96 96
97/* 97/*
98 * Is a address valid? This does a straighforward calculation rather 98 * Is a address valid? This does a straightforward calculation rather
99 * than tests. 99 * than tests.
100 * 100 *
101 * Address valid if: 101 * Address valid if:
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3129795de940..24ad815c7f38 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -381,16 +381,18 @@
381#define __NR_membarrier (__NR_Linux + 358) 381#define __NR_membarrier (__NR_Linux + 358)
382#define __NR_mlock2 (__NR_Linux + 359) 382#define __NR_mlock2 (__NR_Linux + 359)
383#define __NR_copy_file_range (__NR_Linux + 360) 383#define __NR_copy_file_range (__NR_Linux + 360)
384#define __NR_preadv2 (__NR_Linux + 361)
385#define __NR_pwritev2 (__NR_Linux + 362)
384 386
385/* 387/*
386 * Offset of the last Linux o32 flavoured syscall 388 * Offset of the last Linux o32 flavoured syscall
387 */ 389 */
388#define __NR_Linux_syscalls 360 390#define __NR_Linux_syscalls 362
389 391
390#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 392#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
391 393
392#define __NR_O32_Linux 4000 394#define __NR_O32_Linux 4000
393#define __NR_O32_Linux_syscalls 360 395#define __NR_O32_Linux_syscalls 362
394 396
395#if _MIPS_SIM == _MIPS_SIM_ABI64 397#if _MIPS_SIM == _MIPS_SIM_ABI64
396 398
@@ -719,16 +721,18 @@
719#define __NR_membarrier (__NR_Linux + 318) 721#define __NR_membarrier (__NR_Linux + 318)
720#define __NR_mlock2 (__NR_Linux + 319) 722#define __NR_mlock2 (__NR_Linux + 319)
721#define __NR_copy_file_range (__NR_Linux + 320) 723#define __NR_copy_file_range (__NR_Linux + 320)
724#define __NR_preadv2 (__NR_Linux + 321)
725#define __NR_pwritev2 (__NR_Linux + 322)
722 726
723/* 727/*
724 * Offset of the last Linux 64-bit flavoured syscall 728 * Offset of the last Linux 64-bit flavoured syscall
725 */ 729 */
726#define __NR_Linux_syscalls 320 730#define __NR_Linux_syscalls 322
727 731
728#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 732#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
729 733
730#define __NR_64_Linux 5000 734#define __NR_64_Linux 5000
731#define __NR_64_Linux_syscalls 320 735#define __NR_64_Linux_syscalls 322
732 736
733#if _MIPS_SIM == _MIPS_SIM_NABI32 737#if _MIPS_SIM == _MIPS_SIM_NABI32
734 738
@@ -1061,15 +1065,17 @@
1061#define __NR_membarrier (__NR_Linux + 322) 1065#define __NR_membarrier (__NR_Linux + 322)
1062#define __NR_mlock2 (__NR_Linux + 323) 1066#define __NR_mlock2 (__NR_Linux + 323)
1063#define __NR_copy_file_range (__NR_Linux + 324) 1067#define __NR_copy_file_range (__NR_Linux + 324)
1068#define __NR_preadv2 (__NR_Linux + 325)
1069#define __NR_pwritev2 (__NR_Linux + 326)
1064 1070
1065/* 1071/*
1066 * Offset of the last N32 flavoured syscall 1072 * Offset of the last N32 flavoured syscall
1067 */ 1073 */
1068#define __NR_Linux_syscalls 324 1074#define __NR_Linux_syscalls 326
1069 1075
1070#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1076#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1071 1077
1072#define __NR_N32_Linux 6000 1078#define __NR_N32_Linux 6000
1073#define __NR_N32_Linux_syscalls 324 1079#define __NR_N32_Linux_syscalls 326
1074 1080
1075#endif /* _UAPI_ASM_UNISTD_H */ 1081#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 1448c1f43d4e..760217bbb2fa 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -24,7 +24,7 @@ static char *cm2_tr[8] = {
24 "0x04", "cpc", "0x06", "0x07" 24 "0x04", "cpc", "0x06", "0x07"
25}; 25};
26 26
27/* CM3 Tag ECC transation type */ 27/* CM3 Tag ECC transaction type */
28static char *cm3_tr[16] = { 28static char *cm3_tr[16] = {
29 [0x0] = "ReqNoData", 29 [0x0] = "ReqNoData",
30 [0x1] = "0x1", 30 [0x1] = "0x1",
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 1f5aac7f9ec3..3fff89ae760b 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -940,42 +940,42 @@ repeat:
940 switch (rt) { 940 switch (rt) {
941 case tgei_op: 941 case tgei_op:
942 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) 942 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
943 do_trap_or_bp(regs, 0, "TGEI"); 943 do_trap_or_bp(regs, 0, 0, "TGEI");
944 944
945 MIPS_R2_STATS(traps); 945 MIPS_R2_STATS(traps);
946 946
947 break; 947 break;
948 case tgeiu_op: 948 case tgeiu_op:
949 if (regs->regs[rs] >= MIPSInst_UIMM(inst)) 949 if (regs->regs[rs] >= MIPSInst_UIMM(inst))
950 do_trap_or_bp(regs, 0, "TGEIU"); 950 do_trap_or_bp(regs, 0, 0, "TGEIU");
951 951
952 MIPS_R2_STATS(traps); 952 MIPS_R2_STATS(traps);
953 953
954 break; 954 break;
955 case tlti_op: 955 case tlti_op:
956 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) 956 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
957 do_trap_or_bp(regs, 0, "TLTI"); 957 do_trap_or_bp(regs, 0, 0, "TLTI");
958 958
959 MIPS_R2_STATS(traps); 959 MIPS_R2_STATS(traps);
960 960
961 break; 961 break;
962 case tltiu_op: 962 case tltiu_op:
963 if (regs->regs[rs] < MIPSInst_UIMM(inst)) 963 if (regs->regs[rs] < MIPSInst_UIMM(inst))
964 do_trap_or_bp(regs, 0, "TLTIU"); 964 do_trap_or_bp(regs, 0, 0, "TLTIU");
965 965
966 MIPS_R2_STATS(traps); 966 MIPS_R2_STATS(traps);
967 967
968 break; 968 break;
969 case teqi_op: 969 case teqi_op:
970 if (regs->regs[rs] == MIPSInst_SIMM(inst)) 970 if (regs->regs[rs] == MIPSInst_SIMM(inst))
971 do_trap_or_bp(regs, 0, "TEQI"); 971 do_trap_or_bp(regs, 0, 0, "TEQI");
972 972
973 MIPS_R2_STATS(traps); 973 MIPS_R2_STATS(traps);
974 974
975 break; 975 break;
976 case tnei_op: 976 case tnei_op:
977 if (regs->regs[rs] != MIPSInst_SIMM(inst)) 977 if (regs->regs[rs] != MIPSInst_SIMM(inst))
978 do_trap_or_bp(regs, 0, "TNEI"); 978 do_trap_or_bp(regs, 0, 0, "TNEI");
979 979
980 MIPS_R2_STATS(traps); 980 MIPS_R2_STATS(traps);
981 981
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
index 2b70723071c3..9083d63b765c 100644
--- a/arch/mips/kernel/module-rela.c
+++ b/arch/mips/kernel/module-rela.c
@@ -109,9 +109,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
109 struct module *me) 109 struct module *me)
110{ 110{
111 Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; 111 Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
112 int (*handler)(struct module *me, u32 *location, Elf_Addr v);
112 Elf_Sym *sym; 113 Elf_Sym *sym;
113 u32 *location; 114 u32 *location;
114 unsigned int i; 115 unsigned int i, type;
115 Elf_Addr v; 116 Elf_Addr v;
116 int res; 117 int res;
117 118
@@ -134,9 +135,21 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
134 return -ENOENT; 135 return -ENOENT;
135 } 136 }
136 137
137 v = sym->st_value + rel[i].r_addend; 138 type = ELF_MIPS_R_TYPE(rel[i]);
139
140 if (type < ARRAY_SIZE(reloc_handlers_rela))
141 handler = reloc_handlers_rela[type];
142 else
143 handler = NULL;
138 144
139 res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); 145 if (!handler) {
146 pr_err("%s: Unknown relocation type %u\n",
147 me->name, type);
148 return -EINVAL;
149 }
150
151 v = sym->st_value + rel[i].r_addend;
152 res = handler(me, location, v);
140 if (res) 153 if (res)
141 return res; 154 return res;
142 } 155 }
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 1833f5171ccd..f9b2936d598d 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -197,9 +197,10 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
197 struct module *me) 197 struct module *me)
198{ 198{
199 Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; 199 Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
200 int (*handler)(struct module *me, u32 *location, Elf_Addr v);
200 Elf_Sym *sym; 201 Elf_Sym *sym;
201 u32 *location; 202 u32 *location;
202 unsigned int i; 203 unsigned int i, type;
203 Elf_Addr v; 204 Elf_Addr v;
204 int res; 205 int res;
205 206
@@ -223,9 +224,21 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
223 return -ENOENT; 224 return -ENOENT;
224 } 225 }
225 226
226 v = sym->st_value; 227 type = ELF_MIPS_R_TYPE(rel[i]);
228
229 if (type < ARRAY_SIZE(reloc_handlers_rel))
230 handler = reloc_handlers_rel[type];
231 else
232 handler = NULL;
227 233
228 res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v); 234 if (!handler) {
235 pr_err("%s: Unknown relocation type %u\n",
236 me->name, type);
237 return -EINVAL;
238 }
239
240 v = sym->st_value;
241 res = handler(me, location, v);
229 if (res) 242 if (res)
230 return res; 243 return res;
231 } 244 }
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d7b8dd43147a..9bc1191b1ab0 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -530,7 +530,7 @@ static void mipspmu_enable(struct pmu *pmu)
530 530
531/* 531/*
532 * MIPS performance counters can be per-TC. The control registers can 532 * MIPS performance counters can be per-TC. The control registers can
533 * not be directly accessed accross CPUs. Hence if we want to do global 533 * not be directly accessed across CPUs. Hence if we want to do global
534 * control, we need cross CPU calls. on_each_cpu() can help us, but we 534 * control, we need cross CPU calls. on_each_cpu() can help us, but we
535 * can not make sure this function is called with interrupts enabled. So 535 * can not make sure this function is called with interrupts enabled. So
536 * here we pause local counters and then grab a rwlock and leave the 536 * here we pause local counters and then grab a rwlock and leave the
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289977cc..fa3f9ebad8f4 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -472,7 +472,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
472 /* 472 /*
473 * Disable all but self interventions. The load from COHCTL is defined 473 * Disable all but self interventions. The load from COHCTL is defined
474 * by the interAptiv & proAptiv SUMs as ensuring that the operation 474 * by the interAptiv & proAptiv SUMs as ensuring that the operation
475 * resulting from the preceeding store is complete. 475 * resulting from the preceding store is complete.
476 */ 476 */
477 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); 477 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
478 uasm_i_sw(&p, t0, 0, r_pcohctl); 478 uasm_i_sw(&p, t0, 0, r_pcohctl);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eddd5fd6fdfa..92880cee449e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -615,7 +615,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
615 * allows us to only worry about whether an FP mode switch is in 615 * allows us to only worry about whether an FP mode switch is in
616 * progress when FP is first used in a tasks time slice. Pretty much all 616 * progress when FP is first used in a tasks time slice. Pretty much all
617 * of the mode switch overhead can thus be confined to cases where mode 617 * of the mode switch overhead can thus be confined to cases where mode
618 * switches are actually occuring. That is, to here. However for the 618 * switches are actually occurring. That is, to here. However for the
619 * thread performing the mode switch it may take a while... 619 * thread performing the mode switch it may take a while...
620 */ 620 */
621 if (num_online_cpus() > 1) { 621 if (num_online_cpus() > 1) {
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a56317444bda..d01fe53a6638 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -596,3 +596,5 @@ EXPORT(sys_call_table)
596 PTR sys_membarrier 596 PTR sys_membarrier
597 PTR sys_mlock2 597 PTR sys_mlock2
598 PTR sys_copy_file_range /* 4360 */ 598 PTR sys_copy_file_range /* 4360 */
599 PTR sys_preadv2
600 PTR sys_pwritev2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2b2dc14610d0..6b73ecc02597 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -434,4 +434,6 @@ EXPORT(sys_call_table)
434 PTR sys_membarrier 434 PTR sys_membarrier
435 PTR sys_mlock2 435 PTR sys_mlock2
436 PTR sys_copy_file_range /* 5320 */ 436 PTR sys_copy_file_range /* 5320 */
437 PTR sys_preadv2
438 PTR sys_pwritev2
437 .size sys_call_table,.-sys_call_table 439 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2bf5c8593d91..71f99d5f7a06 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -424,4 +424,6 @@ EXPORT(sysn32_call_table)
424 PTR sys_membarrier 424 PTR sys_membarrier
425 PTR sys_mlock2 425 PTR sys_mlock2
426 PTR sys_copy_file_range 426 PTR sys_copy_file_range
427 PTR compat_sys_preadv2 /* 6325 */
428 PTR compat_sys_pwritev2
427 .size sysn32_call_table,.-sysn32_call_table 429 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index c5b759e584c7..91b43eea2d5a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -579,4 +579,6 @@ EXPORT(sys32_call_table)
579 PTR sys_membarrier 579 PTR sys_membarrier
580 PTR sys_mlock2 580 PTR sys_mlock2
581 PTR sys_copy_file_range /* 4360 */ 581 PTR sys_copy_file_range /* 4360 */
582 PTR compat_sys_preadv2
583 PTR compat_sys_pwritev2
582 .size sys32_call_table,.-sys32_call_table 584 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 37708d9af638..27cb638f0824 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -243,6 +243,18 @@ static int __init mips_smp_ipi_init(void)
243 struct irq_domain *ipidomain; 243 struct irq_domain *ipidomain;
244 struct device_node *node; 244 struct device_node *node;
245 245
246 /*
247 * In some cases like qemu-malta, it is desired to try SMP with
248 * a single core. Qemu-malta has no GIC, so an attempt to set any IPIs
249 * would cause a BUG_ON() to be triggered since there's no ipidomain.
250 *
251 * Since for a single core system IPIs aren't required really, skip the
252 * initialisation which should generally keep any such configurations
253 * happy and only fail hard when trying to truely run SMP.
254 */
255 if (cpumask_weight(cpu_possible_mask) == 1)
256 return 0;
257
246 node = of_irq_find_parent(of_root); 258 node = of_irq_find_parent(of_root);
247 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); 259 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
248 260
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bf14da9f3e33..ae0c89d23ad7 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -56,6 +56,7 @@
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/ptrace.h> 57#include <asm/ptrace.h>
58#include <asm/sections.h> 58#include <asm/sections.h>
59#include <asm/siginfo.h>
59#include <asm/tlbdebug.h> 60#include <asm/tlbdebug.h>
60#include <asm/traps.h> 61#include <asm/traps.h>
61#include <asm/uaccess.h> 62#include <asm/uaccess.h>
@@ -871,7 +872,7 @@ out:
871 exception_exit(prev_state); 872 exception_exit(prev_state);
872} 873}
873 874
874void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 875void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
875 const char *str) 876 const char *str)
876{ 877{
877 siginfo_t info = { 0 }; 878 siginfo_t info = { 0 };
@@ -928,7 +929,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
928 default: 929 default:
929 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 930 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
930 die_if_kernel(b, regs); 931 die_if_kernel(b, regs);
931 force_sig(SIGTRAP, current); 932 if (si_code) {
933 info.si_signo = SIGTRAP;
934 info.si_code = si_code;
935 force_sig_info(SIGTRAP, &info, current);
936 } else {
937 force_sig(SIGTRAP, current);
938 }
932 } 939 }
933} 940}
934 941
@@ -1012,7 +1019,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
1012 break; 1019 break;
1013 } 1020 }
1014 1021
1015 do_trap_or_bp(regs, bcode, "Break"); 1022 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1016 1023
1017out: 1024out:
1018 set_fs(seg); 1025 set_fs(seg);
@@ -1054,7 +1061,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
1054 tcode = (opcode >> 6) & ((1 << 10) - 1); 1061 tcode = (opcode >> 6) & ((1 << 10) - 1);
1055 } 1062 }
1056 1063
1057 do_trap_or_bp(regs, tcode, "Trap"); 1064 do_trap_or_bp(regs, tcode, 0, "Trap");
1058 1065
1059out: 1066out:
1060 set_fs(seg); 1067 set_fs(seg);
@@ -1115,19 +1122,7 @@ no_r2_instr:
1115 if (unlikely(compute_return_epc(regs) < 0)) 1122 if (unlikely(compute_return_epc(regs) < 0))
1116 goto out; 1123 goto out;
1117 1124
1118 if (get_isa16_mode(regs->cp0_epc)) { 1125 if (!get_isa16_mode(regs->cp0_epc)) {
1119 unsigned short mmop[2] = { 0 };
1120
1121 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1122 status = SIGSEGV;
1123 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1124 status = SIGSEGV;
1125 opcode = mmop[0];
1126 opcode = (opcode << 16) | mmop[1];
1127
1128 if (status < 0)
1129 status = simulate_rdhwr_mm(regs, opcode);
1130 } else {
1131 if (unlikely(get_user(opcode, epc) < 0)) 1126 if (unlikely(get_user(opcode, epc) < 0))
1132 status = SIGSEGV; 1127 status = SIGSEGV;
1133 1128
@@ -1142,6 +1137,18 @@ no_r2_instr:
1142 1137
1143 if (status < 0) 1138 if (status < 0)
1144 status = simulate_fp(regs, opcode, old_epc, old31); 1139 status = simulate_fp(regs, opcode, old_epc, old31);
1140 } else if (cpu_has_mmips) {
1141 unsigned short mmop[2] = { 0 };
1142
1143 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1144 status = SIGSEGV;
1145 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1146 status = SIGSEGV;
1147 opcode = mmop[0];
1148 opcode = (opcode << 16) | mmop[1];
1149
1150 if (status < 0)
1151 status = simulate_rdhwr_mm(regs, opcode);
1145 } 1152 }
1146 1153
1147 if (status < 0) 1154 if (status < 0)
@@ -1492,6 +1499,7 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
1492 */ 1499 */
1493asmlinkage void do_watch(struct pt_regs *regs) 1500asmlinkage void do_watch(struct pt_regs *regs)
1494{ 1501{
1502 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1495 enum ctx_state prev_state; 1503 enum ctx_state prev_state;
1496 u32 cause; 1504 u32 cause;
1497 1505
@@ -1512,7 +1520,7 @@ asmlinkage void do_watch(struct pt_regs *regs)
1512 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 1520 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1513 mips_read_watch_registers(); 1521 mips_read_watch_registers();
1514 local_irq_enable(); 1522 local_irq_enable();
1515 force_sig(SIGTRAP, current); 1523 force_sig_info(SIGTRAP, &info, current);
1516 } else { 1524 } else {
1517 mips_clear_watch_registers(); 1525 mips_clear_watch_registers();
1518 local_irq_enable(); 1526 local_irq_enable();
@@ -2214,7 +2222,7 @@ void __init trap_init(void)
2214 2222
2215 /* 2223 /*
2216 * Copy the generic exception handlers to their final destination. 2224 * Copy the generic exception handlers to their final destination.
2217 * This will be overriden later as suitable for a particular 2225 * This will be overridden later as suitable for a particular
2218 * configuration. 2226 * configuration.
2219 */ 2227 */
2220 set_handler(0x180, &except_vec3_generic, 0x80); 2228 set_handler(0x180, &except_vec3_generic, 0x80);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 490cea569d57..5c62065cbf22 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
885{ 885{
886 union mips_instruction insn; 886 union mips_instruction insn;
887 unsigned long value; 887 unsigned long value;
888 unsigned int res; 888 unsigned int res, preempted;
889 unsigned long origpc; 889 unsigned long origpc;
890 unsigned long orig31; 890 unsigned long orig31;
891 void __user *fault_addr = NULL; 891 void __user *fault_addr = NULL;
@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1226 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) 1226 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1227 goto sigbus; 1227 goto sigbus;
1228 1228
1229 /* 1229 do {
1230 * Disable preemption to avoid a race between copying 1230 /*
1231 * state from userland, migrating to another CPU and 1231 * If we have live MSA context keep track of
1232 * updating the hardware vector register below. 1232 * whether we get preempted in order to avoid
1233 */ 1233 * the register context we load being clobbered
1234 preempt_disable(); 1234 * by the live context as it's saved during
1235 1235 * preemption. If we don't have live context
1236 res = __copy_from_user_inatomic(fpr, addr, 1236 * then it can't be saved to clobber the value
1237 sizeof(*fpr)); 1237 * we load.
1238 if (res) 1238 */
1239 goto fault; 1239 preempted = test_thread_flag(TIF_USEDMSA);
1240 1240
1241 /* 1241 res = __copy_from_user_inatomic(fpr, addr,
1242 * Update the hardware register if it is in use by the 1242 sizeof(*fpr));
1243 * task in this quantum, in order to avoid having to 1243 if (res)
1244 * save & restore the whole vector context. 1244 goto fault;
1245 */
1246 if (test_thread_flag(TIF_USEDMSA))
1247 write_msa_wr(wd, fpr, df);
1248 1245
1249 preempt_enable(); 1246 /*
1247 * Update the hardware register if it is in use
1248 * by the task in this quantum, in order to
1249 * avoid having to save & restore the whole
1250 * vector context.
1251 */
1252 preempt_disable();
1253 if (test_thread_flag(TIF_USEDMSA)) {
1254 write_msa_wr(wd, fpr, df);
1255 preempted = 0;
1256 }
1257 preempt_enable();
1258 } while (preempted);
1250 break; 1259 break;
1251 1260
1252 case msa_st_op: 1261 case msa_st_op:
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a08c43946247..e0e1d0a611fc 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -632,7 +632,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
632 632
633 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); 633 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
634 634
635 /* Alocate new kernel and user ASIDs if needed */ 635 /* Allocate new kernel and user ASIDs if needed */
636 636
637 local_irq_save(flags); 637 local_irq_save(flags);
638 638
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index ad988000563f..c4038d2a724c 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -500,7 +500,7 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
500 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); 500 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
501 501
502 /* 502 /*
503 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) 503 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
504 */ 504 */
505 kvm_write_c0_guest_intctl(cop0, 0xFC000000); 505 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
506 506
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index ad3c73436777..47d26c805eac 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -97,7 +97,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
97{ 97{
98 assert(xm); /* we don't gen exact zeros (probably should) */ 98 assert(xm); /* we don't gen exact zeros (probably should) */
99 99
100 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */ 100 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */
101 assert(xm & (DP_HIDDEN_BIT << 3)); 101 assert(xm & (DP_HIDDEN_BIT << 3));
102 102
103 if (xe < DP_EMIN) { 103 if (xe < DP_EMIN) {
@@ -165,7 +165,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
165 /* strip grs bits */ 165 /* strip grs bits */
166 xm >>= 3; 166 xm >>= 3;
167 167
168 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 168 assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
169 assert(xe >= DP_EMIN); 169 assert(xe >= DP_EMIN);
170 170
171 if (xe > DP_EMAX) { 171 if (xe > DP_EMAX) {
@@ -198,7 +198,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
198 ieee754_setcx(IEEE754_UNDERFLOW); 198 ieee754_setcx(IEEE754_UNDERFLOW);
199 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); 199 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
200 } else { 200 } else {
201 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 201 assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
202 assert(xm & DP_HIDDEN_BIT); 202 assert(xm & DP_HIDDEN_BIT);
203 203
204 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 204 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index def00ffc50fc..e0b2c450b963 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -97,7 +97,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
97{ 97{
98 assert(xm); /* we don't gen exact zeros (probably should) */ 98 assert(xm); /* we don't gen exact zeros (probably should) */
99 99
100 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */ 100 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */
101 assert(xm & (SP_HIDDEN_BIT << 3)); 101 assert(xm & (SP_HIDDEN_BIT << 3));
102 102
103 if (xe < SP_EMIN) { 103 if (xe < SP_EMIN) {
@@ -163,7 +163,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
163 /* strip grs bits */ 163 /* strip grs bits */
164 xm >>= 3; 164 xm >>= 3;
165 165
166 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 166 assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
167 assert(xe >= SP_EMIN); 167 assert(xe >= SP_EMIN);
168 168
169 if (xe > SP_EMAX) { 169 if (xe > SP_EMAX) {
@@ -196,7 +196,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
196 ieee754_setcx(IEEE754_UNDERFLOW); 196 ieee754_setcx(IEEE754_UNDERFLOW);
197 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm); 197 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
198 } else { 198 } else {
199 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 199 assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
200 assert(xm & SP_HIDDEN_BIT); 200 assert(xm & SP_HIDDEN_BIT);
201 201
202 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); 202 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index dc7c5a5214a9..026cb59a914d 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -158,7 +158,7 @@ static inline int __init indy_sc_probe(void)
158 return 1; 158 return 1;
159} 159}
160 160
161/* XXX Check with wje if the Indy caches can differenciate between 161/* XXX Check with wje if the Indy caches can differentiate between
162 writeback + invalidate and just invalidate. */ 162 writeback + invalidate and just invalidate. */
163static struct bcache_ops indy_sc_ops = { 163static struct bcache_ops indy_sc_ops = {
164 .bc_enable = indy_sc_enable, 164 .bc_enable = indy_sc_enable,
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 5037d5868cef..c17d7627f872 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -19,6 +19,7 @@
19#include <asm/cpu.h> 19#include <asm/cpu.h>
20#include <asm/cpu-type.h> 20#include <asm/cpu-type.h>
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#include <asm/hazards.h>
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23#include <asm/pgtable.h> 24#include <asm/pgtable.h>
24#include <asm/tlb.h> 25#include <asm/tlb.h>
@@ -486,6 +487,10 @@ static void r4k_tlb_configure(void)
486 * be set to fixed-size pages. 487 * be set to fixed-size pages.
487 */ 488 */
488 write_c0_pagemask(PM_DEFAULT_MASK); 489 write_c0_pagemask(PM_DEFAULT_MASK);
490 back_to_back_c0_hazard();
491 if (read_c0_pagemask() != PM_DEFAULT_MASK)
492 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
493
489 write_c0_wired(0); 494 write_c0_wired(0);
490 if (current_cpu_type() == CPU_R10000 || 495 if (current_cpu_type() == CPU_R10000 ||
491 current_cpu_type() == CPU_R12000 || 496 current_cpu_type() == CPU_R12000 ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 5a04b6f5c6fb..84c6e3fda84a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -12,7 +12,7 @@
12 * Copyright (C) 2011 MIPS Technologies, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc.
13 * 13 *
14 * ... and the days got worse and worse and now you see 14 * ... and the days got worse and worse and now you see
15 * I've gone completly out of my mind. 15 * I've gone completely out of my mind.
16 * 16 *
17 * They're coming to take me a away haha 17 * They're coming to take me a away haha
18 * they're coming to take me a away hoho hihi haha 18 * they're coming to take me a away hoho hihi haha
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 8d0eb2643248..f1f88291451e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2000 by Silicon Graphics, Inc. 7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig 8 * Copyright (C) 2004 by Christoph Hellwig
9 * 9 *
10 * On SGI IP27 the ARC memory configuration data is completly bogus but 10 * On SGI IP27 the ARC memory configuration data is completely bogus but
11 * alternate easier to use mechanisms are available. 11 * alternate easier to use mechanisms are available.
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 91c2a39cd5aa..67001277256c 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
319 if (!mapping) 319 if (!mapping)
320 return; 320 return;
321 321
322 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 322 pgoff = page->index;
323 323
324 /* We have carefully arranged in arch_get_unmapped_area() that 324 /* We have carefully arranged in arch_get_unmapped_area() that
325 * *any* mappings of a file are always congruently mapped (whether 325 * *any* mappings of a file are always congruently mapped (whether
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3c07d6b96877..6b3e7c6ee096 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -22,7 +22,7 @@
22#include <linux/swap.h> 22#include <linux/swap.h>
23#include <linux/unistd.h> 23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */ 24#include <linux/nodemask.h> /* for node_online_map */
25#include <linux/pagemap.h> /* for release_pages and page_cache_release */ 25#include <linux/pagemap.h> /* for release_pages */
26#include <linux/compat.h> 26#include <linux/compat.h>
27 27
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index dfa863876778..6ca5f0525e57 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
732 return -ENOMEM; 732 return -ENOMEM;
733 733
734 sb->s_maxbytes = MAX_LFS_FILESIZE; 734 sb->s_maxbytes = MAX_LFS_FILESIZE;
735 sb->s_blocksize = PAGE_CACHE_SIZE; 735 sb->s_blocksize = PAGE_SIZE;
736 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 736 sb->s_blocksize_bits = PAGE_SHIFT;
737 sb->s_magic = SPUFS_MAGIC; 737 sb->s_magic = SPUFS_MAGIC;
738 sb->s_op = &s_ops; 738 sb->s_op = &s_ops;
739 sb->s_fs_info = info; 739 sb->s_fs_info = info;
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 0f3da2cb2bd6..255c7eec4481 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
278 sbi->uid = current_uid(); 278 sbi->uid = current_uid();
279 sbi->gid = current_gid(); 279 sbi->gid = current_gid();
280 sb->s_fs_info = sbi; 280 sb->s_fs_info = sbi;
281 sb->s_blocksize = PAGE_CACHE_SIZE; 281 sb->s_blocksize = PAGE_SIZE;
282 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 282 sb->s_blocksize_bits = PAGE_SHIFT;
283 sb->s_magic = HYPFS_MAGIC; 283 sb->s_magic = HYPFS_MAGIC;
284 sb->s_op = &hypfs_s_ops; 284 sb->s_op = &hypfs_s_ops;
285 if (hypfs_parse_options(data, sb)) 285 if (hypfs_parse_options(data, sb))
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 69247b4dcc43..cace818d86eb 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -23,7 +23,7 @@
23/** 23/**
24 * gmap_alloc - allocate a guest address space 24 * gmap_alloc - allocate a guest address space
25 * @mm: pointer to the parent mm_struct 25 * @mm: pointer to the parent mm_struct
26 * @limit: maximum size of the gmap address space 26 * @limit: maximum address of the gmap address space
27 * 27 *
28 * Returns a guest address space structure. 28 * Returns a guest address space structure.
29 */ 29 */
@@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
292 if ((from | to | len) & (PMD_SIZE - 1)) 292 if ((from | to | len) & (PMD_SIZE - 1))
293 return -EINVAL; 293 return -EINVAL;
294 if (len == 0 || from + len < from || to + len < to || 294 if (len == 0 || from + len < from || to + len < to ||
295 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) 295 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
296 return -EINVAL; 296 return -EINVAL;
297 297
298 flush = 0; 298 flush = 0;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f62a9f37f79f..b7e394485a5f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -43,7 +43,7 @@
43 43
44#define KVM_PIO_PAGE_OFFSET 1 44#define KVM_PIO_PAGE_OFFSET 1
45#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 45#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
46#define KVM_HALT_POLL_NS_DEFAULT 500000 46#define KVM_HALT_POLL_NS_DEFAULT 400000
47 47
48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
49 49
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2367ae07eb76..319b08a5b6ed 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -146,31 +146,6 @@ int default_check_phys_apicid_present(int phys_apicid)
146 146
147struct boot_params boot_params; 147struct boot_params boot_params;
148 148
149/*
150 * Machine setup..
151 */
152static struct resource data_resource = {
153 .name = "Kernel data",
154 .start = 0,
155 .end = 0,
156 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
157};
158
159static struct resource code_resource = {
160 .name = "Kernel code",
161 .start = 0,
162 .end = 0,
163 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
164};
165
166static struct resource bss_resource = {
167 .name = "Kernel bss",
168 .start = 0,
169 .end = 0,
170 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
171};
172
173
174#ifdef CONFIG_X86_32 149#ifdef CONFIG_X86_32
175/* cpu data as detected by the assembly code in head.S */ 150/* cpu data as detected by the assembly code in head.S */
176struct cpuinfo_x86 new_cpu_data = { 151struct cpuinfo_x86 new_cpu_data = {
@@ -949,13 +924,6 @@ void __init setup_arch(char **cmdline_p)
949 924
950 mpx_mm_init(&init_mm); 925 mpx_mm_init(&init_mm);
951 926
952 code_resource.start = __pa_symbol(_text);
953 code_resource.end = __pa_symbol(_etext)-1;
954 data_resource.start = __pa_symbol(_etext);
955 data_resource.end = __pa_symbol(_edata)-1;
956 bss_resource.start = __pa_symbol(__bss_start);
957 bss_resource.end = __pa_symbol(__bss_stop)-1;
958
959#ifdef CONFIG_CMDLINE_BOOL 927#ifdef CONFIG_CMDLINE_BOOL
960#ifdef CONFIG_CMDLINE_OVERRIDE 928#ifdef CONFIG_CMDLINE_OVERRIDE
961 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 929 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
@@ -1019,11 +987,6 @@ void __init setup_arch(char **cmdline_p)
1019 987
1020 x86_init.resources.probe_roms(); 988 x86_init.resources.probe_roms();
1021 989
1022 /* after parse_early_param, so could debug it */
1023 insert_resource(&iomem_resource, &code_resource);
1024 insert_resource(&iomem_resource, &data_resource);
1025 insert_resource(&iomem_resource, &bss_resource);
1026
1027 e820_add_kernel_range(); 990 e820_add_kernel_range();
1028 trim_bios_range(); 991 trim_bios_range();
1029#ifdef CONFIG_X86_32 992#ifdef CONFIG_X86_32
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5ff3485acb60..01bd7b7a6866 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1116 break; 1116 break;
1117 case HVCALL_POST_MESSAGE: 1117 case HVCALL_POST_MESSAGE:
1118 case HVCALL_SIGNAL_EVENT: 1118 case HVCALL_SIGNAL_EVENT:
1119 /* don't bother userspace if it has no way to handle it */
1120 if (!vcpu_to_synic(vcpu)->active) {
1121 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1122 break;
1123 }
1119 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 1124 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1120 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; 1125 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1121 vcpu->run->hyperv.u.hcall.input = param; 1126 vcpu->run->hyperv.u.hcall.input = param;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 443d2a57ad3d..1a2da0e5a373 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
1369 1369
1370 hrtimer_start(&apic->lapic_timer.timer, 1370 hrtimer_start(&apic->lapic_timer.timer,
1371 ktime_add_ns(now, apic->lapic_timer.period), 1371 ktime_add_ns(now, apic->lapic_timer.period),
1372 HRTIMER_MODE_ABS); 1372 HRTIMER_MODE_ABS_PINNED);
1373 1373
1374 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 1374 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1375 PRIx64 ", " 1375 PRIx64 ", "
@@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
1402 expire = ktime_add_ns(now, ns); 1402 expire = ktime_add_ns(now, ns);
1403 expire = ktime_sub_ns(expire, lapic_timer_advance_ns); 1403 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1404 hrtimer_start(&apic->lapic_timer.timer, 1404 hrtimer_start(&apic->lapic_timer.timer,
1405 expire, HRTIMER_MODE_ABS); 1405 expire, HRTIMER_MODE_ABS_PINNED);
1406 } else 1406 } else
1407 apic_timer_expired(apic); 1407 apic_timer_expired(apic);
1408 1408
@@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
1868 apic->vcpu = vcpu; 1868 apic->vcpu = vcpu;
1869 1869
1870 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 1870 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1871 HRTIMER_MODE_ABS); 1871 HRTIMER_MODE_ABS_PINNED);
1872 apic->lapic_timer.timer.function = apic_timer_fn; 1872 apic->lapic_timer.timer.function = apic_timer_fn;
1873 1873
1874 /* 1874 /*
@@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2003 2003
2004 timer = &vcpu->arch.apic->lapic_timer.timer; 2004 timer = &vcpu->arch.apic->lapic_timer.timer;
2005 if (hrtimer_cancel(timer)) 2005 if (hrtimer_cancel(timer))
2006 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 2006 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2007} 2007}
2008 2008
2009/* 2009/*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70e95d097ef1..1ff4dbb73fb7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
557 !is_writable_pte(new_spte)) 557 !is_writable_pte(new_spte))
558 ret = true; 558 ret = true;
559 559
560 if (!shadow_accessed_mask) 560 if (!shadow_accessed_mask) {
561 /*
562 * We don't set page dirty when dropping non-writable spte.
563 * So do it now if the new spte is becoming non-writable.
564 */
565 if (ret)
566 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
561 return ret; 567 return ret;
568 }
562 569
563 /* 570 /*
564 * Flush TLB when accessed/dirty bits are changed in the page tables, 571 * Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
605 612
606 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 613 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
607 kvm_set_pfn_accessed(pfn); 614 kvm_set_pfn_accessed(pfn);
608 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 615 if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
616 PT_WRITABLE_MASK))
609 kvm_set_pfn_dirty(pfn); 617 kvm_set_pfn_dirty(pfn);
610 return 1; 618 return 1;
611} 619}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 742d0f7d3556..0a2c70e43bc8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6095 } 6095 }
6096 6096
6097 /* try to inject new event if pending */ 6097 /* try to inject new event if pending */
6098 if (vcpu->arch.nmi_pending) { 6098 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6099 if (kvm_x86_ops->nmi_allowed(vcpu)) { 6099 --vcpu->arch.nmi_pending;
6100 --vcpu->arch.nmi_pending; 6100 vcpu->arch.nmi_injected = true;
6101 vcpu->arch.nmi_injected = true; 6101 kvm_x86_ops->set_nmi(vcpu);
6102 kvm_x86_ops->set_nmi(vcpu);
6103 }
6104 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 6102 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
6105 /* 6103 /*
6106 * Because interrupts can be injected asynchronously, we are 6104 * Because interrupts can be injected asynchronously, we are
@@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6569 if (inject_pending_event(vcpu, req_int_win) != 0) 6567 if (inject_pending_event(vcpu, req_int_win) != 0)
6570 req_immediate_exit = true; 6568 req_immediate_exit = true;
6571 /* enable NMI/IRQ window open exits if needed */ 6569 /* enable NMI/IRQ window open exits if needed */
6572 else if (vcpu->arch.nmi_pending) 6570 else {
6573 kvm_x86_ops->enable_nmi_window(vcpu); 6571 if (vcpu->arch.nmi_pending)
6574 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 6572 kvm_x86_ops->enable_nmi_window(vcpu);
6575 kvm_x86_ops->enable_irq_window(vcpu); 6573 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6574 kvm_x86_ops->enable_irq_window(vcpu);
6575 }
6576 6576
6577 if (kvm_lapic_enabled(vcpu)) { 6577 if (kvm_lapic_enabled(vcpu)) {
6578 update_cr8_intercept(vcpu); 6578 update_cr8_intercept(vcpu);
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index abf4901c917b..db52a7fafcc2 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -66,7 +66,7 @@ static u32 xen_apic_read(u32 reg)
66 66
67 ret = HYPERVISOR_platform_op(&op); 67 ret = HYPERVISOR_platform_op(&op);
68 if (ret) 68 if (ret)
69 return 0; 69 op.u.pcpu_info.apic_id = BAD_APICID;
70 70
71 return op.u.pcpu_info.apic_id << 24; 71 return op.u.pcpu_info.apic_id << 24;
72} 72}
@@ -142,6 +142,14 @@ static void xen_silent_inquire(int apicid)
142{ 142{
143} 143}
144 144
145static int xen_cpu_present_to_apicid(int cpu)
146{
147 if (cpu_present(cpu))
148 return xen_get_apic_id(xen_apic_read(APIC_ID));
149 else
150 return BAD_APICID;
151}
152
145static struct apic xen_pv_apic = { 153static struct apic xen_pv_apic = {
146 .name = "Xen PV", 154 .name = "Xen PV",
147 .probe = xen_apic_probe_pv, 155 .probe = xen_apic_probe_pv,
@@ -162,7 +170,7 @@ static struct apic xen_pv_apic = {
162 170
163 .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */ 171 .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
164 .setup_apic_routing = NULL, 172 .setup_apic_routing = NULL,
165 .cpu_present_to_apicid = default_cpu_present_to_apicid, 173 .cpu_present_to_apicid = xen_cpu_present_to_apicid,
166 .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */ 174 .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */
167 .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */ 175 .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */
168 .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */ 176 .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3c6d17fd423a..719cf291dcdf 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -545,6 +545,8 @@ static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
545 * data back is to call: 545 * data back is to call:
546 */ 546 */
547 tick_nohz_idle_enter(); 547 tick_nohz_idle_enter();
548
549 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
548} 550}
549 551
550#else /* !CONFIG_HOTPLUG_CPU */ 552#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/block/bio.c b/block/bio.c
index f124a0a624fc..807d25e466ec 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1339 * release the pages we didn't map into the bio, if any 1339 * release the pages we didn't map into the bio, if any
1340 */ 1340 */
1341 while (j < page_limit) 1341 while (j < page_limit)
1342 page_cache_release(pages[j++]); 1342 put_page(pages[j++]);
1343 } 1343 }
1344 1344
1345 kfree(pages); 1345 kfree(pages);
@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1365 for (j = 0; j < nr_pages; j++) { 1365 for (j = 0; j < nr_pages; j++) {
1366 if (!pages[j]) 1366 if (!pages[j])
1367 break; 1367 break;
1368 page_cache_release(pages[j]); 1368 put_page(pages[j]);
1369 } 1369 }
1370 out: 1370 out:
1371 kfree(pages); 1371 kfree(pages);
@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
1385 if (bio_data_dir(bio) == READ) 1385 if (bio_data_dir(bio) == READ)
1386 set_page_dirty_lock(bvec->bv_page); 1386 set_page_dirty_lock(bvec->bv_page);
1387 1387
1388 page_cache_release(bvec->bv_page); 1388 put_page(bvec->bv_page);
1389 } 1389 }
1390 1390
1391 bio_put(bio); 1391 bio_put(bio);
@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
1615 * the BIO and the offending pages and re-dirty the pages in process context. 1615 * the BIO and the offending pages and re-dirty the pages in process context.
1616 * 1616 *
1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1618 * here on. It will run one page_cache_release() against each page and will 1618 * here on. It will run one put_page() against each page and will run one
1619 * run one bio_put() against the BIO. 1619 * bio_put() against the BIO.
1620 */ 1620 */
1621 1621
1622static void bio_dirty_fn(struct work_struct *work); 1622static void bio_dirty_fn(struct work_struct *work);
@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
1658 struct page *page = bvec->bv_page; 1658 struct page *page = bvec->bv_page;
1659 1659
1660 if (PageDirty(page) || PageCompound(page)) { 1660 if (PageDirty(page) || PageCompound(page)) {
1661 page_cache_release(page); 1661 put_page(page);
1662 bvec->bv_page = NULL; 1662 bvec->bv_page = NULL;
1663 } else { 1663 } else {
1664 nr_clean_pages++; 1664 nr_clean_pages++;
diff --git a/block/blk-core.c b/block/blk-core.c
index 827f8badd143..b60537b2c35b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
706 goto fail_id; 706 goto fail_id;
707 707
708 q->backing_dev_info.ra_pages = 708 q->backing_dev_info.ra_pages =
709 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 709 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
710 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; 710 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
711 q->backing_dev_info.name = "block"; 711 q->backing_dev_info.name = "block";
712 q->node = node_id; 712 q->node = node_id;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c7bb666aafd1..331e4eee0dda 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
239 struct queue_limits *limits = &q->limits; 239 struct queue_limits *limits = &q->limits;
240 unsigned int max_sectors; 240 unsigned int max_sectors;
241 241
242 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 242 if ((max_hw_sectors << 9) < PAGE_SIZE) {
243 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 243 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
244 printk(KERN_INFO "%s: set to minimum %d\n", 244 printk(KERN_INFO "%s: set to minimum %d\n",
245 __func__, max_hw_sectors); 245 __func__, max_hw_sectors);
246 } 246 }
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
329 **/ 329 **/
330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
331{ 331{
332 if (max_size < PAGE_CACHE_SIZE) { 332 if (max_size < PAGE_SIZE) {
333 max_size = PAGE_CACHE_SIZE; 333 max_size = PAGE_SIZE;
334 printk(KERN_INFO "%s: set to minimum %d\n", 334 printk(KERN_INFO "%s: set to minimum %d\n",
335 __func__, max_size); 335 __func__, max_size);
336 } 336 }
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
760 **/ 760 **/
761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
762{ 762{
763 if (mask < PAGE_CACHE_SIZE - 1) { 763 if (mask < PAGE_SIZE - 1) {
764 mask = PAGE_CACHE_SIZE - 1; 764 mask = PAGE_SIZE - 1;
765 printk(KERN_INFO "%s: set to minimum %lx\n", 765 printk(KERN_INFO "%s: set to minimum %lx\n",
766 __func__, mask); 766 __func__, mask);
767 } 767 }
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index dd93763057ce..995b58d46ed1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
76static ssize_t queue_ra_show(struct request_queue *q, char *page) 76static ssize_t queue_ra_show(struct request_queue *q, char *page)
77{ 77{
78 unsigned long ra_kb = q->backing_dev_info.ra_pages << 78 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
79 (PAGE_CACHE_SHIFT - 10); 79 (PAGE_SHIFT - 10);
80 80
81 return queue_var_show(ra_kb, (page)); 81 return queue_var_show(ra_kb, (page));
82} 82}
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
90 if (ret < 0) 90 if (ret < 0)
91 return ret; 91 return ret;
92 92
93 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 93 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
94 94
95 return ret; 95 return ret;
96} 96}
@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
117 if (blk_queue_cluster(q)) 117 if (blk_queue_cluster(q))
118 return queue_var_show(queue_max_segment_size(q), (page)); 118 return queue_var_show(queue_max_segment_size(q), (page));
119 119
120 return queue_var_show(PAGE_CACHE_SIZE, (page)); 120 return queue_var_show(PAGE_SIZE, (page));
121} 121}
122 122
123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
198{ 198{
199 unsigned long max_sectors_kb, 199 unsigned long max_sectors_kb,
200 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 200 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
201 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 201 page_kb = 1 << (PAGE_SHIFT - 10);
202 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 202 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
203 203
204 if (ret < 0) 204 if (ret < 0)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e3c591dd8f19..4a349787bc62 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4075 * idle timer unplug to continue working. 4075 * idle timer unplug to continue working.
4076 */ 4076 */
4077 if (cfq_cfqq_wait_request(cfqq)) { 4077 if (cfq_cfqq_wait_request(cfqq)) {
4078 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 4078 if (blk_rq_bytes(rq) > PAGE_SIZE ||
4079 cfqd->busy_queues > 1) { 4079 cfqd->busy_queues > 1) {
4080 cfq_del_timer(cfqd, cfqq); 4080 cfq_del_timer(cfqd, cfqq);
4081 cfq_clear_cfqq_wait_request(cfqq); 4081 cfq_clear_cfqq_wait_request(cfqq);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f678c733df40..556826ac7cb4 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
710 return -EINVAL; 710 return -EINVAL;
711 bdi = blk_get_backing_dev_info(bdev); 711 bdi = blk_get_backing_dev_info(bdev);
712 return compat_put_long(arg, 712 return compat_put_long(arg,
713 (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 713 (bdi->ra_pages * PAGE_SIZE) / 512);
714 case BLKROGET: /* compatible */ 714 case BLKROGET: /* compatible */
715 return compat_put_int(arg, bdev_read_only(bdev) != 0); 715 return compat_put_int(arg, bdev_read_only(bdev) != 0);
716 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ 716 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
729 if (!capable(CAP_SYS_ADMIN)) 729 if (!capable(CAP_SYS_ADMIN))
730 return -EACCES; 730 return -EACCES;
731 bdi = blk_get_backing_dev_info(bdev); 731 bdi = blk_get_backing_dev_info(bdev);
732 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 732 bdi->ra_pages = (arg * 512) / PAGE_SIZE;
733 return 0; 733 return 0;
734 case BLKGETSIZE: 734 case BLKGETSIZE:
735 size = i_size_read(bdev->bd_inode); 735 size = i_size_read(bdev->bd_inode);
diff --git a/block/ioctl.c b/block/ioctl.c
index d8996bbd7f12..4ff1f92f89ca 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
550 if (!arg) 550 if (!arg)
551 return -EINVAL; 551 return -EINVAL;
552 bdi = blk_get_backing_dev_info(bdev); 552 bdi = blk_get_backing_dev_info(bdev);
553 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 553 return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
554 case BLKROGET: 554 case BLKROGET:
555 return put_int(arg, bdev_read_only(bdev) != 0); 555 return put_int(arg, bdev_read_only(bdev) != 0);
556 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ 556 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
578 if(!capable(CAP_SYS_ADMIN)) 578 if(!capable(CAP_SYS_ADMIN))
579 return -EACCES; 579 return -EACCES;
580 bdi = blk_get_backing_dev_info(bdev); 580 bdi = blk_get_backing_dev_info(bdev);
581 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 581 bdi->ra_pages = (arg * 512) / PAGE_SIZE;
582 return 0; 582 return 0;
583 case BLKBSZSET: 583 case BLKBSZSET:
584 return blkdev_bszset(bdev, mode, argp); 584 return blkdev_bszset(bdev, mode, argp);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5d8701941054..2c6ae2aed2c4 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
566{ 566{
567 struct address_space *mapping = bdev->bd_inode->i_mapping; 567 struct address_space *mapping = bdev->bd_inode->i_mapping;
568 568
569 return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), 569 return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
570 NULL); 570 NULL);
571} 571}
572 572
573unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) 573unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
@@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
584 if (PageError(page)) 584 if (PageError(page))
585 goto fail; 585 goto fail;
586 p->v = page; 586 p->v = page;
587 return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9); 587 return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
588fail: 588fail:
589 page_cache_release(page); 589 put_page(page);
590 } 590 }
591 p->v = NULL; 591 p->v = NULL;
592 return NULL; 592 return NULL;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..ec9d8610b25f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
397 WARN_ON(d->flags & DEVFL_UP); 397 WARN_ON(d->flags & DEVFL_UP);
398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); 398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
399 q->backing_dev_info.name = "aoe"; 399 q->backing_dev_info.name = "aoe";
400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; 400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
401 d->bufpool = mp; 401 d->bufpool = mp;
402 d->blkq = gd->queue = q; 402 d->blkq = gd->queue = q;
403 q->queuedata = d; 403 q->queuedata = d;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f7ecc287d733..51a071e32221 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
374 struct page *page, int rw) 374 struct page *page, int rw)
375{ 375{
376 struct brd_device *brd = bdev->bd_disk->private_data; 376 struct brd_device *brd = bdev->bd_disk->private_data;
377 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); 377 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
378 page_endio(page, rw & WRITE, err); 378 page_endio(page, rw & WRITE, err);
379 return err; 379 return err;
380} 380}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4cad75..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@ struct bm_extent {
1327#endif 1327#endif
1328#endif 1328#endif
1329 1329
1330/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 1330/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
1331 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 1331 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1332 * Since we may live in a mixed-platform cluster, 1332 * Since we may live in a mixed-platform cluster,
1333 * we limit us to a platform agnostic constant here for now. 1333 * we limit us to a platform agnostic constant here for now.
1334 * A followup commit may allow even bigger BIO sizes, 1334 * A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 226eb0c9f0fb..1fd1dccebb6b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1178 blk_queue_max_hw_sectors(q, max_hw_sectors); 1178 blk_queue_max_hw_sectors(q, max_hw_sectors);
1179 /* This is the workaround for "bio would need to, but cannot, be split" */ 1179 /* This is the workaround for "bio would need to, but cannot, be split" */
1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1181 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 1181 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1182 1182
1183 if (b) { 1183 if (b) {
1184 struct drbd_connection *connection = first_peer_device(device)->connection; 1184 struct drbd_connection *connection = first_peer_device(device)->connection;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f1a55d1888cb..6f3369de232f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -622,7 +622,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
622 set_page_dirty(page); 622 set_page_dirty(page);
623 623
624 mark_page_accessed(page); 624 mark_page_accessed(page);
625 page_cache_release(page); 625 put_page(page);
626 } 626 }
627 627
628 sg_free_table(ttm->sg); 628 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db31aa4..aca7f9cc6109 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
481 481
482 release: 482 release:
483 for_each_sg(sgt->sgl, sg, num, i) 483 for_each_sg(sgt->sgl, sg, num, i)
484 page_cache_release(sg_page(sg)); 484 put_page(sg_page(sg));
485 free_table: 485 free_table:
486 sg_free_table(sgt); 486 sg_free_table(sgt);
487 free_sgt: 487 free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
502 if (dobj->obj.filp) { 502 if (dobj->obj.filp) {
503 struct scatterlist *sg; 503 struct scatterlist *sg;
504 for_each_sg(sgt->sgl, sg, sgt->nents, i) 504 for_each_sg(sgt->sgl, sg, sgt->nents, i)
505 page_cache_release(sg_page(sg)); 505 put_page(sg_page(sg));
506 } 506 }
507 507
508 sg_free_table(sgt); 508 sg_free_table(sgt);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..da0c5320789f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
534 534
535fail: 535fail:
536 while (i--) 536 while (i--)
537 page_cache_release(pages[i]); 537 put_page(pages[i]);
538 538
539 drm_free_large(pages); 539 drm_free_large(pages);
540 return ERR_CAST(p); 540 return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
569 mark_page_accessed(pages[i]); 569 mark_page_accessed(pages[i]);
570 570
571 /* Undo the reference we took when populating the table */ 571 /* Undo the reference we took when populating the table */
572 page_cache_release(pages[i]); 572 put_page(pages[i]);
573 } 573 }
574 574
575 drm_free_large(pages); 575 drm_free_large(pages);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1aff932..c52f9adf5e04 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to 220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon. 221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
222 */ 222 */
223static int __deprecated 223static int
224i2c_dp_aux_add_bus(struct i2c_adapter *adapter) 224i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
225{ 225{
226 int error; 226 int error;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3ac589e..dabc08987b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 drm_clflush_virt_range(vaddr, PAGE_SIZE); 177 drm_clflush_virt_range(vaddr, PAGE_SIZE);
178 kunmap_atomic(src); 178 kunmap_atomic(src);
179 179
180 page_cache_release(page); 180 put_page(page);
181 vaddr += PAGE_SIZE; 181 vaddr += PAGE_SIZE;
182 } 182 }
183 183
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
243 set_page_dirty(page); 243 set_page_dirty(page);
244 if (obj->madv == I915_MADV_WILLNEED) 244 if (obj->madv == I915_MADV_WILLNEED)
245 mark_page_accessed(page); 245 mark_page_accessed(page);
246 page_cache_release(page); 246 put_page(page);
247 vaddr += PAGE_SIZE; 247 vaddr += PAGE_SIZE;
248 } 248 }
249 obj->dirty = 0; 249 obj->dirty = 0;
@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2206 if (obj->madv == I915_MADV_WILLNEED) 2206 if (obj->madv == I915_MADV_WILLNEED)
2207 mark_page_accessed(page); 2207 mark_page_accessed(page);
2208 2208
2209 page_cache_release(page); 2209 put_page(page);
2210 } 2210 }
2211 obj->dirty = 0; 2211 obj->dirty = 0;
2212 2212
@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2346err_pages: 2346err_pages:
2347 sg_mark_end(sg); 2347 sg_mark_end(sg);
2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2349 page_cache_release(sg_page_iter_page(&sg_iter)); 2349 put_page(sg_page_iter_page(&sg_iter));
2350 sg_free_table(st); 2350 sg_free_table(st);
2351 kfree(st); 2351 kfree(st);
2352 2352
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 6be40f3ba2c7..18ba8139e922 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683 set_page_dirty(page); 683 set_page_dirty(page);
684 684
685 mark_page_accessed(page); 685 mark_page_accessed(page);
686 page_cache_release(page); 686 put_page(page);
687 } 687 }
688 obj->dirty = 0; 688 obj->dirty = 0;
689 689
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c008312e1bcd..7dddfdce85e6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -615,7 +615,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
615 set_page_dirty(page); 615 set_page_dirty(page);
616 616
617 mark_page_accessed(page); 617 mark_page_accessed(page);
618 page_cache_release(page); 618 put_page(page);
619 } 619 }
620 620
621 sg_free_table(ttm->sg); 621 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f9cc30..077ae9b2865d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
311 goto out_err; 311 goto out_err;
312 312
313 copy_highpage(to_page, from_page); 313 copy_highpage(to_page, from_page);
314 page_cache_release(from_page); 314 put_page(from_page);
315 } 315 }
316 316
317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
361 copy_highpage(to_page, from_page); 361 copy_highpage(to_page, from_page);
362 set_page_dirty(to_page); 362 set_page_dirty(to_page);
363 mark_page_accessed(to_page); 363 mark_page_accessed(to_page);
364 page_cache_release(to_page); 364 put_page(to_page);
365 } 365 }
366 366
367 ttm_tt_unpopulate(ttm); 367 ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index e797dfc07ae3..7e2a12c4fed2 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
188 if (NULL != (page = vsg->pages[i])) { 188 if (NULL != (page = vsg->pages[i])) {
189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
190 SetPageDirty(page); 190 SetPageDirty(page);
191 page_cache_release(page); 191 put_page(page);
192 } 192 }
193 } 193 }
194 case dr_via_pages_alloc: 194 case dr_via_pages_alloc:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 92745d755272..38f917a6c778 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1992,7 +1992,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
1992/** 1992/**
1993 * i40iw_get_dst_ipv6 1993 * i40iw_get_dst_ipv6
1994 */ 1994 */
1995#if IS_ENABLED(CONFIG_IPV6)
1996static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, 1995static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
1997 struct sockaddr_in6 *dst_addr) 1996 struct sockaddr_in6 *dst_addr)
1998{ 1997{
@@ -2008,7 +2007,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2008 dst = ip6_route_output(&init_net, NULL, &fl6); 2007 dst = ip6_route_output(&init_net, NULL, &fl6);
2009 return dst; 2008 return dst;
2010} 2009}
2011#endif
2012 2010
2013/** 2011/**
2014 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address 2012 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
@@ -2016,7 +2014,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2016 * @dst_ip: remote ip address 2014 * @dst_ip: remote ip address
2017 * @arpindex: if there is an arp entry 2015 * @arpindex: if there is an arp entry
2018 */ 2016 */
2019#if IS_ENABLED(CONFIG_IPV6)
2020static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, 2017static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2021 u32 *src, 2018 u32 *src,
2022 u32 *dest, 2019 u32 *dest,
@@ -2089,7 +2086,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2089 dst_release(dst); 2086 dst_release(dst);
2090 return rc; 2087 return rc;
2091} 2088}
2092#endif
2093 2089
2094/** 2090/**
2095 * i40iw_ipv4_is_loopback - check if loopback 2091 * i40iw_ipv4_is_loopback - check if loopback
@@ -2190,13 +2186,13 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
2190 cm_info->loc_addr[0], 2186 cm_info->loc_addr[0],
2191 cm_info->rem_addr[0], 2187 cm_info->rem_addr[0],
2192 oldarpindex); 2188 oldarpindex);
2193#if IS_ENABLED(CONFIG_IPV6) 2189 else if (IS_ENABLED(CONFIG_IPV6))
2194 else
2195 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, 2190 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
2196 cm_info->loc_addr, 2191 cm_info->loc_addr,
2197 cm_info->rem_addr, 2192 cm_info->rem_addr,
2198 oldarpindex); 2193 oldarpindex);
2199#endif 2194 else
2195 arpindex = -EINVAL;
2200 } 2196 }
2201 if (arpindex < 0) { 2197 if (arpindex < 0) {
2202 i40iw_pr_err("cm_node arpindex\n"); 2198 i40iw_pr_err("cm_node arpindex\n");
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f16c818ad2e6..b46c25542a7c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -776,15 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); 776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, 777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
778 unsigned long end); 778 unsigned long end);
779int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
780 u8 port, struct ifla_vf_info *info);
781int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
782 u8 port, int state);
783int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
784 u8 port, struct ifla_vf_stats *stats);
785int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
786 u64 guid, int type);
787
788#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 779#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
789static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) 780static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
790{ 781{
@@ -801,6 +792,15 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
801 792
802#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 793#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
803 794
795int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
796 u8 port, struct ifla_vf_info *info);
797int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
798 u8 port, int state);
799int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
800 u8 port, struct ifla_vf_stats *stats);
801int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
802 u64 guid, int type);
803
804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
805 int index); 805 int index);
806 806
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7df6b4f1548a..bef71751aade 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
322{ 322{
323 ClearPagePrivate(page); 323 ClearPagePrivate(page);
324 set_page_private(page, 0); 324 set_page_private(page, 0);
325 page_cache_release(page); 325 put_page(page);
326} 326}
327static void free_buffers(struct page *page) 327static void free_buffers(struct page *page)
328{ 328{
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861aea68a5..6b469e8c4c6e 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
228 "au8522", 0x8e >> 1, NULL); 228 "au8522", 0x8e >> 1, NULL);
229 if (sd == NULL) 229 if (sd == NULL)
230 pr_err("analog subdev registration failed\n"); 230 pr_err("analog subdev registration failed\n");
231#ifdef CONFIG_MEDIA_CONTROLLER
232 if (sd)
233 dev->decoder = &sd->entity;
234#endif
235 } 231 }
236 232
237 /* Setup tuners */ 233 /* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 5dc82e8c8670..cc22b32776ad 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -137,8 +137,14 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
137#ifdef CONFIG_MEDIA_CONTROLLER 137#ifdef CONFIG_MEDIA_CONTROLLER
138 if (dev->media_dev && 138 if (dev->media_dev &&
139 media_devnode_is_registered(&dev->media_dev->devnode)) { 139 media_devnode_is_registered(&dev->media_dev->devnode)) {
140 /* clear enable_source, disable_source */
141 dev->media_dev->source_priv = NULL;
142 dev->media_dev->enable_source = NULL;
143 dev->media_dev->disable_source = NULL;
144
140 media_device_unregister(dev->media_dev); 145 media_device_unregister(dev->media_dev);
141 media_device_cleanup(dev->media_dev); 146 media_device_cleanup(dev->media_dev);
147 kfree(dev->media_dev);
142 dev->media_dev = NULL; 148 dev->media_dev = NULL;
143 } 149 }
144#endif 150#endif
@@ -166,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
166 Set the status so poll routines can check and avoid 172 Set the status so poll routines can check and avoid
167 access after disconnect. 173 access after disconnect.
168 */ 174 */
169 dev->dev_state = DEV_DISCONNECTED; 175 set_bit(DEV_DISCONNECTED, &dev->dev_state);
170 176
171 au0828_rc_unregister(dev); 177 au0828_rc_unregister(dev);
172 /* Digital TV */ 178 /* Digital TV */
@@ -192,7 +198,7 @@ static int au0828_media_device_init(struct au0828_dev *dev,
192#ifdef CONFIG_MEDIA_CONTROLLER 198#ifdef CONFIG_MEDIA_CONTROLLER
193 struct media_device *mdev; 199 struct media_device *mdev;
194 200
195 mdev = media_device_get_devres(&udev->dev); 201 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
196 if (!mdev) 202 if (!mdev)
197 return -ENOMEM; 203 return -ENOMEM;
198 204
@@ -456,7 +462,8 @@ static int au0828_media_device_register(struct au0828_dev *dev,
456{ 462{
457#ifdef CONFIG_MEDIA_CONTROLLER 463#ifdef CONFIG_MEDIA_CONTROLLER
458 int ret; 464 int ret;
459 struct media_entity *entity, *demod = NULL, *tuner = NULL; 465 struct media_entity *entity, *demod = NULL;
466 struct media_link *link;
460 467
461 if (!dev->media_dev) 468 if (!dev->media_dev)
462 return 0; 469 return 0;
@@ -482,26 +489,37 @@ static int au0828_media_device_register(struct au0828_dev *dev,
482 } 489 }
483 490
484 /* 491 /*
485 * Find tuner and demod to disable the link between 492 * Find tuner, decoder and demod.
486 * the two to avoid disable step when tuner is requested 493 *
487 * by video or audio. Note that this step can't be done 494 * The tuner and decoder should be cached, as they'll be used by
488 * until dvb graph is created during dvb register. 495 * au0828_enable_source.
496 *
497 * It also needs to disable the link between tuner and
498 * decoder/demod, to avoid disable step when tuner is requested
499 * by video or audio. Note that this step can't be done until dvb
500 * graph is created during dvb register.
489 */ 501 */
490 media_device_for_each_entity(entity, dev->media_dev) { 502 media_device_for_each_entity(entity, dev->media_dev) {
491 if (entity->function == MEDIA_ENT_F_DTV_DEMOD) 503 switch (entity->function) {
504 case MEDIA_ENT_F_TUNER:
505 dev->tuner = entity;
506 break;
507 case MEDIA_ENT_F_ATV_DECODER:
508 dev->decoder = entity;
509 break;
510 case MEDIA_ENT_F_DTV_DEMOD:
492 demod = entity; 511 demod = entity;
493 else if (entity->function == MEDIA_ENT_F_TUNER) 512 break;
494 tuner = entity; 513 }
495 } 514 }
496 /* Disable link between tuner and demod */
497 if (tuner && demod) {
498 struct media_link *link;
499 515
500 list_for_each_entry(link, &demod->links, list) { 516 /* Disable link between tuner->demod and/or tuner->decoder */
501 if (link->sink->entity == demod && 517 if (dev->tuner) {
502 link->source->entity == tuner) { 518 list_for_each_entry(link, &dev->tuner->links, list) {
519 if (demod && link->sink->entity == demod)
520 media_entity_setup_link(link, 0);
521 if (dev->decoder && link->sink->entity == dev->decoder)
503 media_entity_setup_link(link, 0); 522 media_entity_setup_link(link, 0);
504 }
505 } 523 }
506 } 524 }
507 525
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
130 bool first = true; 130 bool first = true;
131 131
132 /* do nothing if device is disconnected */ 132 /* do nothing if device is disconnected */
133 if (ir->dev->dev_state == DEV_DISCONNECTED) 133 if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
134 return 0; 134 return 0;
135 135
136 /* Check IR int */ 136 /* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
260 cancel_delayed_work_sync(&ir->work); 260 cancel_delayed_work_sync(&ir->work);
261 261
262 /* do nothing if device is disconnected */ 262 /* do nothing if device is disconnected */
263 if (ir->dev->dev_state != DEV_DISCONNECTED) { 263 if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
264 /* Disable IR */ 264 /* Disable IR */
265 au8522_rc_clear(ir, 0xe0, 1 << 4); 265 au8522_rc_clear(ir, 0xe0, 1 << 4);
266 } 266 }
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13f6dab9ccc2..32d7db96479c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -106,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
106 106
107static int check_dev(struct au0828_dev *dev) 107static int check_dev(struct au0828_dev *dev)
108{ 108{
109 if (dev->dev_state & DEV_DISCONNECTED) { 109 if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
110 pr_info("v4l2 ioctl: device not present\n"); 110 pr_info("v4l2 ioctl: device not present\n");
111 return -ENODEV; 111 return -ENODEV;
112 } 112 }
113 113
114 if (dev->dev_state & DEV_MISCONFIGURED) { 114 if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
115 pr_info("v4l2 ioctl: device is misconfigured; " 115 pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
116 "close and open it again\n");
117 return -EIO; 116 return -EIO;
118 } 117 }
119 return 0; 118 return 0;
@@ -521,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
521 if (!dev) 520 if (!dev)
522 return 0; 521 return 0;
523 522
524 if ((dev->dev_state & DEV_DISCONNECTED) || 523 if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
525 (dev->dev_state & DEV_MISCONFIGURED)) 524 test_bit(DEV_MISCONFIGURED, &dev->dev_state))
526 return 0; 525 return 0;
527 526
528 if (urb->status < 0) { 527 if (urb->status < 0) {
@@ -824,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
824 int ret = 0; 823 int ret = 0;
825 824
826 dev->stream_state = STREAM_INTERRUPT; 825 dev->stream_state = STREAM_INTERRUPT;
827 if (dev->dev_state == DEV_DISCONNECTED) 826 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
828 return -ENODEV; 827 return -ENODEV;
829 else if (ret) { 828 else if (ret) {
830 dev->dev_state = DEV_MISCONFIGURED; 829 set_bit(DEV_MISCONFIGURED, &dev->dev_state);
831 dprintk(1, "%s device is misconfigured!\n", __func__); 830 dprintk(1, "%s device is misconfigured!\n", __func__);
832 return ret; 831 return ret;
833 } 832 }
@@ -1026,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
1026 int ret; 1025 int ret;
1027 1026
1028 dprintk(1, 1027 dprintk(1,
1029 "%s called std_set %d dev_state %d stream users %d users %d\n", 1028 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1030 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1029 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1031 dev->streaming_users, dev->users); 1030 dev->streaming_users, dev->users);
1032 1031
@@ -1045,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
1045 au0828_analog_stream_enable(dev); 1044 au0828_analog_stream_enable(dev);
1046 au0828_analog_stream_reset(dev); 1045 au0828_analog_stream_reset(dev);
1047 dev->stream_state = STREAM_OFF; 1046 dev->stream_state = STREAM_OFF;
1048 dev->dev_state |= DEV_INITIALIZED; 1047 set_bit(DEV_INITIALIZED, &dev->dev_state);
1049 } 1048 }
1050 dev->users++; 1049 dev->users++;
1051 mutex_unlock(&dev->lock); 1050 mutex_unlock(&dev->lock);
@@ -1059,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
1059 struct video_device *vdev = video_devdata(filp); 1058 struct video_device *vdev = video_devdata(filp);
1060 1059
1061 dprintk(1, 1060 dprintk(1,
1062 "%s called std_set %d dev_state %d stream users %d users %d\n", 1061 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1063 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1062 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1064 dev->streaming_users, dev->users); 1063 dev->streaming_users, dev->users);
1065 1064
@@ -1075,7 +1074,7 @@ static int au0828_v4l2_close(struct file *filp)
1075 del_timer_sync(&dev->vbi_timeout); 1074 del_timer_sync(&dev->vbi_timeout);
1076 } 1075 }
1077 1076
1078 if (dev->dev_state == DEV_DISCONNECTED) 1077 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
1079 goto end; 1078 goto end;
1080 1079
1081 if (dev->users == 1) { 1080 if (dev->users == 1) {
@@ -1135,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
1135 .type = V4L2_TUNER_ANALOG_TV, 1134 .type = V4L2_TUNER_ANALOG_TV,
1136 }; 1135 };
1137 1136
1138 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1137 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1139 dev->std_set_in_tuner_core, dev->dev_state); 1138 dev->std_set_in_tuner_core, dev->dev_state);
1140 1139
1141 if (dev->std_set_in_tuner_core) 1140 if (dev->std_set_in_tuner_core)
@@ -1207,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv,
1207 struct video_device *vdev = video_devdata(file); 1206 struct video_device *vdev = video_devdata(file);
1208 struct au0828_dev *dev = video_drvdata(file); 1207 struct au0828_dev *dev = video_drvdata(file);
1209 1208
1210 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1209 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1211 dev->std_set_in_tuner_core, dev->dev_state); 1210 dev->std_set_in_tuner_core, dev->dev_state);
1212 1211
1213 strlcpy(cap->driver, "au0828", sizeof(cap->driver)); 1212 strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1250,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
1250{ 1249{
1251 struct au0828_dev *dev = video_drvdata(file); 1250 struct au0828_dev *dev = video_drvdata(file);
1252 1251
1253 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1252 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1254 dev->std_set_in_tuner_core, dev->dev_state); 1253 dev->std_set_in_tuner_core, dev->dev_state);
1255 1254
1256 f->fmt.pix.width = dev->width; 1255 f->fmt.pix.width = dev->width;
@@ -1269,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1269{ 1268{
1270 struct au0828_dev *dev = video_drvdata(file); 1269 struct au0828_dev *dev = video_drvdata(file);
1271 1270
1272 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1271 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1273 dev->std_set_in_tuner_core, dev->dev_state); 1272 dev->std_set_in_tuner_core, dev->dev_state);
1274 1273
1275 return au0828_set_format(dev, VIDIOC_TRY_FMT, f); 1274 return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1281,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
1281 struct au0828_dev *dev = video_drvdata(file); 1280 struct au0828_dev *dev = video_drvdata(file);
1282 int rc; 1281 int rc;
1283 1282
1284 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1283 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1285 dev->std_set_in_tuner_core, dev->dev_state); 1284 dev->std_set_in_tuner_core, dev->dev_state);
1286 1285
1287 rc = check_dev(dev); 1286 rc = check_dev(dev);
@@ -1303,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
1303{ 1302{
1304 struct au0828_dev *dev = video_drvdata(file); 1303 struct au0828_dev *dev = video_drvdata(file);
1305 1304
1306 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1305 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1307 dev->std_set_in_tuner_core, dev->dev_state); 1306 dev->std_set_in_tuner_core, dev->dev_state);
1308 1307
1309 if (norm == dev->std) 1308 if (norm == dev->std)
@@ -1335,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
1335{ 1334{
1336 struct au0828_dev *dev = video_drvdata(file); 1335 struct au0828_dev *dev = video_drvdata(file);
1337 1336
1338 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1337 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1339 dev->std_set_in_tuner_core, dev->dev_state); 1338 dev->std_set_in_tuner_core, dev->dev_state);
1340 1339
1341 *norm = dev->std; 1340 *norm = dev->std;
@@ -1357,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
1357 [AU0828_VMUX_DVB] = "DVB", 1356 [AU0828_VMUX_DVB] = "DVB",
1358 }; 1357 };
1359 1358
1360 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1359 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1361 dev->std_set_in_tuner_core, dev->dev_state); 1360 dev->std_set_in_tuner_core, dev->dev_state);
1362 1361
1363 tmp = input->index; 1362 tmp = input->index;
@@ -1387,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
1387{ 1386{
1388 struct au0828_dev *dev = video_drvdata(file); 1387 struct au0828_dev *dev = video_drvdata(file);
1389 1388
1390 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1389 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1391 dev->std_set_in_tuner_core, dev->dev_state); 1390 dev->std_set_in_tuner_core, dev->dev_state);
1392 1391
1393 *i = dev->ctrl_input; 1392 *i = dev->ctrl_input;
@@ -1398,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
1398{ 1397{
1399 int i; 1398 int i;
1400 1399
1401 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1400 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1402 dev->std_set_in_tuner_core, dev->dev_state); 1401 dev->std_set_in_tuner_core, dev->dev_state);
1403 1402
1404 switch (AUVI_INPUT(index).type) { 1403 switch (AUVI_INPUT(index).type) {
@@ -1496,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
1496{ 1495{
1497 struct au0828_dev *dev = video_drvdata(file); 1496 struct au0828_dev *dev = video_drvdata(file);
1498 1497
1499 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1498 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1500 dev->std_set_in_tuner_core, dev->dev_state); 1499 dev->std_set_in_tuner_core, dev->dev_state);
1501 1500
1502 a->index = dev->ctrl_ainput; 1501 a->index = dev->ctrl_ainput;
@@ -1516,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
1516 if (a->index != dev->ctrl_ainput) 1515 if (a->index != dev->ctrl_ainput)
1517 return -EINVAL; 1516 return -EINVAL;
1518 1517
1519 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1518 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1520 dev->std_set_in_tuner_core, dev->dev_state); 1519 dev->std_set_in_tuner_core, dev->dev_state);
1521 return 0; 1520 return 0;
1522} 1521}
@@ -1534,7 +1533,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
1534 if (ret) 1533 if (ret)
1535 return ret; 1534 return ret;
1536 1535
1537 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1536 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1538 dev->std_set_in_tuner_core, dev->dev_state); 1537 dev->std_set_in_tuner_core, dev->dev_state);
1539 1538
1540 strcpy(t->name, "Auvitek tuner"); 1539 strcpy(t->name, "Auvitek tuner");
@@ -1554,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
1554 if (t->index != 0) 1553 if (t->index != 0)
1555 return -EINVAL; 1554 return -EINVAL;
1556 1555
1557 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1556 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1558 dev->std_set_in_tuner_core, dev->dev_state); 1557 dev->std_set_in_tuner_core, dev->dev_state);
1559 1558
1560 au0828_init_tuner(dev); 1559 au0828_init_tuner(dev);
@@ -1576,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
1576 1575
1577 if (freq->tuner != 0) 1576 if (freq->tuner != 0)
1578 return -EINVAL; 1577 return -EINVAL;
1579 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1578 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1580 dev->std_set_in_tuner_core, dev->dev_state); 1579 dev->std_set_in_tuner_core, dev->dev_state);
1581 freq->frequency = dev->ctrl_freq; 1580 freq->frequency = dev->ctrl_freq;
1582 return 0; 1581 return 0;
@@ -1591,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1591 if (freq->tuner != 0) 1590 if (freq->tuner != 0)
1592 return -EINVAL; 1591 return -EINVAL;
1593 1592
1594 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1593 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1595 dev->std_set_in_tuner_core, dev->dev_state); 1594 dev->std_set_in_tuner_core, dev->dev_state);
1596 1595
1597 au0828_init_tuner(dev); 1596 au0828_init_tuner(dev);
@@ -1617,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
1617{ 1616{
1618 struct au0828_dev *dev = video_drvdata(file); 1617 struct au0828_dev *dev = video_drvdata(file);
1619 1618
1620 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1619 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1621 dev->std_set_in_tuner_core, dev->dev_state); 1620 dev->std_set_in_tuner_core, dev->dev_state);
1622 1621
1623 format->fmt.vbi.samples_per_line = dev->vbi_width; 1622 format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1643,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
1643 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1642 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1644 return -EINVAL; 1643 return -EINVAL;
1645 1644
1646 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1645 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1647 dev->std_set_in_tuner_core, dev->dev_state); 1646 dev->std_set_in_tuner_core, dev->dev_state);
1648 1647
1649 cc->bounds.left = 0; 1648 cc->bounds.left = 0;
@@ -1665,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
1665{ 1664{
1666 struct au0828_dev *dev = video_drvdata(file); 1665 struct au0828_dev *dev = video_drvdata(file);
1667 1666
1668 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1667 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1669 dev->std_set_in_tuner_core, dev->dev_state); 1668 dev->std_set_in_tuner_core, dev->dev_state);
1670 1669
1671 reg->val = au0828_read(dev, reg->reg); 1670 reg->val = au0828_read(dev, reg->reg);
@@ -1678,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
1678{ 1677{
1679 struct au0828_dev *dev = video_drvdata(file); 1678 struct au0828_dev *dev = video_drvdata(file);
1680 1679
1681 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1680 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1682 dev->std_set_in_tuner_core, dev->dev_state); 1681 dev->std_set_in_tuner_core, dev->dev_state);
1683 1682
1684 return au0828_writereg(dev, reg->reg, reg->val); 1683 return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ff7f8510fb77..87f32846f1c0 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 23
24#include <linux/bitops.h>
24#include <linux/usb.h> 25#include <linux/usb.h>
25#include <linux/i2c.h> 26#include <linux/i2c.h>
26#include <linux/i2c-algo-bit.h> 27#include <linux/i2c-algo-bit.h>
@@ -121,9 +122,9 @@ enum au0828_stream_state {
121 122
122/* device state */ 123/* device state */
123enum au0828_dev_state { 124enum au0828_dev_state {
124 DEV_INITIALIZED = 0x01, 125 DEV_INITIALIZED = 0,
125 DEV_DISCONNECTED = 0x02, 126 DEV_DISCONNECTED = 1,
126 DEV_MISCONFIGURED = 0x04 127 DEV_MISCONFIGURED = 2
127}; 128};
128 129
129struct au0828_dev; 130struct au0828_dev;
@@ -247,7 +248,7 @@ struct au0828_dev {
247 int input_type; 248 int input_type;
248 int std_set_in_tuner_core; 249 int std_set_in_tuner_core;
249 unsigned int ctrl_input; 250 unsigned int ctrl_input;
250 enum au0828_dev_state dev_state; 251 long unsigned int dev_state; /* defined at enum au0828_dev_state */;
251 enum au0828_stream_state stream_state; 252 enum au0828_stream_state stream_state;
252 wait_queue_head_t open; 253 wait_queue_head_t open;
253 254
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index df4c052c6bd6..f300f060b3f3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
349 349
350 if (dma->pages) { 350 if (dma->pages) {
351 for (i = 0; i < dma->nr_pages; i++) 351 for (i = 0; i < dma->nr_pages; i++)
352 page_cache_release(dma->pages[i]); 352 put_page(dma->pages[i]);
353 kfree(dma->pages); 353 kfree(dma->pages);
354 dma->pages = NULL; 354 dma->pages = NULL;
355 } 355 }
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..9c677f3f3c26 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
116{ 116{
117 struct inode *root; 117 struct inode *root;
118 118
119 sb->s_blocksize = PAGE_CACHE_SIZE; 119 sb->s_blocksize = PAGE_SIZE;
120 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 120 sb->s_blocksize_bits = PAGE_SHIFT;
121 sb->s_magic = IBMASMFS_MAGIC; 121 sb->s_magic = IBMASMFS_MAGIC;
122 sb->s_op = &ibmasmfs_s_ops; 122 sb->s_op = &ibmasmfs_s_ops;
123 sb->s_time_gran = 1; 123 sb->s_time_gran = 1;
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5f1a36b8fbb0..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
458 break; 458 break;
459 459
460 val = kmalloc(len, GFP_KERNEL); 460 val = kmalloc(len, GFP_KERNEL);
461 if (!val) 461 if (!val) {
462 kfree(base);
462 break; 463 break;
464 }
463 465
464 *val = 0x12345678; 466 *val = 0x12345678;
465 base[offset] = *val; 467 base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
498 } 500 }
499 case CT_READ_BUDDY_AFTER_FREE: { 501 case CT_READ_BUDDY_AFTER_FREE: {
500 unsigned long p = __get_free_page(GFP_KERNEL); 502 unsigned long p = __get_free_page(GFP_KERNEL);
501 int saw, *val = kmalloc(1024, GFP_KERNEL); 503 int saw, *val;
502 int *base; 504 int *base;
503 505
504 if (!p) 506 if (!p)
505 break; 507 break;
506 508
507 if (!val) 509 val = kmalloc(1024, GFP_KERNEL);
510 if (!val) {
511 free_page(p);
508 break; 512 break;
513 }
509 514
510 base = (int *)p; 515 base = (int *)p;
511 516
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..f84a4275ca29 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
728 if (dirty) 728 if (dirty)
729 set_page_dirty(pages[i]); 729 set_page_dirty(pages[i]);
730 730
731 page_cache_release(pages[i]); 731 put_page(pages[i]);
732 pages[i] = NULL; 732 pages[i] = NULL;
733 } 733 }
734} 734}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d94607611d8..6e4c55a4aab5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
356 * They have to set these according to their abilities. 356 * They have to set these according to their abilities.
357 */ 357 */
358 host->max_segs = 1; 358 host->max_segs = 1;
359 host->max_seg_size = PAGE_CACHE_SIZE; 359 host->max_seg_size = PAGE_SIZE;
360 360
361 host->max_req_size = PAGE_CACHE_SIZE; 361 host->max_req_size = PAGE_SIZE;
362 host->max_blk_size = 512; 362 host->max_blk_size = 512;
363 host->max_blk_count = PAGE_CACHE_SIZE / 512; 363 host->max_blk_count = PAGE_SIZE / 512;
364 364
365 return host; 365 return host;
366} 366}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 8d870ce9f944..d9a655f47d41 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1513 mmc->caps |= pd->caps; 1513 mmc->caps |= pd->caps;
1514 mmc->max_segs = 32; 1514 mmc->max_segs = 32;
1515 mmc->max_blk_size = 512; 1515 mmc->max_blk_size = 512;
1516 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1516 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1518 mmc->max_seg_size = mmc->max_req_size; 1518 mmc->max_seg_size = mmc->max_req_size;
1519 1519
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 675435873823..7fb0c034dcb6 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
63 } 63 }
64 } 64 }
65 65
66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
67 (align & PAGE_MASK))) || !multiple) { 67 (align & PAGE_MASK))) || !multiple) {
68 ret = -EINVAL; 68 ret = -EINVAL;
69 goto pio; 69 goto pio;
@@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
133 } 133 }
134 } 134 }
135 135
136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
137 (align & PAGE_MASK))) || !multiple) { 137 (align & PAGE_MASK))) || !multiple) {
138 ret = -EINVAL; 138 ret = -EINVAL;
139 goto pio; 139 goto pio;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 03f6e74c1906..0521b4662748 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1125 mmc->caps2 |= pdata->capabilities2; 1125 mmc->caps2 |= pdata->capabilities2;
1126 mmc->max_segs = 32; 1126 mmc->max_segs = 32;
1127 mmc->max_blk_size = 512; 1127 mmc->max_blk_size = 512;
1128 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 1128 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1129 mmc->max_segs; 1129 mmc->max_segs;
1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1131 mmc->max_seg_size = mmc->max_req_size; 1131 mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b2752fe711f2..807c06e203c3 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
1789 /* Set .max_segs to some random number. Feel free to adjust. */ 1789 /* Set .max_segs to some random number. Feel free to adjust. */
1790 mmc->max_segs = 32; 1790 mmc->max_segs = 32;
1791 mmc->max_blk_size = 512; 1791 mmc->max_blk_size = 512;
1792 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1792 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1794 /* 1794 /*
1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code, 1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057737e6..7c887f111a7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
75 break; 75 break;
76 } 76 }
77 77
78 page_cache_release(page); 78 put_page(page);
79 pages--; 79 pages--;
80 index++; 80 index++;
81 } 81 }
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
124 return PTR_ERR(page); 124 return PTR_ERR(page);
125 125
126 memcpy(buf, page_address(page) + offset, cpylen); 126 memcpy(buf, page_address(page) + offset, cpylen);
127 page_cache_release(page); 127 put_page(page);
128 128
129 if (retlen) 129 if (retlen)
130 *retlen += cpylen; 130 *retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
164 unlock_page(page); 164 unlock_page(page);
165 balance_dirty_pages_ratelimited(mapping); 165 balance_dirty_pages_ratelimited(mapping);
166 } 166 }
167 page_cache_release(page); 167 put_page(page);
168 168
169 if (retlen) 169 if (retlen)
170 *retlen += cpylen; 170 *retlen += cpylen;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd519503bb1..a58169a28741 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
1339 int i; 1339 int i;
1340 1340
1341 for (i = 0; i < ns->held_cnt; i++) 1341 for (i = 0; i < ns->held_cnt; i++)
1342 page_cache_release(ns->held_pages[i]); 1342 put_page(ns->held_pages[i]);
1343} 1343}
1344 1344
1345/* Get page cache pages in advance to provide NOFS memory allocation */ 1345/* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
1349 struct page *page; 1349 struct page *page;
1350 struct address_space *mapping = file->f_mapping; 1350 struct address_space *mapping = file->f_mapping;
1351 1351
1352 start_index = pos >> PAGE_CACHE_SHIFT; 1352 start_index = pos >> PAGE_SHIFT;
1353 end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1353 end_index = (pos + count - 1) >> PAGE_SHIFT;
1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) 1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1355 return -EINVAL; 1355 return -EINVAL;
1356 ns->held_cnt = 0; 1356 ns->held_cnt = 0;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index c32cbb593600..f068b6513cd2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
1204{ 1204{
1205 struct btt *btt = bdev->bd_disk->private_data; 1205 struct btt *btt = bdev->bd_disk->private_data;
1206 1206
1207 btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector); 1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
1208 page_endio(page, rw & WRITE, 0); 1208 page_endio(page, rw & WRITE, 0);
1209 return 0; 1209 return 0;
1210} 1210}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index cc31c6f1f88e..12c86fa80c5f 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -151,7 +151,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
151 struct pmem_device *pmem = bdev->bd_disk->private_data; 151 struct pmem_device *pmem = bdev->bd_disk->private_data;
152 int rc; 152 int rc;
153 153
154 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); 154 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
155 if (rw & WRITE) 155 if (rw & WRITE)
156 wmb_pmem(); 156 wmb_pmem();
157 157
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b48ac6300c79..a0e5260bd006 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
239{ 239{
240 struct inode *root_inode; 240 struct inode *root_inode;
241 241
242 sb->s_blocksize = PAGE_CACHE_SIZE; 242 sb->s_blocksize = PAGE_SIZE;
243 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 243 sb->s_blocksize_bits = PAGE_SHIFT;
244 sb->s_magic = OPROFILEFS_MAGIC; 244 sb->s_magic = OPROFILEFS_MAGIC;
245 sb->s_op = &s_ops; 245 sb->s_op = &s_ops;
246 sb->s_time_gran = 1; 246 sb->s_time_gran = 1;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
56 int stschg_irq; /* card-status-change irq */ 56 int stschg_irq; /* card-status-change irq */
57 int card_irq; /* card irq */ 57 int card_irq; /* card irq */
58 int eject_irq; /* db1200/pb1200 have these */ 58 int eject_irq; /* db1200/pb1200 have these */
59 int insert_gpio; /* db1000 carddetect gpio */
59 60
60#define BOARD_TYPE_DEFAULT 0 /* most boards */ 61#define BOARD_TYPE_DEFAULT 0 /* most boards */
61#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */ 62#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
83/* carddetect gpio: low-active */ 84/* carddetect gpio: low-active */
84static int db1000_card_inserted(struct db1x_pcmcia_sock *sock) 85static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
85{ 86{
86 return !gpio_get_value(irq_to_gpio(sock->insert_irq)); 87 return !gpio_get_value(sock->insert_gpio);
87} 88}
88 89
89static int db1x_card_inserted(struct db1x_pcmcia_sock *sock) 90static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
457 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card"); 458 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
458 sock->card_irq = r ? r->start : 0; 459 sock->card_irq = r ? r->start : 0;
459 460
460 /* insert: irq which triggers on card insertion/ejection */ 461 /* insert: irq which triggers on card insertion/ejection
462 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
463 */
461 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert"); 464 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
462 sock->insert_irq = r ? r->start : -1; 465 sock->insert_irq = r ? r->start : -1;
466 if (sock->board_type == BOARD_TYPE_DEFAULT) {
467 sock->insert_gpio = r ? r->start : -1;
468 sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
469 }
463 470
464 /* stschg: irq which trigger on card status change (optional) */ 471 /* stschg: irq which trigger on card status change (optional) */
465 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg"); 472 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 46210512d8ec..9cfa544072b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -762,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
762 762
763 if (of_property_read_bool(dev_np, "fsl,input-sel")) { 763 if (of_property_read_bool(dev_np, "fsl,input-sel")) {
764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0); 764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
765 if (np) { 765 if (!np) {
766 ipctl->input_sel_base = of_iomap(np, 0);
767 if (IS_ERR(ipctl->input_sel_base)) {
768 of_node_put(np);
769 dev_err(&pdev->dev,
770 "iomuxc input select base address not found\n");
771 return PTR_ERR(ipctl->input_sel_base);
772 }
773 } else {
774 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n"); 766 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
775 return -EINVAL; 767 return -EINVAL;
776 } 768 }
769
770 ipctl->input_sel_base = of_iomap(np, 0);
777 of_node_put(np); 771 of_node_put(np);
772 if (!ipctl->input_sel_base) {
773 dev_err(&pdev->dev,
774 "iomuxc input select base address not found\n");
775 return -ENOMEM;
776 }
778 } 777 }
779 778
780 imx_pinctrl_desc.name = dev_name(&pdev->dev); 779 imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85536b467c25..6c2c816f8e5f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -665,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d)
665 spin_unlock(&pctrl->lock); 665 spin_unlock(&pctrl->lock);
666} 666}
667 667
668static void intel_gpio_irq_enable(struct irq_data *d)
669{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
671 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
672 const struct intel_community *community;
673 unsigned pin = irqd_to_hwirq(d);
674 unsigned long flags;
675
676 spin_lock_irqsave(&pctrl->lock, flags);
677
678 community = intel_get_community(pctrl, pin);
679 if (community) {
680 unsigned padno = pin_to_padno(community, pin);
681 unsigned gpp_size = community->gpp_size;
682 unsigned gpp_offset = padno % gpp_size;
683 unsigned gpp = padno / gpp_size;
684 u32 value;
685
686 /* Clear interrupt status first to avoid unexpected interrupt */
687 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
688
689 value = readl(community->regs + community->ie_offset + gpp * 4);
690 value |= BIT(gpp_offset);
691 writel(value, community->regs + community->ie_offset + gpp * 4);
692 }
693
694 spin_unlock_irqrestore(&pctrl->lock, flags);
695}
696
668static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) 697static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
669{ 698{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 699 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
741 value |= PADCFG0_RXINV; 770 value |= PADCFG0_RXINV;
742 } else if (type & IRQ_TYPE_EDGE_RISING) { 771 } else if (type & IRQ_TYPE_EDGE_RISING) {
743 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT; 772 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
744 } else if (type & IRQ_TYPE_LEVEL_LOW) { 773 } else if (type & IRQ_TYPE_LEVEL_MASK) {
745 value |= PADCFG0_RXINV; 774 if (type & IRQ_TYPE_LEVEL_LOW)
775 value |= PADCFG0_RXINV;
746 } else { 776 } else {
747 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT; 777 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
748 } 778 }
@@ -852,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
852 882
853static struct irq_chip intel_gpio_irqchip = { 883static struct irq_chip intel_gpio_irqchip = {
854 .name = "intel-gpio", 884 .name = "intel-gpio",
885 .irq_enable = intel_gpio_irq_enable,
855 .irq_ack = intel_gpio_irq_ack, 886 .irq_ack = intel_gpio_irq_ack,
856 .irq_mask = intel_gpio_irq_mask, 887 .irq_mask = intel_gpio_irq_mask,
857 .irq_unmask = intel_gpio_irq_unmask, 888 .irq_unmask = intel_gpio_irq_unmask,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352406108fa0..c8969dd49449 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
990 int val; 990 int val;
991 991
992 if (pull) 992 if (pull)
993 pullidx = data_out ? 1 : 2; 993 pullidx = data_out ? 2 : 1;
994 994
995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s", 995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
996 gpio, 996 gpio,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 856f736cb1a6..2673cd9d106e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
469 "mfio83", 469 "mfio83",
470}; 470};
471 471
472static const char * const pistachio_sys_pll_lock_groups[] = { 472static const char * const pistachio_audio_pll_lock_groups[] = {
473 "mfio84", 473 "mfio84",
474}; 474};
475 475
476static const char * const pistachio_wifi_pll_lock_groups[] = { 476static const char * const pistachio_rpu_v_pll_lock_groups[] = {
477 "mfio85", 477 "mfio85",
478}; 478};
479 479
480static const char * const pistachio_bt_pll_lock_groups[] = { 480static const char * const pistachio_rpu_l_pll_lock_groups[] = {
481 "mfio86", 481 "mfio86",
482}; 482};
483 483
484static const char * const pistachio_rpu_v_pll_lock_groups[] = { 484static const char * const pistachio_sys_pll_lock_groups[] = {
485 "mfio87", 485 "mfio87",
486}; 486};
487 487
488static const char * const pistachio_rpu_l_pll_lock_groups[] = { 488static const char * const pistachio_wifi_pll_lock_groups[] = {
489 "mfio88", 489 "mfio88",
490}; 490};
491 491
492static const char * const pistachio_audio_pll_lock_groups[] = { 492static const char * const pistachio_bt_pll_lock_groups[] = {
493 "mfio89", 493 "mfio89",
494}; 494};
495 495
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
559 PISTACHIO_FUNCTION_DREQ4, 559 PISTACHIO_FUNCTION_DREQ4,
560 PISTACHIO_FUNCTION_DREQ5, 560 PISTACHIO_FUNCTION_DREQ5,
561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK, 561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
562 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
563 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
564 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
562 PISTACHIO_FUNCTION_SYS_PLL_LOCK, 565 PISTACHIO_FUNCTION_SYS_PLL_LOCK,
563 PISTACHIO_FUNCTION_WIFI_PLL_LOCK, 566 PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
564 PISTACHIO_FUNCTION_BT_PLL_LOCK, 567 PISTACHIO_FUNCTION_BT_PLL_LOCK,
565 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
566 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
567 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND, 568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND, 569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND, 570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
620 FUNCTION(dreq4), 620 FUNCTION(dreq4),
621 FUNCTION(dreq5), 621 FUNCTION(dreq5),
622 FUNCTION(mips_pll_lock), 622 FUNCTION(mips_pll_lock),
623 FUNCTION(audio_pll_lock),
624 FUNCTION(rpu_v_pll_lock),
625 FUNCTION(rpu_l_pll_lock),
623 FUNCTION(sys_pll_lock), 626 FUNCTION(sys_pll_lock),
624 FUNCTION(wifi_pll_lock), 627 FUNCTION(wifi_pll_lock),
625 FUNCTION(bt_pll_lock), 628 FUNCTION(bt_pll_lock),
626 FUNCTION(rpu_v_pll_lock),
627 FUNCTION(rpu_l_pll_lock),
628 FUNCTION(audio_pll_lock),
629 FUNCTION(debug_raw_cca_ind), 629 FUNCTION(debug_raw_cca_ind),
630 FUNCTION(debug_ed_sec20_cca_ind), 630 FUNCTION(debug_ed_sec20_cca_ind),
631 FUNCTION(debug_ed_sec40_cca_ind), 631 FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 412c6b78140a..a13f2b6f6fc0 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
1573 return 0; 1573 return 0;
1574} 1574}
1575 1575
1576/*
1577 * gpiolib gpiod_to_irq callback function.
1578 * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
1579 */
1580static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
1581{
1582 struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
1583 int i;
1584
1585 for (i = 0; i < info->num_exin; i++)
1586 if (info->exin[i] == offset)
1587 return ltq_eiu_get_irq(i);
1588
1589 return -1;
1590}
1591
1576static struct gpio_chip xway_chip = { 1592static struct gpio_chip xway_chip = {
1577 .label = "gpio-xway", 1593 .label = "gpio-xway",
1578 .direction_input = xway_gpio_dir_in, 1594 .direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = {
1581 .set = xway_gpio_set, 1597 .set = xway_gpio_set,
1582 .request = gpiochip_generic_request, 1598 .request = gpiochip_generic_request,
1583 .free = gpiochip_generic_free, 1599 .free = gpiochip_generic_free,
1600 .to_irq = xway_gpio_to_irq,
1584 .base = -1, 1601 .base = -1,
1585}; 1602};
1586 1603
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b5d81ced6ce6..b68ae424cee2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -237,7 +237,7 @@ DECLARE_QCA_GPIO_PINS(99);
237 .pins = gpio##id##_pins, \ 237 .pins = gpio##id##_pins, \
238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \ 238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
239 .funcs = (int[]){ \ 239 .funcs = (int[]){ \
240 qca_mux_NA, /* gpio mode */ \ 240 qca_mux_gpio, /* gpio mode */ \
241 qca_mux_##f1, \ 241 qca_mux_##f1, \
242 qca_mux_##f2, \ 242 qca_mux_##f2, \
243 qca_mux_##f3, \ 243 qca_mux_##f3, \
@@ -254,11 +254,11 @@ DECLARE_QCA_GPIO_PINS(99);
254 qca_mux_##f14 \ 254 qca_mux_##f14 \
255 }, \ 255 }, \
256 .nfuncs = 15, \ 256 .nfuncs = 15, \
257 .ctl_reg = 0x1000 + 0x10 * id, \ 257 .ctl_reg = 0x0 + 0x1000 * id, \
258 .io_reg = 0x1004 + 0x10 * id, \ 258 .io_reg = 0x4 + 0x1000 * id, \
259 .intr_cfg_reg = 0x1008 + 0x10 * id, \ 259 .intr_cfg_reg = 0x8 + 0x1000 * id, \
260 .intr_status_reg = 0x100c + 0x10 * id, \ 260 .intr_status_reg = 0xc + 0x1000 * id, \
261 .intr_target_reg = 0x400 + 0x4 * id, \ 261 .intr_target_reg = 0x8 + 0x1000 * id, \
262 .mux_bit = 2, \ 262 .mux_bit = 2, \
263 .pull_bit = 0, \ 263 .pull_bit = 0, \
264 .drv_bit = 6, \ 264 .drv_bit = 6, \
@@ -414,7 +414,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
414 .nfunctions = ARRAY_SIZE(ipq4019_functions), 414 .nfunctions = ARRAY_SIZE(ipq4019_functions),
415 .groups = ipq4019_groups, 415 .groups = ipq4019_groups,
416 .ngroups = ARRAY_SIZE(ipq4019_groups), 416 .ngroups = ARRAY_SIZE(ipq4019_groups),
417 .ngpios = 70, 417 .ngpios = 100,
418}; 418};
419 419
420static int ipq4019_pinctrl_probe(struct platform_device *pdev) 420static int ipq4019_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index dc3609f0c60b..ee0c1f2567d9 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -546,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
546 return ret; 546 return ret;
547 } 547 }
548 548
549 pinctrl_provide_dummies(); 549 /* Enable dummy states for those platforms without pinctrl support */
550 if (!of_have_populated_dt())
551 pinctrl_provide_dummies();
550 552
551 ret = sh_pfc_init_ranges(pfc); 553 ret = sh_pfc_init_ranges(pfc);
552 if (ret < 0) 554 if (ret < 0)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
485 .pins = sun8i_a33_pins, 485 .pins = sun8i_a33_pins,
486 .npins = ARRAY_SIZE(sun8i_a33_pins), 486 .npins = ARRAY_SIZE(sun8i_a33_pins),
487 .irq_banks = 2, 487 .irq_banks = 2,
488 .irq_bank_base = 1,
488}; 489};
489 490
490static int sun8i_a33_pinctrl_probe(struct platform_device *pdev) 491static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 12a1dfabb1af..3b017dbd289c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -579,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) 579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
580{ 580{
581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
582 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 582 u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
583 u8 index = sunxi_irq_cfg_offset(d->hwirq); 583 u8 index = sunxi_irq_cfg_offset(d->hwirq);
584 unsigned long flags; 584 unsigned long flags;
585 u32 regval; 585 u32 regval;
@@ -626,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
626static void sunxi_pinctrl_irq_ack(struct irq_data *d) 626static void sunxi_pinctrl_irq_ack(struct irq_data *d)
627{ 627{
628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
629 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 629 u32 status_reg = sunxi_irq_status_reg(d->hwirq,
630 pctl->desc->irq_bank_base);
630 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 631 u8 status_idx = sunxi_irq_status_offset(d->hwirq);
631 632
632 /* Clear the IRQ */ 633 /* Clear the IRQ */
@@ -636,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
636static void sunxi_pinctrl_irq_mask(struct irq_data *d) 637static void sunxi_pinctrl_irq_mask(struct irq_data *d)
637{ 638{
638 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 639 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
639 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 640 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
640 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 641 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
641 unsigned long flags; 642 unsigned long flags;
642 u32 val; 643 u32 val;
@@ -653,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
653static void sunxi_pinctrl_irq_unmask(struct irq_data *d) 654static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
654{ 655{
655 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 656 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
656 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 657 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
657 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 658 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
658 unsigned long flags; 659 unsigned long flags;
659 u32 val; 660 u32 val;
@@ -745,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
745 if (bank == pctl->desc->irq_banks) 746 if (bank == pctl->desc->irq_banks)
746 return; 747 return;
747 748
748 reg = sunxi_irq_status_reg_from_bank(bank); 749 reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
749 val = readl(pctl->membase + reg); 750 val = readl(pctl->membase + reg);
750 751
751 if (val) { 752 if (val) {
@@ -1024,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
1024 1025
1025 for (i = 0; i < pctl->desc->irq_banks; i++) { 1026 for (i = 0; i < pctl->desc->irq_banks; i++) {
1026 /* Mask and clear all IRQs before registering a handler */ 1027 /* Mask and clear all IRQs before registering a handler */
1027 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i)); 1028 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
1029 pctl->desc->irq_bank_base));
1028 writel(0xffffffff, 1030 writel(0xffffffff,
1029 pctl->membase + sunxi_irq_status_reg_from_bank(i)); 1031 pctl->membase + sunxi_irq_status_reg_from_bank(i,
1032 pctl->desc->irq_bank_base));
1030 1033
1031 irq_set_chained_handler_and_data(pctl->irq[i], 1034 irq_set_chained_handler_and_data(pctl->irq[i],
1032 sunxi_pinctrl_irq_handler, 1035 sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
97 int npins; 97 int npins;
98 unsigned pin_base; 98 unsigned pin_base;
99 unsigned irq_banks; 99 unsigned irq_banks;
100 unsigned irq_bank_base;
100 bool irq_read_needs_mux; 101 bool irq_read_needs_mux;
101}; 102};
102 103
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
233 return pin_num * PULL_PINS_BITS; 234 return pin_num * PULL_PINS_BITS;
234} 235}
235 236
236static inline u32 sunxi_irq_cfg_reg(u16 irq) 237static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
237{ 238{
238 u8 bank = irq / IRQ_PER_BANK; 239 u8 bank = irq / IRQ_PER_BANK;
239 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04; 240 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
240 241
241 return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg; 242 return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
242} 243}
243 244
244static inline u32 sunxi_irq_cfg_offset(u16 irq) 245static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
247 return irq_num * IRQ_CFG_IRQ_BITS; 248 return irq_num * IRQ_CFG_IRQ_BITS;
248} 249}
249 250
250static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank) 251static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
251{ 252{
252 return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE; 253 return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
253} 254}
254 255
255static inline u32 sunxi_irq_ctrl_reg(u16 irq) 256static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
256{ 257{
257 u8 bank = irq / IRQ_PER_BANK; 258 u8 bank = irq / IRQ_PER_BANK;
258 259
259 return sunxi_irq_ctrl_reg_from_bank(bank); 260 return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
260} 261}
261 262
262static inline u32 sunxi_irq_ctrl_offset(u16 irq) 263static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
265 return irq_num * IRQ_CTRL_IRQ_BITS; 266 return irq_num * IRQ_CTRL_IRQ_BITS;
266} 267}
267 268
268static inline u32 sunxi_irq_status_reg_from_bank(u8 bank) 269static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
269{ 270{
270 return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE; 271 return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
271} 272}
272 273
273static inline u32 sunxi_irq_status_reg(u16 irq) 274static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
274{ 275{
275 u8 bank = irq / IRQ_PER_BANK; 276 u8 bank = irq / IRQ_PER_BANK;
276 277
277 return sunxi_irq_status_reg_from_bank(bank); 278 return sunxi_irq_status_reg_from_bank(bank, bank_base);
278} 279}
279 280
280static inline u32 sunxi_irq_status_offset(u16 irq) 281static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a5457ac9cdb..1bd0753f678a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2891,7 +2891,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2891 if (sdkp->opt_xfer_blocks && 2891 if (sdkp->opt_xfer_blocks &&
2892 sdkp->opt_xfer_blocks <= dev_max && 2892 sdkp->opt_xfer_blocks <= dev_max &&
2893 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && 2893 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2894 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) 2894 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
2895 rw_max = q->limits.io_opt = 2895 rw_max = q->limits.io_opt =
2896 sdkp->opt_xfer_blocks * sdp->sector_size; 2896 sdkp->opt_xfer_blocks * sdp->sector_size;
2897 else 2897 else
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71c5138ddf94..dbf1882cfbac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4941 out_unmap: 4941 out_unmap:
4942 if (res > 0) { 4942 if (res > 0) {
4943 for (j=0; j < res; j++) 4943 for (j=0; j < res; j++)
4944 page_cache_release(pages[j]); 4944 put_page(pages[j]);
4945 res = 0; 4945 res = 0;
4946 } 4946 }
4947 kfree(pages); 4947 kfree(pages);
@@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
4963 /* FIXME: cache flush missing for rw==READ 4963 /* FIXME: cache flush missing for rw==READ
4964 * FIXME: call the correct reference counting function 4964 * FIXME: call the correct reference counting function
4965 */ 4965 */
4966 page_cache_release(page); 4966 put_page(page);
4967 } 4967 }
4968 kfree(STbp->mapped_pages); 4968 kfree(STbp->mapped_pages);
4969 STbp->mapped_pages = NULL; 4969 STbp->mapped_pages = NULL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e7a19be87c38..50769078e72e 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -211,11 +211,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
211 struct spi_transfer *transfer) 211 struct spi_transfer *transfer)
212{ 212{
213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
214 unsigned int bpw = transfer->bits_per_word; 214 unsigned int bpw;
215 215
216 if (!master->dma_rx) 216 if (!master->dma_rx)
217 return false; 217 return false;
218 218
219 if (!transfer)
220 return false;
221
222 bpw = transfer->bits_per_word;
219 if (!bpw) 223 if (!bpw)
220 bpw = spi->bits_per_word; 224 bpw = spi->bits_per_word;
221 225
@@ -333,8 +337,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
333static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 337static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
334 struct spi_imx_config *config) 338 struct spi_imx_config *config)
335{ 339{
336 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0; 340 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
337 u32 clk = config->speed_hz, delay, reg; 341 u32 clk = config->speed_hz, delay, reg;
342 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
338 343
339 /* 344 /*
340 * The hardware seems to have a race condition when changing modes. The 345 * The hardware seems to have a race condition when changing modes. The
@@ -358,13 +363,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
358 363
359 if (config->mode & SPI_CPHA) 364 if (config->mode & SPI_CPHA)
360 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 365 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
366 else
367 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
361 368
362 if (config->mode & SPI_CPOL) { 369 if (config->mode & SPI_CPOL) {
363 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 370 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
364 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 371 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
372 } else {
373 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
374 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
365 } 375 }
366 if (config->mode & SPI_CS_HIGH) 376 if (config->mode & SPI_CS_HIGH)
367 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 377 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
378 else
379 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
368 380
369 if (spi_imx->usedma) 381 if (spi_imx->usedma)
370 ctrl |= MX51_ECSPI_CTRL_SMC; 382 ctrl |= MX51_ECSPI_CTRL_SMC;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8bef46..43a02e377b3b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
423 423
424 if (mcspi_dma->dma_tx) { 424 if (mcspi_dma->dma_tx) {
425 struct dma_async_tx_descriptor *tx; 425 struct dma_async_tx_descriptor *tx;
426 struct scatterlist sg;
427 426
428 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); 427 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
429 428
430 sg_init_table(&sg, 1); 429 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
431 sg_dma_address(&sg) = xfer->tx_dma; 430 xfer->tx_sg.nents, DMA_MEM_TO_DEV,
432 sg_dma_len(&sg) = xfer->len; 431 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
433
434 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
435 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
436 if (tx) { 432 if (tx) {
437 tx->callback = omap2_mcspi_tx_callback; 433 tx->callback = omap2_mcspi_tx_callback;
438 tx->callback_param = spi; 434 tx->callback_param = spi;
@@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
478 474
479 if (mcspi_dma->dma_rx) { 475 if (mcspi_dma->dma_rx) {
480 struct dma_async_tx_descriptor *tx; 476 struct dma_async_tx_descriptor *tx;
481 struct scatterlist sg;
482 477
483 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); 478 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
484 479
485 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) 480 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
486 dma_count -= es; 481 dma_count -= es;
487 482
488 sg_init_table(&sg, 1); 483 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
489 sg_dma_address(&sg) = xfer->rx_dma; 484 xfer->rx_sg.nents, DMA_DEV_TO_MEM,
490 sg_dma_len(&sg) = dma_count; 485 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
491
492 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
493 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
494 DMA_CTRL_ACK);
495 if (tx) { 486 if (tx) {
496 tx->callback = omap2_mcspi_rx_callback; 487 tx->callback = omap2_mcspi_rx_callback;
497 tx->callback_param = spi; 488 tx->callback_param = spi;
@@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
505 omap2_mcspi_set_dma_req(spi, 1, 1); 496 omap2_mcspi_set_dma_req(spi, 1, 1);
506 497
507 wait_for_completion(&mcspi_dma->dma_rx_completion); 498 wait_for_completion(&mcspi_dma->dma_rx_completion);
508 dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
509 DMA_FROM_DEVICE);
510 499
511 if (mcspi->fifo_depth > 0) 500 if (mcspi->fifo_depth > 0)
512 return count; 501 return count;
@@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
619 608
620 if (tx != NULL) { 609 if (tx != NULL) {
621 wait_for_completion(&mcspi_dma->dma_tx_completion); 610 wait_for_completion(&mcspi_dma->dma_tx_completion);
622 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
623 DMA_TO_DEVICE);
624 611
625 if (mcspi->fifo_depth > 0) { 612 if (mcspi->fifo_depth > 0) {
626 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; 613 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
1087 gpio_free(spi->cs_gpio); 1074 gpio_free(spi->cs_gpio);
1088} 1075}
1089 1076
1077static bool omap2_mcspi_can_dma(struct spi_master *master,
1078 struct spi_device *spi,
1079 struct spi_transfer *xfer)
1080{
1081 if (xfer->len < DMA_MIN_BYTES)
1082 return false;
1083
1084 return true;
1085}
1086
1090static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, 1087static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1091 struct spi_device *spi, struct spi_transfer *t) 1088 struct spi_device *spi, struct spi_transfer *t)
1092{ 1089{
@@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
1268 return -EINVAL; 1265 return -EINVAL;
1269 } 1266 }
1270 1267
1271 if (len < DMA_MIN_BYTES)
1272 goto skip_dma_map;
1273
1274 if (mcspi_dma->dma_tx && tx_buf != NULL) {
1275 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1276 len, DMA_TO_DEVICE);
1277 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1278 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1279 'T', len);
1280 return -EINVAL;
1281 }
1282 }
1283 if (mcspi_dma->dma_rx && rx_buf != NULL) {
1284 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1285 DMA_FROM_DEVICE);
1286 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1287 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1288 'R', len);
1289 if (tx_buf != NULL)
1290 dma_unmap_single(mcspi->dev, t->tx_dma,
1291 len, DMA_TO_DEVICE);
1292 return -EINVAL;
1293 }
1294 }
1295
1296skip_dma_map:
1297 return omap2_mcspi_work_one(mcspi, spi, t); 1268 return omap2_mcspi_work_one(mcspi, spi, t);
1298} 1269}
1299 1270
@@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1377 master->transfer_one = omap2_mcspi_transfer_one; 1348 master->transfer_one = omap2_mcspi_transfer_one;
1378 master->set_cs = omap2_mcspi_set_cs; 1349 master->set_cs = omap2_mcspi_set_cs;
1379 master->cleanup = omap2_mcspi_cleanup; 1350 master->cleanup = omap2_mcspi_cleanup;
1351 master->can_dma = omap2_mcspi_can_dma;
1380 master->dev.of_node = node; 1352 master->dev.of_node = node;
1381 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; 1353 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
1382 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; 1354 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 8f50a4020f6f..6c6c0013ec7a 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -534,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
534 if (WARN_ON(rs->speed > MAX_SCLK_OUT)) 534 if (WARN_ON(rs->speed > MAX_SCLK_OUT))
535 rs->speed = MAX_SCLK_OUT; 535 rs->speed = MAX_SCLK_OUT;
536 536
537 /* the minimum divsor is 2 */ 537 /* the minimum divisor is 2 */
538 if (rs->max_freq < 2 * rs->speed) { 538 if (rs->max_freq < 2 * rs->speed) {
539 clk_set_rate(rs->spiclk, 2 * rs->speed); 539 clk_set_rate(rs->spiclk, 2 * rs->speed);
540 rs->max_freq = clk_get_rate(rs->spiclk); 540 rs->max_freq = clk_get_rate(rs->spiclk);
@@ -730,23 +730,27 @@ static int rockchip_spi_probe(struct platform_device *pdev)
730 master->transfer_one = rockchip_spi_transfer_one; 730 master->transfer_one = rockchip_spi_transfer_one;
731 master->handle_err = rockchip_spi_handle_err; 731 master->handle_err = rockchip_spi_handle_err;
732 732
733 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); 733 rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
734 if (IS_ERR_OR_NULL(rs->dma_tx.ch)) { 734 if (IS_ERR(rs->dma_tx.ch)) {
735 /* Check tx to see if we need defer probing driver */ 735 /* Check tx to see if we need defer probing driver */
736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { 736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
737 ret = -EPROBE_DEFER; 737 ret = -EPROBE_DEFER;
738 goto err_get_fifo_len; 738 goto err_get_fifo_len;
739 } 739 }
740 dev_warn(rs->dev, "Failed to request TX DMA channel\n"); 740 dev_warn(rs->dev, "Failed to request TX DMA channel\n");
741 rs->dma_tx.ch = NULL;
741 } 742 }
742 743
743 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); 744 rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
744 if (!rs->dma_rx.ch) { 745 if (IS_ERR(rs->dma_rx.ch)) {
745 if (rs->dma_tx.ch) { 746 if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
746 dma_release_channel(rs->dma_tx.ch); 747 dma_release_channel(rs->dma_tx.ch);
747 rs->dma_tx.ch = NULL; 748 rs->dma_tx.ch = NULL;
749 ret = -EPROBE_DEFER;
750 goto err_get_fifo_len;
748 } 751 }
749 dev_warn(rs->dev, "Failed to request RX DMA channel\n"); 752 dev_warn(rs->dev, "Failed to request RX DMA channel\n");
753 rs->dma_rx.ch = NULL;
750 } 754 }
751 755
752 if (rs->dma_tx.ch && rs->dma_rx.ch) { 756 if (rs->dma_tx.ch && rs->dma_rx.ch) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index de2f2f90d799..0239b45eed92 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1209,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
1209 struct spi_master *master = 1209 struct spi_master *master =
1210 container_of(work, struct spi_master, pump_messages); 1210 container_of(work, struct spi_master, pump_messages);
1211 1211
1212 __spi_pump_messages(master, true, false); 1212 __spi_pump_messages(master, true, master->bus_lock_flag);
1213} 1213}
1214 1214
1215static int spi_init_queue(struct spi_master *master) 1215static int spi_init_queue(struct spi_master *master)
@@ -2853,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2853 */ 2853 */
2854int spi_sync(struct spi_device *spi, struct spi_message *message) 2854int spi_sync(struct spi_device *spi, struct spi_message *message)
2855{ 2855{
2856 return __spi_sync(spi, message, 0); 2856 return __spi_sync(spi, message, spi->master->bus_lock_flag);
2857} 2857}
2858EXPORT_SYMBOL_GPL(spi_sync); 2858EXPORT_SYMBOL_GPL(spi_sync);
2859 2859
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index dab486261154..13335437c69c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -88,7 +88,7 @@ do { \
88} while (0) 88} while (0)
89 89
90#ifndef LIBCFS_VMALLOC_SIZE 90#ifndef LIBCFS_VMALLOC_SIZE
91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ 91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
92#endif 92#endif
93 93
94#define LIBCFS_ALLOC_PRE(size, mask) \ 94#define LIBCFS_ALLOC_PRE(size, mask) \
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 0f2fd79e5ec8..837eb22749c3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -57,7 +57,7 @@
57#include "../libcfs_cpu.h" 57#include "../libcfs_cpu.h"
58#endif 58#endif
59 59
60#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) 60#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
61#define page_index(p) ((p)->index) 61#define page_index(p) ((p)->index)
62 62
63#define memory_pressure_get() (current->flags & PF_MEMALLOC) 63#define memory_pressure_get() (current->flags & PF_MEMALLOC)
@@ -67,7 +67,7 @@
67#if BITS_PER_LONG == 32 67#if BITS_PER_LONG == 32
68/* limit to lowmem on 32-bit systems */ 68/* limit to lowmem on 32-bit systems */
69#define NUM_CACHEPAGES \ 69#define NUM_CACHEPAGES \
70 min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) 70 min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
71#else 71#else
72#define NUM_CACHEPAGES totalram_pages 72#define NUM_CACHEPAGES totalram_pages
73#endif 73#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c341c5..1c679cb72785 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@ typedef struct {
514 /** 514 /**
515 * Starting offset of the fragment within the page. Note that the 515 * Starting offset of the fragment within the page. Note that the
516 * end of the fragment must not pass the end of the page; i.e., 516 * end of the fragment must not pass the end of the page; i.e.,
517 * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. 517 * kiov_len + kiov_offset <= PAGE_SIZE.
518 */ 518 */
519 unsigned int kiov_offset; 519 unsigned int kiov_offset;
520} lnet_kiov_t; 520} lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 3e1f24e77f64..d4ce06d0aeeb 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
291 291
292 for (nob = i = 0; i < niov; i++) { 292 for (nob = i = 0; i < niov; i++) {
293 if ((kiov[i].kiov_offset && i > 0) || 293 if ((kiov[i].kiov_offset && i > 0) ||
294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) 294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
295 return NULL; 295 return NULL;
296 296
297 pages[i] = kiov[i].kiov_page; 297 pages[i] = kiov[i].kiov_page;
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c90e5102fe06..c3d628bac5b8 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize)
517 max = TCD_MAX_PAGES; 517 max = TCD_MAX_PAGES;
518 } else { 518 } else {
519 max = max / num_possible_cpus(); 519 max = max / num_possible_cpus();
520 max <<= (20 - PAGE_CACHE_SHIFT); 520 max <<= (20 - PAGE_SHIFT);
521 } 521 }
522 rc = cfs_tracefile_init(max); 522 rc = cfs_tracefile_init(max);
523 523
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index ec3bc04bd89f..244eb89eef68 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
182 if (tcd->tcd_cur_pages > 0) { 182 if (tcd->tcd_cur_pages > 0) {
183 __LASSERT(!list_empty(&tcd->tcd_pages)); 183 __LASSERT(!list_empty(&tcd->tcd_pages));
184 tage = cfs_tage_from_list(tcd->tcd_pages.prev); 184 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
185 if (tage->used + len <= PAGE_CACHE_SIZE) 185 if (tage->used + len <= PAGE_SIZE)
186 return tage; 186 return tage;
187 } 187 }
188 188
@@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
260 * from here: this will lead to infinite recursion. 260 * from here: this will lead to infinite recursion.
261 */ 261 */
262 262
263 if (len > PAGE_CACHE_SIZE) { 263 if (len > PAGE_SIZE) {
264 pr_err("cowardly refusing to write %lu bytes in a page\n", len); 264 pr_err("cowardly refusing to write %lu bytes in a page\n", len);
265 return NULL; 265 return NULL;
266 } 266 }
@@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
349 for (i = 0; i < 2; i++) { 349 for (i = 0; i < 2; i++) {
350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1); 350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
351 if (!tage) { 351 if (!tage) {
352 if (needed + known_size > PAGE_CACHE_SIZE) 352 if (needed + known_size > PAGE_SIZE)
353 mask |= D_ERROR; 353 mask |= D_ERROR;
354 354
355 cfs_trace_put_tcd(tcd); 355 cfs_trace_put_tcd(tcd);
@@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
360 string_buf = (char *)page_address(tage->page) + 360 string_buf = (char *)page_address(tage->page) +
361 tage->used + known_size; 361 tage->used + known_size;
362 362
363 max_nob = PAGE_CACHE_SIZE - tage->used - known_size; 363 max_nob = PAGE_SIZE - tage->used - known_size;
364 if (max_nob <= 0) { 364 if (max_nob <= 0) {
365 printk(KERN_EMERG "negative max_nob: %d\n", 365 printk(KERN_EMERG "negative max_nob: %d\n",
366 max_nob); 366 max_nob);
@@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
424 __LASSERT(debug_buf == string_buf); 424 __LASSERT(debug_buf == string_buf);
425 425
426 tage->used += needed; 426 tage->used += needed;
427 __LASSERT(tage->used <= PAGE_CACHE_SIZE); 427 __LASSERT(tage->used <= PAGE_SIZE);
428 428
429console: 429console:
430 if ((mask & libcfs_printk) == 0) { 430 if ((mask & libcfs_printk) == 0) {
@@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
835 835
836int cfs_trace_allocate_string_buffer(char **str, int nob) 836int cfs_trace_allocate_string_buffer(char **str, int nob)
837{ 837{
838 if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ 838 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
839 return -EINVAL; 839 return -EINVAL;
840 840
841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); 841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
951 } 951 }
952 952
953 mb /= num_possible_cpus(); 953 mb /= num_possible_cpus();
954 pages = mb << (20 - PAGE_CACHE_SHIFT); 954 pages = mb << (20 - PAGE_SHIFT);
955 955
956 cfs_tracefile_write_lock(); 956 cfs_tracefile_write_lock();
957 957
@@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
977 977
978 cfs_tracefile_read_unlock(); 978 cfs_tracefile_read_unlock();
979 979
980 return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; 980 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
981} 981}
982 982
983static int tracefiled(void *arg) 983static int tracefiled(void *arg)
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index 4c77f9044dd3..ac84e7f4c859 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
87extern int libcfs_panic_in_progress; 87extern int libcfs_panic_in_progress;
88int cfs_trace_max_debug_mb(void); 88int cfs_trace_max_debug_mb(void);
89 89
90#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 90#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
91#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 91#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
92#define CFS_TRACEFILE_SIZE (500 << 20) 92#define CFS_TRACEFILE_SIZE (500 << 20)
93 93
@@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
96/* 96/*
97 * Private declare for tracefile 97 * Private declare for tracefile
98 */ 98 */
99#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 99#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
100#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 100#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
101 101
102#define CFS_TRACEFILE_SIZE (500 << 20) 102#define CFS_TRACEFILE_SIZE (500 << 20)
@@ -257,7 +257,7 @@ do { \
257do { \ 257do { \
258 __LASSERT(tage); \ 258 __LASSERT(tage); \
259 __LASSERT(tage->page); \ 259 __LASSERT(tage->page); \
260 __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ 260 __LASSERT(tage->used <= PAGE_SIZE); \
261 __LASSERT(page_count(tage->page) > 0); \ 261 __LASSERT(page_count(tage->page) > 0); \
262} while (0) 262} while (0)
263 263
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index c74514f99f90..75d31217bf92 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
139 for (i = 0; i < (int)niov; i++) { 139 for (i = 0; i < (int)niov; i++) {
140 /* We take the page pointer on trust */ 140 /* We take the page pointer on trust */
141 if (lmd->md_iov.kiov[i].kiov_offset + 141 if (lmd->md_iov.kiov[i].kiov_offset +
142 lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) 142 lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
143 return -EINVAL; /* invalid length */ 143 return -EINVAL; /* invalid length */
144 144
145 total_length += lmd->md_iov.kiov[i].kiov_len; 145 total_length += lmd->md_iov.kiov[i].kiov_len;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 0009a8de77d5..f19aa9320e34 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
549 if (len <= frag_len) { 549 if (len <= frag_len) {
550 dst->kiov_len = len; 550 dst->kiov_len = len;
551 LASSERT(dst->kiov_offset + dst->kiov_len 551 LASSERT(dst->kiov_offset + dst->kiov_len
552 <= PAGE_CACHE_SIZE); 552 <= PAGE_SIZE);
553 return niov; 553 return niov;
554 } 554 }
555 555
556 dst->kiov_len = frag_len; 556 dst->kiov_len = frag_len;
557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); 557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
558 558
559 len -= frag_len; 559 len -= frag_len;
560 dst++; 560 dst++;
@@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
887 rbp = &the_lnet.ln_rtrpools[cpt][0]; 887 rbp = &the_lnet.ln_rtrpools[cpt][0];
888 888
889 LASSERT(msg->msg_len <= LNET_MTU); 889 LASSERT(msg->msg_len <= LNET_MTU);
890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { 890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
891 rbp++; 891 rbp++;
892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); 892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
893 } 893 }
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index cc0c2753dd63..891fd59401d7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
166 nalloc = 16; /* first guess at max interfaces */ 166 nalloc = 16; /* first guess at max interfaces */
167 toobig = 0; 167 toobig = 0;
168 for (;;) { 168 for (;;) {
169 if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { 169 if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
170 toobig = 1; 170 toobig = 1;
171 nalloc = PAGE_CACHE_SIZE / sizeof(*ifr); 171 nalloc = PAGE_SIZE / sizeof(*ifr);
172 CWARN("Too many interfaces: only enumerating first %d\n", 172 CWARN("Too many interfaces: only enumerating first %d\n",
173 nalloc); 173 nalloc);
174 } 174 }
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 61459cf9d58f..b01dc424c514 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -27,8 +27,8 @@
27#define LNET_NRB_SMALL_PAGES 1 27#define LNET_NRB_SMALL_PAGES 1
28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ 28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) 29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \ 30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
31 PAGE_CACHE_SHIFT) 31 PAGE_SHIFT)
32 32
33static char *forwarding = ""; 33static char *forwarding = "";
34module_param(forwarding, charp, 0444); 34module_param(forwarding, charp, 0444);
@@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1338 return NULL; 1338 return NULL;
1339 } 1339 }
1340 1340
1341 rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; 1341 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1342 rb->rb_kiov[i].kiov_offset = 0; 1342 rb->rb_kiov[i].kiov_offset = 0;
1343 rb->rb_kiov[i].kiov_page = page; 1343 rb->rb_kiov[i].kiov_page = page;
1344 } 1344 }
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index eebc92412061..dcb6e506f592 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
90 * NB: this is not going to work for variable page size, 90 * NB: this is not going to work for variable page size,
91 * but we have to keep it for compatibility 91 * but we have to keep it for compatibility
92 */ 92 */
93 len = npg * PAGE_CACHE_SIZE; 93 len = npg * PAGE_SIZE;
94 94
95 } else { 95 } else {
96 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 96 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi)
104 opc = breq->blk_opc; 104 opc = breq->blk_opc;
105 flags = breq->blk_flags; 105 flags = breq->blk_flags;
106 len = breq->blk_len; 106 len = breq->blk_len;
107 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 107 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
108 } 108 }
109 109
110 if (npg > LNET_MAX_IOV || npg <= 0) 110 if (npg > LNET_MAX_IOV || npg <= 0)
@@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
167 167
168 if (pattern == LST_BRW_CHECK_SIMPLE) { 168 if (pattern == LST_BRW_CHECK_SIMPLE) {
169 memcpy(addr, &magic, BRW_MSIZE); 169 memcpy(addr, &magic, BRW_MSIZE);
170 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 170 addr += PAGE_SIZE - BRW_MSIZE;
171 memcpy(addr, &magic, BRW_MSIZE); 171 memcpy(addr, &magic, BRW_MSIZE);
172 return; 172 return;
173 } 173 }
174 174
175 if (pattern == LST_BRW_CHECK_FULL) { 175 if (pattern == LST_BRW_CHECK_FULL) {
176 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) 176 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
177 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); 177 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
178 return; 178 return;
179 } 179 }
@@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
198 if (data != magic) 198 if (data != magic)
199 goto bad_data; 199 goto bad_data;
200 200
201 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 201 addr += PAGE_SIZE - BRW_MSIZE;
202 data = *((__u64 *)addr); 202 data = *((__u64 *)addr);
203 if (data != magic) 203 if (data != magic)
204 goto bad_data; 204 goto bad_data;
@@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
207 } 207 }
208 208
209 if (pattern == LST_BRW_CHECK_FULL) { 209 if (pattern == LST_BRW_CHECK_FULL) {
210 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { 210 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
211 data = *(((__u64 *)addr) + i); 211 data = *(((__u64 *)addr) + i);
212 if (data != magic) 212 if (data != magic)
213 goto bad_data; 213 goto bad_data;
@@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
278 opc = breq->blk_opc; 278 opc = breq->blk_opc;
279 flags = breq->blk_flags; 279 flags = breq->blk_flags;
280 npg = breq->blk_npg; 280 npg = breq->blk_npg;
281 len = npg * PAGE_CACHE_SIZE; 281 len = npg * PAGE_SIZE;
282 282
283 } else { 283 } else {
284 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 284 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
292 opc = breq->blk_opc; 292 opc = breq->blk_opc;
293 flags = breq->blk_flags; 293 flags = breq->blk_flags;
294 len = breq->blk_len; 294 len = breq->blk_len;
295 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 295 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
296 } 296 }
297 297
298 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); 298 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
463 reply->brw_status = EINVAL; 463 reply->brw_status = EINVAL;
464 return 0; 464 return 0;
465 } 465 }
466 npg = reqst->brw_len >> PAGE_CACHE_SHIFT; 466 npg = reqst->brw_len >> PAGE_SHIFT;
467 467
468 } else { 468 } else {
469 npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 469 npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
470 } 470 }
471 471
472 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; 472 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 5c7cb72eac9a..79ee6c0bf7c1 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
743 if (args->lstio_tes_param && 743 if (args->lstio_tes_param &&
744 (args->lstio_tes_param_len <= 0 || 744 (args->lstio_tes_param_len <= 0 ||
745 args->lstio_tes_param_len > 745 args->lstio_tes_param_len >
746 PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) 746 PAGE_SIZE - sizeof(lstcon_test_t)))
747 return -EINVAL; 747 return -EINVAL;
748 748
749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); 749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
819 819
820 opc = data->ioc_u32[0]; 820 opc = data->ioc_u32[0];
821 821
822 if (data->ioc_plen1 > PAGE_CACHE_SIZE) 822 if (data->ioc_plen1 > PAGE_SIZE)
823 return -EINVAL; 823 return -EINVAL;
824 824
825 LIBCFS_ALLOC(buf, data->ioc_plen1); 825 LIBCFS_ALLOC(buf, data->ioc_plen1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index bcd78888f9cc..35a227d0c657 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
786 test_bulk_req_t *brq = &req->tsr_u.bulk_v0; 786 test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
787 787
788 brq->blk_opc = param->blk_opc; 788 brq->blk_opc = param->blk_opc;
789 brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / 789 brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
790 PAGE_CACHE_SIZE; 790 PAGE_SIZE;
791 brq->blk_flags = param->blk_flags; 791 brq->blk_flags = param->blk_flags;
792 792
793 return 0; 793 return 0;
@@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
822 if (transop == LST_TRANS_TSBCLIADD) { 822 if (transop == LST_TRANS_TSBCLIADD) {
823 npg = sfw_id_pages(test->tes_span); 823 npg = sfw_id_pages(test->tes_span);
824 nob = !(feats & LST_FEAT_BULK_LEN) ? 824 nob = !(feats & LST_FEAT_BULK_LEN) ?
825 npg * PAGE_CACHE_SIZE : 825 npg * PAGE_SIZE :
826 sizeof(lnet_process_id_packed_t) * test->tes_span; 826 sizeof(lnet_process_id_packed_t) * test->tes_span;
827 } 827 }
828 828
@@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
851 LASSERT(nob > 0); 851 LASSERT(nob > 0);
852 852
853 len = !(feats & LST_FEAT_BULK_LEN) ? 853 len = !(feats & LST_FEAT_BULK_LEN) ?
854 PAGE_CACHE_SIZE : 854 PAGE_SIZE :
855 min_t(int, nob, PAGE_CACHE_SIZE); 855 min_t(int, nob, PAGE_SIZE);
856 nob -= len; 856 nob -= len;
857 857
858 bulk->bk_iovs[i].kiov_offset = 0; 858 bulk->bk_iovs[i].kiov_offset = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 926c3970c498..e2c532399366 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
1161 int len; 1161 int len;
1162 1162
1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { 1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
1164 len = npg * PAGE_CACHE_SIZE; 1164 len = npg * PAGE_SIZE;
1165 1165
1166 } else { 1166 } else {
1167 len = sizeof(lnet_process_id_packed_t) * 1167 len = sizeof(lnet_process_id_packed_t) *
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 69be7d6f48fa..7d7748d96332 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
90static int 90static int
91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) 91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
92{ 92{
93 nob = min_t(int, nob, PAGE_CACHE_SIZE); 93 nob = min_t(int, nob, PAGE_SIZE);
94 94
95 LASSERT(nob > 0); 95 LASSERT(nob > 0);
96 LASSERT(i >= 0 && i < bk->bk_niov); 96 LASSERT(i >= 0 && i < bk->bk_niov);
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 288522d4d7b9..e689ca1846e1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -390,10 +390,10 @@ typedef struct sfw_test_instance {
390 } tsi_u; 390 } tsi_u;
391} sfw_test_instance_t; 391} sfw_test_instance_t;
392 392
393/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at 393/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
394 * the end of pages are not used */ 394 * pages are not used */
395#define SFW_MAX_CONCUR LST_MAX_CONCUR 395#define SFW_MAX_CONCUR LST_MAX_CONCUR
396#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) 396#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) 397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) 398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
399 399
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 33e0b99e1fb4..c6c7f54637fb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
52 return; 52 return;
53 53
54 if (PagePrivate(page)) 54 if (PagePrivate(page))
55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
56 56
57 cancel_dirty_page(page); 57 cancel_dirty_page(page);
58 ClearPageMappedToDisk(page); 58 ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index b5088b13a305..242bb1ef6245 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@ struct lu_context_key {
1118 { \ 1118 { \
1119 type *value; \ 1119 type *value; \
1120 \ 1120 \
1121 CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ 1121 CLASSERT(PAGE_SIZE >= sizeof (*value)); \
1122 \ 1122 \
1123 value = kzalloc(sizeof(*value), GFP_NOFS); \ 1123 value = kzalloc(sizeof(*value), GFP_NOFS); \
1124 if (!value) \ 1124 if (!value) \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index da8bc6eadd13..5aae1d06a5fa 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1022,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
1022 * MDS_READPAGE page size 1022 * MDS_READPAGE page size
1023 * 1023 *
1024 * This is the directory page size packed in MDS_READPAGE RPC. 1024 * This is the directory page size packed in MDS_READPAGE RPC.
1025 * It's different than PAGE_CACHE_SIZE because the client needs to 1025 * It's different than PAGE_SIZE because the client needs to
1026 * access the struct lu_dirpage header packed at the beginning of 1026 * access the struct lu_dirpage header packed at the beginning of
1027 * the "page" and without this there isn't any way to know find the 1027 * the "page" and without this there isn't any way to know find the
1028 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. 1028 * lu_dirpage header is if client and server PAGE_SIZE differ.
1029 */ 1029 */
1030#define LU_PAGE_SHIFT 12 1030#define LU_PAGE_SHIFT 12
1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) 1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1032#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) 1032#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1033 1033
1034#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) 1034#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1035 1035
1036/** @} lu_dir */ 1036/** @} lu_dir */
1037 1037
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index df94f9f3bef2..af77eb359c43 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
155 if (cli->cl_max_mds_easize < body->max_mdsize) { 155 if (cli->cl_max_mds_easize < body->max_mdsize) {
156 cli->cl_max_mds_easize = body->max_mdsize; 156 cli->cl_max_mds_easize = body->max_mdsize;
157 cli->cl_default_mds_easize = 157 cli->cl_default_mds_easize =
158 min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); 158 min_t(__u32, body->max_mdsize, PAGE_SIZE);
159 } 159 }
160 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) { 160 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
161 cli->cl_max_mds_cookiesize = body->max_cookiesize; 161 cli->cl_max_mds_cookiesize = body->max_cookiesize;
162 cli->cl_default_mds_cookiesize = 162 cli->cl_default_mds_cookiesize =
163 min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); 163 min_t(__u32, body->max_cookiesize, PAGE_SIZE);
164 } 164 }
165 } 165 }
166} 166}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 4fa1a18b7d15..69586a522eb7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -99,21 +99,21 @@
99 */ 99 */
100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) 100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) 101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
103 103
104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) 104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE 107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
110 110
111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ 111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) 112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" 113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
114# endif 114# endif
115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) 115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" 116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
117# endif 117# endif
118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) 118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
119# error "PTLRPC_MAX_BRW_SIZE too big" 119# error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4a0f2e8b19f6..4264d97650ec 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -272,7 +272,7 @@ struct client_obd {
272 int cl_grant_shrink_interval; /* seconds */ 272 int cl_grant_shrink_interval; /* seconds */
273 273
274 /* A chunk is an optimal size used by osc_extent to determine 274 /* A chunk is an optimal size used by osc_extent to determine
275 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) 275 * the extent size. A chunk is max(PAGE_SIZE, OST block size)
276 */ 276 */
277 int cl_chunkbits; 277 int cl_chunkbits;
278 int cl_chunk; 278 int cl_chunk;
@@ -1318,7 +1318,7 @@ bad_format:
1318 1318
1319static inline int cli_brw_size(struct obd_device *obd) 1319static inline int cli_brw_size(struct obd_device *obd)
1320{ 1320{
1321 return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 1321 return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
1322} 1322}
1323 1323
1324#endif /* __OBD_H */ 1324#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 225262fa67b6..f8ee3a3254ba 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -500,7 +500,7 @@ extern char obd_jobid_var[];
500 500
501#ifdef POISON_BULK 501#ifdef POISON_BULK
502#define POISON_PAGE(page, val) do { \ 502#define POISON_PAGE(page, val) do { \
503 memset(kmap(page), val, PAGE_CACHE_SIZE); \ 503 memset(kmap(page), val, PAGE_SIZE); \
504 kunmap(page); \ 504 kunmap(page); \
505} while (0) 505} while (0)
506#else 506#else
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index aced41ab93a1..96141d17d07f 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
758 * --bug 17336 758 * --bug 17336
759 */ 759 */
760 loff_t size = cl_isize_read(inode); 760 loff_t size = cl_isize_read(inode);
761 loff_t cur_index = start >> PAGE_CACHE_SHIFT; 761 loff_t cur_index = start >> PAGE_SHIFT;
762 loff_t size_index = (size - 1) >> 762 loff_t size_index = (size - 1) >>
763 PAGE_CACHE_SHIFT; 763 PAGE_SHIFT;
764 764
765 if ((size == 0 && cur_index != 0) || 765 if ((size == 0 && cur_index != 0) ||
766 size_index < cur_index) 766 size_index < cur_index)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index b586d5a88d00..7dd7df59aa1f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
307 cli->cl_avail_grant = 0; 307 cli->cl_avail_grant = 0;
308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */ 308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */
309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; 309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
310 if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) 310 if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
311 cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); 311 cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
312 INIT_LIST_HEAD(&cli->cl_cache_waiters); 312 INIT_LIST_HEAD(&cli->cl_cache_waiters);
313 INIT_LIST_HEAD(&cli->cl_loi_ready_list); 313 INIT_LIST_HEAD(&cli->cl_loi_ready_list);
314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); 314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
353 * In the future this should likely be increased. LU-1431 353 * In the future this should likely be increased. LU-1431
354 */ 354 */
355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, 355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
356 LNET_MTU >> PAGE_CACHE_SHIFT); 356 LNET_MTU >> PAGE_SHIFT);
357 357
358 if (!strcmp(name, LUSTRE_MDC_NAME)) { 358 if (!strcmp(name, LUSTRE_MDC_NAME)) {
359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; 359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
360 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { 360 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
361 cli->cl_max_rpcs_in_flight = 2; 361 cli->cl_max_rpcs_in_flight = 2;
362 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { 362 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
363 cli->cl_max_rpcs_in_flight = 3; 363 cli->cl_max_rpcs_in_flight = 3;
364 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { 364 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
365 cli->cl_max_rpcs_in_flight = 4; 365 cli->cl_max_rpcs_in_flight = 4;
366 } else { 366 } else {
367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; 367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3e937b050203..b913ba9cf97c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -107,7 +107,7 @@
107/* 107/*
108 * 50 ldlm locks for 1MB of RAM. 108 * 50 ldlm locks for 1MB of RAM.
109 */ 109 */
110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) 110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
111 111
112/* 112/*
113 * Maximal possible grant step plan in %. 113 * Maximal possible grant step plan in %.
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index c7904a96f9af..74e193e52cd6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
546{ 546{
547 int avail; 547 int avail;
548 548
549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; 549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
550 if (likely(avail >= 0)) 550 if (likely(avail >= 0))
551 avail /= (int)sizeof(struct lustre_handle); 551 avail /= (int)sizeof(struct lustre_handle);
552 else 552 else
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 4e0a3e583330..e4c82883e580 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
134 * a header lu_dirpage which describes the start/end hash, and whether this 134 * a header lu_dirpage which describes the start/end hash, and whether this
135 * page is empty (contains no dir entry) or hash collide with next page. 135 * page is empty (contains no dir entry) or hash collide with next page.
136 * After client receives reply, several pages will be integrated into dir page 136 * After client receives reply, several pages will be integrated into dir page
137 * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the 137 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
138 * lu_dirpage for this integrated page will be adjusted. See 138 * for this integrated page will be adjusted. See lmv_adjust_dirpages().
139 * lmv_adjust_dirpages().
140 * 139 *
141 */ 140 */
142 141
@@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
153 struct page **page_pool; 152 struct page **page_pool;
154 struct page *page; 153 struct page *page;
155 struct lu_dirpage *dp; 154 struct lu_dirpage *dp;
156 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; 155 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
157 int nrdpgs = 0; /* number of pages read actually */ 156 int nrdpgs = 0; /* number of pages read actually */
158 int npages; 157 int npages;
159 int i; 158 int i;
@@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
193 if (body->valid & OBD_MD_FLSIZE) 192 if (body->valid & OBD_MD_FLSIZE)
194 cl_isize_write(inode, body->size); 193 cl_isize_write(inode, body->size);
195 194
196 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1) 195 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
197 >> PAGE_CACHE_SHIFT; 196 >> PAGE_SHIFT;
198 SetPageUptodate(page0); 197 SetPageUptodate(page0);
199 } 198 }
200 unlock_page(page0); 199 unlock_page(page0);
@@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
209 page = page_pool[i]; 208 page = page_pool[i];
210 209
211 if (rc < 0 || i >= nrdpgs) { 210 if (rc < 0 || i >= nrdpgs) {
212 page_cache_release(page); 211 put_page(page);
213 continue; 212 continue;
214 } 213 }
215 214
@@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
230 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n", 229 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
231 offset, ret); 230 offset, ret);
232 } 231 }
233 page_cache_release(page); 232 put_page(page);
234 } 233 }
235 234
236 if (page_pool != &page0) 235 if (page_pool != &page0)
@@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove)
247 truncate_complete_page(page->mapping, page); 246 truncate_complete_page(page->mapping, page);
248 unlock_page(page); 247 unlock_page(page);
249 } 248 }
250 page_cache_release(page); 249 put_page(page);
251} 250}
252 251
253/* 252/*
@@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
273 if (found > 0 && !radix_tree_exceptional_entry(page)) { 272 if (found > 0 && !radix_tree_exceptional_entry(page)) {
274 struct lu_dirpage *dp; 273 struct lu_dirpage *dp;
275 274
276 page_cache_get(page); 275 get_page(page);
277 spin_unlock_irq(&mapping->tree_lock); 276 spin_unlock_irq(&mapping->tree_lock);
278 /* 277 /*
279 * In contrast to find_lock_page() we are sure that directory 278 * In contrast to find_lock_page() we are sure that directory
@@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
313 page = NULL; 312 page = NULL;
314 } 313 }
315 } else { 314 } else {
316 page_cache_release(page); 315 put_page(page);
317 page = ERR_PTR(-EIO); 316 page = ERR_PTR(-EIO);
318 } 317 }
319 318
@@ -1507,7 +1506,7 @@ skip_lmm:
1507 st.st_gid = body->gid; 1506 st.st_gid = body->gid;
1508 st.st_rdev = body->rdev; 1507 st.st_rdev = body->rdev;
1509 st.st_size = body->size; 1508 st.st_size = body->size;
1510 st.st_blksize = PAGE_CACHE_SIZE; 1509 st.st_blksize = PAGE_SIZE;
1511 st.st_blocks = body->blocks; 1510 st.st_blocks = body->blocks;
1512 st.st_atime = body->atime; 1511 st.st_atime = body->atime;
1513 st.st_mtime = body->mtime; 1512 st.st_mtime = body->mtime;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3e1572cb457b..e3c0f1dd4d31 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
310/* default to about 40meg of readahead on a given system. That much tied 310/* default to about 40meg of readahead on a given system. That much tied
311 * up in 512k readahead requests serviced at 40ms each is about 1GB/s. 311 * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
312 */ 312 */
313#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) 313#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
314 314
315/* default to read-ahead full files smaller than 2MB on the second read */ 315/* default to read-ahead full files smaller than 2MB on the second read */
316#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) 316#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
317 317
318enum ra_stat { 318enum ra_stat {
319 RA_STAT_HIT = 0, 319 RA_STAT_HIT = 0,
@@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
975static inline void ll_invalidate_page(struct page *vmpage) 975static inline void ll_invalidate_page(struct page *vmpage)
976{ 976{
977 struct address_space *mapping = vmpage->mapping; 977 struct address_space *mapping = vmpage->mapping;
978 loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; 978 loff_t offset = vmpage->index << PAGE_SHIFT;
979 979
980 LASSERT(PageLocked(vmpage)); 980 LASSERT(PageLocked(vmpage));
981 if (!mapping) 981 if (!mapping)
982 return; 982 return;
983 983
984 ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); 984 ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
985 truncate_complete_page(mapping, vmpage); 985 truncate_complete_page(mapping, vmpage);
986} 986}
987 987
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6d6bb33e3655..b57a992688a8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
85 85
86 si_meminfo(&si); 86 si_meminfo(&si);
87 pages = si.totalram - si.totalhigh; 87 pages = si.totalram - si.totalhigh;
88 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) 88 if (pages >> (20 - PAGE_SHIFT) < 512)
89 lru_page_max = pages / 2; 89 lru_page_max = pages / 2;
90 else 90 else
91 lru_page_max = (pages / 4) * 3; 91 lru_page_max = (pages / 4) * 3;
@@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
272 valid != CLIENT_CONNECT_MDT_REQD) { 272 valid != CLIENT_CONNECT_MDT_REQD) {
273 char *buf; 273 char *buf;
274 274
275 buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 275 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
276 if (!buf) { 276 if (!buf) {
277 err = -ENOMEM; 277 err = -ENOMEM;
278 goto out_md_fid; 278 goto out_md_fid;
279 } 279 }
280 obd_connect_flags2str(buf, PAGE_CACHE_SIZE, 280 obd_connect_flags2str(buf, PAGE_SIZE,
281 valid ^ CLIENT_CONNECT_MDT_REQD, ","); 281 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
282 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", 282 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
283 sbi->ll_md_exp->exp_obd->obd_name, buf); 283 sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
335 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 335 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
336 sbi->ll_md_brw_size = data->ocd_brw_size; 336 sbi->ll_md_brw_size = data->ocd_brw_size;
337 else 337 else
338 sbi->ll_md_brw_size = PAGE_CACHE_SIZE; 338 sbi->ll_md_brw_size = PAGE_SIZE;
339 339
340 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { 340 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
341 LCONSOLE_INFO("Layout lock feature supported.\n"); 341 LCONSOLE_INFO("Layout lock feature supported.\n");
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 69445a9f2011..5b484e62ffd0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
58 size_t count) 58 size_t count)
59{ 59{
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + 60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); 61 (vma->vm_pgoff << PAGE_SHIFT);
62 policy->l_extent.end = (policy->l_extent.start + count - 1) | 62 policy->l_extent.end = (policy->l_extent.start + count - 1) |
63 ~CFS_PAGE_MASK; 63 ~CFS_PAGE_MASK;
64} 64}
@@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
321 321
322 vmpage = vio->u.fault.ft_vmpage; 322 vmpage = vio->u.fault.ft_vmpage;
323 if (result != 0 && vmpage) { 323 if (result != 0 && vmpage) {
324 page_cache_release(vmpage); 324 put_page(vmpage);
325 vmf->page = NULL; 325 vmf->page = NULL;
326 } 326 }
327 } 327 }
@@ -360,7 +360,7 @@ restart:
360 lock_page(vmpage); 360 lock_page(vmpage);
361 if (unlikely(!vmpage->mapping)) { /* unlucky */ 361 if (unlikely(!vmpage->mapping)) { /* unlucky */
362 unlock_page(vmpage); 362 unlock_page(vmpage);
363 page_cache_release(vmpage); 363 put_page(vmpage);
364 vmf->page = NULL; 364 vmf->page = NULL;
365 365
366 if (!printed && ++count > 16) { 366 if (!printed && ++count > 16) {
@@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
457 LASSERTF(last > first, "last %llu first %llu\n", last, first); 457 LASSERTF(last > first, "last %llu first %llu\n", last, first);
458 if (mapping_mapped(mapping)) { 458 if (mapping_mapped(mapping)) {
459 rc = 0; 459 rc = 0;
460 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, 460 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
461 last - first + 1, 0); 461 last - first + 1, 0);
462 } 462 }
463 463
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index b725fc16cf49..f169c0db63b4 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; 218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
219 bio_for_each_segment(bvec, bio, iter) { 219 bio_for_each_segment(bvec, bio, iter) {
220 BUG_ON(bvec.bv_offset != 0); 220 BUG_ON(bvec.bv_offset != 0);
221 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); 221 BUG_ON(bvec.bv_len != PAGE_SIZE);
222 222
223 pages[page_count] = bvec.bv_page; 223 pages[page_count] = bvec.bv_page;
224 offsets[page_count] = offset; 224 offsets[page_count] = offset;
@@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, 232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
233 page_count); 233 page_count);
234 234
235 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; 235 pvec->ldp_size = page_count << PAGE_SHIFT;
236 pvec->ldp_nr = page_count; 236 pvec->ldp_nr = page_count;
237 237
238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to 238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
507 507
508 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 508 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
509 509
510 lo->lo_blocksize = PAGE_CACHE_SIZE; 510 lo->lo_blocksize = PAGE_SIZE;
511 lo->lo_device = bdev; 511 lo->lo_device = bdev;
512 lo->lo_flags = lo_flags; 512 lo->lo_flags = lo_flags;
513 lo->lo_backing_file = file; 513 lo->lo_backing_file = file;
@@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
525 lo->lo_queue->queuedata = lo; 525 lo->lo_queue->queuedata = lo;
526 526
527 /* queue parameters */ 527 /* queue parameters */
528 CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); 528 CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
529 blk_queue_logical_block_size(lo->lo_queue, 529 blk_queue_logical_block_size(lo->lo_queue,
530 (unsigned short)PAGE_CACHE_SIZE); 530 (unsigned short)PAGE_SIZE);
531 blk_queue_max_hw_sectors(lo->lo_queue, 531 blk_queue_max_hw_sectors(lo->lo_queue,
532 LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); 532 LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
533 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); 533 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
534 534
535 set_capacity(disks[lo->lo_number], size); 535 set_capacity(disks[lo->lo_number], size);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 45941a6600fe..27ab1261400e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
233 pages_number = sbi->ll_ra_info.ra_max_pages; 233 pages_number = sbi->ll_ra_info.ra_max_pages;
234 spin_unlock(&sbi->ll_lock); 234 spin_unlock(&sbi->ll_lock);
235 235
236 mult = 1 << (20 - PAGE_CACHE_SHIFT); 236 mult = 1 << (20 - PAGE_SHIFT);
237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
238} 238}
239 239
@@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
251 if (rc) 251 if (rc)
252 return rc; 252 return rc;
253 253
254 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 254 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
255 255
256 if (pages_number > totalram_pages / 2) { 256 if (pages_number > totalram_pages / 2) {
257 257
258 CERROR("can't set file readahead more than %lu MB\n", 258 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/ 259 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
260 return -ERANGE; 260 return -ERANGE;
261 } 261 }
262 262
@@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file; 281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
282 spin_unlock(&sbi->ll_lock); 282 spin_unlock(&sbi->ll_lock);
283 283
284 mult = 1 << (20 - PAGE_CACHE_SHIFT); 284 mult = 1 << (20 - PAGE_SHIFT);
285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
286} 286}
287 287
@@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; 326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
327 spin_unlock(&sbi->ll_lock); 327 spin_unlock(&sbi->ll_lock);
328 328
329 mult = 1 << (20 - PAGE_CACHE_SHIFT); 329 mult = 1 << (20 - PAGE_SHIFT);
330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
331} 331}
332 332
@@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
349 */ 349 */
350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { 350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", 351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); 352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
353 return -ERANGE; 353 return -ERANGE;
354 } 354 }
355 355
@@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
366 struct super_block *sb = m->private; 366 struct super_block *sb = m->private;
367 struct ll_sb_info *sbi = ll_s2sbi(sb); 367 struct ll_sb_info *sbi = ll_s2sbi(sb);
368 struct cl_client_cache *cache = &sbi->ll_cache; 368 struct cl_client_cache *cache = &sbi->ll_cache;
369 int shift = 20 - PAGE_CACHE_SHIFT; 369 int shift = 20 - PAGE_SHIFT;
370 int max_cached_mb; 370 int max_cached_mb;
371 int unused_mb; 371 int unused_mb;
372 372
@@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
405 return -EFAULT; 405 return -EFAULT;
406 kernbuf[count] = 0; 406 kernbuf[count] = 0;
407 407
408 mult = 1 << (20 - PAGE_CACHE_SHIFT); 408 mult = 1 << (20 - PAGE_SHIFT);
409 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - 409 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
410 kernbuf; 410 kernbuf;
411 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 411 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
415 if (pages_number < 0 || pages_number > totalram_pages) { 415 if (pages_number < 0 || pages_number > totalram_pages) {
416 CERROR("%s: can't set max cache more than %lu MB\n", 416 CERROR("%s: can't set max cache more than %lu MB\n",
417 ll_get_fsname(sb, NULL, 0), 417 ll_get_fsname(sb, NULL, 0),
418 totalram_pages >> (20 - PAGE_CACHE_SHIFT)); 418 totalram_pages >> (20 - PAGE_SHIFT));
419 return -ERANGE; 419 return -ERANGE;
420 } 420 }
421 421
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 34614acf3f8e..edab6c5b7e50 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
146 */ 146 */
147 io->ci_lockreq = CILR_NEVER; 147 io->ci_lockreq = CILR_NEVER;
148 148
149 pos = vmpage->index << PAGE_CACHE_SHIFT; 149 pos = vmpage->index << PAGE_SHIFT;
150 150
151 /* Create a temp IO to serve write. */ 151 /* Create a temp IO to serve write. */
152 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); 152 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
153 if (result == 0) { 153 if (result == 0) {
154 cio->cui_fd = LUSTRE_FPRIVATE(file); 154 cio->cui_fd = LUSTRE_FPRIVATE(file);
155 cio->cui_iter = NULL; 155 cio->cui_iter = NULL;
@@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
498 } 498 }
499 if (rc != 1) 499 if (rc != 1)
500 unlock_page(vmpage); 500 unlock_page(vmpage);
501 page_cache_release(vmpage); 501 put_page(vmpage);
502 } else { 502 } else {
503 which = RA_STAT_FAILED_GRAB_PAGE; 503 which = RA_STAT_FAILED_GRAB_PAGE;
504 msg = "g_c_p_n failed"; 504 msg = "g_c_p_n failed";
@@ -521,13 +521,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
521 * striped over, rather than having a constant value for all files here. 521 * striped over, rather than having a constant value for all files here.
522 */ 522 */
523 523
524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). 524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled 525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
526 * by default, this should be adjusted corresponding with max_read_ahead_mb 526 * by default, this should be adjusted corresponding with max_read_ahead_mb
527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used 527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
528 * up quickly which will affect read performance significantly. See LU-2816 528 * up quickly which will affect read performance significantly. See LU-2816
529 */ 529 */
530#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) 530#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
531 531
532static inline int stride_io_mode(struct ll_readahead_state *ras) 532static inline int stride_io_mode(struct ll_readahead_state *ras)
533{ 533{
@@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
739 end = rpc_boundary; 739 end = rpc_boundary;
740 740
741 /* Truncate RA window to end of file */ 741 /* Truncate RA window to end of file */
742 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); 742 end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
743 743
744 ras->ras_next_readahead = max(end, end + 1); 744 ras->ras_next_readahead = max(end, end + 1);
745 RAS_CDEBUG(ras); 745 RAS_CDEBUG(ras);
@@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
776 if (reserved != 0) 776 if (reserved != 0)
777 ll_ra_count_put(ll_i2sbi(inode), reserved); 777 ll_ra_count_put(ll_i2sbi(inode), reserved);
778 778
779 if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) 779 if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
780 ll_ra_stats_inc(mapping, RA_STAT_EOF); 780 ll_ra_stats_inc(mapping, RA_STAT_EOF);
781 781
782 /* if we didn't get to the end of the region we reserved from 782 /* if we didn't get to the end of the region we reserved from
@@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
985 if (ras->ras_requests == 2 && !ras->ras_request_index) { 985 if (ras->ras_requests == 2 && !ras->ras_request_index) {
986 __u64 kms_pages; 986 __u64 kms_pages;
987 987
988 kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 988 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
989 PAGE_CACHE_SHIFT; 989 PAGE_SHIFT;
990 990
991 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, 991 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
992 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); 992 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1173 * PageWriteback or clean the page. 1173 * PageWriteback or clean the page.
1174 */ 1174 */
1175 result = cl_sync_file_range(inode, offset, 1175 result = cl_sync_file_range(inode, offset,
1176 offset + PAGE_CACHE_SIZE - 1, 1176 offset + PAGE_SIZE - 1,
1177 CL_FSYNC_LOCAL, 1); 1177 CL_FSYNC_LOCAL, 1);
1178 if (result > 0) { 1178 if (result > 0) {
1179 /* actually we may have written more than one page. 1179 /* actually we may have written more than one page.
@@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1211 int ignore_layout = 0; 1211 int ignore_layout = 0;
1212 1212
1213 if (wbc->range_cyclic) { 1213 if (wbc->range_cyclic) {
1214 start = mapping->writeback_index << PAGE_CACHE_SHIFT; 1214 start = mapping->writeback_index << PAGE_SHIFT;
1215 end = OBD_OBJECT_EOF; 1215 end = OBD_OBJECT_EOF;
1216 } else { 1216 } else {
1217 start = wbc->range_start; 1217 start = wbc->range_start;
@@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1241 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { 1241 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1242 if (end == OBD_OBJECT_EOF) 1242 if (end == OBD_OBJECT_EOF)
1243 end = i_size_read(inode); 1243 end = i_size_read(inode);
1244 mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; 1244 mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
1245 } 1245 }
1246 return result; 1246 return result;
1247} 1247}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 7a5db67bc680..69aa15e8e3ef 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
87 * below because they are run with page locked and all our io is 87 * below because they are run with page locked and all our io is
88 * happening with locked page too 88 * happening with locked page too
89 */ 89 */
90 if (offset == 0 && length == PAGE_CACHE_SIZE) { 90 if (offset == 0 && length == PAGE_SIZE) {
91 env = cl_env_get(&refcheck); 91 env = cl_env_get(&refcheck);
92 if (!IS_ERR(env)) { 92 if (!IS_ERR(env)) {
93 inode = vmpage->mapping->host; 93 inode = vmpage->mapping->host;
@@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
193 return -EFBIG; 193 return -EFBIG;
194 } 194 }
195 195
196 *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 196 *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
197 *max_pages -= user_addr >> PAGE_CACHE_SHIFT; 197 *max_pages -= user_addr >> PAGE_SHIFT;
198 198
199 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); 199 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
200 if (*pages) { 200 if (*pages) {
@@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
217 for (i = 0; i < npages; i++) { 217 for (i = 0; i < npages; i++) {
218 if (do_dirty) 218 if (do_dirty)
219 set_page_dirty_lock(pages[i]); 219 set_page_dirty_lock(pages[i]);
220 page_cache_release(pages[i]); 220 put_page(pages[i]);
221 } 221 }
222 kvfree(pages); 222 kvfree(pages);
223} 223}
@@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
357 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. 357 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
358 */ 358 */
359#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ 359#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
360 PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) 360 PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
361static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, 361static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
362 loff_t file_offset) 362 loff_t file_offset)
363{ 363{
@@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
382 CDEBUG(D_VFSTRACE, 382 CDEBUG(D_VFSTRACE,
383 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", 383 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
384 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, 384 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
385 file_offset, file_offset, count >> PAGE_CACHE_SHIFT, 385 file_offset, file_offset, count >> PAGE_SHIFT,
386 MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); 386 MAX_DIO_SIZE >> PAGE_SHIFT);
387 387
388 /* Check that all user buffers are aligned as well */ 388 /* Check that all user buffers are aligned as well */
389 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) 389 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
432 * page worth of page pointers = 4MB on i386. 432 * page worth of page pointers = 4MB on i386.
433 */ 433 */
434 if (result == -ENOMEM && 434 if (result == -ENOMEM &&
435 size > (PAGE_CACHE_SIZE / sizeof(*pages)) * 435 size > (PAGE_SIZE / sizeof(*pages)) *
436 PAGE_CACHE_SIZE) { 436 PAGE_SIZE) {
437 size = ((((size / 2) - 1) | 437 size = ((((size / 2) - 1) |
438 ~CFS_PAGE_MASK) + 1) & 438 ~CFS_PAGE_MASK) + 1) &
439 CFS_PAGE_MASK; 439 CFS_PAGE_MASK;
@@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
474 loff_t pos, unsigned len, unsigned flags, 474 loff_t pos, unsigned len, unsigned flags,
475 struct page **pagep, void **fsdata) 475 struct page **pagep, void **fsdata)
476{ 476{
477 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 477 pgoff_t index = pos >> PAGE_SHIFT;
478 struct page *page; 478 struct page *page;
479 int rc; 479 int rc;
480 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 480 unsigned from = pos & (PAGE_SIZE - 1);
481 481
482 page = grab_cache_page_write_begin(mapping, index, flags); 482 page = grab_cache_page_write_begin(mapping, index, flags);
483 if (!page) 483 if (!page)
@@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
488 rc = ll_prepare_write(file, page, from, from + len); 488 rc = ll_prepare_write(file, page, from, from + len);
489 if (rc) { 489 if (rc) {
490 unlock_page(page); 490 unlock_page(page);
491 page_cache_release(page); 491 put_page(page);
492 } 492 }
493 return rc; 493 return rc;
494} 494}
@@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
497 loff_t pos, unsigned len, unsigned copied, 497 loff_t pos, unsigned len, unsigned copied,
498 struct page *page, void *fsdata) 498 struct page *page, void *fsdata)
499{ 499{
500 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 500 unsigned from = pos & (PAGE_SIZE - 1);
501 int rc; 501 int rc;
502 502
503 rc = ll_commit_write(file, page, from, from + copied); 503 rc = ll_commit_write(file, page, from, from + copied);
504 unlock_page(page); 504 unlock_page(page);
505 page_cache_release(page); 505 put_page(page);
506 506
507 return rc ?: copied; 507 return rc ?: copied;
508} 508}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index fb0c26ee7ff3..85a835976174 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -512,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env,
512 vio->cui_ra_window_set = 1; 512 vio->cui_ra_window_set = 1;
513 bead->lrr_start = cl_index(obj, pos); 513 bead->lrr_start = cl_index(obj, pos);
514 /* 514 /*
515 * XXX: explicit PAGE_CACHE_SIZE 515 * XXX: explicit PAGE_SIZE
516 */ 516 */
517 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); 517 bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
518 ll_ra_read_in(file, bead); 518 ll_ra_read_in(file, bead);
519 } 519 }
520 520
@@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
959 * We're completely overwriting an existing page, so _don't_ 959 * We're completely overwriting an existing page, so _don't_
960 * set it up to date until commit_write 960 * set it up to date until commit_write
961 */ 961 */
962 if (from == 0 && to == PAGE_CACHE_SIZE) { 962 if (from == 0 && to == PAGE_SIZE) {
963 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); 963 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
964 POISON_PAGE(page, 0x11); 964 POISON_PAGE(page, 0x11);
965 } else 965 } else
@@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
1022 set_page_dirty(vmpage); 1022 set_page_dirty(vmpage);
1023 vvp_write_pending(cl2ccc(obj), cp); 1023 vvp_write_pending(cl2ccc(obj), cp);
1024 } else if (result == -EDQUOT) { 1024 } else if (result == -EDQUOT) {
1025 pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; 1025 pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
1026 bool need_clip = true; 1026 bool need_clip = true;
1027 1027
1028 /* 1028 /*
@@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
1040 * being. 1040 * being.
1041 */ 1041 */
1042 if (last_index > pg->cp_index) { 1042 if (last_index > pg->cp_index) {
1043 to = PAGE_CACHE_SIZE; 1043 to = PAGE_SIZE;
1044 need_clip = false; 1044 need_clip = false;
1045 } else if (last_index == pg->cp_index) { 1045 } else if (last_index == pg->cp_index) {
1046 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; 1046 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 850bae734075..33ca3eb34965 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp)
57 struct page *vmpage = cp->cpg_page; 57 struct page *vmpage = cp->cpg_page;
58 58
59 LASSERT(vmpage); 59 LASSERT(vmpage);
60 page_cache_release(vmpage); 60 put_page(vmpage);
61} 61}
62 62
63static void vvp_page_fini(const struct lu_env *env, 63static void vvp_page_fini(const struct lu_env *env,
@@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env,
164 LASSERT(vmpage); 164 LASSERT(vmpage);
165 LASSERT(PageLocked(vmpage)); 165 LASSERT(PageLocked(vmpage));
166 166
167 offset = vmpage->index << PAGE_CACHE_SHIFT; 167 offset = vmpage->index << PAGE_SHIFT;
168 168
169 /* 169 /*
170 * XXX is it safe to call this with the page lock held? 170 * XXX is it safe to call this with the page lock held?
171 */ 171 */
172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); 172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
173 return 0; 173 return 0;
174} 174}
175 175
@@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
537 CLOBINVRNT(env, obj, ccc_object_invariant(obj)); 537 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
538 538
539 cpg->cpg_page = vmpage; 539 cpg->cpg_page = vmpage;
540 page_cache_get(vmpage); 540 get_page(vmpage);
541 541
542 INIT_LIST_HEAD(&cpg->cpg_pending_linkage); 542 INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
543 if (page->cp_type == CPT_CACHEABLE) { 543 if (page->cp_type == CPT_CACHEABLE) {
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 0f776cf8a5aa..9abb7c2b9231 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2017 * |s|e|f|p|ent| 0 | ... | 0 | 2017 * |s|e|f|p|ent| 0 | ... | 0 |
2018 * '----------------- -----' 2018 * '----------------- -----'
2019 * 2019 *
2020 * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is 2020 * However, on hosts where the native VM page size (PAGE_SIZE) is
2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple 2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple
2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the 2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the
2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately 2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span 2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
2049 * to the first entry of the next lu_dirpage. 2049 * to the first entry of the next lu_dirpage.
2050 */ 2050 */
2051#if PAGE_CACHE_SIZE > LU_PAGE_SIZE 2051#if PAGE_SIZE > LU_PAGE_SIZE
2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) 2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2053{ 2053{
2054 int i; 2054 int i;
@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2101} 2101}
2102#else 2102#else
2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) 2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
2104#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ 2104#endif /* PAGE_SIZE > LU_PAGE_SIZE */
2105 2105
2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, 2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2107 struct page **pages, struct ptlrpc_request **request) 2107 struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2110 struct lmv_obd *lmv = &obd->u.lmv; 2110 struct lmv_obd *lmv = &obd->u.lmv;
2111 __u64 offset = op_data->op_offset; 2111 __u64 offset = op_data->op_offset;
2112 int rc; 2112 int rc;
2113 int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ 2113 int ncfspgs; /* pages read in PAGE_SIZE */
2114 int nlupgs; /* pages read in LU_PAGE_SIZE */ 2114 int nlupgs; /* pages read in LU_PAGE_SIZE */
2115 struct lmv_tgt_desc *tgt; 2115 struct lmv_tgt_desc *tgt;
2116 2116
@@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2129 if (rc != 0) 2129 if (rc != 0)
2130 return rc; 2130 return rc;
2131 2131
2132 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1) 2132 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
2133 >> PAGE_CACHE_SHIFT; 2133 >> PAGE_SHIFT;
2134 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; 2134 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
2135 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); 2135 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
2136 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); 2136 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 55dd8ef9525b..b91d3ff18b02 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1002,10 +1002,10 @@ restart_bulk:
1002 1002
1003 /* NB req now owns desc and will free it when it gets freed */ 1003 /* NB req now owns desc and will free it when it gets freed */
1004 for (i = 0; i < op_data->op_npages; i++) 1004 for (i = 0; i < op_data->op_npages; i++)
1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1006 1006
1007 mdc_readdir_pack(req, op_data->op_offset, 1007 mdc_readdir_pack(req, op_data->op_offset,
1008 PAGE_CACHE_SIZE * op_data->op_npages, 1008 PAGE_SIZE * op_data->op_npages,
1009 &op_data->op_fid1); 1009 &op_data->op_fid1);
1010 1010
1011 ptlrpc_request_set_replen(req); 1011 ptlrpc_request_set_replen(req);
@@ -1037,7 +1037,7 @@ restart_bulk:
1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { 1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", 1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
1039 req->rq_bulk->bd_nob_transferred, 1039 req->rq_bulk->bd_nob_transferred,
1040 PAGE_CACHE_SIZE * op_data->op_npages); 1040 PAGE_SIZE * op_data->op_npages);
1041 ptlrpc_req_finished(req); 1041 ptlrpc_req_finished(req);
1042 return -EPROTO; 1042 return -EPROTO;
1043 } 1043 }
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index b7dc87248032..3924b095bfb0 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
1113} 1113}
1114 1114
1115enum { 1115enum {
1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), 1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
1117 CONFIG_READ_NRPAGES = 4 1117 CONFIG_READ_NRPAGES = 4
1118}; 1118};
1119 1119
@@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1137 LASSERT(cfg->cfg_instance); 1137 LASSERT(cfg->cfg_instance);
1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance); 1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance);
1139 1139
1140 inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1140 inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
1141 if (!inst) 1141 if (!inst)
1142 return -ENOMEM; 1142 return -ENOMEM;
1143 1143
1144 pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); 1144 pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
1145 if (pos >= PAGE_CACHE_SIZE) { 1145 if (pos >= PAGE_SIZE) {
1146 kfree(inst); 1146 kfree(inst);
1147 return -E2BIG; 1147 return -E2BIG;
1148 } 1148 }
1149 1149
1150 ++pos; 1150 ++pos;
1151 buf = inst + pos; 1151 buf = inst + pos;
1152 bufsz = PAGE_CACHE_SIZE - pos; 1152 bufsz = PAGE_SIZE - pos;
1153 1153
1154 while (datalen > 0) { 1154 while (datalen > 0) {
1155 int entry_len = sizeof(*entry); 1155 int entry_len = sizeof(*entry);
@@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1181 /* Keep this swab for normal mixed endian handling. LU-1644 */ 1181 /* Keep this swab for normal mixed endian handling. LU-1644 */
1182 if (mne_swab) 1182 if (mne_swab)
1183 lustre_swab_mgs_nidtbl_entry(entry); 1183 lustre_swab_mgs_nidtbl_entry(entry);
1184 if (entry->mne_length > PAGE_CACHE_SIZE) { 1184 if (entry->mne_length > PAGE_SIZE) {
1185 CERROR("MNE too large (%u)\n", entry->mne_length); 1185 CERROR("MNE too large (%u)\n", entry->mne_length);
1186 break; 1186 break;
1187 } 1187 }
@@ -1371,7 +1371,7 @@ again:
1371 } 1371 }
1372 body->mcb_offset = cfg->cfg_last_idx + 1; 1372 body->mcb_offset = cfg->cfg_last_idx + 1;
1373 body->mcb_type = cld->cld_type; 1373 body->mcb_type = cld->cld_type;
1374 body->mcb_bits = PAGE_CACHE_SHIFT; 1374 body->mcb_bits = PAGE_SHIFT;
1375 body->mcb_units = nrpages; 1375 body->mcb_units = nrpages;
1376 1376
1377 /* allocate bulk transfer descriptor */ 1377 /* allocate bulk transfer descriptor */
@@ -1383,7 +1383,7 @@ again:
1383 } 1383 }
1384 1384
1385 for (i = 0; i < nrpages; i++) 1385 for (i = 0; i < nrpages; i++)
1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1387 1387
1388 ptlrpc_request_set_replen(req); 1388 ptlrpc_request_set_replen(req);
1389 rc = ptlrpc_queue_wait(req); 1389 rc = ptlrpc_queue_wait(req);
@@ -1411,7 +1411,7 @@ again:
1411 goto out; 1411 goto out;
1412 } 1412 }
1413 1413
1414 if (ealen > nrpages << PAGE_CACHE_SHIFT) { 1414 if (ealen > nrpages << PAGE_SHIFT) {
1415 rc = -EINVAL; 1415 rc = -EINVAL;
1416 goto out; 1416 goto out;
1417 } 1417 }
@@ -1439,7 +1439,7 @@ again:
1439 1439
1440 ptr = kmap(pages[i]); 1440 ptr = kmap(pages[i]);
1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, 1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
1442 min_t(int, ealen, PAGE_CACHE_SIZE), 1442 min_t(int, ealen, PAGE_SIZE),
1443 mne_swab); 1443 mne_swab);
1444 kunmap(pages[i]); 1444 kunmap(pages[i]);
1445 if (rc2 < 0) { 1445 if (rc2 < 0) {
@@ -1448,7 +1448,7 @@ again:
1448 break; 1448 break;
1449 } 1449 }
1450 1450
1451 ealen -= PAGE_CACHE_SIZE; 1451 ealen -= PAGE_SIZE;
1452 } 1452 }
1453 1453
1454out: 1454out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 231a2f26c693..394580016638 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1477 /* 1477 /*
1478 * XXX for now. 1478 * XXX for now.
1479 */ 1479 */
1480 return (loff_t)idx << PAGE_CACHE_SHIFT; 1480 return (loff_t)idx << PAGE_SHIFT;
1481} 1481}
1482EXPORT_SYMBOL(cl_offset); 1482EXPORT_SYMBOL(cl_offset);
1483 1483
@@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1489 /* 1489 /*
1490 * XXX for now. 1490 * XXX for now.
1491 */ 1491 */
1492 return offset >> PAGE_CACHE_SHIFT; 1492 return offset >> PAGE_SHIFT;
1493} 1493}
1494EXPORT_SYMBOL(cl_index); 1494EXPORT_SYMBOL(cl_index);
1495 1495
1496int cl_page_size(const struct cl_object *obj) 1496int cl_page_size(const struct cl_object *obj)
1497{ 1497{
1498 return 1 << PAGE_CACHE_SHIFT; 1498 return 1 << PAGE_SHIFT;
1499} 1499}
1500EXPORT_SYMBOL(cl_page_size); 1500EXPORT_SYMBOL(cl_page_size);
1501 1501
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 1a938e1376f9..c2cf015962dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -461,9 +461,9 @@ static int obd_init_checks(void)
461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); 461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
462 ret = -EINVAL; 462 ret = -EINVAL;
463 } 463 }
464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { 464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
465 CWARN("mask failed: u64val %llu >= %llu\n", u64val, 465 CWARN("mask failed: u64val %llu >= %llu\n", u64val,
466 (__u64)PAGE_CACHE_SIZE); 466 (__u64)PAGE_SIZE);
467 ret = -EINVAL; 467 ret = -EINVAL;
468 } 468 }
469 469
@@ -509,7 +509,7 @@ static int __init obdclass_init(void)
509 * For clients with less memory, a larger fraction is needed 509 * For clients with less memory, a larger fraction is needed
510 * for other purposes (mostly for BGL). 510 * for other purposes (mostly for BGL).
511 */ 511 */
512 if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) 512 if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
513 obd_max_dirty_pages = totalram_pages / 4; 513 obd_max_dirty_pages = totalram_pages / 4;
514 else 514 else
515 obd_max_dirty_pages = totalram_pages / 2; 515 obd_max_dirty_pages = totalram_pages / 2;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 9496c09b2b69..b41b65e2f021 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
47#include "../../include/lustre/lustre_idl.h" 47#include "../../include/lustre/lustre_idl.h"
48 48
49#include <linux/fs.h> 49#include <linux/fs.h>
50#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
51 50
52void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) 51void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
53{ 52{
@@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
71 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) 70 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
72 dst->i_blkbits = ffs(src->o_blksize) - 1; 71 dst->i_blkbits = ffs(src->o_blksize) - 1;
73 72
74 if (dst->i_blkbits < PAGE_CACHE_SHIFT) 73 if (dst->i_blkbits < PAGE_SHIFT)
75 dst->i_blkbits = PAGE_CACHE_SHIFT; 74 dst->i_blkbits = PAGE_SHIFT;
76 75
77 /* allocation of space */ 76 /* allocation of space */
78 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) 77 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index fd333b9e968c..e6bf414a4444 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
100 char *buf) 100 char *buf)
101{ 101{
102 return sprintf(buf, "%ul\n", 102 return sprintf(buf, "%ul\n",
103 obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT))); 103 obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
104} 104}
105 105
106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, 106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
113 if (rc) 113 if (rc)
114 return rc; 114 return rc;
115 115
116 val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */ 116 val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
117 117
118 if (val > ((totalram_pages / 10) * 9)) { 118 if (val > ((totalram_pages / 10) * 9)) {
119 /* Somebody wants to assign too much memory to dirty pages */ 119 /* Somebody wants to assign too much memory to dirty pages */
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { 123 if (val < 4 << (20 - PAGE_SHIFT)) {
124 /* Less than 4 Mb for dirty cache is also bad */ 124 /* Less than 4 Mb for dirty cache is also bad */
125 return -EINVAL; 125 return -EINVAL;
126 } 126 }
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 65a4746c89ca..978568ada8e9 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -840,8 +840,8 @@ static int lu_htable_order(void)
840 840
841#if BITS_PER_LONG == 32 841#if BITS_PER_LONG == 32
842 /* limit hashtable size for lowmem systems to low RAM */ 842 /* limit hashtable size for lowmem systems to low RAM */
843 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) 843 if (cache_size > 1 << (30 - PAGE_SHIFT))
844 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; 844 cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
845#endif 845#endif
846 846
847 /* clear off unreasonable cache setting. */ 847 /* clear off unreasonable cache setting. */
@@ -853,7 +853,7 @@ static int lu_htable_order(void)
853 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; 853 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
854 } 854 }
855 cache_size = cache_size / 100 * lu_cache_percent * 855 cache_size = cache_size / 100 * lu_cache_percent *
856 (PAGE_CACHE_SIZE / 1024); 856 (PAGE_SIZE / 1024);
857 857
858 for (bits = 1; (1 << bits) < cache_size; ++bits) { 858 for (bits = 1; (1 << bits) < cache_size; ++bits) {
859 ; 859 ;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 64ffe243f870..1e83669c204d 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
278 struct page *vmpage = ep->ep_vmpage; 278 struct page *vmpage = ep->ep_vmpage;
279 279
280 atomic_dec(&eco->eo_npages); 280 atomic_dec(&eco->eo_npages);
281 page_cache_release(vmpage); 281 put_page(vmpage);
282} 282}
283 283
284static int echo_page_prep(const struct lu_env *env, 284static int echo_page_prep(const struct lu_env *env,
@@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
373 struct echo_object *eco = cl2echo_obj(obj); 373 struct echo_object *eco = cl2echo_obj(obj);
374 374
375 ep->ep_vmpage = vmpage; 375 ep->ep_vmpage = vmpage;
376 page_cache_get(vmpage); 376 get_page(vmpage);
377 mutex_init(&ep->ep_lock); 377 mutex_init(&ep->ep_lock);
378 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops); 378 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
379 atomic_inc(&eco->eo_npages); 379 atomic_inc(&eco->eo_npages);
@@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1138 LASSERT(rc == 0); 1138 LASSERT(rc == 0);
1139 1139
1140 rc = cl_echo_enqueue0(env, eco, offset, 1140 rc = cl_echo_enqueue0(env, eco, offset,
1141 offset + npages * PAGE_CACHE_SIZE - 1, 1141 offset + npages * PAGE_SIZE - 1,
1142 rw == READ ? LCK_PR : LCK_PW, &lh.cookie, 1142 rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
1143 CEF_NEVER); 1143 CEF_NEVER);
1144 if (rc < 0) 1144 if (rc < 0)
@@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
1311 int delta; 1311 int delta;
1312 1312
1313 /* no partial pages on the client */ 1313 /* no partial pages on the client */
1314 LASSERT(count == PAGE_CACHE_SIZE); 1314 LASSERT(count == PAGE_SIZE);
1315 1315
1316 addr = kmap(page); 1316 addr = kmap(page);
1317 1317
1318 for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1318 for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1319 if (rw == OBD_BRW_WRITE) { 1319 if (rw == OBD_BRW_WRITE) {
1320 stripe_off = offset + delta; 1320 stripe_off = offset + delta;
1321 stripe_id = id; 1321 stripe_id = id;
@@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
1341 int rc2; 1341 int rc2;
1342 1342
1343 /* no partial pages on the client */ 1343 /* no partial pages on the client */
1344 LASSERT(count == PAGE_CACHE_SIZE); 1344 LASSERT(count == PAGE_SIZE);
1345 1345
1346 addr = kmap(page); 1346 addr = kmap(page);
1347 1347
1348 for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1348 for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1349 stripe_off = offset + delta; 1349 stripe_off = offset + delta;
1350 stripe_id = id; 1350 stripe_id = id;
1351 1351
@@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1391 return -EINVAL; 1391 return -EINVAL;
1392 1392
1393 /* XXX think again with misaligned I/O */ 1393 /* XXX think again with misaligned I/O */
1394 npages = count >> PAGE_CACHE_SHIFT; 1394 npages = count >> PAGE_SHIFT;
1395 1395
1396 if (rw == OBD_BRW_WRITE) 1396 if (rw == OBD_BRW_WRITE)
1397 brw_flags = OBD_BRW_ASYNC; 1397 brw_flags = OBD_BRW_ASYNC;
@@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1408 1408
1409 for (i = 0, pgp = pga, off = offset; 1409 for (i = 0, pgp = pga, off = offset;
1410 i < npages; 1410 i < npages;
1411 i++, pgp++, off += PAGE_CACHE_SIZE) { 1411 i++, pgp++, off += PAGE_SIZE) {
1412 1412
1413 LASSERT(!pgp->pg); /* for cleanup */ 1413 LASSERT(!pgp->pg); /* for cleanup */
1414 1414
@@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1418 goto out; 1418 goto out;
1419 1419
1420 pages[i] = pgp->pg; 1420 pages[i] = pgp->pg;
1421 pgp->count = PAGE_CACHE_SIZE; 1421 pgp->count = PAGE_SIZE;
1422 pgp->off = off; 1422 pgp->off = off;
1423 pgp->flag = brw_flags; 1423 pgp->flag = brw_flags;
1424 1424
@@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
1473 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) 1473 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
1474 return -EINVAL; 1474 return -EINVAL;
1475 1475
1476 npages = batch >> PAGE_CACHE_SHIFT; 1476 npages = batch >> PAGE_SHIFT;
1477 tot_pages = count >> PAGE_CACHE_SHIFT; 1477 tot_pages = count >> PAGE_SHIFT;
1478 1478
1479 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); 1479 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
1480 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); 1480 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
1497 if (tot_pages < npages) 1497 if (tot_pages < npages)
1498 npages = tot_pages; 1498 npages = tot_pages;
1499 1499
1500 for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { 1500 for (i = 0; i < npages; i++, off += PAGE_SIZE) {
1501 rnb[i].offset = off; 1501 rnb[i].offset = off;
1502 rnb[i].len = PAGE_CACHE_SIZE; 1502 rnb[i].len = PAGE_SIZE;
1503 rnb[i].flags = brw_flags; 1503 rnb[i].flags = brw_flags;
1504 } 1504 }
1505 1505
@@ -1878,7 +1878,7 @@ static int __init obdecho_init(void)
1878{ 1878{
1879 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); 1879 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
1880 1880
1881 LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); 1881 LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
1882 1882
1883 return echo_client_init(); 1883 return echo_client_init();
1884} 1884}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 57c43c506ef2..a3358c39b2f1 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
162 if (rc) 162 if (rc)
163 return rc; 163 return rc;
164 164
165 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 165 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
166 166
167 if (pages_number <= 0 || 167 if (pages_number <= 0 ||
168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || 168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
169 pages_number > totalram_pages / 4) /* 1/4 of RAM */ 169 pages_number > totalram_pages / 4) /* 1/4 of RAM */
170 return -ERANGE; 170 return -ERANGE;
171 171
172 client_obd_list_lock(&cli->cl_loi_list_lock); 172 client_obd_list_lock(&cli->cl_loi_list_lock);
173 cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT); 173 cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
174 osc_wake_cache_waiters(cli); 174 osc_wake_cache_waiters(cli);
175 client_obd_list_unlock(&cli->cl_loi_list_lock); 175 client_obd_list_unlock(&cli->cl_loi_list_lock);
176 176
@@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
182{ 182{
183 struct obd_device *dev = m->private; 183 struct obd_device *dev = m->private;
184 struct client_obd *cli = &dev->u.cli; 184 struct client_obd *cli = &dev->u.cli;
185 int shift = 20 - PAGE_CACHE_SHIFT; 185 int shift = 20 - PAGE_SHIFT;
186 186
187 seq_printf(m, 187 seq_printf(m,
188 "used_mb: %d\n" 188 "used_mb: %d\n"
@@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
211 return -EFAULT; 211 return -EFAULT;
212 kernbuf[count] = 0; 212 kernbuf[count] = 0;
213 213
214 mult = 1 << (20 - PAGE_CACHE_SHIFT); 214 mult = 1 << (20 - PAGE_SHIFT);
215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - 215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
216 kernbuf; 216 kernbuf;
217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
569 569
570 /* if the max_pages is specified in bytes, convert to pages */ 570 /* if the max_pages is specified in bytes, convert to pages */
571 if (val >= ONE_MB_BRW_SIZE) 571 if (val >= ONE_MB_BRW_SIZE)
572 val >>= PAGE_CACHE_SHIFT; 572 val >>= PAGE_SHIFT;
573 573
574 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); 574 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
575 /* max_pages_per_rpc must be chunk aligned */ 575 /* max_pages_per_rpc must be chunk aligned */
576 val = (val + ~chunk_mask) & chunk_mask; 576 val = (val + ~chunk_mask) & chunk_mask;
577 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { 577 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
578 return -ERANGE; 578 return -ERANGE;
579 } 579 }
580 client_obd_list_lock(&cli->cl_loi_list_lock); 580 client_obd_list_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 63363111380c..5f25bf83dcfc 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
544 return -ERANGE; 544 return -ERANGE;
545 545
546 LASSERT(cur->oe_osclock == victim->oe_osclock); 546 LASSERT(cur->oe_osclock == victim->oe_osclock);
547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; 547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
548 chunk_start = cur->oe_start >> ppc_bits; 548 chunk_start = cur->oe_start >> ppc_bits;
549 chunk_end = cur->oe_end >> ppc_bits; 549 chunk_end = cur->oe_end >> ppc_bits;
550 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && 550 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); 647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); 648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
649 649
650 LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); 650 LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
651 ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 651 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
652 chunk_mask = ~((1 << ppc_bits) - 1); 652 chunk_mask = ~((1 << ppc_bits) - 1);
653 chunksize = 1 << cli->cl_chunkbits; 653 chunksize = 1 << cli->cl_chunkbits;
654 chunk = index >> ppc_bits; 654 chunk = index >> ppc_bits;
@@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
871 871
872 if (!sent) { 872 if (!sent) {
873 lost_grant = ext->oe_grants; 873 lost_grant = ext->oe_grants;
874 } else if (blocksize < PAGE_CACHE_SIZE && 874 } else if (blocksize < PAGE_SIZE &&
875 last_count != PAGE_CACHE_SIZE) { 875 last_count != PAGE_SIZE) {
876 /* For short writes we shouldn't count parts of pages that 876 /* For short writes we shouldn't count parts of pages that
877 * span a whole chunk on the OST side, or our accounting goes 877 * span a whole chunk on the OST side, or our accounting goes
878 * wrong. Should match the code in filter_grant_check. 878 * wrong. Should match the code in filter_grant_check.
@@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
884 if (end) 884 if (end)
885 count += blocksize - end; 885 count += blocksize - end;
886 886
887 lost_grant = PAGE_CACHE_SIZE - count; 887 lost_grant = PAGE_SIZE - count;
888 } 888 }
889 if (ext->oe_grants > 0) 889 if (ext->oe_grants > 0)
890 osc_free_grant(cli, nr_pages, lost_grant); 890 osc_free_grant(cli, nr_pages, lost_grant);
@@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
967 struct osc_async_page *oap; 967 struct osc_async_page *oap;
968 struct osc_async_page *tmp; 968 struct osc_async_page *tmp;
969 int pages_in_chunk = 0; 969 int pages_in_chunk = 0;
970 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 970 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
971 __u64 trunc_chunk = trunc_index >> ppc_bits; 971 __u64 trunc_chunk = trunc_index >> ppc_bits;
972 int grants = 0; 972 int grants = 0;
973 int nr_pages = 0; 973 int nr_pages = 0;
@@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1125 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { 1125 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
1126 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); 1126 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1127 LASSERT(last->oap_count > 0); 1127 LASSERT(last->oap_count > 0);
1128 LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); 1128 LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
1129 last->oap_async_flags |= ASYNC_COUNT_STABLE; 1129 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1130 } 1130 }
1131 1131
@@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1134 */ 1134 */
1135 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { 1135 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1136 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { 1136 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1137 oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; 1137 oap->oap_count = PAGE_SIZE - oap->oap_page_off;
1138 oap->oap_async_flags |= ASYNC_COUNT_STABLE; 1138 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1139 } 1139 }
1140 } 1140 }
@@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
1158 struct osc_object *obj = ext->oe_obj; 1158 struct osc_object *obj = ext->oe_obj;
1159 struct client_obd *cli = osc_cli(obj); 1159 struct client_obd *cli = osc_cli(obj);
1160 struct osc_extent *next; 1160 struct osc_extent *next;
1161 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 1161 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
1162 pgoff_t chunk = index >> ppc_bits; 1162 pgoff_t chunk = index >> ppc_bits;
1163 pgoff_t end_chunk; 1163 pgoff_t end_chunk;
1164 pgoff_t end_index; 1164 pgoff_t end_index;
@@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
1293 return 0; 1293 return 0;
1294 else if (cl_offset(obj, page->cp_index + 1) > kms) 1294 else if (cl_offset(obj, page->cp_index + 1) > kms)
1295 /* catch sub-page write at end of file */ 1295 /* catch sub-page write at end of file */
1296 return kms % PAGE_CACHE_SIZE; 1296 return kms % PAGE_SIZE;
1297 else 1297 else
1298 return PAGE_CACHE_SIZE; 1298 return PAGE_SIZE;
1299} 1299}
1300 1300
1301static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, 1301static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
1376 assert_spin_locked(&cli->cl_loi_list_lock.lock); 1376 assert_spin_locked(&cli->cl_loi_list_lock.lock);
1377 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); 1377 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1378 atomic_inc(&obd_dirty_pages); 1378 atomic_inc(&obd_dirty_pages);
1379 cli->cl_dirty += PAGE_CACHE_SIZE; 1379 cli->cl_dirty += PAGE_SIZE;
1380 pga->flag |= OBD_BRW_FROM_GRANT; 1380 pga->flag |= OBD_BRW_FROM_GRANT;
1381 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", 1381 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
1382 PAGE_CACHE_SIZE, pga, pga->pg); 1382 PAGE_SIZE, pga, pga->pg);
1383 osc_update_next_shrink(cli); 1383 osc_update_next_shrink(cli);
1384} 1384}
1385 1385
@@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
1396 1396
1397 pga->flag &= ~OBD_BRW_FROM_GRANT; 1397 pga->flag &= ~OBD_BRW_FROM_GRANT;
1398 atomic_dec(&obd_dirty_pages); 1398 atomic_dec(&obd_dirty_pages);
1399 cli->cl_dirty -= PAGE_CACHE_SIZE; 1399 cli->cl_dirty -= PAGE_SIZE;
1400 if (pga->flag & OBD_BRW_NOCACHE) { 1400 if (pga->flag & OBD_BRW_NOCACHE) {
1401 pga->flag &= ~OBD_BRW_NOCACHE; 1401 pga->flag &= ~OBD_BRW_NOCACHE;
1402 atomic_dec(&obd_dirty_transit_pages); 1402 atomic_dec(&obd_dirty_transit_pages);
1403 cli->cl_dirty_transit -= PAGE_CACHE_SIZE; 1403 cli->cl_dirty_transit -= PAGE_SIZE;
1404 } 1404 }
1405} 1405}
1406 1406
@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
1456 * used, we should return these grants to OST. There're two cases where grants 1456 * used, we should return these grants to OST. There're two cases where grants
1457 * can be lost: 1457 * can be lost:
1458 * 1. truncate; 1458 * 1. truncate;
1459 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was 1459 * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
1460 * written. In this case OST may use less chunks to serve this partial 1460 * written. In this case OST may use less chunks to serve this partial
1461 * write. OSTs don't actually know the page size on the client side. so 1461 * write. OSTs don't actually know the page size on the client side. so
1462 * clients have to calculate lost grant by the blocksize on the OST. 1462 * clients have to calculate lost grant by the blocksize on the OST.
@@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1469 1469
1470 client_obd_list_lock(&cli->cl_loi_list_lock); 1470 client_obd_list_lock(&cli->cl_loi_list_lock);
1471 atomic_sub(nr_pages, &obd_dirty_pages); 1471 atomic_sub(nr_pages, &obd_dirty_pages);
1472 cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; 1472 cli->cl_dirty -= nr_pages << PAGE_SHIFT;
1473 cli->cl_lost_grant += lost_grant; 1473 cli->cl_lost_grant += lost_grant;
1474 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { 1474 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
1475 /* borrow some grant from truncate to avoid the case that 1475 /* borrow some grant from truncate to avoid the case that
@@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
1512 if (rc < 0) 1512 if (rc < 0)
1513 return 0; 1513 return 0;
1514 1514
1515 if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && 1515 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1516 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { 1516 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
1517 osc_consume_write_grant(cli, &oap->oap_brw_page); 1517 osc_consume_write_grant(cli, &oap->oap_brw_page);
1518 if (transient) { 1518 if (transient) {
1519 cli->cl_dirty_transit += PAGE_CACHE_SIZE; 1519 cli->cl_dirty_transit += PAGE_SIZE;
1520 atomic_inc(&obd_dirty_transit_pages); 1520 atomic_inc(&obd_dirty_transit_pages);
1521 oap->oap_brw_flags |= OBD_BRW_NOCACHE; 1521 oap->oap_brw_flags |= OBD_BRW_NOCACHE;
1522 } 1522 }
@@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1562 * of queued writes and create a discontiguous rpc stream 1562 * of queued writes and create a discontiguous rpc stream
1563 */ 1563 */
1564 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || 1564 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
1565 cli->cl_dirty_max < PAGE_CACHE_SIZE || 1565 cli->cl_dirty_max < PAGE_SIZE ||
1566 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { 1566 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
1567 rc = -EDQUOT; 1567 rc = -EDQUOT;
1568 goto out; 1568 goto out;
@@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
1632 1632
1633 ocw->ocw_rc = -EDQUOT; 1633 ocw->ocw_rc = -EDQUOT;
1634 /* we can't dirty more */ 1634 /* we can't dirty more */
1635 if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || 1635 if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
1636 (atomic_read(&obd_dirty_pages) + 1 > 1636 (atomic_read(&obd_dirty_pages) + 1 >
1637 obd_max_dirty_pages)) { 1637 obd_max_dirty_pages)) {
1638 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", 1638 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index d720b1a1c18c..ce9ddd515f64 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
410 int result; 410 int result;
411 411
412 opg->ops_from = 0; 412 opg->ops_from = 0;
413 opg->ops_to = PAGE_CACHE_SIZE; 413 opg->ops_to = PAGE_SIZE;
414 414
415 result = osc_prep_async_page(osc, opg, vmpage, 415 result = osc_prep_async_page(osc, opg, vmpage,
416 cl_offset(obj, page->cp_index)); 416 cl_offset(obj, page->cp_index));
@@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
487/* LRU pages are freed in batch mode. OSC should at least free this 487/* LRU pages are freed in batch mode. OSC should at least free this
488 * number of pages to avoid running out of LRU budget, and.. 488 * number of pages to avoid running out of LRU budget, and..
489 */ 489 */
490static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ 490static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
491/* free this number at most otherwise it will take too long time to finish. */ 491/* free this number at most otherwise it will take too long time to finish. */
492static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ 492static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
493 493
494/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, 494/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
495 * we should free slots aggressively. In this way, slots are freed in a steady 495 * we should free slots aggressively. In this way, slots are freed in a steady
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 74805f1ae888..30526ebcad04 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
826 oa->o_undirty = 0; 826 oa->o_undirty = 0;
827 } else { 827 } else {
828 long max_in_flight = (cli->cl_max_pages_per_rpc << 828 long max_in_flight = (cli->cl_max_pages_per_rpc <<
829 PAGE_CACHE_SHIFT)* 829 PAGE_SHIFT)*
830 (cli->cl_max_rpcs_in_flight + 1); 830 (cli->cl_max_rpcs_in_flight + 1);
831 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); 831 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
832 } 832 }
@@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
909static int osc_shrink_grant(struct client_obd *cli) 909static int osc_shrink_grant(struct client_obd *cli)
910{ 910{
911 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * 911 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
912 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); 912 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
913 913
914 client_obd_list_lock(&cli->cl_loi_list_lock); 914 client_obd_list_lock(&cli->cl_loi_list_lock);
915 if (cli->cl_avail_grant <= target_bytes) 915 if (cli->cl_avail_grant <= target_bytes)
916 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 916 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
917 client_obd_list_unlock(&cli->cl_loi_list_lock); 917 client_obd_list_unlock(&cli->cl_loi_list_lock);
918 918
919 return osc_shrink_grant_to_target(cli, target_bytes); 919 return osc_shrink_grant_to_target(cli, target_bytes);
@@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
929 * We don't want to shrink below a single RPC, as that will negatively 929 * We don't want to shrink below a single RPC, as that will negatively
930 * impact block allocation and long-term performance. 930 * impact block allocation and long-term performance.
931 */ 931 */
932 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) 932 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
933 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 933 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
934 934
935 if (target_bytes >= cli->cl_avail_grant) { 935 if (target_bytes >= cli->cl_avail_grant) {
936 client_obd_list_unlock(&cli->cl_loi_list_lock); 936 client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
978 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) 978 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
979 * Keep comment here so that it can be found by searching. 979 * Keep comment here so that it can be found by searching.
980 */ 980 */
981 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 981 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
982 982
983 if (client->cl_import->imp_state == LUSTRE_IMP_FULL && 983 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
984 client->cl_avail_grant > brw_size) 984 client->cl_avail_grant > brw_size)
@@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1052 } 1052 }
1053 1053
1054 /* determine the appropriate chunk size used by osc_extent. */ 1054 /* determine the appropriate chunk size used by osc_extent. */
1055 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); 1055 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
1056 client_obd_list_unlock(&cli->cl_loi_list_lock); 1056 client_obd_list_unlock(&cli->cl_loi_list_lock);
1057 1057
1058 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", 1058 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1317 LASSERT(pg->count > 0); 1317 LASSERT(pg->count > 0);
1318 /* make sure there is no gap in the middle of page array */ 1318 /* make sure there is no gap in the middle of page array */
1319 LASSERTF(page_count == 1 || 1319 LASSERTF(page_count == 1 ||
1320 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && 1320 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1321 ergo(i > 0 && i < page_count - 1, 1321 ergo(i > 0 && i < page_count - 1,
1322 poff == 0 && pg->count == PAGE_CACHE_SIZE) && 1322 poff == 0 && pg->count == PAGE_SIZE) &&
1323 ergo(i == page_count - 1, poff == 0)), 1323 ergo(i == page_count - 1, poff == 0)),
1324 "i: %d/%d pg: %p off: %llu, count: %u\n", 1324 "i: %d/%d pg: %p off: %llu, count: %u\n",
1325 i, page_count, pg, pg->off, pg->count); 1325 i, page_count, pg, pg->off, pg->count);
@@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1877 oap->oap_count; 1877 oap->oap_count;
1878 else 1878 else
1879 LASSERT(oap->oap_page_off + oap->oap_count == 1879 LASSERT(oap->oap_page_off + oap->oap_count ==
1880 PAGE_CACHE_SIZE); 1880 PAGE_SIZE);
1881 } 1881 }
1882 } 1882 }
1883 1883
@@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1993 tmp->oap_request = ptlrpc_request_addref(req); 1993 tmp->oap_request = ptlrpc_request_addref(req);
1994 1994
1995 client_obd_list_lock(&cli->cl_loi_list_lock); 1995 client_obd_list_lock(&cli->cl_loi_list_lock);
1996 starting_offset >>= PAGE_CACHE_SHIFT; 1996 starting_offset >>= PAGE_SHIFT;
1997 if (cmd == OBD_BRW_READ) { 1997 if (cmd == OBD_BRW_READ) {
1998 cli->cl_r_in_flight++; 1998 cli->cl_r_in_flight++;
1999 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); 1999 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2790,12 +2790,12 @@ out:
2790 CFS_PAGE_MASK; 2790 CFS_PAGE_MASK;
2791 2791
2792 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= 2792 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2793 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) 2793 fm_key->fiemap.fm_start + PAGE_SIZE - 1)
2794 policy.l_extent.end = OBD_OBJECT_EOF; 2794 policy.l_extent.end = OBD_OBJECT_EOF;
2795 else 2795 else
2796 policy.l_extent.end = (fm_key->fiemap.fm_start + 2796 policy.l_extent.end = (fm_key->fiemap.fm_start +
2797 fm_key->fiemap.fm_length + 2797 fm_key->fiemap.fm_length +
2798 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; 2798 PAGE_SIZE - 1) & CFS_PAGE_MASK;
2799 2799
2800 ostid_build_res_name(&fm_key->oa.o_oi, &res_id); 2800 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2801 mode = ldlm_lock_match(exp->exp_obd->obd_namespace, 2801 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 1b7673eec4d7..cf3ac8eee9ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
174 LASSERT(page); 174 LASSERT(page);
175 LASSERT(pageoffset >= 0); 175 LASSERT(pageoffset >= 0);
176 LASSERT(len > 0); 176 LASSERT(len > 0);
177 LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); 177 LASSERT(pageoffset + len <= PAGE_SIZE);
178 178
179 desc->bd_nob += len; 179 desc->bd_nob += len;
180 180
181 if (pin) 181 if (pin)
182 page_cache_get(page); 182 get_page(page);
183 183
184 ptlrpc_add_bulk_page(desc, page, pageoffset, len); 184 ptlrpc_add_bulk_page(desc, page, pageoffset, len);
185} 185}
@@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
206 206
207 if (unpin) { 207 if (unpin) {
208 for (i = 0; i < desc->bd_iov_count; i++) 208 for (i = 0; i < desc->bd_iov_count; i++)
209 page_cache_release(desc->bd_iov[i].kiov_page); 209 put_page(desc->bd_iov[i].kiov_page);
210 } 210 }
211 211
212 kfree(desc); 212 kfree(desc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index b4eddf291269..cd94fed0ffdf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1092,7 +1092,7 @@ finish:
1092 1092
1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1094 cli->cl_max_pages_per_rpc = 1094 cli->cl_max_pages_per_rpc =
1095 min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, 1095 min(ocd->ocd_brw_size >> PAGE_SHIFT,
1096 cli->cl_max_pages_per_rpc); 1096 cli->cl_max_pages_per_rpc);
1097 else if (imp->imp_connect_op == MDS_CONNECT || 1097 else if (imp->imp_connect_op == MDS_CONNECT ||
1098 imp->imp_connect_op == MGS_CONNECT) 1098 imp->imp_connect_op == MGS_CONNECT)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cee04efb6fb5..c95a91ce26c9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
308 * hose a kernel by allowing the request history to grow too 308 * hose a kernel by allowing the request history to grow too
309 * far. 309 * far.
310 */ 310 */
311 bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 311 bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
312 if (val > totalram_pages / (2 * bufpages)) 312 if (val > totalram_pages / (2 * bufpages))
313 return -ERANGE; 313 return -ERANGE;
314 314
@@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
1226 const char prefix[] = "connection="; 1226 const char prefix[] = "connection=";
1227 const int prefix_len = sizeof(prefix) - 1; 1227 const int prefix_len = sizeof(prefix) - 1;
1228 1228
1229 if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) 1229 if (count > PAGE_SIZE - 1 || count <= prefix_len)
1230 return -EINVAL; 1230 return -EINVAL;
1231 1231
1232 kbuf = kzalloc(count + 1, GFP_NOFS); 1232 kbuf = kzalloc(count + 1, GFP_NOFS);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 5f27d9c2e4ef..30d9a164e52d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
195 } 195 }
196 196
197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { 197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
198 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, 198 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
199 "req %p bad\n", req); 199 "req %p bad\n", req);
200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); 200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
201 if (!ptlrpc_no_resend(req)) 201 if (!ptlrpc_no_resend(req))
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 72d5b9bf5b29..d3872b8c9a6e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -58,7 +58,7 @@
58 * bulk encryption page pools * 58 * bulk encryption page pools *
59 ****************************************/ 59 ****************************************/
60 60
61#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 61#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
62#define PAGES_PER_POOL (POINTERS_PER_PAGE) 62#define PAGES_PER_POOL (POINTERS_PER_PAGE)
63 63
64#define IDLE_IDX_MAX (100) 64#define IDLE_IDX_MAX (100)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8cfce105c7ee..e21ca2bd6839 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1147,8 +1147,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1147 ffs->sb = sb; 1147 ffs->sb = sb;
1148 data->ffs_data = NULL; 1148 data->ffs_data = NULL;
1149 sb->s_fs_info = ffs; 1149 sb->s_fs_info = ffs;
1150 sb->s_blocksize = PAGE_CACHE_SIZE; 1150 sb->s_blocksize = PAGE_SIZE;
1151 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1151 sb->s_blocksize_bits = PAGE_SHIFT;
1152 sb->s_magic = FUNCTIONFS_MAGIC; 1152 sb->s_magic = FUNCTIONFS_MAGIC;
1153 sb->s_op = &ffs_sb_operations; 1153 sb->s_op = &ffs_sb_operations;
1154 sb->s_time_gran = 1; 1154 sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5cdaf0150a4e..e64479f882a5 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
1954 return -ENODEV; 1954 return -ENODEV;
1955 1955
1956 /* superblock */ 1956 /* superblock */
1957 sb->s_blocksize = PAGE_CACHE_SIZE; 1957 sb->s_blocksize = PAGE_SIZE;
1958 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1958 sb->s_blocksize_bits = PAGE_SHIFT;
1959 sb->s_magic = GADGETFS_MAGIC; 1959 sb->s_magic = GADGETFS_MAGIC;
1960 sb->s_op = &gadget_fs_operations; 1960 sb->s_op = &gadget_fs_operations;
1961 sb->s_time_gran = 1; 1961 sb->s_time_gran = 1;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba51362d2e2..90901861bfc0 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
123 unsigned int max_sectors = 64; 123 unsigned int max_sectors = 64;
124 124
125 if (us->fflags & US_FL_MAX_SECTORS_MIN) 125 if (us->fflags & US_FL_MAX_SECTORS_MIN)
126 max_sectors = PAGE_CACHE_SIZE >> 9; 126 max_sectors = PAGE_SIZE >> 9;
127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) 127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
128 blk_queue_max_hw_sectors(sdev->request_queue, 128 blk_queue_max_hw_sectors(sdev->request_queue,
129 max_sectors); 129 max_sectors);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 71a923e53f93..3b1ca4411073 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -735,7 +735,7 @@ out:
735 735
736out_unmap: 736out_unmap:
737 for (i = 0; i < nr_pages; i++) 737 for (i = 0; i < nr_pages; i++)
738 page_cache_release(pages[i]); 738 put_page(pages[i]);
739 739
740 kfree(pages); 740 kfree(pages);
741 741
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 488017a0806a..cb7138c97c69 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
485 int rc = 0; 485 int rc = 0;
486 486
487 irq_move_irq(data); 487 if (!VALID_EVTCHN(evtchn))
488 return;
488 489
489 if (VALID_EVTCHN(evtchn)) 490 if (unlikely(irqd_is_setaffinity_pending(data))) {
491 int masked = test_and_set_mask(evtchn);
492
493 clear_evtchn(evtchn);
494
495 irq_move_masked_irq(data);
496
497 if (!masked)
498 unmask_evtchn(evtchn);
499 } else
490 clear_evtchn(evtchn); 500 clear_evtchn(evtchn);
491 501
492 if (pirq_needs_eoi(data->irq)) { 502 if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
1357{ 1367{
1358 int evtchn = evtchn_from_irq(data->irq); 1368 int evtchn = evtchn_from_irq(data->irq);
1359 1369
1360 irq_move_irq(data); 1370 if (!VALID_EVTCHN(evtchn))
1371 return;
1361 1372
1362 if (VALID_EVTCHN(evtchn)) 1373 if (unlikely(irqd_is_setaffinity_pending(data))) {
1374 int masked = test_and_set_mask(evtchn);
1375
1376 clear_evtchn(evtchn);
1377
1378 irq_move_masked_irq(data);
1379
1380 if (!masked)
1381 unmask_evtchn(evtchn);
1382 } else
1363 clear_evtchn(evtchn); 1383 clear_evtchn(evtchn);
1364} 1384}
1365 1385
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index e9e04376c52c..ac9225e86bf3 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
153 * If called with zero offset, we should release 153 * If called with zero offset, we should release
154 * the private state assocated with the page 154 * the private state assocated with the page
155 */ 155 */
156 if (offset == 0 && length == PAGE_CACHE_SIZE) 156 if (offset == 0 && length == PAGE_SIZE)
157 v9fs_fscache_invalidate_page(page); 157 v9fs_fscache_invalidate_page(page);
158} 158}
159 159
@@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page)
166 struct bio_vec bvec; 166 struct bio_vec bvec;
167 int err, len; 167 int err, len;
168 168
169 if (page->index == size >> PAGE_CACHE_SHIFT) 169 if (page->index == size >> PAGE_SHIFT)
170 len = size & ~PAGE_CACHE_MASK; 170 len = size & ~PAGE_MASK;
171 else 171 else
172 len = PAGE_CACHE_SIZE; 172 len = PAGE_SIZE;
173 173
174 bvec.bv_page = page; 174 bvec.bv_page = page;
175 bvec.bv_offset = 0; 175 bvec.bv_offset = 0;
@@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
271 int retval = 0; 271 int retval = 0;
272 struct page *page; 272 struct page *page;
273 struct v9fs_inode *v9inode; 273 struct v9fs_inode *v9inode;
274 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 274 pgoff_t index = pos >> PAGE_SHIFT;
275 struct inode *inode = mapping->host; 275 struct inode *inode = mapping->host;
276 276
277 277
@@ -288,11 +288,11 @@ start:
288 if (PageUptodate(page)) 288 if (PageUptodate(page))
289 goto out; 289 goto out;
290 290
291 if (len == PAGE_CACHE_SIZE) 291 if (len == PAGE_SIZE)
292 goto out; 292 goto out;
293 293
294 retval = v9fs_fid_readpage(v9inode->writeback_fid, page); 294 retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
295 page_cache_release(page); 295 put_page(page);
296 if (!retval) 296 if (!retval)
297 goto start; 297 goto start;
298out: 298out:
@@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
313 /* 313 /*
314 * zero out the rest of the area 314 * zero out the rest of the area
315 */ 315 */
316 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 316 unsigned from = pos & (PAGE_SIZE - 1);
317 317
318 zero_user(page, from + copied, len - copied); 318 zero_user(page, from + copied, len - copied);
319 flush_dcache_page(page); 319 flush_dcache_page(page);
@@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
331 } 331 }
332 set_page_dirty(page); 332 set_page_dirty(page);
333 unlock_page(page); 333 unlock_page(page);
334 page_cache_release(page); 334 put_page(page);
335 335
336 return copied; 336 return copied;
337} 337}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index eadc894faea2..b84c291ba1eb 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
421 struct inode *inode = file_inode(file); 421 struct inode *inode = file_inode(file);
422 loff_t i_size; 422 loff_t i_size;
423 unsigned long pg_start, pg_end; 423 unsigned long pg_start, pg_end;
424 pg_start = origin >> PAGE_CACHE_SHIFT; 424 pg_start = origin >> PAGE_SHIFT;
425 pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; 425 pg_end = (origin + retval - 1) >> PAGE_SHIFT;
426 if (inode->i_mapping && inode->i_mapping->nrpages) 426 if (inode->i_mapping && inode->i_mapping->nrpages)
427 invalidate_inode_pages2_range(inode->i_mapping, 427 invalidate_inode_pages2_range(inode->i_mapping,
428 pg_start, pg_end); 428 pg_start, pg_end);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index bf495cedec26..de3ed8629196 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -87,7 +87,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
87 sb->s_op = &v9fs_super_ops; 87 sb->s_op = &v9fs_super_ops;
88 sb->s_bdi = &v9ses->bdi; 88 sb->s_bdi = &v9ses->bdi;
89 if (v9ses->cache) 89 if (v9ses->cache)
90 sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; 90 sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
91 91
92 sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; 92 sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
93 if (!v9ses->cache) 93 if (!v9ses->cache)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 22fc7c802d69..0cde550050e8 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -510,9 +510,9 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
510 510
511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, 511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
512 page->index, to); 512 page->index, to);
513 BUG_ON(to > PAGE_CACHE_SIZE); 513 BUG_ON(to > PAGE_SIZE);
514 bsize = AFFS_SB(sb)->s_data_blksize; 514 bsize = AFFS_SB(sb)->s_data_blksize;
515 tmp = page->index << PAGE_CACHE_SHIFT; 515 tmp = page->index << PAGE_SHIFT;
516 bidx = tmp / bsize; 516 bidx = tmp / bsize;
517 boff = tmp % bsize; 517 boff = tmp % bsize;
518 518
@@ -613,10 +613,10 @@ affs_readpage_ofs(struct file *file, struct page *page)
613 int err; 613 int err;
614 614
615 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); 615 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
616 to = PAGE_CACHE_SIZE; 616 to = PAGE_SIZE;
617 if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { 617 if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
618 to = inode->i_size & ~PAGE_CACHE_MASK; 618 to = inode->i_size & ~PAGE_MASK;
619 memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); 619 memset(page_address(page) + to, 0, PAGE_SIZE - to);
620 } 620 }
621 621
622 err = affs_do_readpage_ofs(page, to); 622 err = affs_do_readpage_ofs(page, to);
@@ -646,7 +646,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
646 return err; 646 return err;
647 } 647 }
648 648
649 index = pos >> PAGE_CACHE_SHIFT; 649 index = pos >> PAGE_SHIFT;
650 page = grab_cache_page_write_begin(mapping, index, flags); 650 page = grab_cache_page_write_begin(mapping, index, flags);
651 if (!page) 651 if (!page)
652 return -ENOMEM; 652 return -ENOMEM;
@@ -656,10 +656,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
656 return 0; 656 return 0;
657 657
658 /* XXX: inefficient but safe in the face of short writes */ 658 /* XXX: inefficient but safe in the face of short writes */
659 err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE); 659 err = affs_do_readpage_ofs(page, PAGE_SIZE);
660 if (err) { 660 if (err) {
661 unlock_page(page); 661 unlock_page(page);
662 page_cache_release(page); 662 put_page(page);
663 } 663 }
664 return err; 664 return err;
665} 665}
@@ -677,7 +677,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
677 u32 tmp; 677 u32 tmp;
678 int written; 678 int written;
679 679
680 from = pos & (PAGE_CACHE_SIZE - 1); 680 from = pos & (PAGE_SIZE - 1);
681 to = pos + len; 681 to = pos + len;
682 /* 682 /*
683 * XXX: not sure if this can handle short copies (len < copied), but 683 * XXX: not sure if this can handle short copies (len < copied), but
@@ -692,7 +692,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
692 692
693 bh = NULL; 693 bh = NULL;
694 written = 0; 694 written = 0;
695 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 695 tmp = (page->index << PAGE_SHIFT) + from;
696 bidx = tmp / bsize; 696 bidx = tmp / bsize;
697 boff = tmp % bsize; 697 boff = tmp % bsize;
698 if (boff) { 698 if (boff) {
@@ -788,13 +788,13 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
788 788
789done: 789done:
790 affs_brelse(bh); 790 affs_brelse(bh);
791 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 791 tmp = (page->index << PAGE_SHIFT) + from;
792 if (tmp > inode->i_size) 792 if (tmp > inode->i_size)
793 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 793 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
794 794
795err_first_bh: 795err_first_bh:
796 unlock_page(page); 796 unlock_page(page);
797 page_cache_release(page); 797 put_page(page);
798 798
799 return written; 799 return written;
800 800
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e10e17788f06..5fda2bc53cd7 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -181,7 +181,7 @@ error:
181static inline void afs_dir_put_page(struct page *page) 181static inline void afs_dir_put_page(struct page *page)
182{ 182{
183 kunmap(page); 183 kunmap(page);
184 page_cache_release(page); 184 put_page(page);
185} 185}
186 186
187/* 187/*
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 999bc3caec92..6344aee4ac4b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -164,7 +164,7 @@ int afs_page_filler(void *data, struct page *page)
164 _debug("cache said ENOBUFS"); 164 _debug("cache said ENOBUFS");
165 default: 165 default:
166 go_on: 166 go_on:
167 offset = page->index << PAGE_CACHE_SHIFT; 167 offset = page->index << PAGE_SHIFT;
168 len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); 168 len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
169 169
170 /* read the contents of the file from the server into the 170 /* read the contents of the file from the server into the
@@ -319,7 +319,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
319 BUG_ON(!PageLocked(page)); 319 BUG_ON(!PageLocked(page));
320 320
321 /* we clean up only if the entire page is being invalidated */ 321 /* we clean up only if the entire page is being invalidated */
322 if (offset == 0 && length == PAGE_CACHE_SIZE) { 322 if (offset == 0 && length == PAGE_SIZE) {
323#ifdef CONFIG_AFS_FSCACHE 323#ifdef CONFIG_AFS_FSCACHE
324 if (PageFsCache(page)) { 324 if (PageFsCache(page)) {
325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index ccd0b212e82a..81dd075356b9 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -93,7 +93,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
93 93
94 kunmap(page); 94 kunmap(page);
95out_free: 95out_free:
96 page_cache_release(page); 96 put_page(page);
97out: 97out:
98 _leave(" = %d", ret); 98 _leave(" = %d", ret);
99 return ret; 99 return ret;
@@ -189,7 +189,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
189 buf = kmap_atomic(page); 189 buf = kmap_atomic(page);
190 memcpy(devname, buf, size); 190 memcpy(devname, buf, size);
191 kunmap_atomic(buf); 191 kunmap_atomic(buf);
192 page_cache_release(page); 192 put_page(page);
193 page = NULL; 193 page = NULL;
194 } 194 }
195 195
@@ -211,7 +211,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
211 return mnt; 211 return mnt;
212 212
213error: 213error:
214 page_cache_release(page); 214 put_page(page);
215error_no_page: 215error_no_page:
216 free_page((unsigned long) options); 216 free_page((unsigned long) options);
217error_no_options: 217error_no_options:
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 81afefe7d8a6..fbdb022b75a2 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -315,8 +315,8 @@ static int afs_fill_super(struct super_block *sb,
315 _enter(""); 315 _enter("");
316 316
317 /* fill in the superblock */ 317 /* fill in the superblock */
318 sb->s_blocksize = PAGE_CACHE_SIZE; 318 sb->s_blocksize = PAGE_SIZE;
319 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 319 sb->s_blocksize_bits = PAGE_SHIFT;
320 sb->s_magic = AFS_FS_MAGIC; 320 sb->s_magic = AFS_FS_MAGIC;
321 sb->s_op = &afs_super_ops; 321 sb->s_op = &afs_super_ops;
322 sb->s_bdi = &as->volume->bdi; 322 sb->s_bdi = &as->volume->bdi;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dfef94f70667..65de439bdc4f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
93 _enter(",,%llu", (unsigned long long)pos); 93 _enter(",,%llu", (unsigned long long)pos);
94 94
95 i_size = i_size_read(&vnode->vfs_inode); 95 i_size = i_size_read(&vnode->vfs_inode);
96 if (pos + PAGE_CACHE_SIZE > i_size) 96 if (pos + PAGE_SIZE > i_size)
97 len = i_size - pos; 97 len = i_size - pos;
98 else 98 else
99 len = PAGE_CACHE_SIZE; 99 len = PAGE_SIZE;
100 100
101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page); 101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
102 if (ret < 0) { 102 if (ret < 0) {
@@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
124 struct page *page; 124 struct page *page;
125 struct key *key = file->private_data; 125 struct key *key = file->private_data;
126 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 126 unsigned from = pos & (PAGE_SIZE - 1);
127 unsigned to = from + len; 127 unsigned to = from + len;
128 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 128 pgoff_t index = pos >> PAGE_SHIFT;
129 int ret; 129 int ret;
130 130
131 _enter("{%x:%u},{%lx},%u,%u", 131 _enter("{%x:%u},{%lx},%u,%u",
@@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
151 *pagep = page; 151 *pagep = page;
152 /* page won't leak in error case: it eventually gets cleaned off LRU */ 152 /* page won't leak in error case: it eventually gets cleaned off LRU */
153 153
154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { 154 if (!PageUptodate(page) && len != PAGE_SIZE) {
155 ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); 155 ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
156 if (ret < 0) { 156 if (ret < 0) {
157 kfree(candidate); 157 kfree(candidate);
158 _leave(" = %d [prep]", ret); 158 _leave(" = %d [prep]", ret);
@@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
266 if (PageDirty(page)) 266 if (PageDirty(page))
267 _debug("dirtied"); 267 _debug("dirtied");
268 unlock_page(page); 268 unlock_page(page);
269 page_cache_release(page); 269 put_page(page);
270 270
271 return copied; 271 return copied;
272} 272}
@@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping,
480 480
481 if (page->index > end) { 481 if (page->index > end) {
482 *_next = index; 482 *_next = index;
483 page_cache_release(page); 483 put_page(page);
484 _leave(" = 0 [%lx]", *_next); 484 _leave(" = 0 [%lx]", *_next);
485 return 0; 485 return 0;
486 } 486 }
@@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping,
494 494
495 if (page->mapping != mapping) { 495 if (page->mapping != mapping) {
496 unlock_page(page); 496 unlock_page(page);
497 page_cache_release(page); 497 put_page(page);
498 continue; 498 continue;
499 } 499 }
500 500
@@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping,
515 515
516 ret = afs_write_back_from_locked_page(wb, page); 516 ret = afs_write_back_from_locked_page(wb, page);
517 unlock_page(page); 517 unlock_page(page);
518 page_cache_release(page); 518 put_page(page);
519 if (ret < 0) { 519 if (ret < 0) {
520 _leave(" = %d", ret); 520 _leave(" = %d", ret);
521 return ret; 521 return ret;
@@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping,
551 &next); 551 &next);
552 mapping->writeback_index = next; 552 mapping->writeback_index = next;
553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
554 end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT); 554 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
555 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 555 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
556 if (wbc->nr_to_write > 0) 556 if (wbc->nr_to_write > 0)
557 mapping->writeback_index = next; 557 mapping->writeback_index = next;
558 } else { 558 } else {
559 start = wbc->range_start >> PAGE_CACHE_SHIFT; 559 start = wbc->range_start >> PAGE_SHIFT;
560 end = wbc->range_end >> PAGE_CACHE_SHIFT; 560 end = wbc->range_end >> PAGE_SHIFT;
561 ret = afs_writepages_region(mapping, wbc, start, end, &next); 561 ret = afs_writepages_region(mapping, wbc, start, end, &next);
562 } 562 }
563 563
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7d914c67a9d0..81381cc0dd17 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2292,7 +2292,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2292 void *kaddr = kmap(page); 2292 void *kaddr = kmap(page);
2293 stop = !dump_emit(cprm, kaddr, PAGE_SIZE); 2293 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2294 kunmap(page); 2294 kunmap(page);
2295 page_cache_release(page); 2295 put_page(page);
2296 } else 2296 } else
2297 stop = !dump_skip(cprm, PAGE_SIZE); 2297 stop = !dump_skip(cprm, PAGE_SIZE);
2298 if (stop) 2298 if (stop)
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index b1adb92e69de..083ea2bc60ab 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1533,7 +1533,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
1533 void *kaddr = kmap(page); 1533 void *kaddr = kmap(page);
1534 res = dump_emit(cprm, kaddr, PAGE_SIZE); 1534 res = dump_emit(cprm, kaddr, PAGE_SIZE);
1535 kunmap(page); 1535 kunmap(page);
1536 page_cache_release(page); 1536 put_page(page);
1537 } else { 1537 } else {
1538 res = dump_skip(cprm, PAGE_SIZE); 1538 res = dump_skip(cprm, PAGE_SIZE);
1539 } 1539 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3172c4e2f502..20a2c02b77c4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,7 +331,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
331 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); 331 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
332 332
333 unlock_page(page); 333 unlock_page(page);
334 page_cache_release(page); 334 put_page(page);
335 335
336 return ret; 336 return ret;
337} 337}
@@ -1149,7 +1149,7 @@ void bd_set_size(struct block_device *bdev, loff_t size)
1149 inode_lock(bdev->bd_inode); 1149 inode_lock(bdev->bd_inode);
1150 i_size_write(bdev->bd_inode, size); 1150 i_size_write(bdev->bd_inode, size);
1151 inode_unlock(bdev->bd_inode); 1151 inode_unlock(bdev->bd_inode);
1152 while (bsize < PAGE_CACHE_SIZE) { 1152 while (bsize < PAGE_SIZE) {
1153 if (size & bsize) 1153 if (size & bsize)
1154 break; 1154 break;
1155 bsize <<= 1; 1155 bsize <<= 1;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index e34a71b3e225..516e19d1d202 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -757,7 +757,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
757 BUG_ON(NULL == l); 757 BUG_ON(NULL == l);
758 758
759 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 759 ret = btrfsic_read_block(state, &tmp_next_block_ctx);
760 if (ret < (int)PAGE_CACHE_SIZE) { 760 if (ret < (int)PAGE_SIZE) {
761 printk(KERN_INFO 761 printk(KERN_INFO
762 "btrfsic: read @logical %llu failed!\n", 762 "btrfsic: read @logical %llu failed!\n",
763 tmp_next_block_ctx.start); 763 tmp_next_block_ctx.start);
@@ -1231,15 +1231,15 @@ static void btrfsic_read_from_block_data(
1231 size_t offset_in_page; 1231 size_t offset_in_page;
1232 char *kaddr; 1232 char *kaddr;
1233 char *dst = (char *)dstv; 1233 char *dst = (char *)dstv;
1234 size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1); 1234 size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1);
1235 unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT; 1235 unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
1236 1236
1237 WARN_ON(offset + len > block_ctx->len); 1237 WARN_ON(offset + len > block_ctx->len);
1238 offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1); 1238 offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1);
1239 1239
1240 while (len > 0) { 1240 while (len > 0) {
1241 cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); 1241 cur = min(len, ((size_t)PAGE_SIZE - offset_in_page));
1242 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE)); 1242 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
1243 kaddr = block_ctx->datav[i]; 1243 kaddr = block_ctx->datav[i];
1244 memcpy(dst, kaddr + offset_in_page, cur); 1244 memcpy(dst, kaddr + offset_in_page, cur);
1245 1245
@@ -1605,8 +1605,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
1605 1605
1606 BUG_ON(!block_ctx->datav); 1606 BUG_ON(!block_ctx->datav);
1607 BUG_ON(!block_ctx->pagev); 1607 BUG_ON(!block_ctx->pagev);
1608 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1608 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
1609 PAGE_CACHE_SHIFT; 1609 PAGE_SHIFT;
1610 while (num_pages > 0) { 1610 while (num_pages > 0) {
1611 num_pages--; 1611 num_pages--;
1612 if (block_ctx->datav[num_pages]) { 1612 if (block_ctx->datav[num_pages]) {
@@ -1637,15 +1637,15 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1637 BUG_ON(block_ctx->datav); 1637 BUG_ON(block_ctx->datav);
1638 BUG_ON(block_ctx->pagev); 1638 BUG_ON(block_ctx->pagev);
1639 BUG_ON(block_ctx->mem_to_free); 1639 BUG_ON(block_ctx->mem_to_free);
1640 if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) { 1640 if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
1641 printk(KERN_INFO 1641 printk(KERN_INFO
1642 "btrfsic: read_block() with unaligned bytenr %llu\n", 1642 "btrfsic: read_block() with unaligned bytenr %llu\n",
1643 block_ctx->dev_bytenr); 1643 block_ctx->dev_bytenr);
1644 return -1; 1644 return -1;
1645 } 1645 }
1646 1646
1647 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1647 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
1648 PAGE_CACHE_SHIFT; 1648 PAGE_SHIFT;
1649 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) + 1649 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
1650 sizeof(*block_ctx->pagev)) * 1650 sizeof(*block_ctx->pagev)) *
1651 num_pages, GFP_NOFS); 1651 num_pages, GFP_NOFS);
@@ -1676,8 +1676,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1676 1676
1677 for (j = i; j < num_pages; j++) { 1677 for (j = i; j < num_pages; j++) {
1678 ret = bio_add_page(bio, block_ctx->pagev[j], 1678 ret = bio_add_page(bio, block_ctx->pagev[j],
1679 PAGE_CACHE_SIZE, 0); 1679 PAGE_SIZE, 0);
1680 if (PAGE_CACHE_SIZE != ret) 1680 if (PAGE_SIZE != ret)
1681 break; 1681 break;
1682 } 1682 }
1683 if (j == i) { 1683 if (j == i) {
@@ -1693,7 +1693,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1693 return -1; 1693 return -1;
1694 } 1694 }
1695 bio_put(bio); 1695 bio_put(bio);
1696 dev_bytenr += (j - i) * PAGE_CACHE_SIZE; 1696 dev_bytenr += (j - i) * PAGE_SIZE;
1697 i = j; 1697 i = j;
1698 } 1698 }
1699 for (i = 0; i < num_pages; i++) { 1699 for (i = 0; i < num_pages; i++) {
@@ -1769,9 +1769,9 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1769 u32 crc = ~(u32)0; 1769 u32 crc = ~(u32)0;
1770 unsigned int i; 1770 unsigned int i;
1771 1771
1772 if (num_pages * PAGE_CACHE_SIZE < state->metablock_size) 1772 if (num_pages * PAGE_SIZE < state->metablock_size)
1773 return 1; /* not metadata */ 1773 return 1; /* not metadata */
1774 num_pages = state->metablock_size >> PAGE_CACHE_SHIFT; 1774 num_pages = state->metablock_size >> PAGE_SHIFT;
1775 h = (struct btrfs_header *)datav[0]; 1775 h = (struct btrfs_header *)datav[0];
1776 1776
1777 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) 1777 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1779,8 +1779,8 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1779 1779
1780 for (i = 0; i < num_pages; i++) { 1780 for (i = 0; i < num_pages; i++) {
1781 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1781 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
1782 size_t sublen = i ? PAGE_CACHE_SIZE : 1782 size_t sublen = i ? PAGE_SIZE :
1783 (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); 1783 (PAGE_SIZE - BTRFS_CSUM_SIZE);
1784 1784
1785 crc = btrfs_crc32c(crc, data, sublen); 1785 crc = btrfs_crc32c(crc, data, sublen);
1786 } 1786 }
@@ -1826,14 +1826,14 @@ again:
1826 if (block->is_superblock) { 1826 if (block->is_superblock) {
1827 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1827 bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
1828 mapped_datav[0]); 1828 mapped_datav[0]);
1829 if (num_pages * PAGE_CACHE_SIZE < 1829 if (num_pages * PAGE_SIZE <
1830 BTRFS_SUPER_INFO_SIZE) { 1830 BTRFS_SUPER_INFO_SIZE) {
1831 printk(KERN_INFO 1831 printk(KERN_INFO
1832 "btrfsic: cannot work with too short bios!\n"); 1832 "btrfsic: cannot work with too short bios!\n");
1833 return; 1833 return;
1834 } 1834 }
1835 is_metadata = 1; 1835 is_metadata = 1;
1836 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1)); 1836 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1));
1837 processed_len = BTRFS_SUPER_INFO_SIZE; 1837 processed_len = BTRFS_SUPER_INFO_SIZE;
1838 if (state->print_mask & 1838 if (state->print_mask &
1839 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1839 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
@@ -1844,7 +1844,7 @@ again:
1844 } 1844 }
1845 if (is_metadata) { 1845 if (is_metadata) {
1846 if (!block->is_superblock) { 1846 if (!block->is_superblock) {
1847 if (num_pages * PAGE_CACHE_SIZE < 1847 if (num_pages * PAGE_SIZE <
1848 state->metablock_size) { 1848 state->metablock_size) {
1849 printk(KERN_INFO 1849 printk(KERN_INFO
1850 "btrfsic: cannot work with too short bios!\n"); 1850 "btrfsic: cannot work with too short bios!\n");
@@ -1880,7 +1880,7 @@ again:
1880 } 1880 }
1881 block->logical_bytenr = bytenr; 1881 block->logical_bytenr = bytenr;
1882 } else { 1882 } else {
1883 if (num_pages * PAGE_CACHE_SIZE < 1883 if (num_pages * PAGE_SIZE <
1884 state->datablock_size) { 1884 state->datablock_size) {
1885 printk(KERN_INFO 1885 printk(KERN_INFO
1886 "btrfsic: cannot work with too short bios!\n"); 1886 "btrfsic: cannot work with too short bios!\n");
@@ -2013,7 +2013,7 @@ again:
2013 block->logical_bytenr = bytenr; 2013 block->logical_bytenr = bytenr;
2014 block->is_metadata = 1; 2014 block->is_metadata = 1;
2015 if (block->is_superblock) { 2015 if (block->is_superblock) {
2016 BUG_ON(PAGE_CACHE_SIZE != 2016 BUG_ON(PAGE_SIZE !=
2017 BTRFS_SUPER_INFO_SIZE); 2017 BTRFS_SUPER_INFO_SIZE);
2018 ret = btrfsic_process_written_superblock( 2018 ret = btrfsic_process_written_superblock(
2019 state, 2019 state,
@@ -2172,8 +2172,8 @@ again:
2172continue_loop: 2172continue_loop:
2173 BUG_ON(!processed_len); 2173 BUG_ON(!processed_len);
2174 dev_bytenr += processed_len; 2174 dev_bytenr += processed_len;
2175 mapped_datav += processed_len >> PAGE_CACHE_SHIFT; 2175 mapped_datav += processed_len >> PAGE_SHIFT;
2176 num_pages -= processed_len >> PAGE_CACHE_SHIFT; 2176 num_pages -= processed_len >> PAGE_SHIFT;
2177 goto again; 2177 goto again;
2178} 2178}
2179 2179
@@ -2954,7 +2954,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
2954 goto leave; 2954 goto leave;
2955 cur_bytenr = dev_bytenr; 2955 cur_bytenr = dev_bytenr;
2956 for (i = 0; i < bio->bi_vcnt; i++) { 2956 for (i = 0; i < bio->bi_vcnt; i++) {
2957 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); 2957 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
2958 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); 2958 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
2959 if (!mapped_datav[i]) { 2959 if (!mapped_datav[i]) {
2960 while (i > 0) { 2960 while (i > 0) {
@@ -3037,16 +3037,16 @@ int btrfsic_mount(struct btrfs_root *root,
3037 struct list_head *dev_head = &fs_devices->devices; 3037 struct list_head *dev_head = &fs_devices->devices;
3038 struct btrfs_device *device; 3038 struct btrfs_device *device;
3039 3039
3040 if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) { 3040 if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
3041 printk(KERN_INFO 3041 printk(KERN_INFO
3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
3043 root->nodesize, PAGE_CACHE_SIZE); 3043 root->nodesize, PAGE_SIZE);
3044 return -1; 3044 return -1;
3045 } 3045 }
3046 if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) { 3046 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
3047 printk(KERN_INFO 3047 printk(KERN_INFO
3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
3049 root->sectorsize, PAGE_CACHE_SIZE); 3049 root->sectorsize, PAGE_SIZE);
3050 return -1; 3050 return -1;
3051 } 3051 }
3052 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 3052 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3346cd8f9910..ff61a41ac90b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode,
119 csum = ~(u32)0; 119 csum = ~(u32)0;
120 120
121 kaddr = kmap_atomic(page); 121 kaddr = kmap_atomic(page);
122 csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); 122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
123 btrfs_csum_final(csum, (char *)&csum); 123 btrfs_csum_final(csum, (char *)&csum);
124 kunmap_atomic(kaddr); 124 kunmap_atomic(kaddr);
125 125
@@ -190,7 +190,7 @@ csum_failed:
190 for (index = 0; index < cb->nr_pages; index++) { 190 for (index = 0; index < cb->nr_pages; index++) {
191 page = cb->compressed_pages[index]; 191 page = cb->compressed_pages[index];
192 page->mapping = NULL; 192 page->mapping = NULL;
193 page_cache_release(page); 193 put_page(page);
194 } 194 }
195 195
196 /* do io completion on the original bio */ 196 /* do io completion on the original bio */
@@ -224,8 +224,8 @@ out:
224static noinline void end_compressed_writeback(struct inode *inode, 224static noinline void end_compressed_writeback(struct inode *inode,
225 const struct compressed_bio *cb) 225 const struct compressed_bio *cb)
226{ 226{
227 unsigned long index = cb->start >> PAGE_CACHE_SHIFT; 227 unsigned long index = cb->start >> PAGE_SHIFT;
228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT; 228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
229 struct page *pages[16]; 229 struct page *pages[16];
230 unsigned long nr_pages = end_index - index + 1; 230 unsigned long nr_pages = end_index - index + 1;
231 int i; 231 int i;
@@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
247 if (cb->errors) 247 if (cb->errors)
248 SetPageError(pages[i]); 248 SetPageError(pages[i]);
249 end_page_writeback(pages[i]); 249 end_page_writeback(pages[i]);
250 page_cache_release(pages[i]); 250 put_page(pages[i]);
251 } 251 }
252 nr_pages -= ret; 252 nr_pages -= ret;
253 index += ret; 253 index += ret;
@@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio)
304 for (index = 0; index < cb->nr_pages; index++) { 304 for (index = 0; index < cb->nr_pages; index++) {
305 page = cb->compressed_pages[index]; 305 page = cb->compressed_pages[index];
306 page->mapping = NULL; 306 page->mapping = NULL;
307 page_cache_release(page); 307 put_page(page);
308 } 308 }
309 309
310 /* finally free the cb struct */ 310 /* finally free the cb struct */
@@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
341 int ret; 341 int ret;
342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
343 343
344 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 344 WARN_ON(start & ((u64)PAGE_SIZE - 1));
345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
346 if (!cb) 346 if (!cb)
347 return -ENOMEM; 347 return -ENOMEM;
@@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
374 page->mapping = inode->i_mapping; 374 page->mapping = inode->i_mapping;
375 if (bio->bi_iter.bi_size) 375 if (bio->bi_iter.bi_size)
376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
377 PAGE_CACHE_SIZE, 377 PAGE_SIZE,
378 bio, 0); 378 bio, 0);
379 else 379 else
380 ret = 0; 380 ret = 0;
381 381
382 page->mapping = NULL; 382 page->mapping = NULL;
383 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 383 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
384 PAGE_CACHE_SIZE) { 384 PAGE_SIZE) {
385 bio_get(bio); 385 bio_get(bio);
386 386
387 /* 387 /*
@@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
410 BUG_ON(!bio); 410 BUG_ON(!bio);
411 bio->bi_private = cb; 411 bio->bi_private = cb;
412 bio->bi_end_io = end_compressed_bio_write; 412 bio->bi_end_io = end_compressed_bio_write;
413 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 413 bio_add_page(bio, page, PAGE_SIZE, 0);
414 } 414 }
415 if (bytes_left < PAGE_CACHE_SIZE) { 415 if (bytes_left < PAGE_SIZE) {
416 btrfs_info(BTRFS_I(inode)->root->fs_info, 416 btrfs_info(BTRFS_I(inode)->root->fs_info,
417 "bytes left %lu compress len %lu nr %lu", 417 "bytes left %lu compress len %lu nr %lu",
418 bytes_left, cb->compressed_len, cb->nr_pages); 418 bytes_left, cb->compressed_len, cb->nr_pages);
419 } 419 }
420 bytes_left -= PAGE_CACHE_SIZE; 420 bytes_left -= PAGE_SIZE;
421 first_byte += PAGE_CACHE_SIZE; 421 first_byte += PAGE_SIZE;
422 cond_resched(); 422 cond_resched();
423 } 423 }
424 bio_get(bio); 424 bio_get(bio);
@@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
457 int misses = 0; 457 int misses = 0;
458 458
459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
460 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 460 last_offset = (page_offset(page) + PAGE_SIZE);
461 em_tree = &BTRFS_I(inode)->extent_tree; 461 em_tree = &BTRFS_I(inode)->extent_tree;
462 tree = &BTRFS_I(inode)->io_tree; 462 tree = &BTRFS_I(inode)->io_tree;
463 463
464 if (isize == 0) 464 if (isize == 0)
465 return 0; 465 return 0;
466 466
467 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 467 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
468 468
469 while (last_offset < compressed_end) { 469 while (last_offset < compressed_end) {
470 pg_index = last_offset >> PAGE_CACHE_SHIFT; 470 pg_index = last_offset >> PAGE_SHIFT;
471 471
472 if (pg_index > end_index) 472 if (pg_index > end_index)
473 break; 473 break;
@@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode,
488 break; 488 break;
489 489
490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
491 page_cache_release(page); 491 put_page(page);
492 goto next; 492 goto next;
493 } 493 }
494 494
495 end = last_offset + PAGE_CACHE_SIZE - 1; 495 end = last_offset + PAGE_SIZE - 1;
496 /* 496 /*
497 * at this point, we have a locked page in the page cache 497 * at this point, we have a locked page in the page cache
498 * for these bytes in the file. But, we have to make 498 * for these bytes in the file. But, we have to make
@@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode,
502 lock_extent(tree, last_offset, end); 502 lock_extent(tree, last_offset, end);
503 read_lock(&em_tree->lock); 503 read_lock(&em_tree->lock);
504 em = lookup_extent_mapping(em_tree, last_offset, 504 em = lookup_extent_mapping(em_tree, last_offset,
505 PAGE_CACHE_SIZE); 505 PAGE_SIZE);
506 read_unlock(&em_tree->lock); 506 read_unlock(&em_tree->lock);
507 507
508 if (!em || last_offset < em->start || 508 if (!em || last_offset < em->start ||
509 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 509 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
511 free_extent_map(em); 511 free_extent_map(em);
512 unlock_extent(tree, last_offset, end); 512 unlock_extent(tree, last_offset, end);
513 unlock_page(page); 513 unlock_page(page);
514 page_cache_release(page); 514 put_page(page);
515 break; 515 break;
516 } 516 }
517 free_extent_map(em); 517 free_extent_map(em);
518 518
519 if (page->index == end_index) { 519 if (page->index == end_index) {
520 char *userpage; 520 char *userpage;
521 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 521 size_t zero_offset = isize & (PAGE_SIZE - 1);
522 522
523 if (zero_offset) { 523 if (zero_offset) {
524 int zeros; 524 int zeros;
525 zeros = PAGE_CACHE_SIZE - zero_offset; 525 zeros = PAGE_SIZE - zero_offset;
526 userpage = kmap_atomic(page); 526 userpage = kmap_atomic(page);
527 memset(userpage + zero_offset, 0, zeros); 527 memset(userpage + zero_offset, 0, zeros);
528 flush_dcache_page(page); 528 flush_dcache_page(page);
@@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode,
531 } 531 }
532 532
533 ret = bio_add_page(cb->orig_bio, page, 533 ret = bio_add_page(cb->orig_bio, page,
534 PAGE_CACHE_SIZE, 0); 534 PAGE_SIZE, 0);
535 535
536 if (ret == PAGE_CACHE_SIZE) { 536 if (ret == PAGE_SIZE) {
537 nr_pages++; 537 nr_pages++;
538 page_cache_release(page); 538 put_page(page);
539 } else { 539 } else {
540 unlock_extent(tree, last_offset, end); 540 unlock_extent(tree, last_offset, end);
541 unlock_page(page); 541 unlock_page(page);
542 page_cache_release(page); 542 put_page(page);
543 break; 543 break;
544 } 544 }
545next: 545next:
546 last_offset += PAGE_CACHE_SIZE; 546 last_offset += PAGE_SIZE;
547 } 547 }
548 return 0; 548 return 0;
549} 549}
@@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
567 struct extent_map_tree *em_tree; 567 struct extent_map_tree *em_tree;
568 struct compressed_bio *cb; 568 struct compressed_bio *cb;
569 struct btrfs_root *root = BTRFS_I(inode)->root; 569 struct btrfs_root *root = BTRFS_I(inode)->root;
570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
571 unsigned long compressed_len; 571 unsigned long compressed_len;
572 unsigned long nr_pages; 572 unsigned long nr_pages;
573 unsigned long pg_index; 573 unsigned long pg_index;
@@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
589 read_lock(&em_tree->lock); 589 read_lock(&em_tree->lock);
590 em = lookup_extent_mapping(em_tree, 590 em = lookup_extent_mapping(em_tree,
591 page_offset(bio->bi_io_vec->bv_page), 591 page_offset(bio->bi_io_vec->bv_page),
592 PAGE_CACHE_SIZE); 592 PAGE_SIZE);
593 read_unlock(&em_tree->lock); 593 read_unlock(&em_tree->lock);
594 if (!em) 594 if (!em)
595 return -EIO; 595 return -EIO;
@@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
617 cb->compress_type = extent_compress_type(bio_flags); 617 cb->compress_type = extent_compress_type(bio_flags);
618 cb->orig_bio = bio; 618 cb->orig_bio = bio;
619 619
620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); 620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
622 GFP_NOFS); 622 GFP_NOFS);
623 if (!cb->compressed_pages) 623 if (!cb->compressed_pages)
@@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
640 add_ra_bio_pages(inode, em_start + em_len, cb); 640 add_ra_bio_pages(inode, em_start + em_len, cb);
641 641
642 /* include any pages we added in add_ra-bio_pages */ 642 /* include any pages we added in add_ra-bio_pages */
643 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 643 uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
644 cb->len = uncompressed_len; 644 cb->len = uncompressed_len;
645 645
646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
@@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
653 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 653 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
654 page = cb->compressed_pages[pg_index]; 654 page = cb->compressed_pages[pg_index];
655 page->mapping = inode->i_mapping; 655 page->mapping = inode->i_mapping;
656 page->index = em_start >> PAGE_CACHE_SHIFT; 656 page->index = em_start >> PAGE_SHIFT;
657 657
658 if (comp_bio->bi_iter.bi_size) 658 if (comp_bio->bi_iter.bi_size)
659 ret = tree->ops->merge_bio_hook(READ, page, 0, 659 ret = tree->ops->merge_bio_hook(READ, page, 0,
660 PAGE_CACHE_SIZE, 660 PAGE_SIZE,
661 comp_bio, 0); 661 comp_bio, 0);
662 else 662 else
663 ret = 0; 663 ret = 0;
664 664
665 page->mapping = NULL; 665 page->mapping = NULL;
666 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 666 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
667 PAGE_CACHE_SIZE) { 667 PAGE_SIZE) {
668 bio_get(comp_bio); 668 bio_get(comp_bio);
669 669
670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
@@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
702 comp_bio->bi_private = cb; 702 comp_bio->bi_private = cb;
703 comp_bio->bi_end_io = end_compressed_bio_read; 703 comp_bio->bi_end_io = end_compressed_bio_read;
704 704
705 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 705 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
706 } 706 }
707 cur_disk_byte += PAGE_CACHE_SIZE; 707 cur_disk_byte += PAGE_SIZE;
708 } 708 }
709 bio_get(comp_bio); 709 bio_get(comp_bio);
710 710
@@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1013 1013
1014 /* copy bytes from the working buffer into the pages */ 1014 /* copy bytes from the working buffer into the pages */
1015 while (working_bytes > 0) { 1015 while (working_bytes > 0) {
1016 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 1016 bytes = min(PAGE_SIZE - *pg_offset,
1017 PAGE_CACHE_SIZE - buf_offset); 1017 PAGE_SIZE - buf_offset);
1018 bytes = min(bytes, working_bytes); 1018 bytes = min(bytes, working_bytes);
1019 kaddr = kmap_atomic(page_out); 1019 kaddr = kmap_atomic(page_out);
1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
@@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1027 current_buf_start += bytes; 1027 current_buf_start += bytes;
1028 1028
1029 /* check if we need to pick another page */ 1029 /* check if we need to pick another page */
1030 if (*pg_offset == PAGE_CACHE_SIZE) { 1030 if (*pg_offset == PAGE_SIZE) {
1031 (*pg_index)++; 1031 (*pg_index)++;
1032 if (*pg_index >= vcnt) 1032 if (*pg_index >= vcnt)
1033 return 0; 1033 return 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d01f89d130e0..4e47849d7427 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1062,7 +1062,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
1062 (unsigned long long)page_offset(page)); 1062 (unsigned long long)page_offset(page));
1063 ClearPagePrivate(page); 1063 ClearPagePrivate(page);
1064 set_page_private(page, 0); 1064 set_page_private(page, 0);
1065 page_cache_release(page); 1065 put_page(page);
1066 } 1066 }
1067} 1067}
1068 1068
@@ -1764,7 +1764,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1764 if (err) 1764 if (err)
1765 return err; 1765 return err;
1766 1766
1767 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; 1767 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
1768 bdi->congested_fn = btrfs_congested_fn; 1768 bdi->congested_fn = btrfs_congested_fn;
1769 bdi->congested_data = info; 1769 bdi->congested_data = info;
1770 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 1770 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
@@ -2542,7 +2542,7 @@ int open_ctree(struct super_block *sb,
2542 err = ret; 2542 err = ret;
2543 goto fail_bdi; 2543 goto fail_bdi;
2544 } 2544 }
2545 fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * 2545 fs_info->dirty_metadata_batch = PAGE_SIZE *
2546 (1 + ilog2(nr_cpu_ids)); 2546 (1 + ilog2(nr_cpu_ids));
2547 2547
2548 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 2548 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
@@ -2787,7 +2787,7 @@ int open_ctree(struct super_block *sb,
2787 * flag our filesystem as having big metadata blocks if 2787 * flag our filesystem as having big metadata blocks if
2788 * they are bigger than the page size 2788 * they are bigger than the page size
2789 */ 2789 */
2790 if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) { 2790 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2791 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 2791 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2792 printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); 2792 printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
2793 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 2793 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
@@ -2837,7 +2837,7 @@ int open_ctree(struct super_block *sb,
2837 2837
2838 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 2838 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2839 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 2839 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2840 SZ_4M / PAGE_CACHE_SIZE); 2840 SZ_4M / PAGE_SIZE);
2841 2841
2842 tree_root->nodesize = nodesize; 2842 tree_root->nodesize = nodesize;
2843 tree_root->sectorsize = sectorsize; 2843 tree_root->sectorsize = sectorsize;
@@ -4076,9 +4076,9 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4076 ret = -EINVAL; 4076 ret = -EINVAL;
4077 } 4077 }
4078 /* Only PAGE SIZE is supported yet */ 4078 /* Only PAGE SIZE is supported yet */
4079 if (sectorsize != PAGE_CACHE_SIZE) { 4079 if (sectorsize != PAGE_SIZE) {
4080 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n", 4080 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
4081 sectorsize, PAGE_CACHE_SIZE); 4081 sectorsize, PAGE_SIZE);
4082 ret = -EINVAL; 4082 ret = -EINVAL;
4083 } 4083 }
4084 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 4084 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 53e12977bfd0..ce114ba9780a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3452,7 +3452,7 @@ again:
3452 num_pages = 1; 3452 num_pages = 1;
3453 3453
3454 num_pages *= 16; 3454 num_pages *= 16;
3455 num_pages *= PAGE_CACHE_SIZE; 3455 num_pages *= PAGE_SIZE;
3456 3456
3457 ret = btrfs_check_data_free_space(inode, 0, num_pages); 3457 ret = btrfs_check_data_free_space(inode, 0, num_pages);
3458 if (ret) 3458 if (ret)
@@ -4639,7 +4639,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4639 loops = 0; 4639 loops = 0;
4640 while (delalloc_bytes && loops < 3) { 4640 while (delalloc_bytes && loops < 3) {
4641 max_reclaim = min(delalloc_bytes, to_reclaim); 4641 max_reclaim = min(delalloc_bytes, to_reclaim);
4642 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; 4642 nr_pages = max_reclaim >> PAGE_SHIFT;
4643 btrfs_writeback_inodes_sb_nr(root, nr_pages, items); 4643 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4644 /* 4644 /*
4645 * We need to wait for the async pages to actually start before 4645 * We need to wait for the async pages to actually start before
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 76a0c8597d98..d247fc0eea19 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1363 1363
1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) 1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1365{ 1365{
1366 unsigned long index = start >> PAGE_CACHE_SHIFT; 1366 unsigned long index = start >> PAGE_SHIFT;
1367 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1367 unsigned long end_index = end >> PAGE_SHIFT;
1368 struct page *page; 1368 struct page *page;
1369 1369
1370 while (index <= end_index) { 1370 while (index <= end_index) {
1371 page = find_get_page(inode->i_mapping, index); 1371 page = find_get_page(inode->i_mapping, index);
1372 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1372 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1373 clear_page_dirty_for_io(page); 1373 clear_page_dirty_for_io(page);
1374 page_cache_release(page); 1374 put_page(page);
1375 index++; 1375 index++;
1376 } 1376 }
1377} 1377}
1378 1378
1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) 1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1380{ 1380{
1381 unsigned long index = start >> PAGE_CACHE_SHIFT; 1381 unsigned long index = start >> PAGE_SHIFT;
1382 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1382 unsigned long end_index = end >> PAGE_SHIFT;
1383 struct page *page; 1383 struct page *page;
1384 1384
1385 while (index <= end_index) { 1385 while (index <= end_index) {
@@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1388 __set_page_dirty_nobuffers(page); 1388 __set_page_dirty_nobuffers(page);
1389 account_page_redirty(page); 1389 account_page_redirty(page);
1390 page_cache_release(page); 1390 put_page(page);
1391 index++; 1391 index++;
1392 } 1392 }
1393} 1393}
@@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1397 */ 1397 */
1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1399{ 1399{
1400 unsigned long index = start >> PAGE_CACHE_SHIFT; 1400 unsigned long index = start >> PAGE_SHIFT;
1401 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1401 unsigned long end_index = end >> PAGE_SHIFT;
1402 struct page *page; 1402 struct page *page;
1403 1403
1404 while (index <= end_index) { 1404 while (index <= end_index) {
1405 page = find_get_page(tree->mapping, index); 1405 page = find_get_page(tree->mapping, index);
1406 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1406 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1407 set_page_writeback(page); 1407 set_page_writeback(page);
1408 page_cache_release(page); 1408 put_page(page);
1409 index++; 1409 index++;
1410 } 1410 }
1411} 1411}
@@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
1556{ 1556{
1557 int ret; 1557 int ret;
1558 struct page *pages[16]; 1558 struct page *pages[16];
1559 unsigned long index = start >> PAGE_CACHE_SHIFT; 1559 unsigned long index = start >> PAGE_SHIFT;
1560 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1560 unsigned long end_index = end >> PAGE_SHIFT;
1561 unsigned long nr_pages = end_index - index + 1; 1561 unsigned long nr_pages = end_index - index + 1;
1562 int i; 1562 int i;
1563 1563
@@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
1571 for (i = 0; i < ret; i++) { 1571 for (i = 0; i < ret; i++) {
1572 if (pages[i] != locked_page) 1572 if (pages[i] != locked_page)
1573 unlock_page(pages[i]); 1573 unlock_page(pages[i]);
1574 page_cache_release(pages[i]); 1574 put_page(pages[i]);
1575 } 1575 }
1576 nr_pages -= ret; 1576 nr_pages -= ret;
1577 index += ret; 1577 index += ret;
@@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1584 u64 delalloc_start, 1584 u64 delalloc_start,
1585 u64 delalloc_end) 1585 u64 delalloc_end)
1586{ 1586{
1587 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; 1587 unsigned long index = delalloc_start >> PAGE_SHIFT;
1588 unsigned long start_index = index; 1588 unsigned long start_index = index;
1589 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; 1589 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1590 unsigned long pages_locked = 0; 1590 unsigned long pages_locked = 0;
1591 struct page *pages[16]; 1591 struct page *pages[16];
1592 unsigned long nrpages; 1592 unsigned long nrpages;
@@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1619 pages[i]->mapping != inode->i_mapping) { 1619 pages[i]->mapping != inode->i_mapping) {
1620 ret = -EAGAIN; 1620 ret = -EAGAIN;
1621 unlock_page(pages[i]); 1621 unlock_page(pages[i]);
1622 page_cache_release(pages[i]); 1622 put_page(pages[i]);
1623 goto done; 1623 goto done;
1624 } 1624 }
1625 } 1625 }
1626 page_cache_release(pages[i]); 1626 put_page(pages[i]);
1627 pages_locked++; 1627 pages_locked++;
1628 } 1628 }
1629 nrpages -= ret; 1629 nrpages -= ret;
@@ -1636,7 +1636,7 @@ done:
1636 __unlock_for_delalloc(inode, locked_page, 1636 __unlock_for_delalloc(inode, locked_page,
1637 delalloc_start, 1637 delalloc_start,
1638 ((u64)(start_index + pages_locked - 1)) << 1638 ((u64)(start_index + pages_locked - 1)) <<
1639 PAGE_CACHE_SHIFT); 1639 PAGE_SHIFT);
1640 } 1640 }
1641 return ret; 1641 return ret;
1642} 1642}
@@ -1696,7 +1696,7 @@ again:
1696 free_extent_state(cached_state); 1696 free_extent_state(cached_state);
1697 cached_state = NULL; 1697 cached_state = NULL;
1698 if (!loops) { 1698 if (!loops) {
1699 max_bytes = PAGE_CACHE_SIZE; 1699 max_bytes = PAGE_SIZE;
1700 loops = 1; 1700 loops = 1;
1701 goto again; 1701 goto again;
1702 } else { 1702 } else {
@@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1735 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 1735 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1736 int ret; 1736 int ret;
1737 struct page *pages[16]; 1737 struct page *pages[16];
1738 unsigned long index = start >> PAGE_CACHE_SHIFT; 1738 unsigned long index = start >> PAGE_SHIFT;
1739 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1739 unsigned long end_index = end >> PAGE_SHIFT;
1740 unsigned long nr_pages = end_index - index + 1; 1740 unsigned long nr_pages = end_index - index + 1;
1741 int i; 1741 int i;
1742 1742
@@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1757 SetPagePrivate2(pages[i]); 1757 SetPagePrivate2(pages[i]);
1758 1758
1759 if (pages[i] == locked_page) { 1759 if (pages[i] == locked_page) {
1760 page_cache_release(pages[i]); 1760 put_page(pages[i]);
1761 continue; 1761 continue;
1762 } 1762 }
1763 if (page_ops & PAGE_CLEAR_DIRTY) 1763 if (page_ops & PAGE_CLEAR_DIRTY)
@@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1770 end_page_writeback(pages[i]); 1770 end_page_writeback(pages[i]);
1771 if (page_ops & PAGE_UNLOCK) 1771 if (page_ops & PAGE_UNLOCK)
1772 unlock_page(pages[i]); 1772 unlock_page(pages[i]);
1773 page_cache_release(pages[i]); 1773 put_page(pages[i]);
1774 } 1774 }
1775 nr_pages -= ret; 1775 nr_pages -= ret;
1776 index += ret; 1776 index += ret;
@@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) 1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1962{ 1962{
1963 u64 start = page_offset(page); 1963 u64 start = page_offset(page);
1964 u64 end = start + PAGE_CACHE_SIZE - 1; 1964 u64 end = start + PAGE_SIZE - 1;
1965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1966 SetPageUptodate(page); 1966 SetPageUptodate(page);
1967} 1967}
@@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2071 struct page *p = eb->pages[i]; 2071 struct page *p = eb->pages[i];
2072 2072
2073 ret = repair_io_failure(root->fs_info->btree_inode, start, 2073 ret = repair_io_failure(root->fs_info->btree_inode, start,
2074 PAGE_CACHE_SIZE, start, p, 2074 PAGE_SIZE, start, p,
2075 start - page_offset(p), mirror_num); 2075 start - page_offset(p), mirror_num);
2076 if (ret) 2076 if (ret)
2077 break; 2077 break;
2078 start += PAGE_CACHE_SIZE; 2078 start += PAGE_SIZE;
2079 } 2079 }
2080 2080
2081 return ret; 2081 return ret;
@@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio)
2466 * advance bv_offset and adjust bv_len to compensate. 2466 * advance bv_offset and adjust bv_len to compensate.
2467 * Print a warning for nonzero offsets, and an error 2467 * Print a warning for nonzero offsets, and an error
2468 * if they don't add up to a full page. */ 2468 * if they don't add up to a full page. */
2469 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2469 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2470 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2470 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2471 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2471 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2472 "partial page write in btrfs with offset %u and length %u", 2472 "partial page write in btrfs with offset %u and length %u",
2473 bvec->bv_offset, bvec->bv_len); 2473 bvec->bv_offset, bvec->bv_len);
@@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio)
2541 * advance bv_offset and adjust bv_len to compensate. 2541 * advance bv_offset and adjust bv_len to compensate.
2542 * Print a warning for nonzero offsets, and an error 2542 * Print a warning for nonzero offsets, and an error
2543 * if they don't add up to a full page. */ 2543 * if they don't add up to a full page. */
2544 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2544 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2545 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2545 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2546 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2546 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2547 "partial page read in btrfs with offset %u and length %u", 2547 "partial page read in btrfs with offset %u and length %u",
2548 bvec->bv_offset, bvec->bv_len); 2548 bvec->bv_offset, bvec->bv_len);
@@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio)
2598readpage_ok: 2598readpage_ok:
2599 if (likely(uptodate)) { 2599 if (likely(uptodate)) {
2600 loff_t i_size = i_size_read(inode); 2600 loff_t i_size = i_size_read(inode);
2601 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2601 pgoff_t end_index = i_size >> PAGE_SHIFT;
2602 unsigned off; 2602 unsigned off;
2603 2603
2604 /* Zero out the end if this page straddles i_size */ 2604 /* Zero out the end if this page straddles i_size */
2605 off = i_size & (PAGE_CACHE_SIZE-1); 2605 off = i_size & (PAGE_SIZE-1);
2606 if (page->index == end_index && off) 2606 if (page->index == end_index && off)
2607 zero_user_segment(page, off, PAGE_CACHE_SIZE); 2607 zero_user_segment(page, off, PAGE_SIZE);
2608 SetPageUptodate(page); 2608 SetPageUptodate(page);
2609 } else { 2609 } else {
2610 ClearPageUptodate(page); 2610 ClearPageUptodate(page);
@@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2768 struct bio *bio; 2768 struct bio *bio;
2769 int contig = 0; 2769 int contig = 0;
2770 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; 2770 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2771 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); 2771 size_t page_size = min_t(size_t, size, PAGE_SIZE);
2772 2772
2773 if (bio_ret && *bio_ret) { 2773 if (bio_ret && *bio_ret) {
2774 bio = *bio_ret; 2774 bio = *bio_ret;
@@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb,
2821{ 2821{
2822 if (!PagePrivate(page)) { 2822 if (!PagePrivate(page)) {
2823 SetPagePrivate(page); 2823 SetPagePrivate(page);
2824 page_cache_get(page); 2824 get_page(page);
2825 set_page_private(page, (unsigned long)eb); 2825 set_page_private(page, (unsigned long)eb);
2826 } else { 2826 } else {
2827 WARN_ON(page->private != (unsigned long)eb); 2827 WARN_ON(page->private != (unsigned long)eb);
@@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page)
2832{ 2832{
2833 if (!PagePrivate(page)) { 2833 if (!PagePrivate(page)) {
2834 SetPagePrivate(page); 2834 SetPagePrivate(page);
2835 page_cache_get(page); 2835 get_page(page);
2836 set_page_private(page, EXTENT_PAGE_PRIVATE); 2836 set_page_private(page, EXTENT_PAGE_PRIVATE);
2837 } 2837 }
2838} 2838}
@@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree,
2880{ 2880{
2881 struct inode *inode = page->mapping->host; 2881 struct inode *inode = page->mapping->host;
2882 u64 start = page_offset(page); 2882 u64 start = page_offset(page);
2883 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2883 u64 page_end = start + PAGE_SIZE - 1;
2884 u64 end; 2884 u64 end;
2885 u64 cur = start; 2885 u64 cur = start;
2886 u64 extent_offset; 2886 u64 extent_offset;
@@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree,
2909 } 2909 }
2910 } 2910 }
2911 2911
2912 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { 2912 if (page->index == last_byte >> PAGE_SHIFT) {
2913 char *userpage; 2913 char *userpage;
2914 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); 2914 size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2915 2915
2916 if (zero_offset) { 2916 if (zero_offset) {
2917 iosize = PAGE_CACHE_SIZE - zero_offset; 2917 iosize = PAGE_SIZE - zero_offset;
2918 userpage = kmap_atomic(page); 2918 userpage = kmap_atomic(page);
2919 memset(userpage + zero_offset, 0, iosize); 2919 memset(userpage + zero_offset, 0, iosize);
2920 flush_dcache_page(page); 2920 flush_dcache_page(page);
@@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree,
2922 } 2922 }
2923 } 2923 }
2924 while (cur <= end) { 2924 while (cur <= end) {
2925 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2925 unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
2926 bool force_bio_submit = false; 2926 bool force_bio_submit = false;
2927 2927
2928 if (cur >= last_byte) { 2928 if (cur >= last_byte) {
2929 char *userpage; 2929 char *userpage;
2930 struct extent_state *cached = NULL; 2930 struct extent_state *cached = NULL;
2931 2931
2932 iosize = PAGE_CACHE_SIZE - pg_offset; 2932 iosize = PAGE_SIZE - pg_offset;
2933 userpage = kmap_atomic(page); 2933 userpage = kmap_atomic(page);
2934 memset(userpage + pg_offset, 0, iosize); 2934 memset(userpage + pg_offset, 0, iosize);
2935 flush_dcache_page(page); 2935 flush_dcache_page(page);
@@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3112 for (index = 0; index < nr_pages; index++) { 3112 for (index = 0; index < nr_pages; index++) {
3113 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 3113 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3114 mirror_num, bio_flags, rw, prev_em_start); 3114 mirror_num, bio_flags, rw, prev_em_start);
3115 page_cache_release(pages[index]); 3115 put_page(pages[index]);
3116 } 3116 }
3117} 3117}
3118 3118
@@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree,
3134 page_start = page_offset(pages[index]); 3134 page_start = page_offset(pages[index]);
3135 if (!end) { 3135 if (!end) {
3136 start = page_start; 3136 start = page_start;
3137 end = start + PAGE_CACHE_SIZE - 1; 3137 end = start + PAGE_SIZE - 1;
3138 first_index = index; 3138 first_index = index;
3139 } else if (end + 1 == page_start) { 3139 } else if (end + 1 == page_start) {
3140 end += PAGE_CACHE_SIZE; 3140 end += PAGE_SIZE;
3141 } else { 3141 } else {
3142 __do_contiguous_readpages(tree, &pages[first_index], 3142 __do_contiguous_readpages(tree, &pages[first_index],
3143 index - first_index, start, 3143 index - first_index, start,
@@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
3145 bio, mirror_num, bio_flags, 3145 bio, mirror_num, bio_flags,
3146 rw, prev_em_start); 3146 rw, prev_em_start);
3147 start = page_start; 3147 start = page_start;
3148 end = start + PAGE_CACHE_SIZE - 1; 3148 end = start + PAGE_SIZE - 1;
3149 first_index = index; 3149 first_index = index;
3150 } 3150 }
3151 } 3151 }
@@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
3167 struct inode *inode = page->mapping->host; 3167 struct inode *inode = page->mapping->host;
3168 struct btrfs_ordered_extent *ordered; 3168 struct btrfs_ordered_extent *ordered;
3169 u64 start = page_offset(page); 3169 u64 start = page_offset(page);
3170 u64 end = start + PAGE_CACHE_SIZE - 1; 3170 u64 end = start + PAGE_SIZE - 1;
3171 int ret; 3171 int ret;
3172 3172
3173 while (1) { 3173 while (1) {
3174 lock_extent(tree, start, end); 3174 lock_extent(tree, start, end);
3175 ordered = btrfs_lookup_ordered_range(inode, start, 3175 ordered = btrfs_lookup_ordered_range(inode, start,
3176 PAGE_CACHE_SIZE); 3176 PAGE_SIZE);
3177 if (!ordered) 3177 if (!ordered)
3178 break; 3178 break;
3179 unlock_extent(tree, start, end); 3179 unlock_extent(tree, start, end);
@@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3227 unsigned long *nr_written) 3227 unsigned long *nr_written)
3228{ 3228{
3229 struct extent_io_tree *tree = epd->tree; 3229 struct extent_io_tree *tree = epd->tree;
3230 u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1; 3230 u64 page_end = delalloc_start + PAGE_SIZE - 1;
3231 u64 nr_delalloc; 3231 u64 nr_delalloc;
3232 u64 delalloc_to_write = 0; 3232 u64 delalloc_to_write = 0;
3233 u64 delalloc_end = 0; 3233 u64 delalloc_end = 0;
@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3264 goto done; 3264 goto done;
3265 } 3265 }
3266 /* 3266 /*
3267 * delalloc_end is already one less than the total 3267 * delalloc_end is already one less than the total length, so
3268 * length, so we don't subtract one from 3268 * we don't subtract one from PAGE_SIZE
3269 * PAGE_CACHE_SIZE
3270 */ 3269 */
3271 delalloc_to_write += (delalloc_end - delalloc_start + 3270 delalloc_to_write += (delalloc_end - delalloc_start +
3272 PAGE_CACHE_SIZE) >> 3271 PAGE_SIZE) >> PAGE_SHIFT;
3273 PAGE_CACHE_SHIFT;
3274 delalloc_start = delalloc_end + 1; 3272 delalloc_start = delalloc_end + 1;
3275 } 3273 }
3276 if (wbc->nr_to_write < delalloc_to_write) { 3274 if (wbc->nr_to_write < delalloc_to_write) {
@@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3319{ 3317{
3320 struct extent_io_tree *tree = epd->tree; 3318 struct extent_io_tree *tree = epd->tree;
3321 u64 start = page_offset(page); 3319 u64 start = page_offset(page);
3322 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3320 u64 page_end = start + PAGE_SIZE - 1;
3323 u64 end; 3321 u64 end;
3324 u64 cur = start; 3322 u64 cur = start;
3325 u64 extent_offset; 3323 u64 extent_offset;
@@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3434 if (ret) { 3432 if (ret) {
3435 SetPageError(page); 3433 SetPageError(page);
3436 } else { 3434 } else {
3437 unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; 3435 unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
3438 3436
3439 set_range_writeback(tree, cur, cur + iosize - 1); 3437 set_range_writeback(tree, cur, cur + iosize - 1);
3440 if (!PageWriteback(page)) { 3438 if (!PageWriteback(page)) {
@@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3477 struct inode *inode = page->mapping->host; 3475 struct inode *inode = page->mapping->host;
3478 struct extent_page_data *epd = data; 3476 struct extent_page_data *epd = data;
3479 u64 start = page_offset(page); 3477 u64 start = page_offset(page);
3480 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3478 u64 page_end = start + PAGE_SIZE - 1;
3481 int ret; 3479 int ret;
3482 int nr = 0; 3480 int nr = 0;
3483 size_t pg_offset = 0; 3481 size_t pg_offset = 0;
3484 loff_t i_size = i_size_read(inode); 3482 loff_t i_size = i_size_read(inode);
3485 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 3483 unsigned long end_index = i_size >> PAGE_SHIFT;
3486 int write_flags; 3484 int write_flags;
3487 unsigned long nr_written = 0; 3485 unsigned long nr_written = 0;
3488 3486
@@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3497 3495
3498 ClearPageError(page); 3496 ClearPageError(page);
3499 3497
3500 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); 3498 pg_offset = i_size & (PAGE_SIZE - 1);
3501 if (page->index > end_index || 3499 if (page->index > end_index ||
3502 (page->index == end_index && !pg_offset)) { 3500 (page->index == end_index && !pg_offset)) {
3503 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 3501 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3504 unlock_page(page); 3502 unlock_page(page);
3505 return 0; 3503 return 0;
3506 } 3504 }
@@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3510 3508
3511 userpage = kmap_atomic(page); 3509 userpage = kmap_atomic(page);
3512 memset(userpage + pg_offset, 0, 3510 memset(userpage + pg_offset, 0,
3513 PAGE_CACHE_SIZE - pg_offset); 3511 PAGE_SIZE - pg_offset);
3514 kunmap_atomic(userpage); 3512 kunmap_atomic(userpage);
3515 flush_dcache_page(page); 3513 flush_dcache_page(page);
3516 } 3514 }
@@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3748 clear_page_dirty_for_io(p); 3746 clear_page_dirty_for_io(p);
3749 set_page_writeback(p); 3747 set_page_writeback(p);
3750 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, 3748 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3751 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, 3749 PAGE_SIZE, 0, bdev, &epd->bio,
3752 -1, end_bio_extent_buffer_writepage, 3750 -1, end_bio_extent_buffer_writepage,
3753 0, epd->bio_flags, bio_flags, false); 3751 0, epd->bio_flags, bio_flags, false);
3754 epd->bio_flags = bio_flags; 3752 epd->bio_flags = bio_flags;
@@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3760 ret = -EIO; 3758 ret = -EIO;
3761 break; 3759 break;
3762 } 3760 }
3763 offset += PAGE_CACHE_SIZE; 3761 offset += PAGE_SIZE;
3764 update_nr_written(p, wbc, 1); 3762 update_nr_written(p, wbc, 1);
3765 unlock_page(p); 3763 unlock_page(p);
3766 } 3764 }
@@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping,
3804 index = mapping->writeback_index; /* Start from prev offset */ 3802 index = mapping->writeback_index; /* Start from prev offset */
3805 end = -1; 3803 end = -1;
3806 } else { 3804 } else {
3807 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3805 index = wbc->range_start >> PAGE_SHIFT;
3808 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3806 end = wbc->range_end >> PAGE_SHIFT;
3809 scanned = 1; 3807 scanned = 1;
3810 } 3808 }
3811 if (wbc->sync_mode == WB_SYNC_ALL) 3809 if (wbc->sync_mode == WB_SYNC_ALL)
@@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
3948 index = mapping->writeback_index; /* Start from prev offset */ 3946 index = mapping->writeback_index; /* Start from prev offset */
3949 end = -1; 3947 end = -1;
3950 } else { 3948 } else {
3951 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3949 index = wbc->range_start >> PAGE_SHIFT;
3952 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3950 end = wbc->range_end >> PAGE_SHIFT;
3953 scanned = 1; 3951 scanned = 1;
3954 } 3952 }
3955 if (wbc->sync_mode == WB_SYNC_ALL) 3953 if (wbc->sync_mode == WB_SYNC_ALL)
@@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4083 int ret = 0; 4081 int ret = 0;
4084 struct address_space *mapping = inode->i_mapping; 4082 struct address_space *mapping = inode->i_mapping;
4085 struct page *page; 4083 struct page *page;
4086 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> 4084 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4087 PAGE_CACHE_SHIFT; 4085 PAGE_SHIFT;
4088 4086
4089 struct extent_page_data epd = { 4087 struct extent_page_data epd = {
4090 .bio = NULL, 4088 .bio = NULL,
@@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4102 }; 4100 };
4103 4101
4104 while (start <= end) { 4102 while (start <= end) {
4105 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 4103 page = find_get_page(mapping, start >> PAGE_SHIFT);
4106 if (clear_page_dirty_for_io(page)) 4104 if (clear_page_dirty_for_io(page))
4107 ret = __extent_writepage(page, &wbc_writepages, &epd); 4105 ret = __extent_writepage(page, &wbc_writepages, &epd);
4108 else { 4106 else {
4109 if (tree->ops && tree->ops->writepage_end_io_hook) 4107 if (tree->ops && tree->ops->writepage_end_io_hook)
4110 tree->ops->writepage_end_io_hook(page, start, 4108 tree->ops->writepage_end_io_hook(page, start,
4111 start + PAGE_CACHE_SIZE - 1, 4109 start + PAGE_SIZE - 1,
4112 NULL, 1); 4110 NULL, 1);
4113 unlock_page(page); 4111 unlock_page(page);
4114 } 4112 }
4115 page_cache_release(page); 4113 put_page(page);
4116 start += PAGE_CACHE_SIZE; 4114 start += PAGE_SIZE;
4117 } 4115 }
4118 4116
4119 flush_epd_write_bio(&epd); 4117 flush_epd_write_bio(&epd);
@@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree,
4163 list_del(&page->lru); 4161 list_del(&page->lru);
4164 if (add_to_page_cache_lru(page, mapping, 4162 if (add_to_page_cache_lru(page, mapping,
4165 page->index, GFP_NOFS)) { 4163 page->index, GFP_NOFS)) {
4166 page_cache_release(page); 4164 put_page(page);
4167 continue; 4165 continue;
4168 } 4166 }
4169 4167
@@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
4197{ 4195{
4198 struct extent_state *cached_state = NULL; 4196 struct extent_state *cached_state = NULL;
4199 u64 start = page_offset(page); 4197 u64 start = page_offset(page);
4200 u64 end = start + PAGE_CACHE_SIZE - 1; 4198 u64 end = start + PAGE_SIZE - 1;
4201 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 4199 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4202 4200
4203 start += ALIGN(offset, blocksize); 4201 start += ALIGN(offset, blocksize);
@@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map,
4223 struct page *page, gfp_t mask) 4221 struct page *page, gfp_t mask)
4224{ 4222{
4225 u64 start = page_offset(page); 4223 u64 start = page_offset(page);
4226 u64 end = start + PAGE_CACHE_SIZE - 1; 4224 u64 end = start + PAGE_SIZE - 1;
4227 int ret = 1; 4225 int ret = 1;
4228 4226
4229 if (test_range_bit(tree, start, end, 4227 if (test_range_bit(tree, start, end,
@@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
4262{ 4260{
4263 struct extent_map *em; 4261 struct extent_map *em;
4264 u64 start = page_offset(page); 4262 u64 start = page_offset(page);
4265 u64 end = start + PAGE_CACHE_SIZE - 1; 4263 u64 end = start + PAGE_SIZE - 1;
4266 4264
4267 if (gfpflags_allow_blocking(mask) && 4265 if (gfpflags_allow_blocking(mask) &&
4268 page->mapping->host->i_size > SZ_16M) { 4266 page->mapping->host->i_size > SZ_16M) {
@@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4587 ClearPagePrivate(page); 4585 ClearPagePrivate(page);
4588 set_page_private(page, 0); 4586 set_page_private(page, 0);
4589 /* One for the page private */ 4587 /* One for the page private */
4590 page_cache_release(page); 4588 put_page(page);
4591 } 4589 }
4592 4590
4593 if (mapped) 4591 if (mapped)
4594 spin_unlock(&page->mapping->private_lock); 4592 spin_unlock(&page->mapping->private_lock);
4595 4593
4596 /* One for when we alloced the page */ 4594 /* One for when we alloced the page */
4597 page_cache_release(page); 4595 put_page(page);
4598 } while (index != 0); 4596 } while (index != 0);
4599} 4597}
4600 4598
@@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4779 4777
4780 rcu_read_lock(); 4778 rcu_read_lock();
4781 eb = radix_tree_lookup(&fs_info->buffer_radix, 4779 eb = radix_tree_lookup(&fs_info->buffer_radix,
4782 start >> PAGE_CACHE_SHIFT); 4780 start >> PAGE_SHIFT);
4783 if (eb && atomic_inc_not_zero(&eb->refs)) { 4781 if (eb && atomic_inc_not_zero(&eb->refs)) {
4784 rcu_read_unlock(); 4782 rcu_read_unlock();
4785 /* 4783 /*
@@ -4829,7 +4827,7 @@ again:
4829 goto free_eb; 4827 goto free_eb;
4830 spin_lock(&fs_info->buffer_lock); 4828 spin_lock(&fs_info->buffer_lock);
4831 ret = radix_tree_insert(&fs_info->buffer_radix, 4829 ret = radix_tree_insert(&fs_info->buffer_radix,
4832 start >> PAGE_CACHE_SHIFT, eb); 4830 start >> PAGE_SHIFT, eb);
4833 spin_unlock(&fs_info->buffer_lock); 4831 spin_unlock(&fs_info->buffer_lock);
4834 radix_tree_preload_end(); 4832 radix_tree_preload_end();
4835 if (ret == -EEXIST) { 4833 if (ret == -EEXIST) {
@@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4862 unsigned long len = fs_info->tree_root->nodesize; 4860 unsigned long len = fs_info->tree_root->nodesize;
4863 unsigned long num_pages = num_extent_pages(start, len); 4861 unsigned long num_pages = num_extent_pages(start, len);
4864 unsigned long i; 4862 unsigned long i;
4865 unsigned long index = start >> PAGE_CACHE_SHIFT; 4863 unsigned long index = start >> PAGE_SHIFT;
4866 struct extent_buffer *eb; 4864 struct extent_buffer *eb;
4867 struct extent_buffer *exists = NULL; 4865 struct extent_buffer *exists = NULL;
4868 struct page *p; 4866 struct page *p;
@@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4896 if (atomic_inc_not_zero(&exists->refs)) { 4894 if (atomic_inc_not_zero(&exists->refs)) {
4897 spin_unlock(&mapping->private_lock); 4895 spin_unlock(&mapping->private_lock);
4898 unlock_page(p); 4896 unlock_page(p);
4899 page_cache_release(p); 4897 put_page(p);
4900 mark_extent_buffer_accessed(exists, p); 4898 mark_extent_buffer_accessed(exists, p);
4901 goto free_eb; 4899 goto free_eb;
4902 } 4900 }
@@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4908 */ 4906 */
4909 ClearPagePrivate(p); 4907 ClearPagePrivate(p);
4910 WARN_ON(PageDirty(p)); 4908 WARN_ON(PageDirty(p));
4911 page_cache_release(p); 4909 put_page(p);
4912 } 4910 }
4913 attach_extent_buffer_page(eb, p); 4911 attach_extent_buffer_page(eb, p);
4914 spin_unlock(&mapping->private_lock); 4912 spin_unlock(&mapping->private_lock);
@@ -4931,7 +4929,7 @@ again:
4931 4929
4932 spin_lock(&fs_info->buffer_lock); 4930 spin_lock(&fs_info->buffer_lock);
4933 ret = radix_tree_insert(&fs_info->buffer_radix, 4931 ret = radix_tree_insert(&fs_info->buffer_radix,
4934 start >> PAGE_CACHE_SHIFT, eb); 4932 start >> PAGE_SHIFT, eb);
4935 spin_unlock(&fs_info->buffer_lock); 4933 spin_unlock(&fs_info->buffer_lock);
4936 radix_tree_preload_end(); 4934 radix_tree_preload_end();
4937 if (ret == -EEXIST) { 4935 if (ret == -EEXIST) {
@@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
4994 4992
4995 spin_lock(&fs_info->buffer_lock); 4993 spin_lock(&fs_info->buffer_lock);
4996 radix_tree_delete(&fs_info->buffer_radix, 4994 radix_tree_delete(&fs_info->buffer_radix,
4997 eb->start >> PAGE_CACHE_SHIFT); 4995 eb->start >> PAGE_SHIFT);
4998 spin_unlock(&fs_info->buffer_lock); 4996 spin_unlock(&fs_info->buffer_lock);
4999 } else { 4997 } else {
5000 spin_unlock(&eb->refs_lock); 4998 spin_unlock(&eb->refs_lock);
@@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
5168 5166
5169 if (start) { 5167 if (start) {
5170 WARN_ON(start < eb->start); 5168 WARN_ON(start < eb->start);
5171 start_i = (start >> PAGE_CACHE_SHIFT) - 5169 start_i = (start >> PAGE_SHIFT) -
5172 (eb->start >> PAGE_CACHE_SHIFT); 5170 (eb->start >> PAGE_SHIFT);
5173 } else { 5171 } else {
5174 start_i = 0; 5172 start_i = 0;
5175 } 5173 }
@@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5252 struct page *page; 5250 struct page *page;
5253 char *kaddr; 5251 char *kaddr;
5254 char *dst = (char *)dstv; 5252 char *dst = (char *)dstv;
5255 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5253 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5256 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5254 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5257 5255
5258 WARN_ON(start > eb->len); 5256 WARN_ON(start > eb->len);
5259 WARN_ON(start + len > eb->start + eb->len); 5257 WARN_ON(start + len > eb->start + eb->len);
5260 5258
5261 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5259 offset = (start_offset + start) & (PAGE_SIZE - 1);
5262 5260
5263 while (len > 0) { 5261 while (len > 0) {
5264 page = eb->pages[i]; 5262 page = eb->pages[i];
5265 5263
5266 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5264 cur = min(len, (PAGE_SIZE - offset));
5267 kaddr = page_address(page); 5265 kaddr = page_address(page);
5268 memcpy(dst, kaddr + offset, cur); 5266 memcpy(dst, kaddr + offset, cur);
5269 5267
@@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5283 struct page *page; 5281 struct page *page;
5284 char *kaddr; 5282 char *kaddr;
5285 char __user *dst = (char __user *)dstv; 5283 char __user *dst = (char __user *)dstv;
5286 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5284 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5287 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5285 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5288 int ret = 0; 5286 int ret = 0;
5289 5287
5290 WARN_ON(start > eb->len); 5288 WARN_ON(start > eb->len);
5291 WARN_ON(start + len > eb->start + eb->len); 5289 WARN_ON(start + len > eb->start + eb->len);
5292 5290
5293 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5291 offset = (start_offset + start) & (PAGE_SIZE - 1);
5294 5292
5295 while (len > 0) { 5293 while (len > 0) {
5296 page = eb->pages[i]; 5294 page = eb->pages[i];
5297 5295
5298 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5296 cur = min(len, (PAGE_SIZE - offset));
5299 kaddr = page_address(page); 5297 kaddr = page_address(page);
5300 if (copy_to_user(dst, kaddr + offset, cur)) { 5298 if (copy_to_user(dst, kaddr + offset, cur)) {
5301 ret = -EFAULT; 5299 ret = -EFAULT;
@@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5316 unsigned long *map_start, 5314 unsigned long *map_start,
5317 unsigned long *map_len) 5315 unsigned long *map_len)
5318{ 5316{
5319 size_t offset = start & (PAGE_CACHE_SIZE - 1); 5317 size_t offset = start & (PAGE_SIZE - 1);
5320 char *kaddr; 5318 char *kaddr;
5321 struct page *p; 5319 struct page *p;
5322 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5320 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5323 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5321 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5324 unsigned long end_i = (start_offset + start + min_len - 1) >> 5322 unsigned long end_i = (start_offset + start + min_len - 1) >>
5325 PAGE_CACHE_SHIFT; 5323 PAGE_SHIFT;
5326 5324
5327 if (i != end_i) 5325 if (i != end_i)
5328 return -EINVAL; 5326 return -EINVAL;
@@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5332 *map_start = 0; 5330 *map_start = 0;
5333 } else { 5331 } else {
5334 offset = 0; 5332 offset = 0;
5335 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 5333 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5336 } 5334 }
5337 5335
5338 if (start + min_len > eb->len) { 5336 if (start + min_len > eb->len) {
@@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5345 p = eb->pages[i]; 5343 p = eb->pages[i];
5346 kaddr = page_address(p); 5344 kaddr = page_address(p);
5347 *map = kaddr + offset; 5345 *map = kaddr + offset;
5348 *map_len = PAGE_CACHE_SIZE - offset; 5346 *map_len = PAGE_SIZE - offset;
5349 return 0; 5347 return 0;
5350} 5348}
5351 5349
@@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5358 struct page *page; 5356 struct page *page;
5359 char *kaddr; 5357 char *kaddr;
5360 char *ptr = (char *)ptrv; 5358 char *ptr = (char *)ptrv;
5361 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5359 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5362 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5360 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5363 int ret = 0; 5361 int ret = 0;
5364 5362
5365 WARN_ON(start > eb->len); 5363 WARN_ON(start > eb->len);
5366 WARN_ON(start + len > eb->start + eb->len); 5364 WARN_ON(start + len > eb->start + eb->len);
5367 5365
5368 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5366 offset = (start_offset + start) & (PAGE_SIZE - 1);
5369 5367
5370 while (len > 0) { 5368 while (len > 0) {
5371 page = eb->pages[i]; 5369 page = eb->pages[i];
5372 5370
5373 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5371 cur = min(len, (PAGE_SIZE - offset));
5374 5372
5375 kaddr = page_address(page); 5373 kaddr = page_address(page);
5376 ret = memcmp(ptr, kaddr + offset, cur); 5374 ret = memcmp(ptr, kaddr + offset, cur);
@@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5393 struct page *page; 5391 struct page *page;
5394 char *kaddr; 5392 char *kaddr;
5395 char *src = (char *)srcv; 5393 char *src = (char *)srcv;
5396 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5394 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5397 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5395 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5398 5396
5399 WARN_ON(start > eb->len); 5397 WARN_ON(start > eb->len);
5400 WARN_ON(start + len > eb->start + eb->len); 5398 WARN_ON(start + len > eb->start + eb->len);
5401 5399
5402 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5400 offset = (start_offset + start) & (PAGE_SIZE - 1);
5403 5401
5404 while (len > 0) { 5402 while (len > 0) {
5405 page = eb->pages[i]; 5403 page = eb->pages[i];
5406 WARN_ON(!PageUptodate(page)); 5404 WARN_ON(!PageUptodate(page));
5407 5405
5408 cur = min(len, PAGE_CACHE_SIZE - offset); 5406 cur = min(len, PAGE_SIZE - offset);
5409 kaddr = page_address(page); 5407 kaddr = page_address(page);
5410 memcpy(kaddr + offset, src, cur); 5408 memcpy(kaddr + offset, src, cur);
5411 5409
@@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
5423 size_t offset; 5421 size_t offset;
5424 struct page *page; 5422 struct page *page;
5425 char *kaddr; 5423 char *kaddr;
5426 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5424 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5427 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5425 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5428 5426
5429 WARN_ON(start > eb->len); 5427 WARN_ON(start > eb->len);
5430 WARN_ON(start + len > eb->start + eb->len); 5428 WARN_ON(start + len > eb->start + eb->len);
5431 5429
5432 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5430 offset = (start_offset + start) & (PAGE_SIZE - 1);
5433 5431
5434 while (len > 0) { 5432 while (len > 0) {
5435 page = eb->pages[i]; 5433 page = eb->pages[i];
5436 WARN_ON(!PageUptodate(page)); 5434 WARN_ON(!PageUptodate(page));
5437 5435
5438 cur = min(len, PAGE_CACHE_SIZE - offset); 5436 cur = min(len, PAGE_SIZE - offset);
5439 kaddr = page_address(page); 5437 kaddr = page_address(page);
5440 memset(kaddr + offset, c, cur); 5438 memset(kaddr + offset, c, cur);
5441 5439
@@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5454 size_t offset; 5452 size_t offset;
5455 struct page *page; 5453 struct page *page;
5456 char *kaddr; 5454 char *kaddr;
5457 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5455 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5458 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5456 unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5459 5457
5460 WARN_ON(src->len != dst_len); 5458 WARN_ON(src->len != dst_len);
5461 5459
5462 offset = (start_offset + dst_offset) & 5460 offset = (start_offset + dst_offset) &
5463 (PAGE_CACHE_SIZE - 1); 5461 (PAGE_SIZE - 1);
5464 5462
5465 while (len > 0) { 5463 while (len > 0) {
5466 page = dst->pages[i]; 5464 page = dst->pages[i];
5467 WARN_ON(!PageUptodate(page)); 5465 WARN_ON(!PageUptodate(page));
5468 5466
5469 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 5467 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5470 5468
5471 kaddr = page_address(page); 5469 kaddr = page_address(page);
5472 read_extent_buffer(src, kaddr + offset, src_offset, cur); 5470 read_extent_buffer(src, kaddr + offset, src_offset, cur);
@@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
5508 unsigned long *page_index, 5506 unsigned long *page_index,
5509 size_t *page_offset) 5507 size_t *page_offset)
5510{ 5508{
5511 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5509 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5512 size_t byte_offset = BIT_BYTE(nr); 5510 size_t byte_offset = BIT_BYTE(nr);
5513 size_t offset; 5511 size_t offset;
5514 5512
@@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
5519 */ 5517 */
5520 offset = start_offset + start + byte_offset; 5518 offset = start_offset + start + byte_offset;
5521 5519
5522 *page_index = offset >> PAGE_CACHE_SHIFT; 5520 *page_index = offset >> PAGE_SHIFT;
5523 *page_offset = offset & (PAGE_CACHE_SIZE - 1); 5521 *page_offset = offset & (PAGE_SIZE - 1);
5524} 5522}
5525 5523
5526/** 5524/**
@@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5572 len -= bits_to_set; 5570 len -= bits_to_set;
5573 bits_to_set = BITS_PER_BYTE; 5571 bits_to_set = BITS_PER_BYTE;
5574 mask_to_set = ~0U; 5572 mask_to_set = ~0U;
5575 if (++offset >= PAGE_CACHE_SIZE && len > 0) { 5573 if (++offset >= PAGE_SIZE && len > 0) {
5576 offset = 0; 5574 offset = 0;
5577 page = eb->pages[++i]; 5575 page = eb->pages[++i];
5578 WARN_ON(!PageUptodate(page)); 5576 WARN_ON(!PageUptodate(page));
@@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5614 len -= bits_to_clear; 5612 len -= bits_to_clear;
5615 bits_to_clear = BITS_PER_BYTE; 5613 bits_to_clear = BITS_PER_BYTE;
5616 mask_to_clear = ~0U; 5614 mask_to_clear = ~0U;
5617 if (++offset >= PAGE_CACHE_SIZE && len > 0) { 5615 if (++offset >= PAGE_SIZE && len > 0) {
5618 offset = 0; 5616 offset = 0;
5619 page = eb->pages[++i]; 5617 page = eb->pages[++i];
5620 WARN_ON(!PageUptodate(page)); 5618 WARN_ON(!PageUptodate(page));
@@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5661 size_t cur; 5659 size_t cur;
5662 size_t dst_off_in_page; 5660 size_t dst_off_in_page;
5663 size_t src_off_in_page; 5661 size_t src_off_in_page;
5664 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5662 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5665 unsigned long dst_i; 5663 unsigned long dst_i;
5666 unsigned long src_i; 5664 unsigned long src_i;
5667 5665
@@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5680 5678
5681 while (len > 0) { 5679 while (len > 0) {
5682 dst_off_in_page = (start_offset + dst_offset) & 5680 dst_off_in_page = (start_offset + dst_offset) &
5683 (PAGE_CACHE_SIZE - 1); 5681 (PAGE_SIZE - 1);
5684 src_off_in_page = (start_offset + src_offset) & 5682 src_off_in_page = (start_offset + src_offset) &
5685 (PAGE_CACHE_SIZE - 1); 5683 (PAGE_SIZE - 1);
5686 5684
5687 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5685 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5688 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 5686 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5689 5687
5690 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 5688 cur = min(len, (unsigned long)(PAGE_SIZE -
5691 src_off_in_page)); 5689 src_off_in_page));
5692 cur = min_t(unsigned long, cur, 5690 cur = min_t(unsigned long, cur,
5693 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); 5691 (unsigned long)(PAGE_SIZE - dst_off_in_page));
5694 5692
5695 copy_pages(dst->pages[dst_i], dst->pages[src_i], 5693 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5696 dst_off_in_page, src_off_in_page, cur); 5694 dst_off_in_page, src_off_in_page, cur);
@@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5709 size_t src_off_in_page; 5707 size_t src_off_in_page;
5710 unsigned long dst_end = dst_offset + len - 1; 5708 unsigned long dst_end = dst_offset + len - 1;
5711 unsigned long src_end = src_offset + len - 1; 5709 unsigned long src_end = src_offset + len - 1;
5712 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5710 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5713 unsigned long dst_i; 5711 unsigned long dst_i;
5714 unsigned long src_i; 5712 unsigned long src_i;
5715 5713
@@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5728 return; 5726 return;
5729 } 5727 }
5730 while (len > 0) { 5728 while (len > 0) {
5731 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 5729 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5732 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 5730 src_i = (start_offset + src_end) >> PAGE_SHIFT;
5733 5731
5734 dst_off_in_page = (start_offset + dst_end) & 5732 dst_off_in_page = (start_offset + dst_end) &
5735 (PAGE_CACHE_SIZE - 1); 5733 (PAGE_SIZE - 1);
5736 src_off_in_page = (start_offset + src_end) & 5734 src_off_in_page = (start_offset + src_end) &
5737 (PAGE_CACHE_SIZE - 1); 5735 (PAGE_SIZE - 1);
5738 5736
5739 cur = min_t(unsigned long, len, src_off_in_page + 1); 5737 cur = min_t(unsigned long, len, src_off_in_page + 1);
5740 cur = min(cur, dst_off_in_page + 1); 5738 cur = min(cur, dst_off_in_page + 1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 5dbf92e68fbd..b5e0ade90e88 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -120,7 +120,7 @@ struct extent_state {
120}; 120};
121 121
122#define INLINE_EXTENT_BUFFER_PAGES 16 122#define INLINE_EXTENT_BUFFER_PAGES 16
123#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE) 123#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
124struct extent_buffer { 124struct extent_buffer {
125 u64 start; 125 u64 start;
126 unsigned long len; 126 unsigned long len;
@@ -365,8 +365,8 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
365 365
366static inline unsigned long num_extent_pages(u64 start, u64 len) 366static inline unsigned long num_extent_pages(u64 start, u64 len)
367{ 367{
368 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 368 return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
369 (start >> PAGE_CACHE_SHIFT); 369 (start >> PAGE_SHIFT);
370} 370}
371 371
372static inline void extent_buffer_get(struct extent_buffer *eb) 372static inline void extent_buffer_get(struct extent_buffer *eb)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b5baf5bdc8e1..7a7d6e253cfc 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -32,7 +32,7 @@
32 size) - 1)) 32 size) - 1))
33 33
34#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 34#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_CACHE_SIZE)) 35 PAGE_SIZE))
36 36
37#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ 37#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \ 38 sizeof(struct btrfs_ordered_sum)) / \
@@ -203,7 +203,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
203 csum = (u8 *)dst; 203 csum = (u8 *)dst;
204 } 204 }
205 205
206 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) 206 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
207 path->reada = READA_FORWARD; 207 path->reada = READA_FORWARD;
208 208
209 WARN_ON(bio->bi_vcnt <= 0); 209 WARN_ON(bio->bi_vcnt <= 0);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 15a09cb156ce..cf31a60c6284 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
414 size_t copied = 0; 414 size_t copied = 0;
415 size_t total_copied = 0; 415 size_t total_copied = 0;
416 int pg = 0; 416 int pg = 0;
417 int offset = pos & (PAGE_CACHE_SIZE - 1); 417 int offset = pos & (PAGE_SIZE - 1);
418 418
419 while (write_bytes > 0) { 419 while (write_bytes > 0) {
420 size_t count = min_t(size_t, 420 size_t count = min_t(size_t,
421 PAGE_CACHE_SIZE - offset, write_bytes); 421 PAGE_SIZE - offset, write_bytes);
422 struct page *page = prepared_pages[pg]; 422 struct page *page = prepared_pages[pg];
423 /* 423 /*
424 * Copy data from userspace to the current page 424 * Copy data from userspace to the current page
@@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
448 if (unlikely(copied == 0)) 448 if (unlikely(copied == 0))
449 break; 449 break;
450 450
451 if (copied < PAGE_CACHE_SIZE - offset) { 451 if (copied < PAGE_SIZE - offset) {
452 offset += copied; 452 offset += copied;
453 } else { 453 } else {
454 pg++; 454 pg++;
@@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
473 */ 473 */
474 ClearPageChecked(pages[i]); 474 ClearPageChecked(pages[i]);
475 unlock_page(pages[i]); 475 unlock_page(pages[i]);
476 page_cache_release(pages[i]); 476 put_page(pages[i]);
477 } 477 }
478} 478}
479 479
@@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode,
1297{ 1297{
1298 int ret = 0; 1298 int ret = 0;
1299 1299
1300 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && 1300 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1301 !PageUptodate(page)) { 1301 !PageUptodate(page)) {
1302 ret = btrfs_readpage(NULL, page); 1302 ret = btrfs_readpage(NULL, page);
1303 if (ret) 1303 if (ret)
@@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1323 size_t write_bytes, bool force_uptodate) 1323 size_t write_bytes, bool force_uptodate)
1324{ 1324{
1325 int i; 1325 int i;
1326 unsigned long index = pos >> PAGE_CACHE_SHIFT; 1326 unsigned long index = pos >> PAGE_SHIFT;
1327 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1327 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1328 int err = 0; 1328 int err = 0;
1329 int faili; 1329 int faili;
@@ -1345,7 +1345,7 @@ again:
1345 err = prepare_uptodate_page(inode, pages[i], 1345 err = prepare_uptodate_page(inode, pages[i],
1346 pos + write_bytes, false); 1346 pos + write_bytes, false);
1347 if (err) { 1347 if (err) {
1348 page_cache_release(pages[i]); 1348 put_page(pages[i]);
1349 if (err == -EAGAIN) { 1349 if (err == -EAGAIN) {
1350 err = 0; 1350 err = 0;
1351 goto again; 1351 goto again;
@@ -1360,7 +1360,7 @@ again:
1360fail: 1360fail:
1361 while (faili >= 0) { 1361 while (faili >= 0) {
1362 unlock_page(pages[faili]); 1362 unlock_page(pages[faili]);
1363 page_cache_release(pages[faili]); 1363 put_page(pages[faili]);
1364 faili--; 1364 faili--;
1365 } 1365 }
1366 return err; 1366 return err;
@@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1408 cached_state, GFP_NOFS); 1408 cached_state, GFP_NOFS);
1409 for (i = 0; i < num_pages; i++) { 1409 for (i = 0; i < num_pages; i++) {
1410 unlock_page(pages[i]); 1410 unlock_page(pages[i]);
1411 page_cache_release(pages[i]); 1411 put_page(pages[i]);
1412 } 1412 }
1413 btrfs_start_ordered_extent(inode, ordered, 1); 1413 btrfs_start_ordered_extent(inode, ordered, 1);
1414 btrfs_put_ordered_extent(ordered); 1414 btrfs_put_ordered_extent(ordered);
@@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1497 bool force_page_uptodate = false; 1497 bool force_page_uptodate = false;
1498 bool need_unlock; 1498 bool need_unlock;
1499 1499
1500 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE), 1500 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1501 PAGE_CACHE_SIZE / (sizeof(struct page *))); 1501 PAGE_SIZE / (sizeof(struct page *)));
1502 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1502 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1503 nrptrs = max(nrptrs, 8); 1503 nrptrs = max(nrptrs, 8);
1504 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1504 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
@@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1506 return -ENOMEM; 1506 return -ENOMEM;
1507 1507
1508 while (iov_iter_count(i) > 0) { 1508 while (iov_iter_count(i) > 0) {
1509 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1509 size_t offset = pos & (PAGE_SIZE - 1);
1510 size_t sector_offset; 1510 size_t sector_offset;
1511 size_t write_bytes = min(iov_iter_count(i), 1511 size_t write_bytes = min(iov_iter_count(i),
1512 nrptrs * (size_t)PAGE_CACHE_SIZE - 1512 nrptrs * (size_t)PAGE_SIZE -
1513 offset); 1513 offset);
1514 size_t num_pages = DIV_ROUND_UP(write_bytes + offset, 1514 size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1515 PAGE_CACHE_SIZE); 1515 PAGE_SIZE);
1516 size_t reserve_bytes; 1516 size_t reserve_bytes;
1517 size_t dirty_pages; 1517 size_t dirty_pages;
1518 size_t copied; 1518 size_t copied;
@@ -1547,7 +1547,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1547 * write_bytes, so scale down. 1547 * write_bytes, so scale down.
1548 */ 1548 */
1549 num_pages = DIV_ROUND_UP(write_bytes + offset, 1549 num_pages = DIV_ROUND_UP(write_bytes + offset,
1550 PAGE_CACHE_SIZE); 1550 PAGE_SIZE);
1551 reserve_bytes = round_up(write_bytes + sector_offset, 1551 reserve_bytes = round_up(write_bytes + sector_offset,
1552 root->sectorsize); 1552 root->sectorsize);
1553 goto reserve_metadata; 1553 goto reserve_metadata;
@@ -1609,7 +1609,7 @@ again:
1609 } else { 1609 } else {
1610 force_page_uptodate = false; 1610 force_page_uptodate = false;
1611 dirty_pages = DIV_ROUND_UP(copied + offset, 1611 dirty_pages = DIV_ROUND_UP(copied + offset,
1612 PAGE_CACHE_SIZE); 1612 PAGE_SIZE);
1613 } 1613 }
1614 1614
1615 /* 1615 /*
@@ -1641,7 +1641,7 @@ again:
1641 u64 __pos; 1641 u64 __pos;
1642 1642
1643 __pos = round_down(pos, root->sectorsize) + 1643 __pos = round_down(pos, root->sectorsize) +
1644 (dirty_pages << PAGE_CACHE_SHIFT); 1644 (dirty_pages << PAGE_SHIFT);
1645 btrfs_delalloc_release_space(inode, __pos, 1645 btrfs_delalloc_release_space(inode, __pos,
1646 release_bytes); 1646 release_bytes);
1647 } 1647 }
@@ -1682,7 +1682,7 @@ again:
1682 cond_resched(); 1682 cond_resched();
1683 1683
1684 balance_dirty_pages_ratelimited(inode->i_mapping); 1684 balance_dirty_pages_ratelimited(inode->i_mapping);
1685 if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1) 1685 if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
1686 btrfs_btree_balance_dirty(root); 1686 btrfs_btree_balance_dirty(root);
1687 1687
1688 pos += copied; 1688 pos += copied;
@@ -1738,8 +1738,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1738 goto out; 1738 goto out;
1739 written += written_buffered; 1739 written += written_buffered;
1740 iocb->ki_pos = pos + written_buffered; 1740 iocb->ki_pos = pos + written_buffered;
1741 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, 1741 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1742 endbyte >> PAGE_CACHE_SHIFT); 1742 endbyte >> PAGE_SHIFT);
1743out: 1743out:
1744 return written ? written : err; 1744 return written ? written : err;
1745} 1745}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 8f835bfa1bdd..5e6062c26129 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -29,7 +29,7 @@
29#include "inode-map.h" 29#include "inode-map.h"
30#include "volumes.h" 30#include "volumes.h"
31 31
32#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 32#define BITS_PER_BITMAP (PAGE_SIZE * 8)
33#define MAX_CACHE_BYTES_PER_GIG SZ_32K 33#define MAX_CACHE_BYTES_PER_GIG SZ_32K
34 34
35struct btrfs_trim_range { 35struct btrfs_trim_range {
@@ -295,7 +295,7 @@ static int readahead_cache(struct inode *inode)
295 return -ENOMEM; 295 return -ENOMEM;
296 296
297 file_ra_state_init(ra, inode->i_mapping); 297 file_ra_state_init(ra, inode->i_mapping);
298 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 298 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
299 299
300 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); 300 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
301 301
@@ -310,14 +310,14 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
310 int num_pages; 310 int num_pages;
311 int check_crcs = 0; 311 int check_crcs = 0;
312 312
313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); 313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
314 314
315 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) 315 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
316 check_crcs = 1; 316 check_crcs = 1;
317 317
318 /* Make sure we can fit our crcs into the first page */ 318 /* Make sure we can fit our crcs into the first page */
319 if (write && check_crcs && 319 if (write && check_crcs &&
320 (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) 320 (num_pages * sizeof(u32)) >= PAGE_SIZE)
321 return -ENOSPC; 321 return -ENOSPC;
322 322
323 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); 323 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -354,9 +354,9 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
354 io_ctl->page = io_ctl->pages[io_ctl->index++]; 354 io_ctl->page = io_ctl->pages[io_ctl->index++];
355 io_ctl->cur = page_address(io_ctl->page); 355 io_ctl->cur = page_address(io_ctl->page);
356 io_ctl->orig = io_ctl->cur; 356 io_ctl->orig = io_ctl->cur;
357 io_ctl->size = PAGE_CACHE_SIZE; 357 io_ctl->size = PAGE_SIZE;
358 if (clear) 358 if (clear)
359 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); 359 memset(io_ctl->cur, 0, PAGE_SIZE);
360} 360}
361 361
362static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) 362static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
@@ -369,7 +369,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
369 if (io_ctl->pages[i]) { 369 if (io_ctl->pages[i]) {
370 ClearPageChecked(io_ctl->pages[i]); 370 ClearPageChecked(io_ctl->pages[i]);
371 unlock_page(io_ctl->pages[i]); 371 unlock_page(io_ctl->pages[i]);
372 page_cache_release(io_ctl->pages[i]); 372 put_page(io_ctl->pages[i]);
373 } 373 }
374 } 374 }
375} 375}
@@ -475,7 +475,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
475 offset = sizeof(u32) * io_ctl->num_pages; 475 offset = sizeof(u32) * io_ctl->num_pages;
476 476
477 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 477 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
478 PAGE_CACHE_SIZE - offset); 478 PAGE_SIZE - offset);
479 btrfs_csum_final(crc, (char *)&crc); 479 btrfs_csum_final(crc, (char *)&crc);
480 io_ctl_unmap_page(io_ctl); 480 io_ctl_unmap_page(io_ctl);
481 tmp = page_address(io_ctl->pages[0]); 481 tmp = page_address(io_ctl->pages[0]);
@@ -503,7 +503,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
503 503
504 io_ctl_map_page(io_ctl, 0); 504 io_ctl_map_page(io_ctl, 0);
505 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 505 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
506 PAGE_CACHE_SIZE - offset); 506 PAGE_SIZE - offset);
507 btrfs_csum_final(crc, (char *)&crc); 507 btrfs_csum_final(crc, (char *)&crc);
508 if (val != crc) { 508 if (val != crc) {
509 btrfs_err_rl(io_ctl->root->fs_info, 509 btrfs_err_rl(io_ctl->root->fs_info,
@@ -561,7 +561,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
561 io_ctl_map_page(io_ctl, 0); 561 io_ctl_map_page(io_ctl, 0);
562 } 562 }
563 563
564 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); 564 memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
565 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 565 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
566 if (io_ctl->index < io_ctl->num_pages) 566 if (io_ctl->index < io_ctl->num_pages)
567 io_ctl_map_page(io_ctl, 0); 567 io_ctl_map_page(io_ctl, 0);
@@ -621,7 +621,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
621 if (ret) 621 if (ret)
622 return ret; 622 return ret;
623 623
624 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); 624 memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
625 io_ctl_unmap_page(io_ctl); 625 io_ctl_unmap_page(io_ctl);
626 626
627 return 0; 627 return 0;
@@ -775,7 +775,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
775 } else { 775 } else {
776 ASSERT(num_bitmaps); 776 ASSERT(num_bitmaps);
777 num_bitmaps--; 777 num_bitmaps--;
778 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 778 e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
779 if (!e->bitmap) { 779 if (!e->bitmap) {
780 kmem_cache_free( 780 kmem_cache_free(
781 btrfs_free_space_cachep, e); 781 btrfs_free_space_cachep, e);
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1661 * we add more bitmaps. 1661 * we add more bitmaps.
1662 */ 1662 */
1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; 1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
1664 1664
1665 if (bitmap_bytes >= max_bytes) { 1665 if (bitmap_bytes >= max_bytes) {
1666 ctl->extents_thresh = 0; 1666 ctl->extents_thresh = 0;
@@ -2111,7 +2111,7 @@ new_bitmap:
2111 } 2111 }
2112 2112
2113 /* allocate the bitmap */ 2113 /* allocate the bitmap */
2114 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 2114 info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
2115 spin_lock(&ctl->tree_lock); 2115 spin_lock(&ctl->tree_lock);
2116 if (!info->bitmap) { 2116 if (!info->bitmap) {
2117 ret = -ENOMEM; 2117 ret = -ENOMEM;
@@ -3580,7 +3580,7 @@ again:
3580 } 3580 }
3581 3581
3582 if (!map) { 3582 if (!map) {
3583 map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 3583 map = kzalloc(PAGE_SIZE, GFP_NOFS);
3584 if (!map) { 3584 if (!map) {
3585 kmem_cache_free(btrfs_free_space_cachep, info); 3585 kmem_cache_free(btrfs_free_space_cachep, info);
3586 return -ENOMEM; 3586 return -ENOMEM;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 1f0ec19b23f6..70107f7c9307 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
283} 283}
284 284
285#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) 285#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
286#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) 286#define INODES_PER_BITMAP (PAGE_SIZE * 8)
287 287
288/* 288/*
289 * The goal is to keep the memory used by the free_ino tree won't 289 * The goal is to keep the memory used by the free_ino tree won't
@@ -317,7 +317,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
317 } 317 }
318 318
319 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * 319 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
320 PAGE_CACHE_SIZE / sizeof(*info); 320 PAGE_SIZE / sizeof(*info);
321} 321}
322 322
323/* 323/*
@@ -481,12 +481,12 @@ again:
481 481
482 spin_lock(&ctl->tree_lock); 482 spin_lock(&ctl->tree_lock);
483 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; 483 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
484 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); 484 prealloc = ALIGN(prealloc, PAGE_SIZE);
485 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; 485 prealloc += ctl->total_bitmaps * PAGE_SIZE;
486 spin_unlock(&ctl->tree_lock); 486 spin_unlock(&ctl->tree_lock);
487 487
488 /* Just to make sure we have enough space */ 488 /* Just to make sure we have enough space */
489 prealloc += 8 * PAGE_CACHE_SIZE; 489 prealloc += 8 * PAGE_SIZE;
490 490
491 ret = btrfs_delalloc_reserve_space(inode, 0, prealloc); 491 ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
492 if (ret) 492 if (ret)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 41a5688ffdfe..2aaba58b4856 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
194 while (compressed_size > 0) { 194 while (compressed_size > 0) {
195 cpage = compressed_pages[i]; 195 cpage = compressed_pages[i];
196 cur_size = min_t(unsigned long, compressed_size, 196 cur_size = min_t(unsigned long, compressed_size,
197 PAGE_CACHE_SIZE); 197 PAGE_SIZE);
198 198
199 kaddr = kmap_atomic(cpage); 199 kaddr = kmap_atomic(cpage);
200 write_extent_buffer(leaf, kaddr, ptr, cur_size); 200 write_extent_buffer(leaf, kaddr, ptr, cur_size);
@@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
208 compress_type); 208 compress_type);
209 } else { 209 } else {
210 page = find_get_page(inode->i_mapping, 210 page = find_get_page(inode->i_mapping,
211 start >> PAGE_CACHE_SHIFT); 211 start >> PAGE_SHIFT);
212 btrfs_set_file_extent_compression(leaf, ei, 0); 212 btrfs_set_file_extent_compression(leaf, ei, 0);
213 kaddr = kmap_atomic(page); 213 kaddr = kmap_atomic(page);
214 offset = start & (PAGE_CACHE_SIZE - 1); 214 offset = start & (PAGE_SIZE - 1);
215 write_extent_buffer(leaf, kaddr + offset, ptr, size); 215 write_extent_buffer(leaf, kaddr + offset, ptr, size);
216 kunmap_atomic(kaddr); 216 kunmap_atomic(kaddr);
217 page_cache_release(page); 217 put_page(page);
218 } 218 }
219 btrfs_mark_buffer_dirty(leaf); 219 btrfs_mark_buffer_dirty(leaf);
220 btrfs_release_path(path); 220 btrfs_release_path(path);
@@ -322,7 +322,7 @@ out:
322 * And at reserve time, it's always aligned to page size, so 322 * And at reserve time, it's always aligned to page size, so
323 * just free one page here. 323 * just free one page here.
324 */ 324 */
325 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); 325 btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
326 btrfs_free_path(path); 326 btrfs_free_path(path);
327 btrfs_end_transaction(trans, root); 327 btrfs_end_transaction(trans, root);
328 return ret; 328 return ret;
@@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode,
435 actual_end = min_t(u64, isize, end + 1); 435 actual_end = min_t(u64, isize, end + 1);
436again: 436again:
437 will_compress = 0; 437 will_compress = 0;
438 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 438 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE); 439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
440 440
441 /* 441 /*
442 * we don't want to send crud past the end of i_size through 442 * we don't want to send crud past the end of i_size through
@@ -514,7 +514,7 @@ again:
514 514
515 if (!ret) { 515 if (!ret) {
516 unsigned long offset = total_compressed & 516 unsigned long offset = total_compressed &
517 (PAGE_CACHE_SIZE - 1); 517 (PAGE_SIZE - 1);
518 struct page *page = pages[nr_pages_ret - 1]; 518 struct page *page = pages[nr_pages_ret - 1];
519 char *kaddr; 519 char *kaddr;
520 520
@@ -524,7 +524,7 @@ again:
524 if (offset) { 524 if (offset) {
525 kaddr = kmap_atomic(page); 525 kaddr = kmap_atomic(page);
526 memset(kaddr + offset, 0, 526 memset(kaddr + offset, 0,
527 PAGE_CACHE_SIZE - offset); 527 PAGE_SIZE - offset);
528 kunmap_atomic(kaddr); 528 kunmap_atomic(kaddr);
529 } 529 }
530 will_compress = 1; 530 will_compress = 1;
@@ -580,7 +580,7 @@ cont:
580 * one last check to make sure the compression is really a 580 * one last check to make sure the compression is really a
581 * win, compare the page count read with the blocks on disk 581 * win, compare the page count read with the blocks on disk
582 */ 582 */
583 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); 583 total_in = ALIGN(total_in, PAGE_SIZE);
584 if (total_compressed >= total_in) { 584 if (total_compressed >= total_in) {
585 will_compress = 0; 585 will_compress = 0;
586 } else { 586 } else {
@@ -594,7 +594,7 @@ cont:
594 */ 594 */
595 for (i = 0; i < nr_pages_ret; i++) { 595 for (i = 0; i < nr_pages_ret; i++) {
596 WARN_ON(pages[i]->mapping); 596 WARN_ON(pages[i]->mapping);
597 page_cache_release(pages[i]); 597 put_page(pages[i]);
598 } 598 }
599 kfree(pages); 599 kfree(pages);
600 pages = NULL; 600 pages = NULL;
@@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed:
650free_pages_out: 650free_pages_out:
651 for (i = 0; i < nr_pages_ret; i++) { 651 for (i = 0; i < nr_pages_ret; i++) {
652 WARN_ON(pages[i]->mapping); 652 WARN_ON(pages[i]->mapping);
653 page_cache_release(pages[i]); 653 put_page(pages[i]);
654 } 654 }
655 kfree(pages); 655 kfree(pages);
656} 656}
@@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
664 664
665 for (i = 0; i < async_extent->nr_pages; i++) { 665 for (i = 0; i < async_extent->nr_pages; i++) {
666 WARN_ON(async_extent->pages[i]->mapping); 666 WARN_ON(async_extent->pages[i]->mapping);
667 page_cache_release(async_extent->pages[i]); 667 put_page(async_extent->pages[i]);
668 } 668 }
669 kfree(async_extent->pages); 669 kfree(async_extent->pages);
670 async_extent->nr_pages = 0; 670 async_extent->nr_pages = 0;
@@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode,
966 PAGE_END_WRITEBACK); 966 PAGE_END_WRITEBACK);
967 967
968 *nr_written = *nr_written + 968 *nr_written = *nr_written +
969 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 969 (end - start + PAGE_SIZE) / PAGE_SIZE;
970 *page_started = 1; 970 *page_started = 1;
971 goto out; 971 goto out;
972 } else if (ret < 0) { 972 } else if (ret < 0) {
@@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
1106 async_cow = container_of(work, struct async_cow, work); 1106 async_cow = container_of(work, struct async_cow, work);
1107 1107
1108 root = async_cow->root; 1108 root = async_cow->root;
1109 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1109 nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1110 PAGE_CACHE_SHIFT; 1110 PAGE_SHIFT;
1111 1111
1112 /* 1112 /*
1113 * atomic_sub_return implies a barrier for waitqueue_active 1113 * atomic_sub_return implies a barrier for waitqueue_active
@@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1164 async_cow_start, async_cow_submit, 1164 async_cow_start, async_cow_submit,
1165 async_cow_free); 1165 async_cow_free);
1166 1166
1167 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1167 nr_pages = (cur_end - start + PAGE_SIZE) >>
1168 PAGE_CACHE_SHIFT; 1168 PAGE_SHIFT;
1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1170 1170
1171 btrfs_queue_work(root->fs_info->delalloc_workers, 1171 btrfs_queue_work(root->fs_info->delalloc_workers,
@@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1961 struct extent_state **cached_state) 1961 struct extent_state **cached_state)
1962{ 1962{
1963 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); 1963 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1965 cached_state, GFP_NOFS); 1965 cached_state, GFP_NOFS);
1966} 1966}
@@ -1993,7 +1993,7 @@ again:
1993 1993
1994 inode = page->mapping->host; 1994 inode = page->mapping->host;
1995 page_start = page_offset(page); 1995 page_start = page_offset(page);
1996 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1996 page_end = page_offset(page) + PAGE_SIZE - 1;
1997 1997
1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
1999 &cached_state); 1999 &cached_state);
@@ -2003,7 +2003,7 @@ again:
2003 goto out; 2003 goto out;
2004 2004
2005 ordered = btrfs_lookup_ordered_range(inode, page_start, 2005 ordered = btrfs_lookup_ordered_range(inode, page_start,
2006 PAGE_CACHE_SIZE); 2006 PAGE_SIZE);
2007 if (ordered) { 2007 if (ordered) {
2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2009 page_end, &cached_state, GFP_NOFS); 2009 page_end, &cached_state, GFP_NOFS);
@@ -2014,7 +2014,7 @@ again:
2014 } 2014 }
2015 2015
2016 ret = btrfs_delalloc_reserve_space(inode, page_start, 2016 ret = btrfs_delalloc_reserve_space(inode, page_start,
2017 PAGE_CACHE_SIZE); 2017 PAGE_SIZE);
2018 if (ret) { 2018 if (ret) {
2019 mapping_set_error(page->mapping, ret); 2019 mapping_set_error(page->mapping, ret);
2020 end_extent_writepage(page, ret, page_start, page_end); 2020 end_extent_writepage(page, ret, page_start, page_end);
@@ -2030,7 +2030,7 @@ out:
2030 &cached_state, GFP_NOFS); 2030 &cached_state, GFP_NOFS);
2031out_page: 2031out_page:
2032 unlock_page(page); 2032 unlock_page(page);
2033 page_cache_release(page); 2033 put_page(page);
2034 kfree(fixup); 2034 kfree(fixup);
2035} 2035}
2036 2036
@@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2063 return -EAGAIN; 2063 return -EAGAIN;
2064 2064
2065 SetPageChecked(page); 2065 SetPageChecked(page);
2066 page_cache_get(page); 2066 get_page(page);
2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2068 btrfs_writepage_fixup_worker, NULL, NULL); 2068 btrfs_writepage_fixup_worker, NULL, NULL);
2069 fixup->page = page; 2069 fixup->page = page;
@@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode,
4247 4247
4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { 4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4249 loff_t offset = new_size; 4249 loff_t offset = new_size;
4250 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); 4250 loff_t page_end = ALIGN(offset, PAGE_SIZE);
4251 4251
4252 /* 4252 /*
4253 * Zero out the remaining of the last page of our inline extent, 4253 * Zero out the remaining of the last page of our inline extent,
@@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4633 struct extent_state *cached_state = NULL; 4633 struct extent_state *cached_state = NULL;
4634 char *kaddr; 4634 char *kaddr;
4635 u32 blocksize = root->sectorsize; 4635 u32 blocksize = root->sectorsize;
4636 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4636 pgoff_t index = from >> PAGE_SHIFT;
4637 unsigned offset = from & (blocksize - 1); 4637 unsigned offset = from & (blocksize - 1);
4638 struct page *page; 4638 struct page *page;
4639 gfp_t mask = btrfs_alloc_write_mask(mapping); 4639 gfp_t mask = btrfs_alloc_write_mask(mapping);
@@ -4668,7 +4668,7 @@ again:
4668 lock_page(page); 4668 lock_page(page);
4669 if (page->mapping != mapping) { 4669 if (page->mapping != mapping) {
4670 unlock_page(page); 4670 unlock_page(page);
4671 page_cache_release(page); 4671 put_page(page);
4672 goto again; 4672 goto again;
4673 } 4673 }
4674 if (!PageUptodate(page)) { 4674 if (!PageUptodate(page)) {
@@ -4686,7 +4686,7 @@ again:
4686 unlock_extent_cached(io_tree, block_start, block_end, 4686 unlock_extent_cached(io_tree, block_start, block_end,
4687 &cached_state, GFP_NOFS); 4687 &cached_state, GFP_NOFS);
4688 unlock_page(page); 4688 unlock_page(page);
4689 page_cache_release(page); 4689 put_page(page);
4690 btrfs_start_ordered_extent(inode, ordered, 1); 4690 btrfs_start_ordered_extent(inode, ordered, 1);
4691 btrfs_put_ordered_extent(ordered); 4691 btrfs_put_ordered_extent(ordered);
4692 goto again; 4692 goto again;
@@ -4728,7 +4728,7 @@ out_unlock:
4728 btrfs_delalloc_release_space(inode, block_start, 4728 btrfs_delalloc_release_space(inode, block_start,
4729 blocksize); 4729 blocksize);
4730 unlock_page(page); 4730 unlock_page(page);
4731 page_cache_release(page); 4731 put_page(page);
4732out: 4732out:
4733 return ret; 4733 return ret;
4734} 4734}
@@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
6717 6717
6718 read_extent_buffer(leaf, tmp, ptr, inline_size); 6718 read_extent_buffer(leaf, tmp, ptr, inline_size);
6719 6719
6720 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 6720 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6721 ret = btrfs_decompress(compress_type, tmp, page, 6721 ret = btrfs_decompress(compress_type, tmp, page,
6722 extent_offset, inline_size, max_size); 6722 extent_offset, inline_size, max_size);
6723 kfree(tmp); 6723 kfree(tmp);
@@ -6879,8 +6879,8 @@ next:
6879 6879
6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6881 extent_offset = page_offset(page) + pg_offset - extent_start; 6881 extent_offset = page_offset(page) + pg_offset - extent_start;
6882 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 6882 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6883 size - extent_offset); 6883 size - extent_offset);
6884 em->start = extent_start + extent_offset; 6884 em->start = extent_start + extent_offset;
6885 em->len = ALIGN(copy_size, root->sectorsize); 6885 em->len = ALIGN(copy_size, root->sectorsize);
6886 em->orig_block_len = em->len; 6886 em->orig_block_len = em->len;
@@ -6899,9 +6899,9 @@ next:
6899 map = kmap(page); 6899 map = kmap(page);
6900 read_extent_buffer(leaf, map + pg_offset, ptr, 6900 read_extent_buffer(leaf, map + pg_offset, ptr,
6901 copy_size); 6901 copy_size);
6902 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 6902 if (pg_offset + copy_size < PAGE_SIZE) {
6903 memset(map + pg_offset + copy_size, 0, 6903 memset(map + pg_offset + copy_size, 0,
6904 PAGE_CACHE_SIZE - pg_offset - 6904 PAGE_SIZE - pg_offset -
6905 copy_size); 6905 copy_size);
6906 } 6906 }
6907 kunmap(page); 6907 kunmap(page);
@@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7336 int start_idx; 7336 int start_idx;
7337 int end_idx; 7337 int end_idx;
7338 7338
7339 start_idx = start >> PAGE_CACHE_SHIFT; 7339 start_idx = start >> PAGE_SHIFT;
7340 7340
7341 /* 7341 /*
7342 * end is the last byte in the last page. end == start is legal 7342 * end is the last byte in the last page. end == start is legal
7343 */ 7343 */
7344 end_idx = end >> PAGE_CACHE_SHIFT; 7344 end_idx = end >> PAGE_SHIFT;
7345 7345
7346 rcu_read_lock(); 7346 rcu_read_lock();
7347 7347
@@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7382 * include/linux/pagemap.h for details. 7382 * include/linux/pagemap.h for details.
7383 */ 7383 */
7384 if (unlikely(page != *pagep)) { 7384 if (unlikely(page != *pagep)) {
7385 page_cache_release(page); 7385 put_page(page);
7386 page = NULL; 7386 page = NULL;
7387 } 7387 }
7388 } 7388 }
@@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7390 if (page) { 7390 if (page) {
7391 if (page->index <= end_idx) 7391 if (page->index <= end_idx)
7392 found = true; 7392 found = true;
7393 page_cache_release(page); 7393 put_page(page);
7394 } 7394 }
7395 7395
7396 rcu_read_unlock(); 7396 rcu_read_unlock();
@@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8719 if (ret == 1) { 8719 if (ret == 1) {
8720 ClearPagePrivate(page); 8720 ClearPagePrivate(page);
8721 set_page_private(page, 0); 8721 set_page_private(page, 0);
8722 page_cache_release(page); 8722 put_page(page);
8723 } 8723 }
8724 return ret; 8724 return ret;
8725} 8725}
@@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8739 struct btrfs_ordered_extent *ordered; 8739 struct btrfs_ordered_extent *ordered;
8740 struct extent_state *cached_state = NULL; 8740 struct extent_state *cached_state = NULL;
8741 u64 page_start = page_offset(page); 8741 u64 page_start = page_offset(page);
8742 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8742 u64 page_end = page_start + PAGE_SIZE - 1;
8743 u64 start; 8743 u64 start;
8744 u64 end; 8744 u64 end;
8745 int inode_evicting = inode->i_state & I_FREEING; 8745 int inode_evicting = inode->i_state & I_FREEING;
@@ -8822,7 +8822,7 @@ again:
8822 * 2) Not written to disk 8822 * 2) Not written to disk
8823 * This means the reserved space should be freed here. 8823 * This means the reserved space should be freed here.
8824 */ 8824 */
8825 btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); 8825 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8826 if (!inode_evicting) { 8826 if (!inode_evicting) {
8827 clear_extent_bit(tree, page_start, page_end, 8827 clear_extent_bit(tree, page_start, page_end,
8828 EXTENT_LOCKED | EXTENT_DIRTY | 8828 EXTENT_LOCKED | EXTENT_DIRTY |
@@ -8837,7 +8837,7 @@ again:
8837 if (PagePrivate(page)) { 8837 if (PagePrivate(page)) {
8838 ClearPagePrivate(page); 8838 ClearPagePrivate(page);
8839 set_page_private(page, 0); 8839 set_page_private(page, 0);
8840 page_cache_release(page); 8840 put_page(page);
8841 } 8841 }
8842} 8842}
8843 8843
@@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8874 u64 page_end; 8874 u64 page_end;
8875 u64 end; 8875 u64 end;
8876 8876
8877 reserved_space = PAGE_CACHE_SIZE; 8877 reserved_space = PAGE_SIZE;
8878 8878
8879 sb_start_pagefault(inode->i_sb); 8879 sb_start_pagefault(inode->i_sb);
8880 page_start = page_offset(page); 8880 page_start = page_offset(page);
8881 page_end = page_start + PAGE_CACHE_SIZE - 1; 8881 page_end = page_start + PAGE_SIZE - 1;
8882 end = page_end; 8882 end = page_end;
8883 8883
8884 /* 8884 /*
@@ -8934,15 +8934,15 @@ again:
8934 goto again; 8934 goto again;
8935 } 8935 }
8936 8936
8937 if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) { 8937 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8938 reserved_space = round_up(size - page_start, root->sectorsize); 8938 reserved_space = round_up(size - page_start, root->sectorsize);
8939 if (reserved_space < PAGE_CACHE_SIZE) { 8939 if (reserved_space < PAGE_SIZE) {
8940 end = page_start + reserved_space - 1; 8940 end = page_start + reserved_space - 1;
8941 spin_lock(&BTRFS_I(inode)->lock); 8941 spin_lock(&BTRFS_I(inode)->lock);
8942 BTRFS_I(inode)->outstanding_extents++; 8942 BTRFS_I(inode)->outstanding_extents++;
8943 spin_unlock(&BTRFS_I(inode)->lock); 8943 spin_unlock(&BTRFS_I(inode)->lock);
8944 btrfs_delalloc_release_space(inode, page_start, 8944 btrfs_delalloc_release_space(inode, page_start,
8945 PAGE_CACHE_SIZE - reserved_space); 8945 PAGE_SIZE - reserved_space);
8946 } 8946 }
8947 } 8947 }
8948 8948
@@ -8969,14 +8969,14 @@ again:
8969 ret = 0; 8969 ret = 0;
8970 8970
8971 /* page is wholly or partially inside EOF */ 8971 /* page is wholly or partially inside EOF */
8972 if (page_start + PAGE_CACHE_SIZE > size) 8972 if (page_start + PAGE_SIZE > size)
8973 zero_start = size & ~PAGE_CACHE_MASK; 8973 zero_start = size & ~PAGE_MASK;
8974 else 8974 else
8975 zero_start = PAGE_CACHE_SIZE; 8975 zero_start = PAGE_SIZE;
8976 8976
8977 if (zero_start != PAGE_CACHE_SIZE) { 8977 if (zero_start != PAGE_SIZE) {
8978 kaddr = kmap(page); 8978 kaddr = kmap(page);
8979 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 8979 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
8980 flush_dcache_page(page); 8980 flush_dcache_page(page);
8981 kunmap(page); 8981 kunmap(page);
8982 } 8982 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 053e677839fe..94a0c8a3e871 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -898,7 +898,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
898 u64 end; 898 u64 end;
899 899
900 read_lock(&em_tree->lock); 900 read_lock(&em_tree->lock);
901 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); 901 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
902 read_unlock(&em_tree->lock); 902 read_unlock(&em_tree->lock);
903 903
904 if (em) { 904 if (em) {
@@ -988,7 +988,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
990 struct extent_map *em; 990 struct extent_map *em;
991 u64 len = PAGE_CACHE_SIZE; 991 u64 len = PAGE_SIZE;
992 992
993 /* 993 /*
994 * hopefully we have this extent in the tree already, try without 994 * hopefully we have this extent in the tree already, try without
@@ -1124,15 +1124,15 @@ static int cluster_pages_for_defrag(struct inode *inode,
1124 struct extent_io_tree *tree; 1124 struct extent_io_tree *tree;
1125 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1125 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1126 1126
1127 file_end = (isize - 1) >> PAGE_CACHE_SHIFT; 1127 file_end = (isize - 1) >> PAGE_SHIFT;
1128 if (!isize || start_index > file_end) 1128 if (!isize || start_index > file_end)
1129 return 0; 1129 return 0;
1130 1130
1131 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); 1131 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1132 1132
1133 ret = btrfs_delalloc_reserve_space(inode, 1133 ret = btrfs_delalloc_reserve_space(inode,
1134 start_index << PAGE_CACHE_SHIFT, 1134 start_index << PAGE_SHIFT,
1135 page_cnt << PAGE_CACHE_SHIFT); 1135 page_cnt << PAGE_SHIFT);
1136 if (ret) 1136 if (ret)
1137 return ret; 1137 return ret;
1138 i_done = 0; 1138 i_done = 0;
@@ -1148,7 +1148,7 @@ again:
1148 break; 1148 break;
1149 1149
1150 page_start = page_offset(page); 1150 page_start = page_offset(page);
1151 page_end = page_start + PAGE_CACHE_SIZE - 1; 1151 page_end = page_start + PAGE_SIZE - 1;
1152 while (1) { 1152 while (1) {
1153 lock_extent_bits(tree, page_start, page_end, 1153 lock_extent_bits(tree, page_start, page_end,
1154 &cached_state); 1154 &cached_state);
@@ -1169,7 +1169,7 @@ again:
1169 */ 1169 */
1170 if (page->mapping != inode->i_mapping) { 1170 if (page->mapping != inode->i_mapping) {
1171 unlock_page(page); 1171 unlock_page(page);
1172 page_cache_release(page); 1172 put_page(page);
1173 goto again; 1173 goto again;
1174 } 1174 }
1175 } 1175 }
@@ -1179,7 +1179,7 @@ again:
1179 lock_page(page); 1179 lock_page(page);
1180 if (!PageUptodate(page)) { 1180 if (!PageUptodate(page)) {
1181 unlock_page(page); 1181 unlock_page(page);
1182 page_cache_release(page); 1182 put_page(page);
1183 ret = -EIO; 1183 ret = -EIO;
1184 break; 1184 break;
1185 } 1185 }
@@ -1187,7 +1187,7 @@ again:
1187 1187
1188 if (page->mapping != inode->i_mapping) { 1188 if (page->mapping != inode->i_mapping) {
1189 unlock_page(page); 1189 unlock_page(page);
1190 page_cache_release(page); 1190 put_page(page);
1191 goto again; 1191 goto again;
1192 } 1192 }
1193 1193
@@ -1208,7 +1208,7 @@ again:
1208 wait_on_page_writeback(pages[i]); 1208 wait_on_page_writeback(pages[i]);
1209 1209
1210 page_start = page_offset(pages[0]); 1210 page_start = page_offset(pages[0]);
1211 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; 1211 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1212 1212
1213 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1213 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1214 page_start, page_end - 1, &cached_state); 1214 page_start, page_end - 1, &cached_state);
@@ -1222,8 +1222,8 @@ again:
1222 BTRFS_I(inode)->outstanding_extents++; 1222 BTRFS_I(inode)->outstanding_extents++;
1223 spin_unlock(&BTRFS_I(inode)->lock); 1223 spin_unlock(&BTRFS_I(inode)->lock);
1224 btrfs_delalloc_release_space(inode, 1224 btrfs_delalloc_release_space(inode,
1225 start_index << PAGE_CACHE_SHIFT, 1225 start_index << PAGE_SHIFT,
1226 (page_cnt - i_done) << PAGE_CACHE_SHIFT); 1226 (page_cnt - i_done) << PAGE_SHIFT);
1227 } 1227 }
1228 1228
1229 1229
@@ -1240,17 +1240,17 @@ again:
1240 set_page_extent_mapped(pages[i]); 1240 set_page_extent_mapped(pages[i]);
1241 set_page_dirty(pages[i]); 1241 set_page_dirty(pages[i]);
1242 unlock_page(pages[i]); 1242 unlock_page(pages[i]);
1243 page_cache_release(pages[i]); 1243 put_page(pages[i]);
1244 } 1244 }
1245 return i_done; 1245 return i_done;
1246out: 1246out:
1247 for (i = 0; i < i_done; i++) { 1247 for (i = 0; i < i_done; i++) {
1248 unlock_page(pages[i]); 1248 unlock_page(pages[i]);
1249 page_cache_release(pages[i]); 1249 put_page(pages[i]);
1250 } 1250 }
1251 btrfs_delalloc_release_space(inode, 1251 btrfs_delalloc_release_space(inode,
1252 start_index << PAGE_CACHE_SHIFT, 1252 start_index << PAGE_SHIFT,
1253 page_cnt << PAGE_CACHE_SHIFT); 1253 page_cnt << PAGE_SHIFT);
1254 return ret; 1254 return ret;
1255 1255
1256} 1256}
@@ -1273,7 +1273,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1273 int defrag_count = 0; 1273 int defrag_count = 0;
1274 int compress_type = BTRFS_COMPRESS_ZLIB; 1274 int compress_type = BTRFS_COMPRESS_ZLIB;
1275 u32 extent_thresh = range->extent_thresh; 1275 u32 extent_thresh = range->extent_thresh;
1276 unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT; 1276 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1277 unsigned long cluster = max_cluster; 1277 unsigned long cluster = max_cluster;
1278 u64 new_align = ~((u64)SZ_128K - 1); 1278 u64 new_align = ~((u64)SZ_128K - 1);
1279 struct page **pages = NULL; 1279 struct page **pages = NULL;
@@ -1317,9 +1317,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1317 /* find the last page to defrag */ 1317 /* find the last page to defrag */
1318 if (range->start + range->len > range->start) { 1318 if (range->start + range->len > range->start) {
1319 last_index = min_t(u64, isize - 1, 1319 last_index = min_t(u64, isize - 1,
1320 range->start + range->len - 1) >> PAGE_CACHE_SHIFT; 1320 range->start + range->len - 1) >> PAGE_SHIFT;
1321 } else { 1321 } else {
1322 last_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1322 last_index = (isize - 1) >> PAGE_SHIFT;
1323 } 1323 }
1324 1324
1325 if (newer_than) { 1325 if (newer_than) {
@@ -1331,11 +1331,11 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1331 * we always align our defrag to help keep 1331 * we always align our defrag to help keep
1332 * the extents in the file evenly spaced 1332 * the extents in the file evenly spaced
1333 */ 1333 */
1334 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1334 i = (newer_off & new_align) >> PAGE_SHIFT;
1335 } else 1335 } else
1336 goto out_ra; 1336 goto out_ra;
1337 } else { 1337 } else {
1338 i = range->start >> PAGE_CACHE_SHIFT; 1338 i = range->start >> PAGE_SHIFT;
1339 } 1339 }
1340 if (!max_to_defrag) 1340 if (!max_to_defrag)
1341 max_to_defrag = last_index - i + 1; 1341 max_to_defrag = last_index - i + 1;
@@ -1348,7 +1348,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1348 inode->i_mapping->writeback_index = i; 1348 inode->i_mapping->writeback_index = i;
1349 1349
1350 while (i <= last_index && defrag_count < max_to_defrag && 1350 while (i <= last_index && defrag_count < max_to_defrag &&
1351 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) { 1351 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1352 /* 1352 /*
1353 * make sure we stop running if someone unmounts 1353 * make sure we stop running if someone unmounts
1354 * the FS 1354 * the FS
@@ -1362,7 +1362,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1362 break; 1362 break;
1363 } 1363 }
1364 1364
1365 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1365 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1366 extent_thresh, &last_len, &skip, 1366 extent_thresh, &last_len, &skip,
1367 &defrag_end, range->flags & 1367 &defrag_end, range->flags &
1368 BTRFS_DEFRAG_RANGE_COMPRESS)) { 1368 BTRFS_DEFRAG_RANGE_COMPRESS)) {
@@ -1371,14 +1371,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1371 * the should_defrag function tells us how much to skip 1371 * the should_defrag function tells us how much to skip
1372 * bump our counter by the suggested amount 1372 * bump our counter by the suggested amount
1373 */ 1373 */
1374 next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE); 1374 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1375 i = max(i + 1, next); 1375 i = max(i + 1, next);
1376 continue; 1376 continue;
1377 } 1377 }
1378 1378
1379 if (!newer_than) { 1379 if (!newer_than) {
1380 cluster = (PAGE_CACHE_ALIGN(defrag_end) >> 1380 cluster = (PAGE_ALIGN(defrag_end) >>
1381 PAGE_CACHE_SHIFT) - i; 1381 PAGE_SHIFT) - i;
1382 cluster = min(cluster, max_cluster); 1382 cluster = min(cluster, max_cluster);
1383 } else { 1383 } else {
1384 cluster = max_cluster; 1384 cluster = max_cluster;
@@ -1412,20 +1412,20 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1412 i += ret; 1412 i += ret;
1413 1413
1414 newer_off = max(newer_off + 1, 1414 newer_off = max(newer_off + 1,
1415 (u64)i << PAGE_CACHE_SHIFT); 1415 (u64)i << PAGE_SHIFT);
1416 1416
1417 ret = find_new_extents(root, inode, newer_than, 1417 ret = find_new_extents(root, inode, newer_than,
1418 &newer_off, SZ_64K); 1418 &newer_off, SZ_64K);
1419 if (!ret) { 1419 if (!ret) {
1420 range->start = newer_off; 1420 range->start = newer_off;
1421 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1421 i = (newer_off & new_align) >> PAGE_SHIFT;
1422 } else { 1422 } else {
1423 break; 1423 break;
1424 } 1424 }
1425 } else { 1425 } else {
1426 if (ret > 0) { 1426 if (ret > 0) {
1427 i += ret; 1427 i += ret;
1428 last_len += ret << PAGE_CACHE_SHIFT; 1428 last_len += ret << PAGE_SHIFT;
1429 } else { 1429 } else {
1430 i++; 1430 i++;
1431 last_len = 0; 1431 last_len = 0;
@@ -1722,7 +1722,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1722 if (vol_args->flags & BTRFS_SUBVOL_RDONLY) 1722 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1723 readonly = true; 1723 readonly = true;
1724 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { 1724 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1725 if (vol_args->size > PAGE_CACHE_SIZE) { 1725 if (vol_args->size > PAGE_SIZE) {
1726 ret = -EINVAL; 1726 ret = -EINVAL;
1727 goto free_args; 1727 goto free_args;
1728 } 1728 }
@@ -2806,12 +2806,12 @@ static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2806 lock_page(page); 2806 lock_page(page);
2807 if (!PageUptodate(page)) { 2807 if (!PageUptodate(page)) {
2808 unlock_page(page); 2808 unlock_page(page);
2809 page_cache_release(page); 2809 put_page(page);
2810 return ERR_PTR(-EIO); 2810 return ERR_PTR(-EIO);
2811 } 2811 }
2812 if (page->mapping != inode->i_mapping) { 2812 if (page->mapping != inode->i_mapping) {
2813 unlock_page(page); 2813 unlock_page(page);
2814 page_cache_release(page); 2814 put_page(page);
2815 return ERR_PTR(-EAGAIN); 2815 return ERR_PTR(-EAGAIN);
2816 } 2816 }
2817 } 2817 }
@@ -2823,7 +2823,7 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
2823 int num_pages, u64 off) 2823 int num_pages, u64 off)
2824{ 2824{
2825 int i; 2825 int i;
2826 pgoff_t index = off >> PAGE_CACHE_SHIFT; 2826 pgoff_t index = off >> PAGE_SHIFT;
2827 2827
2828 for (i = 0; i < num_pages; i++) { 2828 for (i = 0; i < num_pages; i++) {
2829again: 2829again:
@@ -2932,12 +2932,12 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2932 pg = cmp->src_pages[i]; 2932 pg = cmp->src_pages[i];
2933 if (pg) { 2933 if (pg) {
2934 unlock_page(pg); 2934 unlock_page(pg);
2935 page_cache_release(pg); 2935 put_page(pg);
2936 } 2936 }
2937 pg = cmp->dst_pages[i]; 2937 pg = cmp->dst_pages[i];
2938 if (pg) { 2938 if (pg) {
2939 unlock_page(pg); 2939 unlock_page(pg);
2940 page_cache_release(pg); 2940 put_page(pg);
2941 } 2941 }
2942 } 2942 }
2943 kfree(cmp->src_pages); 2943 kfree(cmp->src_pages);
@@ -2949,7 +2949,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2949 u64 len, struct cmp_pages *cmp) 2949 u64 len, struct cmp_pages *cmp)
2950{ 2950{
2951 int ret; 2951 int ret;
2952 int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; 2952 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
2953 struct page **src_pgarr, **dst_pgarr; 2953 struct page **src_pgarr, **dst_pgarr;
2954 2954
2955 /* 2955 /*
@@ -2987,12 +2987,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2987 int ret = 0; 2987 int ret = 0;
2988 int i; 2988 int i;
2989 struct page *src_page, *dst_page; 2989 struct page *src_page, *dst_page;
2990 unsigned int cmp_len = PAGE_CACHE_SIZE; 2990 unsigned int cmp_len = PAGE_SIZE;
2991 void *addr, *dst_addr; 2991 void *addr, *dst_addr;
2992 2992
2993 i = 0; 2993 i = 0;
2994 while (len) { 2994 while (len) {
2995 if (len < PAGE_CACHE_SIZE) 2995 if (len < PAGE_SIZE)
2996 cmp_len = len; 2996 cmp_len = len;
2997 2997
2998 BUG_ON(i >= cmp->num_pages); 2998 BUG_ON(i >= cmp->num_pages);
@@ -3191,7 +3191,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
3191 if (olen > BTRFS_MAX_DEDUPE_LEN) 3191 if (olen > BTRFS_MAX_DEDUPE_LEN)
3192 olen = BTRFS_MAX_DEDUPE_LEN; 3192 olen = BTRFS_MAX_DEDUPE_LEN;
3193 3193
3194 if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) { 3194 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3195 /* 3195 /*
3196 * Btrfs does not support blocksize < page_size. As a 3196 * Btrfs does not support blocksize < page_size. As a
3197 * result, btrfs_cmp_data() won't correctly handle 3197 * result, btrfs_cmp_data() won't correctly handle
@@ -3891,8 +3891,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3891 * data immediately and not the previous data. 3891 * data immediately and not the previous data.
3892 */ 3892 */
3893 truncate_inode_pages_range(&inode->i_data, 3893 truncate_inode_pages_range(&inode->i_data,
3894 round_down(destoff, PAGE_CACHE_SIZE), 3894 round_down(destoff, PAGE_SIZE),
3895 round_up(destoff + len, PAGE_CACHE_SIZE) - 1); 3895 round_up(destoff + len, PAGE_SIZE) - 1);
3896out_unlock: 3896out_unlock:
3897 if (!same_inode) 3897 if (!same_inode)
3898 btrfs_double_inode_unlock(src, inode); 3898 btrfs_double_inode_unlock(src, inode);
@@ -4124,7 +4124,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
4124 /* we generally have at most 6 or so space infos, one for each raid 4124 /* we generally have at most 6 or so space infos, one for each raid
4125 * level. So, a whole page should be more than enough for everyone 4125 * level. So, a whole page should be more than enough for everyone
4126 */ 4126 */
4127 if (alloc_size > PAGE_CACHE_SIZE) 4127 if (alloc_size > PAGE_SIZE)
4128 return -ENOMEM; 4128 return -ENOMEM;
4129 4129
4130 space_args.total_spaces = 0; 4130 space_args.total_spaces = 0;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a2f051347731..1adfbe7be6b8 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -55,8 +55,8 @@ static struct list_head *lzo_alloc_workspace(void)
55 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
56 56
57 workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); 57 workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
58 workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); 58 workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
59 workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); 59 workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
60 if (!workspace->mem || !workspace->buf || !workspace->cbuf) 60 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
61 goto fail; 61 goto fail;
62 62
@@ -116,7 +116,7 @@ static int lzo_compress_pages(struct list_head *ws,
116 *total_out = 0; 116 *total_out = 0;
117 *total_in = 0; 117 *total_in = 0;
118 118
119 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 119 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
120 data_in = kmap(in_page); 120 data_in = kmap(in_page);
121 121
122 /* 122 /*
@@ -133,10 +133,10 @@ static int lzo_compress_pages(struct list_head *ws,
133 tot_out = LZO_LEN; 133 tot_out = LZO_LEN;
134 pages[0] = out_page; 134 pages[0] = out_page;
135 nr_pages = 1; 135 nr_pages = 1;
136 pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; 136 pg_bytes_left = PAGE_SIZE - LZO_LEN;
137 137
138 /* compress at most one page of data each time */ 138 /* compress at most one page of data each time */
139 in_len = min(len, PAGE_CACHE_SIZE); 139 in_len = min(len, PAGE_SIZE);
140 while (tot_in < len) { 140 while (tot_in < len) {
141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, 141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
142 &out_len, workspace->mem); 142 &out_len, workspace->mem);
@@ -201,7 +201,7 @@ static int lzo_compress_pages(struct list_head *ws,
201 cpage_out = kmap(out_page); 201 cpage_out = kmap(out_page);
202 pages[nr_pages++] = out_page; 202 pages[nr_pages++] = out_page;
203 203
204 pg_bytes_left = PAGE_CACHE_SIZE; 204 pg_bytes_left = PAGE_SIZE;
205 out_offset = 0; 205 out_offset = 0;
206 } 206 }
207 } 207 }
@@ -221,12 +221,12 @@ static int lzo_compress_pages(struct list_head *ws,
221 221
222 bytes_left = len - tot_in; 222 bytes_left = len - tot_in;
223 kunmap(in_page); 223 kunmap(in_page);
224 page_cache_release(in_page); 224 put_page(in_page);
225 225
226 start += PAGE_CACHE_SIZE; 226 start += PAGE_SIZE;
227 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 227 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
228 data_in = kmap(in_page); 228 data_in = kmap(in_page);
229 in_len = min(bytes_left, PAGE_CACHE_SIZE); 229 in_len = min(bytes_left, PAGE_SIZE);
230 } 230 }
231 231
232 if (tot_out > tot_in) 232 if (tot_out > tot_in)
@@ -248,7 +248,7 @@ out:
248 248
249 if (in_page) { 249 if (in_page) {
250 kunmap(in_page); 250 kunmap(in_page);
251 page_cache_release(in_page); 251 put_page(in_page);
252 } 252 }
253 253
254 return ret; 254 return ret;
@@ -266,7 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
266 char *data_in; 266 char *data_in;
267 unsigned long page_in_index = 0; 267 unsigned long page_in_index = 0;
268 unsigned long page_out_index = 0; 268 unsigned long page_out_index = 0;
269 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); 269 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
270 unsigned long buf_start; 270 unsigned long buf_start;
271 unsigned long buf_offset = 0; 271 unsigned long buf_offset = 0;
272 unsigned long bytes; 272 unsigned long bytes;
@@ -289,7 +289,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
289 tot_in = LZO_LEN; 289 tot_in = LZO_LEN;
290 in_offset = LZO_LEN; 290 in_offset = LZO_LEN;
291 tot_len = min_t(size_t, srclen, tot_len); 291 tot_len = min_t(size_t, srclen, tot_len);
292 in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; 292 in_page_bytes_left = PAGE_SIZE - LZO_LEN;
293 293
294 tot_out = 0; 294 tot_out = 0;
295 pg_offset = 0; 295 pg_offset = 0;
@@ -345,12 +345,12 @@ cont:
345 345
346 data_in = kmap(pages_in[++page_in_index]); 346 data_in = kmap(pages_in[++page_in_index]);
347 347
348 in_page_bytes_left = PAGE_CACHE_SIZE; 348 in_page_bytes_left = PAGE_SIZE;
349 in_offset = 0; 349 in_offset = 0;
350 } 350 }
351 } 351 }
352 352
353 out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); 353 out_len = lzo1x_worst_compress(PAGE_SIZE);
354 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, 354 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
355 &out_len); 355 &out_len);
356 if (need_unmap) 356 if (need_unmap)
@@ -399,7 +399,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
399 in_len = read_compress_length(data_in); 399 in_len = read_compress_length(data_in);
400 data_in += LZO_LEN; 400 data_in += LZO_LEN;
401 401
402 out_len = PAGE_CACHE_SIZE; 402 out_len = PAGE_SIZE;
403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); 403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
404 if (ret != LZO_E_OK) { 404 if (ret != LZO_E_OK) {
405 printk(KERN_WARNING "BTRFS: decompress failed!\n"); 405 printk(KERN_WARNING "BTRFS: decompress failed!\n");
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 55161369fab1..0b7792e02dd5 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -270,7 +270,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
270 s = kmap(rbio->bio_pages[i]); 270 s = kmap(rbio->bio_pages[i]);
271 d = kmap(rbio->stripe_pages[i]); 271 d = kmap(rbio->stripe_pages[i]);
272 272
273 memcpy(d, s, PAGE_CACHE_SIZE); 273 memcpy(d, s, PAGE_SIZE);
274 274
275 kunmap(rbio->bio_pages[i]); 275 kunmap(rbio->bio_pages[i]);
276 kunmap(rbio->stripe_pages[i]); 276 kunmap(rbio->stripe_pages[i]);
@@ -962,7 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
962 */ 962 */
963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
964{ 964{
965 return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes; 965 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
966} 966}
967 967
968/* 968/*
@@ -1078,7 +1078,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1078 u64 disk_start; 1078 u64 disk_start;
1079 1079
1080 stripe = &rbio->bbio->stripes[stripe_nr]; 1080 stripe = &rbio->bbio->stripes[stripe_nr];
1081 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); 1081 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1082 1082
1083 /* if the device is missing, just fail this stripe */ 1083 /* if the device is missing, just fail this stripe */
1084 if (!stripe->dev->bdev) 1084 if (!stripe->dev->bdev)
@@ -1096,8 +1096,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1096 if (last_end == disk_start && stripe->dev->bdev && 1096 if (last_end == disk_start && stripe->dev->bdev &&
1097 !last->bi_error && 1097 !last->bi_error &&
1098 last->bi_bdev == stripe->dev->bdev) { 1098 last->bi_bdev == stripe->dev->bdev) {
1099 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); 1099 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1100 if (ret == PAGE_CACHE_SIZE) 1100 if (ret == PAGE_SIZE)
1101 return 0; 1101 return 0;
1102 } 1102 }
1103 } 1103 }
@@ -1111,7 +1111,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1111 bio->bi_bdev = stripe->dev->bdev; 1111 bio->bi_bdev = stripe->dev->bdev;
1112 bio->bi_iter.bi_sector = disk_start >> 9; 1112 bio->bi_iter.bi_sector = disk_start >> 9;
1113 1113
1114 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 1114 bio_add_page(bio, page, PAGE_SIZE, 0);
1115 bio_list_add(bio_list, bio); 1115 bio_list_add(bio_list, bio);
1116 return 0; 1116 return 0;
1117} 1117}
@@ -1154,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1154 bio_list_for_each(bio, &rbio->bio_list) { 1154 bio_list_for_each(bio, &rbio->bio_list) {
1155 start = (u64)bio->bi_iter.bi_sector << 9; 1155 start = (u64)bio->bi_iter.bi_sector << 9;
1156 stripe_offset = start - rbio->bbio->raid_map[0]; 1156 stripe_offset = start - rbio->bbio->raid_map[0];
1157 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1157 page_index = stripe_offset >> PAGE_SHIFT;
1158 1158
1159 for (i = 0; i < bio->bi_vcnt; i++) { 1159 for (i = 0; i < bio->bi_vcnt; i++) {
1160 p = bio->bi_io_vec[i].bv_page; 1160 p = bio->bi_io_vec[i].bv_page;
@@ -1253,7 +1253,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1253 } else { 1253 } else {
1254 /* raid5 */ 1254 /* raid5 */
1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1256 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 1256 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1257 } 1257 }
1258 1258
1259 1259
@@ -1914,7 +1914,7 @@ pstripe:
1914 /* Copy parity block into failed block to start with */ 1914 /* Copy parity block into failed block to start with */
1915 memcpy(pointers[faila], 1915 memcpy(pointers[faila],
1916 pointers[rbio->nr_data], 1916 pointers[rbio->nr_data],
1917 PAGE_CACHE_SIZE); 1917 PAGE_SIZE);
1918 1918
1919 /* rearrange the pointer array */ 1919 /* rearrange the pointer array */
1920 p = pointers[faila]; 1920 p = pointers[faila];
@@ -1923,7 +1923,7 @@ pstripe:
1923 pointers[rbio->nr_data - 1] = p; 1923 pointers[rbio->nr_data - 1] = p;
1924 1924
1925 /* xor in the rest */ 1925 /* xor in the rest */
1926 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); 1926 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1927 } 1927 }
1928 /* if we're doing this rebuild as part of an rmw, go through 1928 /* if we're doing this rebuild as part of an rmw, go through
1929 * and set all of our private rbio pages in the 1929 * and set all of our private rbio pages in the
@@ -2250,7 +2250,7 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + 2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2251 rbio->stripe_len * rbio->nr_data); 2251 rbio->stripe_len * rbio->nr_data);
2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); 2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2253 index = stripe_offset >> PAGE_CACHE_SHIFT; 2253 index = stripe_offset >> PAGE_SHIFT;
2254 rbio->bio_pages[index] = page; 2254 rbio->bio_pages[index] = page;
2255} 2255}
2256 2256
@@ -2365,14 +2365,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2365 } else { 2365 } else {
2366 /* raid5 */ 2366 /* raid5 */
2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2368 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2369 } 2369 }
2370 2370
2371 /* Check scrubbing pairty and repair it */ 2371 /* Check scrubbing pairty and repair it */
2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2373 parity = kmap(p); 2373 parity = kmap(p);
2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) 2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2375 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); 2375 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2376 else 2376 else
2377 /* Parity is right, needn't writeback */ 2377 /* Parity is right, needn't writeback */
2378 bitmap_clear(rbio->dbitmap, pagenr, 1); 2378 bitmap_clear(rbio->dbitmap, pagenr, 1);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index b892914968c1..298631eaee78 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -226,7 +226,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
226 /* find extent */ 226 /* find extent */
227 spin_lock(&fs_info->reada_lock); 227 spin_lock(&fs_info->reada_lock);
228 re = radix_tree_lookup(&fs_info->reada_tree, 228 re = radix_tree_lookup(&fs_info->reada_tree,
229 start >> PAGE_CACHE_SHIFT); 229 start >> PAGE_SHIFT);
230 if (re) 230 if (re)
231 re->refcnt++; 231 re->refcnt++;
232 spin_unlock(&fs_info->reada_lock); 232 spin_unlock(&fs_info->reada_lock);
@@ -257,7 +257,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
257 zone = NULL; 257 zone = NULL;
258 spin_lock(&fs_info->reada_lock); 258 spin_lock(&fs_info->reada_lock);
259 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 259 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
260 logical >> PAGE_CACHE_SHIFT, 1); 260 logical >> PAGE_SHIFT, 1);
261 if (ret == 1 && logical >= zone->start && logical <= zone->end) { 261 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
262 kref_get(&zone->refcnt); 262 kref_get(&zone->refcnt);
263 spin_unlock(&fs_info->reada_lock); 263 spin_unlock(&fs_info->reada_lock);
@@ -294,13 +294,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
294 294
295 spin_lock(&fs_info->reada_lock); 295 spin_lock(&fs_info->reada_lock);
296 ret = radix_tree_insert(&dev->reada_zones, 296 ret = radix_tree_insert(&dev->reada_zones,
297 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), 297 (unsigned long)(zone->end >> PAGE_SHIFT),
298 zone); 298 zone);
299 299
300 if (ret == -EEXIST) { 300 if (ret == -EEXIST) {
301 kfree(zone); 301 kfree(zone);
302 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 302 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
303 logical >> PAGE_CACHE_SHIFT, 1); 303 logical >> PAGE_SHIFT, 1);
304 if (ret == 1 && logical >= zone->start && logical <= zone->end) 304 if (ret == 1 && logical >= zone->start && logical <= zone->end)
305 kref_get(&zone->refcnt); 305 kref_get(&zone->refcnt);
306 else 306 else
@@ -326,7 +326,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
326 u64 length; 326 u64 length;
327 int real_stripes; 327 int real_stripes;
328 int nzones = 0; 328 int nzones = 0;
329 unsigned long index = logical >> PAGE_CACHE_SHIFT; 329 unsigned long index = logical >> PAGE_SHIFT;
330 int dev_replace_is_ongoing; 330 int dev_replace_is_ongoing;
331 int have_zone = 0; 331 int have_zone = 0;
332 332
@@ -495,7 +495,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
495 struct reada_extent *re) 495 struct reada_extent *re)
496{ 496{
497 int i; 497 int i;
498 unsigned long index = re->logical >> PAGE_CACHE_SHIFT; 498 unsigned long index = re->logical >> PAGE_SHIFT;
499 499
500 spin_lock(&fs_info->reada_lock); 500 spin_lock(&fs_info->reada_lock);
501 if (--re->refcnt) { 501 if (--re->refcnt) {
@@ -538,7 +538,7 @@ static void reada_zone_release(struct kref *kref)
538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); 538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
539 539
540 radix_tree_delete(&zone->device->reada_zones, 540 radix_tree_delete(&zone->device->reada_zones,
541 zone->end >> PAGE_CACHE_SHIFT); 541 zone->end >> PAGE_SHIFT);
542 542
543 kfree(zone); 543 kfree(zone);
544} 544}
@@ -587,7 +587,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
587static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) 587static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
588{ 588{
589 int i; 589 int i;
590 unsigned long index = zone->end >> PAGE_CACHE_SHIFT; 590 unsigned long index = zone->end >> PAGE_SHIFT;
591 591
592 for (i = 0; i < zone->ndevs; ++i) { 592 for (i = 0; i < zone->ndevs; ++i) {
593 struct reada_zone *peer; 593 struct reada_zone *peer;
@@ -622,7 +622,7 @@ static int reada_pick_zone(struct btrfs_device *dev)
622 (void **)&zone, index, 1); 622 (void **)&zone, index, 1);
623 if (ret == 0) 623 if (ret == 0)
624 break; 624 break;
625 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 625 index = (zone->end >> PAGE_SHIFT) + 1;
626 if (zone->locked) { 626 if (zone->locked) {
627 if (zone->elems > top_locked_elems) { 627 if (zone->elems > top_locked_elems) {
628 top_locked_elems = zone->elems; 628 top_locked_elems = zone->elems;
@@ -673,7 +673,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
673 * plugging to speed things up 673 * plugging to speed things up
674 */ 674 */
675 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 675 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
676 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 676 dev->reada_next >> PAGE_SHIFT, 1);
677 if (ret == 0 || re->logical > dev->reada_curr_zone->end) { 677 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
678 ret = reada_pick_zone(dev); 678 ret = reada_pick_zone(dev);
679 if (!ret) { 679 if (!ret) {
@@ -682,7 +682,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
682 } 682 }
683 re = NULL; 683 re = NULL;
684 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 684 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
685 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 685 dev->reada_next >> PAGE_SHIFT, 1);
686 } 686 }
687 if (ret == 0) { 687 if (ret == 0) {
688 spin_unlock(&fs_info->reada_lock); 688 spin_unlock(&fs_info->reada_lock);
@@ -838,7 +838,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
838 printk(KERN_CONT " curr off %llu", 838 printk(KERN_CONT " curr off %llu",
839 device->reada_next - zone->start); 839 device->reada_next - zone->start);
840 printk(KERN_CONT "\n"); 840 printk(KERN_CONT "\n");
841 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 841 index = (zone->end >> PAGE_SHIFT) + 1;
842 } 842 }
843 cnt = 0; 843 cnt = 0;
844 index = 0; 844 index = 0;
@@ -864,7 +864,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
864 } 864 }
865 } 865 }
866 printk(KERN_CONT "\n"); 866 printk(KERN_CONT "\n");
867 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 867 index = (re->logical >> PAGE_SHIFT) + 1;
868 if (++cnt > 15) 868 if (++cnt > 15)
869 break; 869 break;
870 } 870 }
@@ -880,7 +880,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
880 if (ret == 0) 880 if (ret == 0)
881 break; 881 break;
882 if (!re->scheduled) { 882 if (!re->scheduled) {
883 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 883 index = (re->logical >> PAGE_SHIFT) + 1;
884 continue; 884 continue;
885 } 885 }
886 printk(KERN_DEBUG 886 printk(KERN_DEBUG
@@ -897,7 +897,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
897 } 897 }
898 } 898 }
899 printk(KERN_CONT "\n"); 899 printk(KERN_CONT "\n");
900 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 900 index = (re->logical >> PAGE_SHIFT) + 1;
901 } 901 }
902 spin_unlock(&fs_info->reada_lock); 902 spin_unlock(&fs_info->reada_lock);
903} 903}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 2bd0011450df..3c93968b539d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3129,10 +3129,10 @@ static int relocate_file_extent_cluster(struct inode *inode,
3129 if (ret) 3129 if (ret)
3130 goto out; 3130 goto out;
3131 3131
3132 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 3132 index = (cluster->start - offset) >> PAGE_SHIFT;
3133 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 3133 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3134 while (index <= last_index) { 3134 while (index <= last_index) {
3135 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 3135 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
3136 if (ret) 3136 if (ret)
3137 goto out; 3137 goto out;
3138 3138
@@ -3145,7 +3145,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3145 mask); 3145 mask);
3146 if (!page) { 3146 if (!page) {
3147 btrfs_delalloc_release_metadata(inode, 3147 btrfs_delalloc_release_metadata(inode,
3148 PAGE_CACHE_SIZE); 3148 PAGE_SIZE);
3149 ret = -ENOMEM; 3149 ret = -ENOMEM;
3150 goto out; 3150 goto out;
3151 } 3151 }
@@ -3162,16 +3162,16 @@ static int relocate_file_extent_cluster(struct inode *inode,
3162 lock_page(page); 3162 lock_page(page);
3163 if (!PageUptodate(page)) { 3163 if (!PageUptodate(page)) {
3164 unlock_page(page); 3164 unlock_page(page);
3165 page_cache_release(page); 3165 put_page(page);
3166 btrfs_delalloc_release_metadata(inode, 3166 btrfs_delalloc_release_metadata(inode,
3167 PAGE_CACHE_SIZE); 3167 PAGE_SIZE);
3168 ret = -EIO; 3168 ret = -EIO;
3169 goto out; 3169 goto out;
3170 } 3170 }
3171 } 3171 }
3172 3172
3173 page_start = page_offset(page); 3173 page_start = page_offset(page);
3174 page_end = page_start + PAGE_CACHE_SIZE - 1; 3174 page_end = page_start + PAGE_SIZE - 1;
3175 3175
3176 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3176 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3177 3177
@@ -3191,7 +3191,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3191 unlock_extent(&BTRFS_I(inode)->io_tree, 3191 unlock_extent(&BTRFS_I(inode)->io_tree,
3192 page_start, page_end); 3192 page_start, page_end);
3193 unlock_page(page); 3193 unlock_page(page);
3194 page_cache_release(page); 3194 put_page(page);
3195 3195
3196 index++; 3196 index++;
3197 balance_dirty_pages_ratelimited(inode->i_mapping); 3197 balance_dirty_pages_ratelimited(inode->i_mapping);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 39dbdcbf4d13..4678f03e878e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -703,7 +703,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
703 if (IS_ERR(inode)) 703 if (IS_ERR(inode))
704 return PTR_ERR(inode); 704 return PTR_ERR(inode);
705 705
706 index = offset >> PAGE_CACHE_SHIFT; 706 index = offset >> PAGE_SHIFT;
707 707
708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
709 if (!page) { 709 if (!page) {
@@ -1636,7 +1636,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1636 if (spage->io_error) { 1636 if (spage->io_error) {
1637 void *mapped_buffer = kmap_atomic(spage->page); 1637 void *mapped_buffer = kmap_atomic(spage->page);
1638 1638
1639 memset(mapped_buffer, 0, PAGE_CACHE_SIZE); 1639 memset(mapped_buffer, 0, PAGE_SIZE);
1640 flush_dcache_page(spage->page); 1640 flush_dcache_page(spage->page);
1641 kunmap_atomic(mapped_buffer); 1641 kunmap_atomic(mapped_buffer);
1642 } 1642 }
@@ -4294,8 +4294,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4294 goto out; 4294 goto out;
4295 } 4295 }
4296 4296
4297 while (len >= PAGE_CACHE_SIZE) { 4297 while (len >= PAGE_SIZE) {
4298 index = offset >> PAGE_CACHE_SHIFT; 4298 index = offset >> PAGE_SHIFT;
4299again: 4299again:
4300 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 4300 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4301 if (!page) { 4301 if (!page) {
@@ -4326,7 +4326,7 @@ again:
4326 */ 4326 */
4327 if (page->mapping != inode->i_mapping) { 4327 if (page->mapping != inode->i_mapping) {
4328 unlock_page(page); 4328 unlock_page(page);
4329 page_cache_release(page); 4329 put_page(page);
4330 goto again; 4330 goto again;
4331 } 4331 }
4332 if (!PageUptodate(page)) { 4332 if (!PageUptodate(page)) {
@@ -4348,15 +4348,15 @@ again:
4348 ret = err; 4348 ret = err;
4349next_page: 4349next_page:
4350 unlock_page(page); 4350 unlock_page(page);
4351 page_cache_release(page); 4351 put_page(page);
4352 4352
4353 if (ret) 4353 if (ret)
4354 break; 4354 break;
4355 4355
4356 offset += PAGE_CACHE_SIZE; 4356 offset += PAGE_SIZE;
4357 physical_for_dev_replace += PAGE_CACHE_SIZE; 4357 physical_for_dev_replace += PAGE_SIZE;
4358 nocow_ctx_logical += PAGE_CACHE_SIZE; 4358 nocow_ctx_logical += PAGE_SIZE;
4359 len -= PAGE_CACHE_SIZE; 4359 len -= PAGE_SIZE;
4360 } 4360 }
4361 ret = COPY_COMPLETE; 4361 ret = COPY_COMPLETE;
4362out: 4362out:
@@ -4390,8 +4390,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
4390 bio->bi_iter.bi_size = 0; 4390 bio->bi_iter.bi_size = 0;
4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; 4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4392 bio->bi_bdev = dev->bdev; 4392 bio->bi_bdev = dev->bdev;
4393 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 4393 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4394 if (ret != PAGE_CACHE_SIZE) { 4394 if (ret != PAGE_SIZE) {
4395leave_with_eio: 4395leave_with_eio:
4396 bio_put(bio); 4396 bio_put(bio);
4397 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 4397 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 19b7bf4284ee..8d358c547c59 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4449,9 +4449,9 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4449 struct page *page; 4449 struct page *page;
4450 char *addr; 4450 char *addr;
4451 struct btrfs_key key; 4451 struct btrfs_key key;
4452 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 4452 pgoff_t index = offset >> PAGE_SHIFT;
4453 pgoff_t last_index; 4453 pgoff_t last_index;
4454 unsigned pg_offset = offset & ~PAGE_CACHE_MASK; 4454 unsigned pg_offset = offset & ~PAGE_MASK;
4455 ssize_t ret = 0; 4455 ssize_t ret = 0;
4456 4456
4457 key.objectid = sctx->cur_ino; 4457 key.objectid = sctx->cur_ino;
@@ -4471,7 +4471,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4471 if (len == 0) 4471 if (len == 0)
4472 goto out; 4472 goto out;
4473 4473
4474 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; 4474 last_index = (offset + len - 1) >> PAGE_SHIFT;
4475 4475
4476 /* initial readahead */ 4476 /* initial readahead */
4477 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 4477 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
@@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4481 4481
4482 while (index <= last_index) { 4482 while (index <= last_index) {
4483 unsigned cur_len = min_t(unsigned, len, 4483 unsigned cur_len = min_t(unsigned, len,
4484 PAGE_CACHE_SIZE - pg_offset); 4484 PAGE_SIZE - pg_offset);
4485 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 4485 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
4486 if (!page) { 4486 if (!page) {
4487 ret = -ENOMEM; 4487 ret = -ENOMEM;
@@ -4493,7 +4493,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4493 lock_page(page); 4493 lock_page(page);
4494 if (!PageUptodate(page)) { 4494 if (!PageUptodate(page)) {
4495 unlock_page(page); 4495 unlock_page(page);
4496 page_cache_release(page); 4496 put_page(page);
4497 ret = -EIO; 4497 ret = -EIO;
4498 break; 4498 break;
4499 } 4499 }
@@ -4503,7 +4503,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4503 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); 4503 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4504 kunmap(page); 4504 kunmap(page);
4505 unlock_page(page); 4505 unlock_page(page);
4506 page_cache_release(page); 4506 put_page(page);
4507 index++; 4507 index++;
4508 pg_offset = 0; 4508 pg_offset = 0;
4509 len -= cur_len; 4509 len -= cur_len;
@@ -4804,7 +4804,7 @@ static int clone_range(struct send_ctx *sctx,
4804 type = btrfs_file_extent_type(leaf, ei); 4804 type = btrfs_file_extent_type(leaf, ei);
4805 if (type == BTRFS_FILE_EXTENT_INLINE) { 4805 if (type == BTRFS_FILE_EXTENT_INLINE) {
4806 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei); 4806 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
4807 ext_len = PAGE_CACHE_ALIGN(ext_len); 4807 ext_len = PAGE_ALIGN(ext_len);
4808 } else { 4808 } else {
4809 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 4809 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
4810 } 4810 }
@@ -4886,7 +4886,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
4886 * but there may be items after this page. Make 4886 * but there may be items after this page. Make
4887 * sure to send the whole thing 4887 * sure to send the whole thing
4888 */ 4888 */
4889 len = PAGE_CACHE_ALIGN(len); 4889 len = PAGE_ALIGN(len);
4890 } else { 4890 } else {
4891 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); 4891 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
4892 } 4892 }
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597b0721..e05619f241be 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
66 \ 66 \
67 if (token && token->kaddr && token->offset <= offset && \ 67 if (token && token->kaddr && token->offset <= offset && \
68 token->eb == eb && \ 68 token->eb == eb && \
69 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 69 (token->offset + PAGE_SIZE >= offset + size)) { \
70 kaddr = token->kaddr; \ 70 kaddr = token->kaddr; \
71 p = kaddr + part_offset - token->offset; \ 71 p = kaddr + part_offset - token->offset; \
72 res = get_unaligned_le##bits(p + off); \ 72 res = get_unaligned_le##bits(p + off); \
@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
104 \ 104 \
105 if (token && token->kaddr && token->offset <= offset && \ 105 if (token && token->kaddr && token->offset <= offset && \
106 token->eb == eb && \ 106 token->eb == eb && \
107 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 107 (token->offset + PAGE_SIZE >= offset + size)) { \
108 kaddr = token->kaddr; \ 108 kaddr = token->kaddr; \
109 p = kaddr + part_offset - token->offset; \ 109 p = kaddr + part_offset - token->offset; \
110 put_unaligned_le##bits(val, p + off); \ 110 put_unaligned_le##bits(val, p + off); \
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 669b58201e36..70948b13bc81 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -32,8 +32,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
32{ 32{
33 int ret; 33 int ret;
34 struct page *pages[16]; 34 struct page *pages[16];
35 unsigned long index = start >> PAGE_CACHE_SHIFT; 35 unsigned long index = start >> PAGE_SHIFT;
36 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 36 unsigned long end_index = end >> PAGE_SHIFT;
37 unsigned long nr_pages = end_index - index + 1; 37 unsigned long nr_pages = end_index - index + 1;
38 int i; 38 int i;
39 int count = 0; 39 int count = 0;
@@ -49,9 +49,9 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
49 count++; 49 count++;
50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) 50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
51 unlock_page(pages[i]); 51 unlock_page(pages[i]);
52 page_cache_release(pages[i]); 52 put_page(pages[i]);
53 if (flags & PROCESS_RELEASE) 53 if (flags & PROCESS_RELEASE)
54 page_cache_release(pages[i]); 54 put_page(pages[i]);
55 } 55 }
56 nr_pages -= ret; 56 nr_pages -= ret;
57 index += ret; 57 index += ret;
@@ -93,7 +93,7 @@ static int test_find_delalloc(void)
93 * everything to make sure our pages don't get evicted and screw up our 93 * everything to make sure our pages don't get evicted and screw up our
94 * test. 94 * test.
95 */ 95 */
96 for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) { 96 for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
97 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 97 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
98 if (!page) { 98 if (!page) {
99 test_msg("Failed to allocate test page\n"); 99 test_msg("Failed to allocate test page\n");
@@ -104,7 +104,7 @@ static int test_find_delalloc(void)
104 if (index) { 104 if (index) {
105 unlock_page(page); 105 unlock_page(page);
106 } else { 106 } else {
107 page_cache_get(page); 107 get_page(page);
108 locked_page = page; 108 locked_page = page;
109 } 109 }
110 } 110 }
@@ -129,7 +129,7 @@ static int test_find_delalloc(void)
129 } 129 }
130 unlock_extent(&tmp, start, end); 130 unlock_extent(&tmp, start, end);
131 unlock_page(locked_page); 131 unlock_page(locked_page);
132 page_cache_release(locked_page); 132 put_page(locked_page);
133 133
134 /* 134 /*
135 * Test this scenario 135 * Test this scenario
@@ -139,7 +139,7 @@ static int test_find_delalloc(void)
139 */ 139 */
140 test_start = SZ_64M; 140 test_start = SZ_64M;
141 locked_page = find_lock_page(inode->i_mapping, 141 locked_page = find_lock_page(inode->i_mapping,
142 test_start >> PAGE_CACHE_SHIFT); 142 test_start >> PAGE_SHIFT);
143 if (!locked_page) { 143 if (!locked_page) {
144 test_msg("Couldn't find the locked page\n"); 144 test_msg("Couldn't find the locked page\n");
145 goto out_bits; 145 goto out_bits;
@@ -165,7 +165,7 @@ static int test_find_delalloc(void)
165 } 165 }
166 unlock_extent(&tmp, start, end); 166 unlock_extent(&tmp, start, end);
167 /* locked_page was unlocked above */ 167 /* locked_page was unlocked above */
168 page_cache_release(locked_page); 168 put_page(locked_page);
169 169
170 /* 170 /*
171 * Test this scenario 171 * Test this scenario
@@ -174,7 +174,7 @@ static int test_find_delalloc(void)
174 */ 174 */
175 test_start = max_bytes + 4096; 175 test_start = max_bytes + 4096;
176 locked_page = find_lock_page(inode->i_mapping, test_start >> 176 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_CACHE_SHIFT); 177 PAGE_SHIFT);
178 if (!locked_page) { 178 if (!locked_page) {
179 test_msg("Could'nt find the locked page\n"); 179 test_msg("Could'nt find the locked page\n");
180 goto out_bits; 180 goto out_bits;
@@ -225,13 +225,13 @@ static int test_find_delalloc(void)
225 * range we want to find. 225 * range we want to find.
226 */ 226 */
227 page = find_get_page(inode->i_mapping, 227 page = find_get_page(inode->i_mapping,
228 (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT); 228 (max_bytes + SZ_1M) >> PAGE_SHIFT);
229 if (!page) { 229 if (!page) {
230 test_msg("Couldn't find our page\n"); 230 test_msg("Couldn't find our page\n");
231 goto out_bits; 231 goto out_bits;
232 } 232 }
233 ClearPageDirty(page); 233 ClearPageDirty(page);
234 page_cache_release(page); 234 put_page(page);
235 235
236 /* We unlocked it in the previous test */ 236 /* We unlocked it in the previous test */
237 lock_page(locked_page); 237 lock_page(locked_page);
@@ -239,7 +239,7 @@ static int test_find_delalloc(void)
239 end = 0; 239 end = 0;
240 /* 240 /*
241 * Currently if we fail to find dirty pages in the delalloc range we 241 * Currently if we fail to find dirty pages in the delalloc range we
242 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If 242 * will adjust max_bytes down to PAGE_SIZE and then re-search. If
243 * this changes at any point in the future we will need to fix this 243 * this changes at any point in the future we will need to fix this
244 * tests expected behavior. 244 * tests expected behavior.
245 */ 245 */
@@ -249,9 +249,9 @@ static int test_find_delalloc(void)
249 test_msg("Didn't find our range\n"); 249 test_msg("Didn't find our range\n");
250 goto out_bits; 250 goto out_bits;
251 } 251 }
252 if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) { 252 if (start != test_start && end != test_start + PAGE_SIZE - 1) {
253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n", 253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
254 test_start, test_start + PAGE_CACHE_SIZE - 1, start, 254 test_start, test_start + PAGE_SIZE - 1, start,
255 end); 255 end);
256 goto out_bits; 256 goto out_bits;
257 } 257 }
@@ -265,7 +265,7 @@ out_bits:
265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); 265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
266out: 266out:
267 if (locked_page) 267 if (locked_page)
268 page_cache_release(locked_page); 268 put_page(locked_page);
269 process_page_range(inode, 0, total_dirty - 1, 269 process_page_range(inode, 0, total_dirty - 1,
270 PROCESS_UNLOCK | PROCESS_RELEASE); 270 PROCESS_UNLOCK | PROCESS_RELEASE);
271 iput(inode); 271 iput(inode);
@@ -298,9 +298,9 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
298 return -EINVAL; 298 return -EINVAL;
299 } 299 }
300 300
301 bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 301 bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
302 sizeof(long) * BITS_PER_BYTE); 302 sizeof(long) * BITS_PER_BYTE);
303 extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, 303 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
304 sizeof(long) * BITS_PER_BYTE); 304 sizeof(long) * BITS_PER_BYTE);
305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
306 test_msg("Setting straddling pages failed\n"); 306 test_msg("Setting straddling pages failed\n");
@@ -309,10 +309,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
309 309
310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE); 310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
311 bitmap_clear(bitmap, 311 bitmap_clear(bitmap,
312 (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 312 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
313 sizeof(long) * BITS_PER_BYTE); 313 sizeof(long) * BITS_PER_BYTE);
314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); 314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
315 extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, 315 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
316 sizeof(long) * BITS_PER_BYTE); 316 sizeof(long) * BITS_PER_BYTE);
317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
318 test_msg("Clearing straddling pages failed\n"); 318 test_msg("Clearing straddling pages failed\n");
@@ -353,7 +353,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
353 353
354static int test_eb_bitmaps(void) 354static int test_eb_bitmaps(void)
355{ 355{
356 unsigned long len = PAGE_CACHE_SIZE * 4; 356 unsigned long len = PAGE_SIZE * 4;
357 unsigned long *bitmap; 357 unsigned long *bitmap;
358 struct extent_buffer *eb; 358 struct extent_buffer *eb;
359 int ret; 359 int ret;
@@ -379,7 +379,7 @@ static int test_eb_bitmaps(void)
379 379
380 /* Do it over again with an extent buffer which isn't page-aligned. */ 380 /* Do it over again with an extent buffer which isn't page-aligned. */
381 free_extent_buffer(eb); 381 free_extent_buffer(eb);
382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len); 382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
383 if (!eb) { 383 if (!eb) {
384 test_msg("Couldn't allocate test extent buffer\n"); 384 test_msg("Couldn't allocate test extent buffer\n");
385 kfree(bitmap); 385 kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c9ad97b1e690..514247515312 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -22,7 +22,7 @@
22#include "../disk-io.h" 22#include "../disk-io.h"
23#include "../free-space-cache.h" 23#include "../free-space-cache.h"
24 24
25#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 25#define BITS_PER_BITMAP (PAGE_SIZE * 8)
26 26
27/* 27/*
28 * This test just does basic sanity checking, making sure we can add an exten 28 * This test just does basic sanity checking, making sure we can add an exten
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e2b54d546b7c..bd0f45fb38c4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1025,16 +1025,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1025 } 1025 }
1026 1026
1027 /* make sure our super fits in the device */ 1027 /* make sure our super fits in the device */
1028 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) 1028 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1029 goto error_bdev_put; 1029 goto error_bdev_put;
1030 1030
1031 /* make sure our super fits in the page */ 1031 /* make sure our super fits in the page */
1032 if (sizeof(*disk_super) > PAGE_CACHE_SIZE) 1032 if (sizeof(*disk_super) > PAGE_SIZE)
1033 goto error_bdev_put; 1033 goto error_bdev_put;
1034 1034
1035 /* make sure our super doesn't straddle pages on disk */ 1035 /* make sure our super doesn't straddle pages on disk */
1036 index = bytenr >> PAGE_CACHE_SHIFT; 1036 index = bytenr >> PAGE_SHIFT;
1037 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index) 1037 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1038 goto error_bdev_put; 1038 goto error_bdev_put;
1039 1039
1040 /* pull in the page with our super */ 1040 /* pull in the page with our super */
@@ -1047,7 +1047,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1047 p = kmap(page); 1047 p = kmap(page);
1048 1048
1049 /* align our pointer to the offset of the super block */ 1049 /* align our pointer to the offset of the super block */
1050 disk_super = p + (bytenr & ~PAGE_CACHE_MASK); 1050 disk_super = p + (bytenr & ~PAGE_MASK);
1051 1051
1052 if (btrfs_super_bytenr(disk_super) != bytenr || 1052 if (btrfs_super_bytenr(disk_super) != bytenr ||
1053 btrfs_super_magic(disk_super) != BTRFS_MAGIC) 1053 btrfs_super_magic(disk_super) != BTRFS_MAGIC)
@@ -1075,7 +1075,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1075 1075
1076error_unmap: 1076error_unmap:
1077 kunmap(page); 1077 kunmap(page);
1078 page_cache_release(page); 1078 put_page(page);
1079 1079
1080error_bdev_put: 1080error_bdev_put:
1081 blkdev_put(bdev, flags); 1081 blkdev_put(bdev, flags);
@@ -6527,7 +6527,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6527 * but sb spans only this function. Add an explicit SetPageUptodate call 6527 * but sb spans only this function. Add an explicit SetPageUptodate call
6528 * to silence the warning eg. on PowerPC 64. 6528 * to silence the warning eg. on PowerPC 64.
6529 */ 6529 */
6530 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) 6530 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6531 SetPageUptodate(sb->pages[0]); 6531 SetPageUptodate(sb->pages[0]);
6532 6532
6533 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6533 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 82990b8f872b..88d274e8ecf2 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -59,7 +59,7 @@ static struct list_head *zlib_alloc_workspace(void)
59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), 59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
60 zlib_inflate_workspacesize()); 60 zlib_inflate_workspacesize());
61 workspace->strm.workspace = vmalloc(workspacesize); 61 workspace->strm.workspace = vmalloc(workspacesize);
62 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); 62 workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
63 if (!workspace->strm.workspace || !workspace->buf) 63 if (!workspace->strm.workspace || !workspace->buf)
64 goto fail; 64 goto fail;
65 65
@@ -103,7 +103,7 @@ static int zlib_compress_pages(struct list_head *ws,
103 workspace->strm.total_in = 0; 103 workspace->strm.total_in = 0;
104 workspace->strm.total_out = 0; 104 workspace->strm.total_out = 0;
105 105
106 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 106 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
107 data_in = kmap(in_page); 107 data_in = kmap(in_page);
108 108
109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
@@ -117,8 +117,8 @@ static int zlib_compress_pages(struct list_head *ws,
117 117
118 workspace->strm.next_in = data_in; 118 workspace->strm.next_in = data_in;
119 workspace->strm.next_out = cpage_out; 119 workspace->strm.next_out = cpage_out;
120 workspace->strm.avail_out = PAGE_CACHE_SIZE; 120 workspace->strm.avail_out = PAGE_SIZE;
121 workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE); 121 workspace->strm.avail_in = min(len, PAGE_SIZE);
122 122
123 while (workspace->strm.total_in < len) { 123 while (workspace->strm.total_in < len) {
124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH); 124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
@@ -156,7 +156,7 @@ static int zlib_compress_pages(struct list_head *ws,
156 cpage_out = kmap(out_page); 156 cpage_out = kmap(out_page);
157 pages[nr_pages] = out_page; 157 pages[nr_pages] = out_page;
158 nr_pages++; 158 nr_pages++;
159 workspace->strm.avail_out = PAGE_CACHE_SIZE; 159 workspace->strm.avail_out = PAGE_SIZE;
160 workspace->strm.next_out = cpage_out; 160 workspace->strm.next_out = cpage_out;
161 } 161 }
162 /* we're all done */ 162 /* we're all done */
@@ -170,14 +170,14 @@ static int zlib_compress_pages(struct list_head *ws,
170 170
171 bytes_left = len - workspace->strm.total_in; 171 bytes_left = len - workspace->strm.total_in;
172 kunmap(in_page); 172 kunmap(in_page);
173 page_cache_release(in_page); 173 put_page(in_page);
174 174
175 start += PAGE_CACHE_SIZE; 175 start += PAGE_SIZE;
176 in_page = find_get_page(mapping, 176 in_page = find_get_page(mapping,
177 start >> PAGE_CACHE_SHIFT); 177 start >> PAGE_SHIFT);
178 data_in = kmap(in_page); 178 data_in = kmap(in_page);
179 workspace->strm.avail_in = min(bytes_left, 179 workspace->strm.avail_in = min(bytes_left,
180 PAGE_CACHE_SIZE); 180 PAGE_SIZE);
181 workspace->strm.next_in = data_in; 181 workspace->strm.next_in = data_in;
182 } 182 }
183 } 183 }
@@ -205,7 +205,7 @@ out:
205 205
206 if (in_page) { 206 if (in_page) {
207 kunmap(in_page); 207 kunmap(in_page);
208 page_cache_release(in_page); 208 put_page(in_page);
209 } 209 }
210 return ret; 210 return ret;
211} 211}
@@ -223,18 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
223 size_t total_out = 0; 223 size_t total_out = 0;
224 unsigned long page_in_index = 0; 224 unsigned long page_in_index = 0;
225 unsigned long page_out_index = 0; 225 unsigned long page_out_index = 0;
226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); 226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
227 unsigned long buf_start; 227 unsigned long buf_start;
228 unsigned long pg_offset; 228 unsigned long pg_offset;
229 229
230 data_in = kmap(pages_in[page_in_index]); 230 data_in = kmap(pages_in[page_in_index]);
231 workspace->strm.next_in = data_in; 231 workspace->strm.next_in = data_in;
232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); 232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
233 workspace->strm.total_in = 0; 233 workspace->strm.total_in = 0;
234 234
235 workspace->strm.total_out = 0; 235 workspace->strm.total_out = 0;
236 workspace->strm.next_out = workspace->buf; 236 workspace->strm.next_out = workspace->buf;
237 workspace->strm.avail_out = PAGE_CACHE_SIZE; 237 workspace->strm.avail_out = PAGE_SIZE;
238 pg_offset = 0; 238 pg_offset = 0;
239 239
240 /* If it's deflate, and it's got no preset dictionary, then 240 /* If it's deflate, and it's got no preset dictionary, then
@@ -274,7 +274,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
274 } 274 }
275 275
276 workspace->strm.next_out = workspace->buf; 276 workspace->strm.next_out = workspace->buf;
277 workspace->strm.avail_out = PAGE_CACHE_SIZE; 277 workspace->strm.avail_out = PAGE_SIZE;
278 278
279 if (workspace->strm.avail_in == 0) { 279 if (workspace->strm.avail_in == 0) {
280 unsigned long tmp; 280 unsigned long tmp;
@@ -288,7 +288,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
288 workspace->strm.next_in = data_in; 288 workspace->strm.next_in = data_in;
289 tmp = srclen - workspace->strm.total_in; 289 tmp = srclen - workspace->strm.total_in;
290 workspace->strm.avail_in = min(tmp, 290 workspace->strm.avail_in = min(tmp,
291 PAGE_CACHE_SIZE); 291 PAGE_SIZE);
292 } 292 }
293 } 293 }
294 if (ret != Z_STREAM_END) 294 if (ret != Z_STREAM_END)
@@ -325,7 +325,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
325 workspace->strm.total_in = 0; 325 workspace->strm.total_in = 0;
326 326
327 workspace->strm.next_out = workspace->buf; 327 workspace->strm.next_out = workspace->buf;
328 workspace->strm.avail_out = PAGE_CACHE_SIZE; 328 workspace->strm.avail_out = PAGE_SIZE;
329 workspace->strm.total_out = 0; 329 workspace->strm.total_out = 0;
330 /* If it's deflate, and it's got no preset dictionary, then 330 /* If it's deflate, and it's got no preset dictionary, then
331 we can tell zlib to skip the adler32 check. */ 331 we can tell zlib to skip the adler32 check. */
@@ -368,8 +368,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
368 else 368 else
369 buf_offset = 0; 369 buf_offset = 0;
370 370
371 bytes = min(PAGE_CACHE_SIZE - pg_offset, 371 bytes = min(PAGE_SIZE - pg_offset,
372 PAGE_CACHE_SIZE - buf_offset); 372 PAGE_SIZE - buf_offset);
373 bytes = min(bytes, bytes_left); 373 bytes = min(bytes, bytes_left);
374 374
375 kaddr = kmap_atomic(dest_page); 375 kaddr = kmap_atomic(dest_page);
@@ -380,7 +380,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
380 bytes_left -= bytes; 380 bytes_left -= bytes;
381next: 381next:
382 workspace->strm.next_out = workspace->buf; 382 workspace->strm.next_out = workspace->buf;
383 workspace->strm.avail_out = PAGE_CACHE_SIZE; 383 workspace->strm.avail_out = PAGE_SIZE;
384 } 384 }
385 385
386 if (ret != Z_STREAM_END && bytes_left != 0) 386 if (ret != Z_STREAM_END && bytes_left != 0)
diff --git a/fs/buffer.c b/fs/buffer.c
index 33be29675358..af0d9a82a8ed 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page)
129{ 129{
130 ClearPagePrivate(page); 130 ClearPagePrivate(page);
131 set_page_private(page, 0); 131 set_page_private(page, 0);
132 page_cache_release(page); 132 put_page(page);
133} 133}
134 134
135static void buffer_io_error(struct buffer_head *bh, char *msg) 135static void buffer_io_error(struct buffer_head *bh, char *msg)
@@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
207 struct page *page; 207 struct page *page;
208 int all_mapped = 1; 208 int all_mapped = 1;
209 209
210 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 210 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
211 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 211 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
212 if (!page) 212 if (!page)
213 goto out; 213 goto out;
@@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
245 } 245 }
246out_unlock: 246out_unlock:
247 spin_unlock(&bd_mapping->private_lock); 247 spin_unlock(&bd_mapping->private_lock);
248 page_cache_release(page); 248 put_page(page);
249out: 249out:
250 return ret; 250 return ret;
251} 251}
@@ -1040,7 +1040,7 @@ done:
1040 ret = (block < end_block) ? 1 : -ENXIO; 1040 ret = (block < end_block) ? 1 : -ENXIO;
1041failed: 1041failed:
1042 unlock_page(page); 1042 unlock_page(page);
1043 page_cache_release(page); 1043 put_page(page);
1044 return ret; 1044 return ret;
1045} 1045}
1046 1046
@@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset,
1533 /* 1533 /*
1534 * Check for overflow 1534 * Check for overflow
1535 */ 1535 */
1536 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1536 BUG_ON(stop > PAGE_SIZE || stop < length);
1537 1537
1538 head = page_buffers(page); 1538 head = page_buffers(page);
1539 bh = head; 1539 bh = head;
@@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1716 blocksize = bh->b_size; 1716 blocksize = bh->b_size;
1717 bbits = block_size_bits(blocksize); 1717 bbits = block_size_bits(blocksize);
1718 1718
1719 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1719 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1720 last_block = (i_size_read(inode) - 1) >> bbits; 1720 last_block = (i_size_read(inode) - 1) >> bbits;
1721 1721
1722 /* 1722 /*
@@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers);
1894int __block_write_begin(struct page *page, loff_t pos, unsigned len, 1894int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1895 get_block_t *get_block) 1895 get_block_t *get_block)
1896{ 1896{
1897 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1897 unsigned from = pos & (PAGE_SIZE - 1);
1898 unsigned to = from + len; 1898 unsigned to = from + len;
1899 struct inode *inode = page->mapping->host; 1899 struct inode *inode = page->mapping->host;
1900 unsigned block_start, block_end; 1900 unsigned block_start, block_end;
@@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1904 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 1904 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1905 1905
1906 BUG_ON(!PageLocked(page)); 1906 BUG_ON(!PageLocked(page));
1907 BUG_ON(from > PAGE_CACHE_SIZE); 1907 BUG_ON(from > PAGE_SIZE);
1908 BUG_ON(to > PAGE_CACHE_SIZE); 1908 BUG_ON(to > PAGE_SIZE);
1909 BUG_ON(from > to); 1909 BUG_ON(from > to);
1910 1910
1911 head = create_page_buffers(page, inode, 0); 1911 head = create_page_buffers(page, inode, 0);
1912 blocksize = head->b_size; 1912 blocksize = head->b_size;
1913 bbits = block_size_bits(blocksize); 1913 bbits = block_size_bits(blocksize);
1914 1914
1915 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1915 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1916 1916
1917 for(bh = head, block_start = 0; bh != head || !block_start; 1917 for(bh = head, block_start = 0; bh != head || !block_start;
1918 block++, block_start=block_end, bh = bh->b_this_page) { 1918 block++, block_start=block_end, bh = bh->b_this_page) {
@@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
2020int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2020int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2021 unsigned flags, struct page **pagep, get_block_t *get_block) 2021 unsigned flags, struct page **pagep, get_block_t *get_block)
2022{ 2022{
2023 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 2023 pgoff_t index = pos >> PAGE_SHIFT;
2024 struct page *page; 2024 struct page *page;
2025 int status; 2025 int status;
2026 2026
@@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2031 status = __block_write_begin(page, pos, len, get_block); 2031 status = __block_write_begin(page, pos, len, get_block);
2032 if (unlikely(status)) { 2032 if (unlikely(status)) {
2033 unlock_page(page); 2033 unlock_page(page);
2034 page_cache_release(page); 2034 put_page(page);
2035 page = NULL; 2035 page = NULL;
2036 } 2036 }
2037 2037
@@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
2047 struct inode *inode = mapping->host; 2047 struct inode *inode = mapping->host;
2048 unsigned start; 2048 unsigned start;
2049 2049
2050 start = pos & (PAGE_CACHE_SIZE - 1); 2050 start = pos & (PAGE_SIZE - 1);
2051 2051
2052 if (unlikely(copied < len)) { 2052 if (unlikely(copied < len)) {
2053 /* 2053 /*
@@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2099 } 2099 }
2100 2100
2101 unlock_page(page); 2101 unlock_page(page);
2102 page_cache_release(page); 2102 put_page(page);
2103 2103
2104 if (old_size < pos) 2104 if (old_size < pos)
2105 pagecache_isize_extended(inode, old_size, pos); 2105 pagecache_isize_extended(inode, old_size, pos);
@@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
2136 2136
2137 head = page_buffers(page); 2137 head = page_buffers(page);
2138 blocksize = head->b_size; 2138 blocksize = head->b_size;
2139 to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); 2139 to = min_t(unsigned, PAGE_SIZE - from, count);
2140 to = from + to; 2140 to = from + to;
2141 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) 2141 if (from < blocksize && to > PAGE_SIZE - blocksize)
2142 return 0; 2142 return 0;
2143 2143
2144 bh = head; 2144 bh = head;
@@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2181 blocksize = head->b_size; 2181 blocksize = head->b_size;
2182 bbits = block_size_bits(blocksize); 2182 bbits = block_size_bits(blocksize);
2183 2183
2184 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 2184 iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
2185 lblock = (i_size_read(inode)+blocksize-1) >> bbits; 2185 lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2186 bh = head; 2186 bh = head;
2187 nr = 0; 2187 nr = 0;
@@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2295 unsigned zerofrom, offset, len; 2295 unsigned zerofrom, offset, len;
2296 int err = 0; 2296 int err = 0;
2297 2297
2298 index = pos >> PAGE_CACHE_SHIFT; 2298 index = pos >> PAGE_SHIFT;
2299 offset = pos & ~PAGE_CACHE_MASK; 2299 offset = pos & ~PAGE_MASK;
2300 2300
2301 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { 2301 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2302 zerofrom = curpos & ~PAGE_CACHE_MASK; 2302 zerofrom = curpos & ~PAGE_MASK;
2303 if (zerofrom & (blocksize-1)) { 2303 if (zerofrom & (blocksize-1)) {
2304 *bytes |= (blocksize-1); 2304 *bytes |= (blocksize-1);
2305 (*bytes)++; 2305 (*bytes)++;
2306 } 2306 }
2307 len = PAGE_CACHE_SIZE - zerofrom; 2307 len = PAGE_SIZE - zerofrom;
2308 2308
2309 err = pagecache_write_begin(file, mapping, curpos, len, 2309 err = pagecache_write_begin(file, mapping, curpos, len,
2310 AOP_FLAG_UNINTERRUPTIBLE, 2310 AOP_FLAG_UNINTERRUPTIBLE,
@@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2329 2329
2330 /* page covers the boundary, find the boundary offset */ 2330 /* page covers the boundary, find the boundary offset */
2331 if (index == curidx) { 2331 if (index == curidx) {
2332 zerofrom = curpos & ~PAGE_CACHE_MASK; 2332 zerofrom = curpos & ~PAGE_MASK;
2333 /* if we will expand the thing last block will be filled */ 2333 /* if we will expand the thing last block will be filled */
2334 if (offset <= zerofrom) { 2334 if (offset <= zerofrom) {
2335 goto out; 2335 goto out;
@@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2375 if (err) 2375 if (err)
2376 return err; 2376 return err;
2377 2377
2378 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2378 zerofrom = *bytes & ~PAGE_MASK;
2379 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2379 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2380 *bytes |= (blocksize-1); 2380 *bytes |= (blocksize-1);
2381 (*bytes)++; 2381 (*bytes)++;
@@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2430 } 2430 }
2431 2431
2432 /* page is wholly or partially inside EOF */ 2432 /* page is wholly or partially inside EOF */
2433 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 2433 if (((page->index + 1) << PAGE_SHIFT) > size)
2434 end = size & ~PAGE_CACHE_MASK; 2434 end = size & ~PAGE_MASK;
2435 else 2435 else
2436 end = PAGE_CACHE_SIZE; 2436 end = PAGE_SIZE;
2437 2437
2438 ret = __block_write_begin(page, 0, end, get_block); 2438 ret = __block_write_begin(page, 0, end, get_block);
2439 if (!ret) 2439 if (!ret)
@@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping,
2508 int ret = 0; 2508 int ret = 0;
2509 int is_mapped_to_disk = 1; 2509 int is_mapped_to_disk = 1;
2510 2510
2511 index = pos >> PAGE_CACHE_SHIFT; 2511 index = pos >> PAGE_SHIFT;
2512 from = pos & (PAGE_CACHE_SIZE - 1); 2512 from = pos & (PAGE_SIZE - 1);
2513 to = from + len; 2513 to = from + len;
2514 2514
2515 page = grab_cache_page_write_begin(mapping, index, flags); 2515 page = grab_cache_page_write_begin(mapping, index, flags);
@@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping,
2543 goto out_release; 2543 goto out_release;
2544 } 2544 }
2545 2545
2546 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 2546 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
2547 2547
2548 /* 2548 /*
2549 * We loop across all blocks in the page, whether or not they are 2549 * We loop across all blocks in the page, whether or not they are
@@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping,
2551 * page is fully mapped-to-disk. 2551 * page is fully mapped-to-disk.
2552 */ 2552 */
2553 for (block_start = 0, block_in_page = 0, bh = head; 2553 for (block_start = 0, block_in_page = 0, bh = head;
2554 block_start < PAGE_CACHE_SIZE; 2554 block_start < PAGE_SIZE;
2555 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 2555 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2556 int create; 2556 int create;
2557 2557
@@ -2623,7 +2623,7 @@ failed:
2623 2623
2624out_release: 2624out_release:
2625 unlock_page(page); 2625 unlock_page(page);
2626 page_cache_release(page); 2626 put_page(page);
2627 *pagep = NULL; 2627 *pagep = NULL;
2628 2628
2629 return ret; 2629 return ret;
@@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
2653 } 2653 }
2654 2654
2655 unlock_page(page); 2655 unlock_page(page);
2656 page_cache_release(page); 2656 put_page(page);
2657 2657
2658 while (head) { 2658 while (head) {
2659 bh = head; 2659 bh = head;
@@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2675{ 2675{
2676 struct inode * const inode = page->mapping->host; 2676 struct inode * const inode = page->mapping->host;
2677 loff_t i_size = i_size_read(inode); 2677 loff_t i_size = i_size_read(inode);
2678 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2678 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2679 unsigned offset; 2679 unsigned offset;
2680 int ret; 2680 int ret;
2681 2681
@@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2684 goto out; 2684 goto out;
2685 2685
2686 /* Is the page fully outside i_size? (truncate in progress) */ 2686 /* Is the page fully outside i_size? (truncate in progress) */
2687 offset = i_size & (PAGE_CACHE_SIZE-1); 2687 offset = i_size & (PAGE_SIZE-1);
2688 if (page->index >= end_index+1 || !offset) { 2688 if (page->index >= end_index+1 || !offset) {
2689 /* 2689 /*
2690 * The page may have dirty, unmapped buffers. For example, 2690 * The page may have dirty, unmapped buffers. For example,
@@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2707 * the page size, the remaining memory is zeroed when mapped, and 2707 * the page size, the remaining memory is zeroed when mapped, and
2708 * writes to that region are not written out to the file." 2708 * writes to that region are not written out to the file."
2709 */ 2709 */
2710 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2710 zero_user_segment(page, offset, PAGE_SIZE);
2711out: 2711out:
2712 ret = mpage_writepage(page, get_block, wbc); 2712 ret = mpage_writepage(page, get_block, wbc);
2713 if (ret == -EAGAIN) 2713 if (ret == -EAGAIN)
@@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage);
2720int nobh_truncate_page(struct address_space *mapping, 2720int nobh_truncate_page(struct address_space *mapping,
2721 loff_t from, get_block_t *get_block) 2721 loff_t from, get_block_t *get_block)
2722{ 2722{
2723 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2723 pgoff_t index = from >> PAGE_SHIFT;
2724 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2724 unsigned offset = from & (PAGE_SIZE-1);
2725 unsigned blocksize; 2725 unsigned blocksize;
2726 sector_t iblock; 2726 sector_t iblock;
2727 unsigned length, pos; 2727 unsigned length, pos;
@@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping,
2738 return 0; 2738 return 0;
2739 2739
2740 length = blocksize - length; 2740 length = blocksize - length;
2741 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2741 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2742 2742
2743 page = grab_cache_page(mapping, index); 2743 page = grab_cache_page(mapping, index);
2744 err = -ENOMEM; 2744 err = -ENOMEM;
@@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping,
2748 if (page_has_buffers(page)) { 2748 if (page_has_buffers(page)) {
2749has_buffers: 2749has_buffers:
2750 unlock_page(page); 2750 unlock_page(page);
2751 page_cache_release(page); 2751 put_page(page);
2752 return block_truncate_page(mapping, from, get_block); 2752 return block_truncate_page(mapping, from, get_block);
2753 } 2753 }
2754 2754
@@ -2772,7 +2772,7 @@ has_buffers:
2772 if (!PageUptodate(page)) { 2772 if (!PageUptodate(page)) {
2773 err = mapping->a_ops->readpage(NULL, page); 2773 err = mapping->a_ops->readpage(NULL, page);
2774 if (err) { 2774 if (err) {
2775 page_cache_release(page); 2775 put_page(page);
2776 goto out; 2776 goto out;
2777 } 2777 }
2778 lock_page(page); 2778 lock_page(page);
@@ -2789,7 +2789,7 @@ has_buffers:
2789 2789
2790unlock: 2790unlock:
2791 unlock_page(page); 2791 unlock_page(page);
2792 page_cache_release(page); 2792 put_page(page);
2793out: 2793out:
2794 return err; 2794 return err;
2795} 2795}
@@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page);
2798int block_truncate_page(struct address_space *mapping, 2798int block_truncate_page(struct address_space *mapping,
2799 loff_t from, get_block_t *get_block) 2799 loff_t from, get_block_t *get_block)
2800{ 2800{
2801 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2801 pgoff_t index = from >> PAGE_SHIFT;
2802 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2802 unsigned offset = from & (PAGE_SIZE-1);
2803 unsigned blocksize; 2803 unsigned blocksize;
2804 sector_t iblock; 2804 sector_t iblock;
2805 unsigned length, pos; 2805 unsigned length, pos;
@@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping,
2816 return 0; 2816 return 0;
2817 2817
2818 length = blocksize - length; 2818 length = blocksize - length;
2819 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2819 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2820 2820
2821 page = grab_cache_page(mapping, index); 2821 page = grab_cache_page(mapping, index);
2822 err = -ENOMEM; 2822 err = -ENOMEM;
@@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping,
2865 2865
2866unlock: 2866unlock:
2867 unlock_page(page); 2867 unlock_page(page);
2868 page_cache_release(page); 2868 put_page(page);
2869out: 2869out:
2870 return err; 2870 return err;
2871} 2871}
@@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2879{ 2879{
2880 struct inode * const inode = page->mapping->host; 2880 struct inode * const inode = page->mapping->host;
2881 loff_t i_size = i_size_read(inode); 2881 loff_t i_size = i_size_read(inode);
2882 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2882 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2883 unsigned offset; 2883 unsigned offset;
2884 2884
2885 /* Is the page fully inside i_size? */ 2885 /* Is the page fully inside i_size? */
@@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2888 end_buffer_async_write); 2888 end_buffer_async_write);
2889 2889
2890 /* Is the page fully outside i_size? (truncate in progress) */ 2890 /* Is the page fully outside i_size? (truncate in progress) */
2891 offset = i_size & (PAGE_CACHE_SIZE-1); 2891 offset = i_size & (PAGE_SIZE-1);
2892 if (page->index >= end_index+1 || !offset) { 2892 if (page->index >= end_index+1 || !offset) {
2893 /* 2893 /*
2894 * The page may have dirty, unmapped buffers. For example, 2894 * The page may have dirty, unmapped buffers. For example,
2895 * they may have been added in ext3_writepage(). Make them 2895 * they may have been added in ext3_writepage(). Make them
2896 * freeable here, so the page does not leak. 2896 * freeable here, so the page does not leak.
2897 */ 2897 */
2898 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 2898 do_invalidatepage(page, 0, PAGE_SIZE);
2899 unlock_page(page); 2899 unlock_page(page);
2900 return 0; /* don't care */ 2900 return 0; /* don't care */
2901 } 2901 }
@@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2907 * the page size, the remaining memory is zeroed when mapped, and 2907 * the page size, the remaining memory is zeroed when mapped, and
2908 * writes to that region are not written out to the file." 2908 * writes to that region are not written out to the file."
2909 */ 2909 */
2910 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2910 zero_user_segment(page, offset, PAGE_SIZE);
2911 return __block_write_full_page(inode, page, get_block, wbc, 2911 return __block_write_full_page(inode, page, get_block, wbc,
2912 end_buffer_async_write); 2912 end_buffer_async_write);
2913} 2913}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0f3da3926a0..afbdc418966d 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -194,10 +194,10 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
194 error = -EIO; 194 error = -EIO;
195 } 195 }
196 196
197 page_cache_release(monitor->back_page); 197 put_page(monitor->back_page);
198 198
199 fscache_end_io(op, monitor->netfs_page, error); 199 fscache_end_io(op, monitor->netfs_page, error);
200 page_cache_release(monitor->netfs_page); 200 put_page(monitor->netfs_page);
201 fscache_retrieval_complete(op, 1); 201 fscache_retrieval_complete(op, 1);
202 fscache_put_retrieval(op); 202 fscache_put_retrieval(op);
203 kfree(monitor); 203 kfree(monitor);
@@ -288,8 +288,8 @@ monitor_backing_page:
288 _debug("- monitor add"); 288 _debug("- monitor add");
289 289
290 /* install the monitor */ 290 /* install the monitor */
291 page_cache_get(monitor->netfs_page); 291 get_page(monitor->netfs_page);
292 page_cache_get(backpage); 292 get_page(backpage);
293 monitor->back_page = backpage; 293 monitor->back_page = backpage;
294 monitor->monitor.private = backpage; 294 monitor->monitor.private = backpage;
295 add_page_wait_queue(backpage, &monitor->monitor); 295 add_page_wait_queue(backpage, &monitor->monitor);
@@ -310,7 +310,7 @@ backing_page_already_present:
310 _debug("- present"); 310 _debug("- present");
311 311
312 if (newpage) { 312 if (newpage) {
313 page_cache_release(newpage); 313 put_page(newpage);
314 newpage = NULL; 314 newpage = NULL;
315 } 315 }
316 316
@@ -342,7 +342,7 @@ success:
342 342
343out: 343out:
344 if (backpage) 344 if (backpage)
345 page_cache_release(backpage); 345 put_page(backpage);
346 if (monitor) { 346 if (monitor) {
347 fscache_put_retrieval(monitor->op); 347 fscache_put_retrieval(monitor->op);
348 kfree(monitor); 348 kfree(monitor);
@@ -363,7 +363,7 @@ io_error:
363 goto out; 363 goto out;
364 364
365nomem_page: 365nomem_page:
366 page_cache_release(newpage); 366 put_page(newpage);
367nomem_monitor: 367nomem_monitor:
368 fscache_put_retrieval(monitor->op); 368 fscache_put_retrieval(monitor->op);
369 kfree(monitor); 369 kfree(monitor);
@@ -530,7 +530,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
530 netpage->index, cachefiles_gfp); 530 netpage->index, cachefiles_gfp);
531 if (ret < 0) { 531 if (ret < 0) {
532 if (ret == -EEXIST) { 532 if (ret == -EEXIST) {
533 page_cache_release(netpage); 533 put_page(netpage);
534 fscache_retrieval_complete(op, 1); 534 fscache_retrieval_complete(op, 1);
535 continue; 535 continue;
536 } 536 }
@@ -538,10 +538,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
538 } 538 }
539 539
540 /* install a monitor */ 540 /* install a monitor */
541 page_cache_get(netpage); 541 get_page(netpage);
542 monitor->netfs_page = netpage; 542 monitor->netfs_page = netpage;
543 543
544 page_cache_get(backpage); 544 get_page(backpage);
545 monitor->back_page = backpage; 545 monitor->back_page = backpage;
546 monitor->monitor.private = backpage; 546 monitor->monitor.private = backpage;
547 add_page_wait_queue(backpage, &monitor->monitor); 547 add_page_wait_queue(backpage, &monitor->monitor);
@@ -555,10 +555,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
555 unlock_page(backpage); 555 unlock_page(backpage);
556 } 556 }
557 557
558 page_cache_release(backpage); 558 put_page(backpage);
559 backpage = NULL; 559 backpage = NULL;
560 560
561 page_cache_release(netpage); 561 put_page(netpage);
562 netpage = NULL; 562 netpage = NULL;
563 continue; 563 continue;
564 564
@@ -603,7 +603,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
603 netpage->index, cachefiles_gfp); 603 netpage->index, cachefiles_gfp);
604 if (ret < 0) { 604 if (ret < 0) {
605 if (ret == -EEXIST) { 605 if (ret == -EEXIST) {
606 page_cache_release(netpage); 606 put_page(netpage);
607 fscache_retrieval_complete(op, 1); 607 fscache_retrieval_complete(op, 1);
608 continue; 608 continue;
609 } 609 }
@@ -612,14 +612,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
612 612
613 copy_highpage(netpage, backpage); 613 copy_highpage(netpage, backpage);
614 614
615 page_cache_release(backpage); 615 put_page(backpage);
616 backpage = NULL; 616 backpage = NULL;
617 617
618 fscache_mark_page_cached(op, netpage); 618 fscache_mark_page_cached(op, netpage);
619 619
620 /* the netpage is unlocked and marked up to date here */ 620 /* the netpage is unlocked and marked up to date here */
621 fscache_end_io(op, netpage, 0); 621 fscache_end_io(op, netpage, 0);
622 page_cache_release(netpage); 622 put_page(netpage);
623 netpage = NULL; 623 netpage = NULL;
624 fscache_retrieval_complete(op, 1); 624 fscache_retrieval_complete(op, 1);
625 continue; 625 continue;
@@ -632,11 +632,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
632out: 632out:
633 /* tidy up */ 633 /* tidy up */
634 if (newpage) 634 if (newpage)
635 page_cache_release(newpage); 635 put_page(newpage);
636 if (netpage) 636 if (netpage)
637 page_cache_release(netpage); 637 put_page(netpage);
638 if (backpage) 638 if (backpage)
639 page_cache_release(backpage); 639 put_page(backpage);
640 if (monitor) { 640 if (monitor) {
641 fscache_put_retrieval(op); 641 fscache_put_retrieval(op);
642 kfree(monitor); 642 kfree(monitor);
@@ -644,7 +644,7 @@ out:
644 644
645 list_for_each_entry_safe(netpage, _n, list, lru) { 645 list_for_each_entry_safe(netpage, _n, list, lru) {
646 list_del(&netpage->lru); 646 list_del(&netpage->lru);
647 page_cache_release(netpage); 647 put_page(netpage);
648 fscache_retrieval_complete(op, 1); 648 fscache_retrieval_complete(op, 1);
649 } 649 }
650 650
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index fc5cae2a0db2..4801571f51cb 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
143 inode = page->mapping->host; 143 inode = page->mapping->host;
144 ci = ceph_inode(inode); 144 ci = ceph_inode(inode);
145 145
146 if (offset != 0 || length != PAGE_CACHE_SIZE) { 146 if (offset != 0 || length != PAGE_SIZE) {
147 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 147 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
148 inode, page, page->index, offset, length); 148 inode, page, page->index, offset, length);
149 return; 149 return;
@@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page)
197 &ceph_inode_to_client(inode)->client->osdc; 197 &ceph_inode_to_client(inode)->client->osdc;
198 int err = 0; 198 int err = 0;
199 u64 off = page_offset(page); 199 u64 off = page_offset(page);
200 u64 len = PAGE_CACHE_SIZE; 200 u64 len = PAGE_SIZE;
201 201
202 if (off >= i_size_read(inode)) { 202 if (off >= i_size_read(inode)) {
203 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 203 zero_user_segment(page, 0, PAGE_SIZE);
204 SetPageUptodate(page); 204 SetPageUptodate(page);
205 return 0; 205 return 0;
206 } 206 }
@@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
212 */ 212 */
213 if (off == 0) 213 if (off == 0)
214 return -EINVAL; 214 return -EINVAL;
215 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 215 zero_user_segment(page, 0, PAGE_SIZE);
216 SetPageUptodate(page); 216 SetPageUptodate(page);
217 return 0; 217 return 0;
218 } 218 }
@@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page)
234 ceph_fscache_readpage_cancel(inode, page); 234 ceph_fscache_readpage_cancel(inode, page);
235 goto out; 235 goto out;
236 } 236 }
237 if (err < PAGE_CACHE_SIZE) 237 if (err < PAGE_SIZE)
238 /* zero fill remainder of page */ 238 /* zero fill remainder of page */
239 zero_user_segment(page, err, PAGE_CACHE_SIZE); 239 zero_user_segment(page, err, PAGE_SIZE);
240 else 240 else
241 flush_dcache_page(page); 241 flush_dcache_page(page);
242 242
@@ -278,10 +278,10 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
278 278
279 if (rc < 0 && rc != -ENOENT) 279 if (rc < 0 && rc != -ENOENT)
280 goto unlock; 280 goto unlock;
281 if (bytes < (int)PAGE_CACHE_SIZE) { 281 if (bytes < (int)PAGE_SIZE) {
282 /* zero (remainder of) page */ 282 /* zero (remainder of) page */
283 int s = bytes < 0 ? 0 : bytes; 283 int s = bytes < 0 ? 0 : bytes;
284 zero_user_segment(page, s, PAGE_CACHE_SIZE); 284 zero_user_segment(page, s, PAGE_SIZE);
285 } 285 }
286 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 286 dout("finish_read %p uptodate %p idx %lu\n", inode, page,
287 page->index); 287 page->index);
@@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
290 ceph_readpage_to_fscache(inode, page); 290 ceph_readpage_to_fscache(inode, page);
291unlock: 291unlock:
292 unlock_page(page); 292 unlock_page(page);
293 page_cache_release(page); 293 put_page(page);
294 bytes -= PAGE_CACHE_SIZE; 294 bytes -= PAGE_SIZE;
295 } 295 }
296 kfree(osd_data->pages); 296 kfree(osd_data->pages);
297} 297}
@@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
336 if (max && nr_pages == max) 336 if (max && nr_pages == max)
337 break; 337 break;
338 } 338 }
339 len = nr_pages << PAGE_CACHE_SHIFT; 339 len = nr_pages << PAGE_SHIFT;
340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
341 off, len); 341 off, len);
342 vino = ceph_vino(inode); 342 vino = ceph_vino(inode);
@@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
364 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 364 if (add_to_page_cache_lru(page, &inode->i_data, page->index,
365 GFP_KERNEL)) { 365 GFP_KERNEL)) {
366 ceph_fscache_uncache_page(inode, page); 366 ceph_fscache_uncache_page(inode, page);
367 page_cache_release(page); 367 put_page(page);
368 dout("start_read %p add_to_page_cache failed %p\n", 368 dout("start_read %p add_to_page_cache failed %p\n",
369 inode, page); 369 inode, page);
370 nr_pages = i; 370 nr_pages = i;
@@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
415 if (rc == 0) 415 if (rc == 0)
416 goto out; 416 goto out;
417 417
418 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) 418 if (fsc->mount_options->rsize >= PAGE_SIZE)
419 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 419 max = (fsc->mount_options->rsize + PAGE_SIZE - 1)
420 >> PAGE_SHIFT; 420 >> PAGE_SHIFT;
421 421
422 dout("readpages %p file %p nr_pages %d max %d\n", inode, 422 dout("readpages %p file %p nr_pages %d max %d\n", inode,
@@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
484 long writeback_stat; 484 long writeback_stat;
485 u64 truncate_size; 485 u64 truncate_size;
486 u32 truncate_seq; 486 u32 truncate_seq;
487 int err = 0, len = PAGE_CACHE_SIZE; 487 int err = 0, len = PAGE_SIZE;
488 488
489 dout("writepage %p idx %lu\n", page, page->index); 489 dout("writepage %p idx %lu\n", page, page->index);
490 490
@@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping,
725 } 725 }
726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
727 wsize = fsc->mount_options->wsize; 727 wsize = fsc->mount_options->wsize;
728 if (wsize < PAGE_CACHE_SIZE) 728 if (wsize < PAGE_SIZE)
729 wsize = PAGE_CACHE_SIZE; 729 wsize = PAGE_SIZE;
730 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 730 max_pages_ever = wsize >> PAGE_SHIFT;
731 731
732 pagevec_init(&pvec, 0); 732 pagevec_init(&pvec, 0);
733 733
@@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping,
737 end = -1; 737 end = -1;
738 dout(" cyclic, start at %lu\n", start); 738 dout(" cyclic, start at %lu\n", start);
739 } else { 739 } else {
740 start = wbc->range_start >> PAGE_CACHE_SHIFT; 740 start = wbc->range_start >> PAGE_SHIFT;
741 end = wbc->range_end >> PAGE_CACHE_SHIFT; 741 end = wbc->range_end >> PAGE_SHIFT;
742 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 742 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
743 range_whole = 1; 743 range_whole = 1;
744 should_loop = 0; 744 should_loop = 0;
@@ -887,7 +887,7 @@ get_more_pages:
887 887
888 num_ops = 1 + do_sync; 888 num_ops = 1 + do_sync;
889 strip_unit_end = page->index + 889 strip_unit_end = page->index +
890 ((len - 1) >> PAGE_CACHE_SHIFT); 890 ((len - 1) >> PAGE_SHIFT);
891 891
892 BUG_ON(pages); 892 BUG_ON(pages);
893 max_pages = calc_pages_for(0, (u64)len); 893 max_pages = calc_pages_for(0, (u64)len);
@@ -901,7 +901,7 @@ get_more_pages:
901 901
902 len = 0; 902 len = 0;
903 } else if (page->index != 903 } else if (page->index !=
904 (offset + len) >> PAGE_CACHE_SHIFT) { 904 (offset + len) >> PAGE_SHIFT) {
905 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : 905 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
906 CEPH_OSD_MAX_OPS)) { 906 CEPH_OSD_MAX_OPS)) {
907 redirty_page_for_writepage(wbc, page); 907 redirty_page_for_writepage(wbc, page);
@@ -929,7 +929,7 @@ get_more_pages:
929 929
930 pages[locked_pages] = page; 930 pages[locked_pages] = page;
931 locked_pages++; 931 locked_pages++;
932 len += PAGE_CACHE_SIZE; 932 len += PAGE_SIZE;
933 } 933 }
934 934
935 /* did we get anything? */ 935 /* did we get anything? */
@@ -981,7 +981,7 @@ new_request:
981 BUG_ON(IS_ERR(req)); 981 BUG_ON(IS_ERR(req));
982 } 982 }
983 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 983 BUG_ON(len < page_offset(pages[locked_pages - 1]) +
984 PAGE_CACHE_SIZE - offset); 984 PAGE_SIZE - offset);
985 985
986 req->r_callback = writepages_finish; 986 req->r_callback = writepages_finish;
987 req->r_inode = inode; 987 req->r_inode = inode;
@@ -1011,7 +1011,7 @@ new_request:
1011 } 1011 }
1012 1012
1013 set_page_writeback(pages[i]); 1013 set_page_writeback(pages[i]);
1014 len += PAGE_CACHE_SIZE; 1014 len += PAGE_SIZE;
1015 } 1015 }
1016 1016
1017 if (snap_size != -1) { 1017 if (snap_size != -1) {
@@ -1020,7 +1020,7 @@ new_request:
1020 /* writepages_finish() clears writeback pages 1020 /* writepages_finish() clears writeback pages
1021 * according to the data length, so make sure 1021 * according to the data length, so make sure
1022 * data length covers all locked pages */ 1022 * data length covers all locked pages */
1023 u64 min_len = len + 1 - PAGE_CACHE_SIZE; 1023 u64 min_len = len + 1 - PAGE_SIZE;
1024 len = min(len, (u64)i_size_read(inode) - offset); 1024 len = min(len, (u64)i_size_read(inode) - offset);
1025 len = max(len, min_len); 1025 len = max(len, min_len);
1026 } 1026 }
@@ -1135,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file,
1135{ 1135{
1136 struct inode *inode = file_inode(file); 1136 struct inode *inode = file_inode(file);
1137 struct ceph_inode_info *ci = ceph_inode(inode); 1137 struct ceph_inode_info *ci = ceph_inode(inode);
1138 loff_t page_off = pos & PAGE_CACHE_MASK; 1138 loff_t page_off = pos & PAGE_MASK;
1139 int pos_in_page = pos & ~PAGE_CACHE_MASK; 1139 int pos_in_page = pos & ~PAGE_MASK;
1140 int end_in_page = pos_in_page + len; 1140 int end_in_page = pos_in_page + len;
1141 loff_t i_size; 1141 loff_t i_size;
1142 int r; 1142 int r;
@@ -1191,7 +1191,7 @@ retry_locked:
1191 } 1191 }
1192 1192
1193 /* full page? */ 1193 /* full page? */
1194 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) 1194 if (pos_in_page == 0 && len == PAGE_SIZE)
1195 return 0; 1195 return 0;
1196 1196
1197 /* past end of file? */ 1197 /* past end of file? */
@@ -1199,12 +1199,12 @@ retry_locked:
1199 1199
1200 if (page_off >= i_size || 1200 if (page_off >= i_size ||
1201 (pos_in_page == 0 && (pos+len) >= i_size && 1201 (pos_in_page == 0 && (pos+len) >= i_size &&
1202 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { 1202 end_in_page - pos_in_page != PAGE_SIZE)) {
1203 dout(" zeroing %p 0 - %d and %d - %d\n", 1203 dout(" zeroing %p 0 - %d and %d - %d\n",
1204 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); 1204 page, pos_in_page, end_in_page, (int)PAGE_SIZE);
1205 zero_user_segments(page, 1205 zero_user_segments(page,
1206 0, pos_in_page, 1206 0, pos_in_page,
1207 end_in_page, PAGE_CACHE_SIZE); 1207 end_in_page, PAGE_SIZE);
1208 return 0; 1208 return 0;
1209 } 1209 }
1210 1210
@@ -1228,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
1228{ 1228{
1229 struct inode *inode = file_inode(file); 1229 struct inode *inode = file_inode(file);
1230 struct page *page; 1230 struct page *page;
1231 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1231 pgoff_t index = pos >> PAGE_SHIFT;
1232 int r; 1232 int r;
1233 1233
1234 do { 1234 do {
@@ -1242,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
1242 1242
1243 r = ceph_update_writeable_page(file, pos, len, page); 1243 r = ceph_update_writeable_page(file, pos, len, page);
1244 if (r < 0) 1244 if (r < 0)
1245 page_cache_release(page); 1245 put_page(page);
1246 else 1246 else
1247 *pagep = page; 1247 *pagep = page;
1248 } while (r == -EAGAIN); 1248 } while (r == -EAGAIN);
@@ -1259,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1259 struct page *page, void *fsdata) 1259 struct page *page, void *fsdata)
1260{ 1260{
1261 struct inode *inode = file_inode(file); 1261 struct inode *inode = file_inode(file);
1262 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1262 unsigned from = pos & (PAGE_SIZE - 1);
1263 int check_cap = 0; 1263 int check_cap = 0;
1264 1264
1265 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1265 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
@@ -1279,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1279 set_page_dirty(page); 1279 set_page_dirty(page);
1280 1280
1281 unlock_page(page); 1281 unlock_page(page);
1282 page_cache_release(page); 1282 put_page(page);
1283 1283
1284 if (check_cap) 1284 if (check_cap)
1285 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1285 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1322,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1322 struct ceph_inode_info *ci = ceph_inode(inode); 1322 struct ceph_inode_info *ci = ceph_inode(inode);
1323 struct ceph_file_info *fi = vma->vm_file->private_data; 1323 struct ceph_file_info *fi = vma->vm_file->private_data;
1324 struct page *pinned_page = NULL; 1324 struct page *pinned_page = NULL;
1325 loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; 1325 loff_t off = vmf->pgoff << PAGE_SHIFT;
1326 int want, got, ret; 1326 int want, got, ret;
1327 1327
1328 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1328 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
1329 inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); 1329 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
1330 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1330 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1331 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1331 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1332 else 1332 else
@@ -1343,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1343 } 1343 }
1344 } 1344 }
1345 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1345 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
1346 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); 1346 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
1347 1347
1348 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1348 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1349 ci->i_inline_version == CEPH_INLINE_NONE) 1349 ci->i_inline_version == CEPH_INLINE_NONE)
@@ -1352,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1352 ret = -EAGAIN; 1352 ret = -EAGAIN;
1353 1353
1354 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", 1354 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
1355 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); 1355 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
1356 if (pinned_page) 1356 if (pinned_page)
1357 page_cache_release(pinned_page); 1357 put_page(pinned_page);
1358 ceph_put_cap_refs(ci, got); 1358 ceph_put_cap_refs(ci, got);
1359 1359
1360 if (ret != -EAGAIN) 1360 if (ret != -EAGAIN)
1361 return ret; 1361 return ret;
1362 1362
1363 /* read inline data */ 1363 /* read inline data */
1364 if (off >= PAGE_CACHE_SIZE) { 1364 if (off >= PAGE_SIZE) {
1365 /* does not support inline data > PAGE_SIZE */ 1365 /* does not support inline data > PAGE_SIZE */
1366 ret = VM_FAULT_SIGBUS; 1366 ret = VM_FAULT_SIGBUS;
1367 } else { 1367 } else {
@@ -1378,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1378 CEPH_STAT_CAP_INLINE_DATA, true); 1378 CEPH_STAT_CAP_INLINE_DATA, true);
1379 if (ret1 < 0 || off >= i_size_read(inode)) { 1379 if (ret1 < 0 || off >= i_size_read(inode)) {
1380 unlock_page(page); 1380 unlock_page(page);
1381 page_cache_release(page); 1381 put_page(page);
1382 ret = VM_FAULT_SIGBUS; 1382 ret = VM_FAULT_SIGBUS;
1383 goto out; 1383 goto out;
1384 } 1384 }
1385 if (ret1 < PAGE_CACHE_SIZE) 1385 if (ret1 < PAGE_SIZE)
1386 zero_user_segment(page, ret1, PAGE_CACHE_SIZE); 1386 zero_user_segment(page, ret1, PAGE_SIZE);
1387 else 1387 else
1388 flush_dcache_page(page); 1388 flush_dcache_page(page);
1389 SetPageUptodate(page); 1389 SetPageUptodate(page);
@@ -1392,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1392 } 1392 }
1393out: 1393out:
1394 dout("filemap_fault %p %llu~%zd read inline data ret %d\n", 1394 dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
1395 inode, off, (size_t)PAGE_CACHE_SIZE, ret); 1395 inode, off, (size_t)PAGE_SIZE, ret);
1396 return ret; 1396 return ret;
1397} 1397}
1398 1398
@@ -1430,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1430 } 1430 }
1431 } 1431 }
1432 1432
1433 if (off + PAGE_CACHE_SIZE <= size) 1433 if (off + PAGE_SIZE <= size)
1434 len = PAGE_CACHE_SIZE; 1434 len = PAGE_SIZE;
1435 else 1435 else
1436 len = size & ~PAGE_CACHE_MASK; 1436 len = size & ~PAGE_MASK;
1437 1437
1438 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1438 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1439 inode, ceph_vinop(inode), off, len, size); 1439 inode, ceph_vinop(inode), off, len, size);
@@ -1519,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1519 return; 1519 return;
1520 if (PageUptodate(page)) { 1520 if (PageUptodate(page)) {
1521 unlock_page(page); 1521 unlock_page(page);
1522 page_cache_release(page); 1522 put_page(page);
1523 return; 1523 return;
1524 } 1524 }
1525 } 1525 }
@@ -1534,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1534 } 1534 }
1535 1535
1536 if (page != locked_page) { 1536 if (page != locked_page) {
1537 if (len < PAGE_CACHE_SIZE) 1537 if (len < PAGE_SIZE)
1538 zero_user_segment(page, len, PAGE_CACHE_SIZE); 1538 zero_user_segment(page, len, PAGE_SIZE);
1539 else 1539 else
1540 flush_dcache_page(page); 1540 flush_dcache_page(page);
1541 1541
1542 SetPageUptodate(page); 1542 SetPageUptodate(page);
1543 unlock_page(page); 1543 unlock_page(page);
1544 page_cache_release(page); 1544 put_page(page);
1545 } 1545 }
1546} 1546}
1547 1547
@@ -1578,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
1578 from_pagecache = true; 1578 from_pagecache = true;
1579 lock_page(page); 1579 lock_page(page);
1580 } else { 1580 } else {
1581 page_cache_release(page); 1581 put_page(page);
1582 page = NULL; 1582 page = NULL;
1583 } 1583 }
1584 } 1584 }
@@ -1586,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
1586 1586
1587 if (page) { 1587 if (page) {
1588 len = i_size_read(inode); 1588 len = i_size_read(inode);
1589 if (len > PAGE_CACHE_SIZE) 1589 if (len > PAGE_SIZE)
1590 len = PAGE_CACHE_SIZE; 1590 len = PAGE_SIZE;
1591 } else { 1591 } else {
1592 page = __page_cache_alloc(GFP_NOFS); 1592 page = __page_cache_alloc(GFP_NOFS);
1593 if (!page) { 1593 if (!page) {
@@ -1670,7 +1670,7 @@ out:
1670 if (page && page != locked_page) { 1670 if (page && page != locked_page) {
1671 if (from_pagecache) { 1671 if (from_pagecache) {
1672 unlock_page(page); 1672 unlock_page(page);
1673 page_cache_release(page); 1673 put_page(page);
1674 } else 1674 } else
1675 __free_pages(page, 0); 1675 __free_pages(page, 0);
1676 } 1676 }
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index de17bb232ff8..cfaeef18cbca 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2510,7 +2510,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2510 *pinned_page = page; 2510 *pinned_page = page;
2511 break; 2511 break;
2512 } 2512 }
2513 page_cache_release(page); 2513 put_page(page);
2514 } 2514 }
2515 /* 2515 /*
2516 * drop cap refs first because getattr while 2516 * drop cap refs first because getattr while
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index fadc243dfb28..4fb2bbc2a272 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -129,7 +129,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
129 struct inode *dir = d_inode(parent); 129 struct inode *dir = d_inode(parent);
130 struct dentry *dentry, *last = NULL; 130 struct dentry *dentry, *last = NULL;
131 struct ceph_dentry_info *di; 131 struct ceph_dentry_info *di;
132 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *); 132 unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
133 int err = 0; 133 int err = 0;
134 loff_t ptr_pos = 0; 134 loff_t ptr_pos = 0;
135 struct ceph_readdir_cache_control cache_ctl = {}; 135 struct ceph_readdir_cache_control cache_ctl = {};
@@ -154,7 +154,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
154 } 154 }
155 155
156 err = -EAGAIN; 156 err = -EAGAIN;
157 pgoff = ptr_pos >> PAGE_CACHE_SHIFT; 157 pgoff = ptr_pos >> PAGE_SHIFT;
158 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) { 158 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
159 ceph_readdir_cache_release(&cache_ctl); 159 ceph_readdir_cache_release(&cache_ctl);
160 cache_ctl.page = find_lock_page(&dir->i_data, pgoff); 160 cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ef38f01c1795..a79f9269831e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -466,7 +466,7 @@ more:
466 ret += zlen; 466 ret += zlen;
467 } 467 }
468 468
469 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; 469 didpages = (page_align + ret) >> PAGE_SHIFT;
470 pos += ret; 470 pos += ret;
471 read = pos - off; 471 read = pos - off;
472 left -= ret; 472 left -= ret;
@@ -806,8 +806,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
806 806
807 if (write) { 807 if (write) {
808 ret = invalidate_inode_pages2_range(inode->i_mapping, 808 ret = invalidate_inode_pages2_range(inode->i_mapping,
809 pos >> PAGE_CACHE_SHIFT, 809 pos >> PAGE_SHIFT,
810 (pos + count) >> PAGE_CACHE_SHIFT); 810 (pos + count) >> PAGE_SHIFT);
811 if (ret < 0) 811 if (ret < 0)
812 dout("invalidate_inode_pages2_range returned %d\n", ret); 812 dout("invalidate_inode_pages2_range returned %d\n", ret);
813 813
@@ -872,7 +872,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
872 * may block. 872 * may block.
873 */ 873 */
874 truncate_inode_pages_range(inode->i_mapping, pos, 874 truncate_inode_pages_range(inode->i_mapping, pos,
875 (pos+len) | (PAGE_CACHE_SIZE - 1)); 875 (pos+len) | (PAGE_SIZE - 1));
876 876
877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
878 } 878 }
@@ -1006,8 +1006,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1006 return ret; 1006 return ret;
1007 1007
1008 ret = invalidate_inode_pages2_range(inode->i_mapping, 1008 ret = invalidate_inode_pages2_range(inode->i_mapping,
1009 pos >> PAGE_CACHE_SHIFT, 1009 pos >> PAGE_SHIFT,
1010 (pos + count) >> PAGE_CACHE_SHIFT); 1010 (pos + count) >> PAGE_SHIFT);
1011 if (ret < 0) 1011 if (ret < 0)
1012 dout("invalidate_inode_pages2_range returned %d\n", ret); 1012 dout("invalidate_inode_pages2_range returned %d\n", ret);
1013 1013
@@ -1036,7 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1036 * write from beginning of first page, 1036 * write from beginning of first page,
1037 * regardless of io alignment 1037 * regardless of io alignment
1038 */ 1038 */
1039 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1039 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1040 1040
1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1042 if (IS_ERR(pages)) { 1042 if (IS_ERR(pages)) {
@@ -1159,7 +1159,7 @@ again:
1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1161 if (pinned_page) { 1161 if (pinned_page) {
1162 page_cache_release(pinned_page); 1162 put_page(pinned_page);
1163 pinned_page = NULL; 1163 pinned_page = NULL;
1164 } 1164 }
1165 ceph_put_cap_refs(ci, got); 1165 ceph_put_cap_refs(ci, got);
@@ -1188,10 +1188,10 @@ again:
1188 if (retry_op == READ_INLINE) { 1188 if (retry_op == READ_INLINE) {
1189 BUG_ON(ret > 0 || read > 0); 1189 BUG_ON(ret > 0 || read > 0);
1190 if (iocb->ki_pos < i_size && 1190 if (iocb->ki_pos < i_size &&
1191 iocb->ki_pos < PAGE_CACHE_SIZE) { 1191 iocb->ki_pos < PAGE_SIZE) {
1192 loff_t end = min_t(loff_t, i_size, 1192 loff_t end = min_t(loff_t, i_size,
1193 iocb->ki_pos + len); 1193 iocb->ki_pos + len);
1194 end = min_t(loff_t, end, PAGE_CACHE_SIZE); 1194 end = min_t(loff_t, end, PAGE_SIZE);
1195 if (statret < end) 1195 if (statret < end)
1196 zero_user_segment(page, statret, end); 1196 zero_user_segment(page, statret, end);
1197 ret = copy_page_to_iter(page, 1197 ret = copy_page_to_iter(page,
@@ -1463,21 +1463,21 @@ static inline void ceph_zero_partial_page(
1463 struct inode *inode, loff_t offset, unsigned size) 1463 struct inode *inode, loff_t offset, unsigned size)
1464{ 1464{
1465 struct page *page; 1465 struct page *page;
1466 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 1466 pgoff_t index = offset >> PAGE_SHIFT;
1467 1467
1468 page = find_lock_page(inode->i_mapping, index); 1468 page = find_lock_page(inode->i_mapping, index);
1469 if (page) { 1469 if (page) {
1470 wait_on_page_writeback(page); 1470 wait_on_page_writeback(page);
1471 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); 1471 zero_user(page, offset & (PAGE_SIZE - 1), size);
1472 unlock_page(page); 1472 unlock_page(page);
1473 page_cache_release(page); 1473 put_page(page);
1474 } 1474 }
1475} 1475}
1476 1476
1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1478 loff_t length) 1478 loff_t length)
1479{ 1479{
1480 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); 1480 loff_t nearly = round_up(offset, PAGE_SIZE);
1481 if (offset < nearly) { 1481 if (offset < nearly) {
1482 loff_t size = nearly - offset; 1482 loff_t size = nearly - offset;
1483 if (length < size) 1483 if (length < size)
@@ -1486,8 +1486,8 @@ static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1486 offset += size; 1486 offset += size;
1487 length -= size; 1487 length -= size;
1488 } 1488 }
1489 if (length >= PAGE_CACHE_SIZE) { 1489 if (length >= PAGE_SIZE) {
1490 loff_t size = round_down(length, PAGE_CACHE_SIZE); 1490 loff_t size = round_down(length, PAGE_SIZE);
1491 truncate_pagecache_range(inode, offset, offset + size - 1); 1491 truncate_pagecache_range(inode, offset, offset + size - 1);
1492 offset += size; 1492 offset += size;
1493 length -= size; 1493 length -= size;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ed58b168904a..edfade037738 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1338,7 +1338,7 @@ void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1338{ 1338{
1339 if (ctl->page) { 1339 if (ctl->page) {
1340 kunmap(ctl->page); 1340 kunmap(ctl->page);
1341 page_cache_release(ctl->page); 1341 put_page(ctl->page);
1342 ctl->page = NULL; 1342 ctl->page = NULL;
1343 } 1343 }
1344} 1344}
@@ -1348,7 +1348,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1348 struct ceph_mds_request *req) 1348 struct ceph_mds_request *req)
1349{ 1349{
1350 struct ceph_inode_info *ci = ceph_inode(dir); 1350 struct ceph_inode_info *ci = ceph_inode(dir);
1351 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*); 1351 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1352 unsigned idx = ctl->index % nsize; 1352 unsigned idx = ctl->index % nsize;
1353 pgoff_t pgoff = ctl->index / nsize; 1353 pgoff_t pgoff = ctl->index / nsize;
1354 1354
@@ -1367,7 +1367,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1367 unlock_page(ctl->page); 1367 unlock_page(ctl->page);
1368 ctl->dentries = kmap(ctl->page); 1368 ctl->dentries = kmap(ctl->page);
1369 if (idx == 0) 1369 if (idx == 0)
1370 memset(ctl->dentries, 0, PAGE_CACHE_SIZE); 1370 memset(ctl->dentries, 0, PAGE_SIZE);
1371 } 1371 }
1372 1372
1373 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1373 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 44852c3ae531..541ead4d8965 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1610,7 +1610,7 @@ again:
1610 while (!list_empty(&tmp_list)) { 1610 while (!list_empty(&tmp_list)) {
1611 if (!msg) { 1611 if (!msg) {
1612 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, 1612 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1613 PAGE_CACHE_SIZE, GFP_NOFS, false); 1613 PAGE_SIZE, GFP_NOFS, false);
1614 if (!msg) 1614 if (!msg)
1615 goto out_err; 1615 goto out_err;
1616 head = msg->front.iov_base; 1616 head = msg->front.iov_base;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 37712ccffcc6..ee69a537dba5 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -97,7 +97,7 @@ struct ceph_mds_reply_info_parsed {
97/* 97/*
98 * cap releases are batched and sent to the MDS en masse. 98 * cap releases are batched and sent to the MDS en masse.
99 */ 99 */
100#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ 100#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \
101 sizeof(struct ceph_mds_cap_release)) / \ 101 sizeof(struct ceph_mds_cap_release)) / \
102 sizeof(struct ceph_mds_cap_item)) 102 sizeof(struct ceph_mds_cap_item))
103 103
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index c973043deb0e..f12d5e2955c2 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -560,7 +560,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
560 560
561 /* set up mempools */ 561 /* set up mempools */
562 err = -ENOMEM; 562 err = -ENOMEM;
563 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; 563 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
564 size = sizeof (struct page *) * (page_count ? page_count : 1); 564 size = sizeof (struct page *) * (page_count ? page_count : 1);
565 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); 565 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
566 if (!fsc->wb_pagevec_pool) 566 if (!fsc->wb_pagevec_pool)
@@ -912,13 +912,13 @@ static int ceph_register_bdi(struct super_block *sb,
912 int err; 912 int err;
913 913
914 /* set ra_pages based on rasize mount option? */ 914 /* set ra_pages based on rasize mount option? */
915 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE) 915 if (fsc->mount_options->rasize >= PAGE_SIZE)
916 fsc->backing_dev_info.ra_pages = 916 fsc->backing_dev_info.ra_pages =
917 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1) 917 (fsc->mount_options->rasize + PAGE_SIZE - 1)
918 >> PAGE_SHIFT; 918 >> PAGE_SHIFT;
919 else 919 else
920 fsc->backing_dev_info.ra_pages = 920 fsc->backing_dev_info.ra_pages =
921 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; 921 VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
922 922
923 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", 923 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
924 atomic_long_inc_return(&bdi_seq)); 924 atomic_long_inc_return(&bdi_seq));
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 1d86fc620e5c..89201564c346 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -962,7 +962,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
962 cifs_dbg(FYI, "about to flush pages\n"); 962 cifs_dbg(FYI, "about to flush pages\n");
963 /* should we flush first and last page first */ 963 /* should we flush first and last page first */
964 truncate_inode_pages_range(&target_inode->i_data, destoff, 964 truncate_inode_pages_range(&target_inode->i_data, destoff,
965 PAGE_CACHE_ALIGN(destoff + len)-1); 965 PAGE_ALIGN(destoff + len)-1);
966 966
967 if (target_tcon->ses->server->ops->duplicate_extents) 967 if (target_tcon->ses->server->ops->duplicate_extents)
968 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 968 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d21da9f05bae..f2cc0b3d1af7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
714 * 714 *
715 * Note that this might make for "interesting" allocation problems during 715 * Note that this might make for "interesting" allocation problems during
716 * writeback however as we have to allocate an array of pointers for the 716 * writeback however as we have to allocate an array of pointers for the
717 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 717 * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
718 * 718 *
719 * For reads, there is a similar problem as we need to allocate an array 719 * For reads, there is a similar problem as we need to allocate an array
720 * of kvecs to handle the receive, though that should only need to be done 720 * of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
733 733
734/* 734/*
735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
736 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 736 * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
737 * a single wsize request with a single call. 737 * a single wsize request with a single call.
738 */ 738 */
739#define CIFS_DEFAULT_IOSIZE (1024 * 1024) 739#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 76fcb50295a3..a894bf809ff7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1929,17 +1929,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1929 1929
1930 wsize = server->ops->wp_retry_size(inode); 1930 wsize = server->ops->wp_retry_size(inode);
1931 if (wsize < rest_len) { 1931 if (wsize < rest_len) {
1932 nr_pages = wsize / PAGE_CACHE_SIZE; 1932 nr_pages = wsize / PAGE_SIZE;
1933 if (!nr_pages) { 1933 if (!nr_pages) {
1934 rc = -ENOTSUPP; 1934 rc = -ENOTSUPP;
1935 break; 1935 break;
1936 } 1936 }
1937 cur_len = nr_pages * PAGE_CACHE_SIZE; 1937 cur_len = nr_pages * PAGE_SIZE;
1938 tailsz = PAGE_CACHE_SIZE; 1938 tailsz = PAGE_SIZE;
1939 } else { 1939 } else {
1940 nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE); 1940 nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
1941 cur_len = rest_len; 1941 cur_len = rest_len;
1942 tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE; 1942 tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
1943 } 1943 }
1944 1944
1945 wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); 1945 wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
@@ -1957,7 +1957,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1957 wdata2->sync_mode = wdata->sync_mode; 1957 wdata2->sync_mode = wdata->sync_mode;
1958 wdata2->nr_pages = nr_pages; 1958 wdata2->nr_pages = nr_pages;
1959 wdata2->offset = page_offset(wdata2->pages[0]); 1959 wdata2->offset = page_offset(wdata2->pages[0]);
1960 wdata2->pagesz = PAGE_CACHE_SIZE; 1960 wdata2->pagesz = PAGE_SIZE;
1961 wdata2->tailsz = tailsz; 1961 wdata2->tailsz = tailsz;
1962 wdata2->bytes = cur_len; 1962 wdata2->bytes = cur_len;
1963 1963
@@ -1975,7 +1975,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1975 if (rc != 0 && rc != -EAGAIN) { 1975 if (rc != 0 && rc != -EAGAIN) {
1976 SetPageError(wdata2->pages[j]); 1976 SetPageError(wdata2->pages[j]);
1977 end_page_writeback(wdata2->pages[j]); 1977 end_page_writeback(wdata2->pages[j]);
1978 page_cache_release(wdata2->pages[j]); 1978 put_page(wdata2->pages[j]);
1979 } 1979 }
1980 } 1980 }
1981 1981
@@ -2018,7 +2018,7 @@ cifs_writev_complete(struct work_struct *work)
2018 else if (wdata->result < 0) 2018 else if (wdata->result < 0)
2019 SetPageError(page); 2019 SetPageError(page);
2020 end_page_writeback(page); 2020 end_page_writeback(page);
2021 page_cache_release(page); 2021 put_page(page);
2022 } 2022 }
2023 if (wdata->result != -EAGAIN) 2023 if (wdata->result != -EAGAIN)
2024 mapping_set_error(inode->i_mapping, wdata->result); 2024 mapping_set_error(inode->i_mapping, wdata->result);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a763cd3d9e7c..6f62ac821a84 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3630,7 +3630,7 @@ try_mount_again:
3630 cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info); 3630 cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info);
3631 3631
3632 /* tune readahead according to rsize */ 3632 /* tune readahead according to rsize */
3633 cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; 3633 cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE;
3634 3634
3635remote_path_check: 3635remote_path_check:
3636#ifdef CONFIG_CIFS_DFS_UPCALL 3636#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ff882aeaccc6..c03d0744648b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1833,7 +1833,7 @@ refind_writable:
1833static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) 1833static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1834{ 1834{
1835 struct address_space *mapping = page->mapping; 1835 struct address_space *mapping = page->mapping;
1836 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 1836 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1837 char *write_data; 1837 char *write_data;
1838 int rc = -EFAULT; 1838 int rc = -EFAULT;
1839 int bytes_written = 0; 1839 int bytes_written = 0;
@@ -1849,7 +1849,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1849 write_data = kmap(page); 1849 write_data = kmap(page);
1850 write_data += from; 1850 write_data += from;
1851 1851
1852 if ((to > PAGE_CACHE_SIZE) || (from > to)) { 1852 if ((to > PAGE_SIZE) || (from > to)) {
1853 kunmap(page); 1853 kunmap(page);
1854 return -EIO; 1854 return -EIO;
1855 } 1855 }
@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1902 * find_get_pages_tag seems to return a max of 256 on each 1902 * find_get_pages_tag seems to return a max of 256 on each
1903 * iteration, so we must call it several times in order to 1903 * iteration, so we must call it several times in order to
1904 * fill the array or the wsize is effectively limited to 1904 * fill the array or the wsize is effectively limited to
1905 * 256 * PAGE_CACHE_SIZE. 1905 * 256 * PAGE_SIZE.
1906 */ 1906 */
1907 *found_pages = 0; 1907 *found_pages = 0;
1908 pages = wdata->pages; 1908 pages = wdata->pages;
@@ -1991,7 +1991,7 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1991 1991
1992 /* put any pages we aren't going to use */ 1992 /* put any pages we aren't going to use */
1993 for (i = nr_pages; i < found_pages; i++) { 1993 for (i = nr_pages; i < found_pages; i++) {
1994 page_cache_release(wdata->pages[i]); 1994 put_page(wdata->pages[i]);
1995 wdata->pages[i] = NULL; 1995 wdata->pages[i] = NULL;
1996 } 1996 }
1997 1997
@@ -2009,11 +2009,11 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2009 wdata->sync_mode = wbc->sync_mode; 2009 wdata->sync_mode = wbc->sync_mode;
2010 wdata->nr_pages = nr_pages; 2010 wdata->nr_pages = nr_pages;
2011 wdata->offset = page_offset(wdata->pages[0]); 2011 wdata->offset = page_offset(wdata->pages[0]);
2012 wdata->pagesz = PAGE_CACHE_SIZE; 2012 wdata->pagesz = PAGE_SIZE;
2013 wdata->tailsz = min(i_size_read(mapping->host) - 2013 wdata->tailsz = min(i_size_read(mapping->host) -
2014 page_offset(wdata->pages[nr_pages - 1]), 2014 page_offset(wdata->pages[nr_pages - 1]),
2015 (loff_t)PAGE_CACHE_SIZE); 2015 (loff_t)PAGE_SIZE);
2016 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; 2016 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2017 2017
2018 if (wdata->cfile != NULL) 2018 if (wdata->cfile != NULL)
2019 cifsFileInfo_put(wdata->cfile); 2019 cifsFileInfo_put(wdata->cfile);
@@ -2047,15 +2047,15 @@ static int cifs_writepages(struct address_space *mapping,
2047 * If wsize is smaller than the page cache size, default to writing 2047 * If wsize is smaller than the page cache size, default to writing
2048 * one page at a time via cifs_writepage 2048 * one page at a time via cifs_writepage
2049 */ 2049 */
2050 if (cifs_sb->wsize < PAGE_CACHE_SIZE) 2050 if (cifs_sb->wsize < PAGE_SIZE)
2051 return generic_writepages(mapping, wbc); 2051 return generic_writepages(mapping, wbc);
2052 2052
2053 if (wbc->range_cyclic) { 2053 if (wbc->range_cyclic) {
2054 index = mapping->writeback_index; /* Start from prev offset */ 2054 index = mapping->writeback_index; /* Start from prev offset */
2055 end = -1; 2055 end = -1;
2056 } else { 2056 } else {
2057 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2057 index = wbc->range_start >> PAGE_SHIFT;
2058 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2058 end = wbc->range_end >> PAGE_SHIFT;
2059 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2059 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2060 range_whole = true; 2060 range_whole = true;
2061 scanned = true; 2061 scanned = true;
@@ -2071,7 +2071,7 @@ retry:
2071 if (rc) 2071 if (rc)
2072 break; 2072 break;
2073 2073
2074 tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; 2074 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2075 2075
2076 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index, 2076 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2077 &found_pages); 2077 &found_pages);
@@ -2111,7 +2111,7 @@ retry:
2111 else 2111 else
2112 SetPageError(wdata->pages[i]); 2112 SetPageError(wdata->pages[i]);
2113 end_page_writeback(wdata->pages[i]); 2113 end_page_writeback(wdata->pages[i]);
2114 page_cache_release(wdata->pages[i]); 2114 put_page(wdata->pages[i]);
2115 } 2115 }
2116 if (rc != -EAGAIN) 2116 if (rc != -EAGAIN)
2117 mapping_set_error(mapping, rc); 2117 mapping_set_error(mapping, rc);
@@ -2154,7 +2154,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2154 2154
2155 xid = get_xid(); 2155 xid = get_xid();
2156/* BB add check for wbc flags */ 2156/* BB add check for wbc flags */
2157 page_cache_get(page); 2157 get_page(page);
2158 if (!PageUptodate(page)) 2158 if (!PageUptodate(page))
2159 cifs_dbg(FYI, "ppw - page not up to date\n"); 2159 cifs_dbg(FYI, "ppw - page not up to date\n");
2160 2160
@@ -2170,7 +2170,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2170 */ 2170 */
2171 set_page_writeback(page); 2171 set_page_writeback(page);
2172retry_write: 2172retry_write:
2173 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); 2173 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2174 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL) 2174 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2175 goto retry_write; 2175 goto retry_write;
2176 else if (rc == -EAGAIN) 2176 else if (rc == -EAGAIN)
@@ -2180,7 +2180,7 @@ retry_write:
2180 else 2180 else
2181 SetPageUptodate(page); 2181 SetPageUptodate(page);
2182 end_page_writeback(page); 2182 end_page_writeback(page);
2183 page_cache_release(page); 2183 put_page(page);
2184 free_xid(xid); 2184 free_xid(xid);
2185 return rc; 2185 return rc;
2186} 2186}
@@ -2214,12 +2214,12 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
2214 if (copied == len) 2214 if (copied == len)
2215 SetPageUptodate(page); 2215 SetPageUptodate(page);
2216 ClearPageChecked(page); 2216 ClearPageChecked(page);
2217 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) 2217 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2218 SetPageUptodate(page); 2218 SetPageUptodate(page);
2219 2219
2220 if (!PageUptodate(page)) { 2220 if (!PageUptodate(page)) {
2221 char *page_data; 2221 char *page_data;
2222 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 2222 unsigned offset = pos & (PAGE_SIZE - 1);
2223 unsigned int xid; 2223 unsigned int xid;
2224 2224
2225 xid = get_xid(); 2225 xid = get_xid();
@@ -2248,7 +2248,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
2248 } 2248 }
2249 2249
2250 unlock_page(page); 2250 unlock_page(page);
2251 page_cache_release(page); 2251 put_page(page);
2252 2252
2253 return rc; 2253 return rc;
2254} 2254}
@@ -3286,9 +3286,9 @@ cifs_readv_complete(struct work_struct *work)
3286 (rdata->result == -EAGAIN && got_bytes)) 3286 (rdata->result == -EAGAIN && got_bytes))
3287 cifs_readpage_to_fscache(rdata->mapping->host, page); 3287 cifs_readpage_to_fscache(rdata->mapping->host, page);
3288 3288
3289 got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes); 3289 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3290 3290
3291 page_cache_release(page); 3291 put_page(page);
3292 rdata->pages[i] = NULL; 3292 rdata->pages[i] = NULL;
3293 } 3293 }
3294 kref_put(&rdata->refcount, cifs_readdata_release); 3294 kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3307,21 +3307,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3307 3307
3308 /* determine the eof that the server (probably) has */ 3308 /* determine the eof that the server (probably) has */
3309 eof = CIFS_I(rdata->mapping->host)->server_eof; 3309 eof = CIFS_I(rdata->mapping->host)->server_eof;
3310 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; 3310 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3311 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); 3311 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3312 3312
3313 rdata->got_bytes = 0; 3313 rdata->got_bytes = 0;
3314 rdata->tailsz = PAGE_CACHE_SIZE; 3314 rdata->tailsz = PAGE_SIZE;
3315 for (i = 0; i < nr_pages; i++) { 3315 for (i = 0; i < nr_pages; i++) {
3316 struct page *page = rdata->pages[i]; 3316 struct page *page = rdata->pages[i];
3317 3317
3318 if (len >= PAGE_CACHE_SIZE) { 3318 if (len >= PAGE_SIZE) {
3319 /* enough data to fill the page */ 3319 /* enough data to fill the page */
3320 iov.iov_base = kmap(page); 3320 iov.iov_base = kmap(page);
3321 iov.iov_len = PAGE_CACHE_SIZE; 3321 iov.iov_len = PAGE_SIZE;
3322 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", 3322 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3323 i, page->index, iov.iov_base, iov.iov_len); 3323 i, page->index, iov.iov_base, iov.iov_len);
3324 len -= PAGE_CACHE_SIZE; 3324 len -= PAGE_SIZE;
3325 } else if (len > 0) { 3325 } else if (len > 0) {
3326 /* enough for partial page, fill and zero the rest */ 3326 /* enough for partial page, fill and zero the rest */
3327 iov.iov_base = kmap(page); 3327 iov.iov_base = kmap(page);
@@ -3329,7 +3329,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3329 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", 3329 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3330 i, page->index, iov.iov_base, iov.iov_len); 3330 i, page->index, iov.iov_base, iov.iov_len);
3331 memset(iov.iov_base + len, 3331 memset(iov.iov_base + len,
3332 '\0', PAGE_CACHE_SIZE - len); 3332 '\0', PAGE_SIZE - len);
3333 rdata->tailsz = len; 3333 rdata->tailsz = len;
3334 len = 0; 3334 len = 0;
3335 } else if (page->index > eof_index) { 3335 } else if (page->index > eof_index) {
@@ -3341,12 +3341,12 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3341 * to prevent the VFS from repeatedly attempting to 3341 * to prevent the VFS from repeatedly attempting to
3342 * fill them until the writes are flushed. 3342 * fill them until the writes are flushed.
3343 */ 3343 */
3344 zero_user(page, 0, PAGE_CACHE_SIZE); 3344 zero_user(page, 0, PAGE_SIZE);
3345 lru_cache_add_file(page); 3345 lru_cache_add_file(page);
3346 flush_dcache_page(page); 3346 flush_dcache_page(page);
3347 SetPageUptodate(page); 3347 SetPageUptodate(page);
3348 unlock_page(page); 3348 unlock_page(page);
3349 page_cache_release(page); 3349 put_page(page);
3350 rdata->pages[i] = NULL; 3350 rdata->pages[i] = NULL;
3351 rdata->nr_pages--; 3351 rdata->nr_pages--;
3352 continue; 3352 continue;
@@ -3354,7 +3354,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3354 /* no need to hold page hostage */ 3354 /* no need to hold page hostage */
3355 lru_cache_add_file(page); 3355 lru_cache_add_file(page);
3356 unlock_page(page); 3356 unlock_page(page);
3357 page_cache_release(page); 3357 put_page(page);
3358 rdata->pages[i] = NULL; 3358 rdata->pages[i] = NULL;
3359 rdata->nr_pages--; 3359 rdata->nr_pages--;
3360 continue; 3360 continue;
@@ -3402,8 +3402,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3402 } 3402 }
3403 3403
3404 /* move first page to the tmplist */ 3404 /* move first page to the tmplist */
3405 *offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 3405 *offset = (loff_t)page->index << PAGE_SHIFT;
3406 *bytes = PAGE_CACHE_SIZE; 3406 *bytes = PAGE_SIZE;
3407 *nr_pages = 1; 3407 *nr_pages = 1;
3408 list_move_tail(&page->lru, tmplist); 3408 list_move_tail(&page->lru, tmplist);
3409 3409
@@ -3415,7 +3415,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3415 break; 3415 break;
3416 3416
3417 /* would this page push the read over the rsize? */ 3417 /* would this page push the read over the rsize? */
3418 if (*bytes + PAGE_CACHE_SIZE > rsize) 3418 if (*bytes + PAGE_SIZE > rsize)
3419 break; 3419 break;
3420 3420
3421 __SetPageLocked(page); 3421 __SetPageLocked(page);
@@ -3424,7 +3424,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3424 break; 3424 break;
3425 } 3425 }
3426 list_move_tail(&page->lru, tmplist); 3426 list_move_tail(&page->lru, tmplist);
3427 (*bytes) += PAGE_CACHE_SIZE; 3427 (*bytes) += PAGE_SIZE;
3428 expected_index++; 3428 expected_index++;
3429 (*nr_pages)++; 3429 (*nr_pages)++;
3430 } 3430 }
@@ -3493,7 +3493,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3493 * reach this point however since we set ra_pages to 0 when the 3493 * reach this point however since we set ra_pages to 0 when the
3494 * rsize is smaller than a cache page. 3494 * rsize is smaller than a cache page.
3495 */ 3495 */
3496 if (unlikely(rsize < PAGE_CACHE_SIZE)) { 3496 if (unlikely(rsize < PAGE_SIZE)) {
3497 add_credits_and_wake_if(server, credits, 0); 3497 add_credits_and_wake_if(server, credits, 0);
3498 return 0; 3498 return 0;
3499 } 3499 }
@@ -3512,7 +3512,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3512 list_del(&page->lru); 3512 list_del(&page->lru);
3513 lru_cache_add_file(page); 3513 lru_cache_add_file(page);
3514 unlock_page(page); 3514 unlock_page(page);
3515 page_cache_release(page); 3515 put_page(page);
3516 } 3516 }
3517 rc = -ENOMEM; 3517 rc = -ENOMEM;
3518 add_credits_and_wake_if(server, credits, 0); 3518 add_credits_and_wake_if(server, credits, 0);
@@ -3524,7 +3524,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3524 rdata->offset = offset; 3524 rdata->offset = offset;
3525 rdata->bytes = bytes; 3525 rdata->bytes = bytes;
3526 rdata->pid = pid; 3526 rdata->pid = pid;
3527 rdata->pagesz = PAGE_CACHE_SIZE; 3527 rdata->pagesz = PAGE_SIZE;
3528 rdata->read_into_pages = cifs_readpages_read_into_pages; 3528 rdata->read_into_pages = cifs_readpages_read_into_pages;
3529 rdata->credits = credits; 3529 rdata->credits = credits;
3530 3530
@@ -3542,7 +3542,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3542 page = rdata->pages[i]; 3542 page = rdata->pages[i];
3543 lru_cache_add_file(page); 3543 lru_cache_add_file(page);
3544 unlock_page(page); 3544 unlock_page(page);
3545 page_cache_release(page); 3545 put_page(page);
3546 } 3546 }
3547 /* Fallback to the readpage in error/reconnect cases */ 3547 /* Fallback to the readpage in error/reconnect cases */
3548 kref_put(&rdata->refcount, cifs_readdata_release); 3548 kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3577,7 +3577,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
3577 read_data = kmap(page); 3577 read_data = kmap(page);
3578 /* for reads over a certain size could initiate async read ahead */ 3578 /* for reads over a certain size could initiate async read ahead */
3579 3579
3580 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); 3580 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
3581 3581
3582 if (rc < 0) 3582 if (rc < 0)
3583 goto io_error; 3583 goto io_error;
@@ -3587,8 +3587,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
3587 file_inode(file)->i_atime = 3587 file_inode(file)->i_atime =
3588 current_fs_time(file_inode(file)->i_sb); 3588 current_fs_time(file_inode(file)->i_sb);
3589 3589
3590 if (PAGE_CACHE_SIZE > rc) 3590 if (PAGE_SIZE > rc)
3591 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); 3591 memset(read_data + rc, 0, PAGE_SIZE - rc);
3592 3592
3593 flush_dcache_page(page); 3593 flush_dcache_page(page);
3594 SetPageUptodate(page); 3594 SetPageUptodate(page);
@@ -3608,7 +3608,7 @@ read_complete:
3608 3608
3609static int cifs_readpage(struct file *file, struct page *page) 3609static int cifs_readpage(struct file *file, struct page *page)
3610{ 3610{
3611 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 3611 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
3612 int rc = -EACCES; 3612 int rc = -EACCES;
3613 unsigned int xid; 3613 unsigned int xid;
3614 3614
@@ -3679,8 +3679,8 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
3679 struct page **pagep, void **fsdata) 3679 struct page **pagep, void **fsdata)
3680{ 3680{
3681 int oncethru = 0; 3681 int oncethru = 0;
3682 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 3682 pgoff_t index = pos >> PAGE_SHIFT;
3683 loff_t offset = pos & (PAGE_CACHE_SIZE - 1); 3683 loff_t offset = pos & (PAGE_SIZE - 1);
3684 loff_t page_start = pos & PAGE_MASK; 3684 loff_t page_start = pos & PAGE_MASK;
3685 loff_t i_size; 3685 loff_t i_size;
3686 struct page *page; 3686 struct page *page;
@@ -3703,7 +3703,7 @@ start:
3703 * the server. If the write is short, we'll end up doing a sync write 3703 * the server. If the write is short, we'll end up doing a sync write
3704 * instead. 3704 * instead.
3705 */ 3705 */
3706 if (len == PAGE_CACHE_SIZE) 3706 if (len == PAGE_SIZE)
3707 goto out; 3707 goto out;
3708 3708
3709 /* 3709 /*
@@ -3718,7 +3718,7 @@ start:
3718 (offset == 0 && (pos + len) >= i_size)) { 3718 (offset == 0 && (pos + len) >= i_size)) {
3719 zero_user_segments(page, 0, offset, 3719 zero_user_segments(page, 0, offset,
3720 offset + len, 3720 offset + len,
3721 PAGE_CACHE_SIZE); 3721 PAGE_SIZE);
3722 /* 3722 /*
3723 * PageChecked means that the parts of the page 3723 * PageChecked means that the parts of the page
3724 * to which we're not writing are considered up 3724 * to which we're not writing are considered up
@@ -3737,7 +3737,7 @@ start:
3737 * do a sync write instead since PG_uptodate isn't set. 3737 * do a sync write instead since PG_uptodate isn't set.
3738 */ 3738 */
3739 cifs_readpage_worker(file, page, &page_start); 3739 cifs_readpage_worker(file, page, &page_start);
3740 page_cache_release(page); 3740 put_page(page);
3741 oncethru = 1; 3741 oncethru = 1;
3742 goto start; 3742 goto start;
3743 } else { 3743 } else {
@@ -3764,7 +3764,7 @@ static void cifs_invalidate_page(struct page *page, unsigned int offset,
3764{ 3764{
3765 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); 3765 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3766 3766
3767 if (offset == 0 && length == PAGE_CACHE_SIZE) 3767 if (offset == 0 && length == PAGE_SIZE)
3768 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); 3768 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3769} 3769}
3770 3770
@@ -3772,7 +3772,7 @@ static int cifs_launder_page(struct page *page)
3772{ 3772{
3773 int rc = 0; 3773 int rc = 0;
3774 loff_t range_start = page_offset(page); 3774 loff_t range_start = page_offset(page);
3775 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 3775 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
3776 struct writeback_control wbc = { 3776 struct writeback_control wbc = {
3777 .sync_mode = WB_SYNC_ALL, 3777 .sync_mode = WB_SYNC_ALL,
3778 .nr_to_write = 0, 3778 .nr_to_write = 0,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index aeb26dbfa1bf..5f9ad5c42180 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -59,7 +59,7 @@ static void cifs_set_ops(struct inode *inode)
59 59
60 /* check if server can support readpages */ 60 /* check if server can support readpages */
61 if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf < 61 if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
62 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) 62 PAGE_SIZE + MAX_CIFS_HDR_SIZE)
63 inode->i_data.a_ops = &cifs_addr_ops_smallbuf; 63 inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
64 else 64 else
65 inode->i_data.a_ops = &cifs_addr_ops; 65 inode->i_data.a_ops = &cifs_addr_ops;
@@ -2019,8 +2019,8 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
2019 2019
2020static int cifs_truncate_page(struct address_space *mapping, loff_t from) 2020static int cifs_truncate_page(struct address_space *mapping, loff_t from)
2021{ 2021{
2022 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2022 pgoff_t index = from >> PAGE_SHIFT;
2023 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 2023 unsigned offset = from & (PAGE_SIZE - 1);
2024 struct page *page; 2024 struct page *page;
2025 int rc = 0; 2025 int rc = 0;
2026 2026
@@ -2028,9 +2028,9 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
2028 if (!page) 2028 if (!page)
2029 return -ENOMEM; 2029 return -ENOMEM;
2030 2030
2031 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2031 zero_user_segment(page, offset, PAGE_SIZE);
2032 unlock_page(page); 2032 unlock_page(page);
2033 page_cache_release(page); 2033 put_page(page);
2034 return rc; 2034 return rc;
2035} 2035}
2036 2036
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index a8f3b589a2df..cfd91320e869 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -71,8 +71,8 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
71 struct inode *inode; 71 struct inode *inode;
72 struct dentry *root; 72 struct dentry *root;
73 73
74 sb->s_blocksize = PAGE_CACHE_SIZE; 74 sb->s_blocksize = PAGE_SIZE;
75 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 75 sb->s_blocksize_bits = PAGE_SHIFT;
76 sb->s_magic = CONFIGFS_MAGIC; 76 sb->s_magic = CONFIGFS_MAGIC;
77 sb->s_op = &configfs_ops; 77 sb->s_op = &configfs_ops;
78 sb->s_time_gran = 1; 78 sb->s_time_gran = 1;
diff --git a/fs/cramfs/README b/fs/cramfs/README
index 445d1c2d7646..9d4e7ea311f4 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -86,26 +86,26 @@ Block Size
86 86
87(Block size in cramfs refers to the size of input data that is 87(Block size in cramfs refers to the size of input data that is
88compressed at a time. It's intended to be somewhere around 88compressed at a time. It's intended to be somewhere around
89PAGE_CACHE_SIZE for cramfs_readpage's convenience.) 89PAGE_SIZE for cramfs_readpage's convenience.)
90 90
91The superblock ought to indicate the block size that the fs was 91The superblock ought to indicate the block size that the fs was
92written for, since comments in <linux/pagemap.h> indicate that 92written for, since comments in <linux/pagemap.h> indicate that
93PAGE_CACHE_SIZE may grow in future (if I interpret the comment 93PAGE_SIZE may grow in future (if I interpret the comment
94correctly). 94correctly).
95 95
96Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that 96Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
97for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in 97for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). 98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
99This discrepancy is a bug, though it's not clear which should be 99This discrepancy is a bug, though it's not clear which should be
100changed. 100changed.
101 101
102One option is to change mkcramfs to take its PAGE_CACHE_SIZE from 102One option is to change mkcramfs to take its PAGE_SIZE from
103<asm/page.h>. Personally I don't like this option, but it does 103<asm/page.h>. Personally I don't like this option, but it does
104require the least amount of change: just change `#define 104require the least amount of change: just change `#define
105PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage 105PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
106is that the generated cramfs cannot always be shared between different 106is that the generated cramfs cannot always be shared between different
107kernels, not even necessarily kernels of the same architecture if 107kernels, not even necessarily kernels of the same architecture if
108PAGE_CACHE_SIZE is subject to change between kernel versions 108PAGE_SIZE is subject to change between kernel versions
109(currently possible with arm and ia64). 109(currently possible with arm and ia64).
110 110
111The remaining options try to make cramfs more sharable. 111The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@ size. The options are:
126 1. Always 4096 bytes. 126 1. Always 4096 bytes.
127 127
128 2. Writer chooses blocksize; kernel adapts but rejects blocksize > 128 2. Writer chooses blocksize; kernel adapts but rejects blocksize >
129 PAGE_CACHE_SIZE. 129 PAGE_SIZE.
130 130
131 3. Writer chooses blocksize; kernel adapts even to blocksize > 131 3. Writer chooses blocksize; kernel adapts even to blocksize >
132 PAGE_CACHE_SIZE. 132 PAGE_SIZE.
133 133
134It's easy enough to change the kernel to use a smaller value than 134It's easy enough to change the kernel to use a smaller value than
135PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. 135PAGE_SIZE: just make cramfs_readpage read multiple blocks.
136 136
137The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE 137The cost of option 1 is that kernels with a larger PAGE_SIZE
138value don't get as good compression as they can. 138value don't get as good compression as they can.
139 139
140The cost of option 2 relative to option 1 is that the code uses 140The cost of option 2 relative to option 1 is that the code uses
141variables instead of #define'd constants. The gain is that people 141variables instead of #define'd constants. The gain is that people
142with kernels having larger PAGE_CACHE_SIZE can make use of that if 142with kernels having larger PAGE_SIZE can make use of that if
143they don't mind their cramfs being inaccessible to kernels with 143they don't mind their cramfs being inaccessible to kernels with
144smaller PAGE_CACHE_SIZE values. 144smaller PAGE_SIZE values.
145 145
146Option 3 is easy to implement if we don't mind being CPU-inefficient: 146Option 3 is easy to implement if we don't mind being CPU-inefficient:
147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which 147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b862bc219cd7..3a32ddf98095 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
137 * page cache and dentry tree anyway.. 137 * page cache and dentry tree anyway..
138 * 138 *
139 * This also acts as a way to guarantee contiguous areas of up to 139 * This also acts as a way to guarantee contiguous areas of up to
140 * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to 140 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
141 * worry about end-of-buffer issues even when decompressing a full 141 * worry about end-of-buffer issues even when decompressing a full
142 * page cache. 142 * page cache.
143 */ 143 */
@@ -152,7 +152,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
152 */ 152 */
153#define BLKS_PER_BUF_SHIFT (2) 153#define BLKS_PER_BUF_SHIFT (2)
154#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) 154#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
155#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE) 155#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
156 156
157static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; 157static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
158static unsigned buffer_blocknr[READ_BUFFERS]; 158static unsigned buffer_blocknr[READ_BUFFERS];
@@ -173,8 +173,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
173 173
174 if (!len) 174 if (!len)
175 return NULL; 175 return NULL;
176 blocknr = offset >> PAGE_CACHE_SHIFT; 176 blocknr = offset >> PAGE_SHIFT;
177 offset &= PAGE_CACHE_SIZE - 1; 177 offset &= PAGE_SIZE - 1;
178 178
179 /* Check if an existing buffer already has the data.. */ 179 /* Check if an existing buffer already has the data.. */
180 for (i = 0; i < READ_BUFFERS; i++) { 180 for (i = 0; i < READ_BUFFERS; i++) {
@@ -184,14 +184,14 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
184 continue; 184 continue;
185 if (blocknr < buffer_blocknr[i]) 185 if (blocknr < buffer_blocknr[i])
186 continue; 186 continue;
187 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; 187 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
188 blk_offset += offset; 188 blk_offset += offset;
189 if (blk_offset + len > BUFFER_SIZE) 189 if (blk_offset + len > BUFFER_SIZE)
190 continue; 190 continue;
191 return read_buffers[i] + blk_offset; 191 return read_buffers[i] + blk_offset;
192 } 192 }
193 193
194 devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; 194 devsize = mapping->host->i_size >> PAGE_SHIFT;
195 195
196 /* Ok, read in BLKS_PER_BUF pages completely first. */ 196 /* Ok, read in BLKS_PER_BUF pages completely first. */
197 for (i = 0; i < BLKS_PER_BUF; i++) { 197 for (i = 0; i < BLKS_PER_BUF; i++) {
@@ -213,7 +213,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
213 wait_on_page_locked(page); 213 wait_on_page_locked(page);
214 if (!PageUptodate(page)) { 214 if (!PageUptodate(page)) {
215 /* asynchronous error */ 215 /* asynchronous error */
216 page_cache_release(page); 216 put_page(page);
217 pages[i] = NULL; 217 pages[i] = NULL;
218 } 218 }
219 } 219 }
@@ -229,12 +229,12 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
229 struct page *page = pages[i]; 229 struct page *page = pages[i];
230 230
231 if (page) { 231 if (page) {
232 memcpy(data, kmap(page), PAGE_CACHE_SIZE); 232 memcpy(data, kmap(page), PAGE_SIZE);
233 kunmap(page); 233 kunmap(page);
234 page_cache_release(page); 234 put_page(page);
235 } else 235 } else
236 memset(data, 0, PAGE_CACHE_SIZE); 236 memset(data, 0, PAGE_SIZE);
237 data += PAGE_CACHE_SIZE; 237 data += PAGE_SIZE;
238 } 238 }
239 return read_buffers[buffer] + offset; 239 return read_buffers[buffer] + offset;
240} 240}
@@ -353,7 +353,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
353 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 353 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
354 354
355 buf->f_type = CRAMFS_MAGIC; 355 buf->f_type = CRAMFS_MAGIC;
356 buf->f_bsize = PAGE_CACHE_SIZE; 356 buf->f_bsize = PAGE_SIZE;
357 buf->f_blocks = CRAMFS_SB(sb)->blocks; 357 buf->f_blocks = CRAMFS_SB(sb)->blocks;
358 buf->f_bfree = 0; 358 buf->f_bfree = 0;
359 buf->f_bavail = 0; 359 buf->f_bavail = 0;
@@ -496,7 +496,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
496 int bytes_filled; 496 int bytes_filled;
497 void *pgdata; 497 void *pgdata;
498 498
499 maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 499 maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
500 bytes_filled = 0; 500 bytes_filled = 0;
501 pgdata = kmap(page); 501 pgdata = kmap(page);
502 502
@@ -516,14 +516,14 @@ static int cramfs_readpage(struct file *file, struct page *page)
516 516
517 if (compr_len == 0) 517 if (compr_len == 0)
518 ; /* hole */ 518 ; /* hole */
519 else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) { 519 else if (unlikely(compr_len > (PAGE_SIZE << 1))) {
520 pr_err("bad compressed blocksize %u\n", 520 pr_err("bad compressed blocksize %u\n",
521 compr_len); 521 compr_len);
522 goto err; 522 goto err;
523 } else { 523 } else {
524 mutex_lock(&read_mutex); 524 mutex_lock(&read_mutex);
525 bytes_filled = cramfs_uncompress_block(pgdata, 525 bytes_filled = cramfs_uncompress_block(pgdata,
526 PAGE_CACHE_SIZE, 526 PAGE_SIZE,
527 cramfs_read(sb, start_offset, compr_len), 527 cramfs_read(sb, start_offset, compr_len),
528 compr_len); 528 compr_len);
529 mutex_unlock(&read_mutex); 529 mutex_unlock(&read_mutex);
@@ -532,7 +532,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
532 } 532 }
533 } 533 }
534 534
535 memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled); 535 memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
536 flush_dcache_page(page); 536 flush_dcache_page(page);
537 kunmap(page); 537 kunmap(page);
538 SetPageUptodate(page); 538 SetPageUptodate(page);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 06cd1a22240b..7f5804537d30 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -175,10 +175,10 @@ static int do_page_crypto(struct inode *inode,
175 FS_XTS_TWEAK_SIZE - sizeof(index)); 175 FS_XTS_TWEAK_SIZE - sizeof(index));
176 176
177 sg_init_table(&dst, 1); 177 sg_init_table(&dst, 1);
178 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 178 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
179 sg_init_table(&src, 1); 179 sg_init_table(&src, 1);
180 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 180 sg_set_page(&src, src_page, PAGE_SIZE, 0);
181 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 181 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
182 xts_tweak); 182 xts_tweak);
183 if (rw == FS_DECRYPT) 183 if (rw == FS_DECRYPT)
184 res = crypto_skcipher_decrypt(req); 184 res = crypto_skcipher_decrypt(req);
@@ -287,7 +287,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
287 struct bio *bio; 287 struct bio *bio;
288 int ret, err = 0; 288 int ret, err = 0;
289 289
290 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); 290 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
291 291
292 ctx = fscrypt_get_ctx(inode); 292 ctx = fscrypt_get_ctx(inode);
293 if (IS_ERR(ctx)) 293 if (IS_ERR(ctx))
diff --git a/fs/dax.c b/fs/dax.c
index 90322eb7498c..75ba46d82a76 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
323 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 323 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
324 if (vmf->pgoff >= size) { 324 if (vmf->pgoff >= size) {
325 unlock_page(page); 325 unlock_page(page);
326 page_cache_release(page); 326 put_page(page);
327 return VM_FAULT_SIGBUS; 327 return VM_FAULT_SIGBUS;
328 } 328 }
329 329
@@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode,
351} 351}
352 352
353#define NO_SECTOR -1 353#define NO_SECTOR -1
354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT)) 354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
355 355
356static int dax_radix_entry(struct address_space *mapping, pgoff_t index, 356static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
357 sector_t sector, bool pmd_entry, bool dirty) 357 sector_t sector, bool pmd_entry, bool dirty)
@@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
506 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 506 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
507 return 0; 507 return 0;
508 508
509 start_index = wbc->range_start >> PAGE_CACHE_SHIFT; 509 start_index = wbc->range_start >> PAGE_SHIFT;
510 end_index = wbc->range_end >> PAGE_CACHE_SHIFT; 510 end_index = wbc->range_end >> PAGE_SHIFT;
511 pmd_index = DAX_PMD_INDEX(start_index); 511 pmd_index = DAX_PMD_INDEX(start_index);
512 512
513 rcu_read_lock(); 513 rcu_read_lock();
@@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
642 page = find_get_page(mapping, vmf->pgoff); 642 page = find_get_page(mapping, vmf->pgoff);
643 if (page) { 643 if (page) {
644 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 644 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
645 page_cache_release(page); 645 put_page(page);
646 return VM_FAULT_RETRY; 646 return VM_FAULT_RETRY;
647 } 647 }
648 if (unlikely(page->mapping != mapping)) { 648 if (unlikely(page->mapping != mapping)) {
649 unlock_page(page); 649 unlock_page(page);
650 page_cache_release(page); 650 put_page(page);
651 goto repeat; 651 goto repeat;
652 } 652 }
653 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 653 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
711 711
712 if (page) { 712 if (page) {
713 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 713 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
714 PAGE_CACHE_SIZE, 0); 714 PAGE_SIZE, 0);
715 delete_from_page_cache(page); 715 delete_from_page_cache(page);
716 unlock_page(page); 716 unlock_page(page);
717 page_cache_release(page); 717 put_page(page);
718 page = NULL; 718 page = NULL;
719 } 719 }
720 720
@@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
747 unlock_page: 747 unlock_page:
748 if (page) { 748 if (page) {
749 unlock_page(page); 749 unlock_page(page);
750 page_cache_release(page); 750 put_page(page);
751 } 751 }
752 goto out; 752 goto out;
753} 753}
@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1094 * you are truncating a file, the helper function dax_truncate_page() may be 1094 * you are truncating a file, the helper function dax_truncate_page() may be
1095 * more convenient. 1095 * more convenient.
1096 * 1096 *
1097 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1097 * We work in terms of PAGE_SIZE here for commonality with
1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1099 * took care of disposing of the unnecessary blocks. Even if the filesystem 1099 * took care of disposing of the unnecessary blocks. Even if the filesystem
1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1104 get_block_t get_block) 1104 get_block_t get_block)
1105{ 1105{
1106 struct buffer_head bh; 1106 struct buffer_head bh;
1107 pgoff_t index = from >> PAGE_CACHE_SHIFT; 1107 pgoff_t index = from >> PAGE_SHIFT;
1108 unsigned offset = from & (PAGE_CACHE_SIZE-1); 1108 unsigned offset = from & (PAGE_SIZE-1);
1109 int err; 1109 int err;
1110 1110
1111 /* Block boundary? Nothing to do */ 1111 /* Block boundary? Nothing to do */
1112 if (!length) 1112 if (!length)
1113 return 0; 1113 return 0;
1114 BUG_ON((offset + length) > PAGE_CACHE_SIZE); 1114 BUG_ON((offset + length) > PAGE_SIZE);
1115 1115
1116 memset(&bh, 0, sizeof(bh)); 1116 memset(&bh, 0, sizeof(bh));
1117 bh.b_bdev = inode->i_sb->s_bdev; 1117 bh.b_bdev = inode->i_sb->s_bdev;
1118 bh.b_size = PAGE_CACHE_SIZE; 1118 bh.b_size = PAGE_SIZE;
1119 err = get_block(inode, index, &bh, 0); 1119 err = get_block(inode, index, &bh, 0);
1120 if (err < 0) 1120 if (err < 0)
1121 return err; 1121 return err;
@@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1123 struct block_device *bdev = bh.b_bdev; 1123 struct block_device *bdev = bh.b_bdev;
1124 struct blk_dax_ctl dax = { 1124 struct blk_dax_ctl dax = {
1125 .sector = to_sector(&bh, inode), 1125 .sector = to_sector(&bh, inode),
1126 .size = PAGE_CACHE_SIZE, 1126 .size = PAGE_SIZE,
1127 }; 1127 };
1128 1128
1129 if (dax_map_atomic(bdev, &dax) < 0) 1129 if (dax_map_atomic(bdev, &dax) < 0)
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
1146 * Similar to block_truncate_page(), this function can be called by a 1146 * Similar to block_truncate_page(), this function can be called by a
1147 * filesystem when it is truncating a DAX file to handle the partial page. 1147 * filesystem when it is truncating a DAX file to handle the partial page.
1148 * 1148 *
1149 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1149 * We work in terms of PAGE_SIZE here for commonality with
1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1151 * took care of disposing of the unnecessary blocks. Even if the filesystem 1151 * took care of disposing of the unnecessary blocks. Even if the filesystem
1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
1154 */ 1154 */
1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) 1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1156{ 1156{
1157 unsigned length = PAGE_CACHE_ALIGN(from) - from; 1157 unsigned length = PAGE_ALIGN(from) - from;
1158 return dax_zero_page_range(inode, from, length, get_block); 1158 return dax_zero_page_range(inode, from, length, get_block);
1159} 1159}
1160EXPORT_SYMBOL_GPL(dax_truncate_page); 1160EXPORT_SYMBOL_GPL(dax_truncate_page);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 476f1ecbd1f0..472037732daf 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -172,7 +172,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
172 */ 172 */
173 if (dio->page_errors == 0) 173 if (dio->page_errors == 0)
174 dio->page_errors = ret; 174 dio->page_errors = ret;
175 page_cache_get(page); 175 get_page(page);
176 dio->pages[0] = page; 176 dio->pages[0] = page;
177 sdio->head = 0; 177 sdio->head = 0;
178 sdio->tail = 1; 178 sdio->tail = 1;
@@ -424,7 +424,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
424static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) 424static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
425{ 425{
426 while (sdio->head < sdio->tail) 426 while (sdio->head < sdio->tail)
427 page_cache_release(dio->pages[sdio->head++]); 427 put_page(dio->pages[sdio->head++]);
428} 428}
429 429
430/* 430/*
@@ -487,7 +487,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
487 if (dio->rw == READ && !PageCompound(page) && 487 if (dio->rw == READ && !PageCompound(page) &&
488 dio->should_dirty) 488 dio->should_dirty)
489 set_page_dirty_lock(page); 489 set_page_dirty_lock(page);
490 page_cache_release(page); 490 put_page(page);
491 } 491 }
492 err = bio->bi_error; 492 err = bio->bi_error;
493 bio_put(bio); 493 bio_put(bio);
@@ -696,7 +696,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio)
696 */ 696 */
697 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) 697 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
698 sdio->pages_in_io--; 698 sdio->pages_in_io--;
699 page_cache_get(sdio->cur_page); 699 get_page(sdio->cur_page);
700 sdio->final_block_in_bio = sdio->cur_page_block + 700 sdio->final_block_in_bio = sdio->cur_page_block +
701 (sdio->cur_page_len >> sdio->blkbits); 701 (sdio->cur_page_len >> sdio->blkbits);
702 ret = 0; 702 ret = 0;
@@ -810,13 +810,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
810 */ 810 */
811 if (sdio->cur_page) { 811 if (sdio->cur_page) {
812 ret = dio_send_cur_page(dio, sdio, map_bh); 812 ret = dio_send_cur_page(dio, sdio, map_bh);
813 page_cache_release(sdio->cur_page); 813 put_page(sdio->cur_page);
814 sdio->cur_page = NULL; 814 sdio->cur_page = NULL;
815 if (ret) 815 if (ret)
816 return ret; 816 return ret;
817 } 817 }
818 818
819 page_cache_get(page); /* It is in dio */ 819 get_page(page); /* It is in dio */
820 sdio->cur_page = page; 820 sdio->cur_page = page;
821 sdio->cur_page_offset = offset; 821 sdio->cur_page_offset = offset;
822 sdio->cur_page_len = len; 822 sdio->cur_page_len = len;
@@ -830,7 +830,7 @@ out:
830 if (sdio->boundary) { 830 if (sdio->boundary) {
831 ret = dio_send_cur_page(dio, sdio, map_bh); 831 ret = dio_send_cur_page(dio, sdio, map_bh);
832 dio_bio_submit(dio, sdio); 832 dio_bio_submit(dio, sdio);
833 page_cache_release(sdio->cur_page); 833 put_page(sdio->cur_page);
834 sdio->cur_page = NULL; 834 sdio->cur_page = NULL;
835 } 835 }
836 return ret; 836 return ret;
@@ -947,7 +947,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
947 947
948 ret = get_more_blocks(dio, sdio, map_bh); 948 ret = get_more_blocks(dio, sdio, map_bh);
949 if (ret) { 949 if (ret) {
950 page_cache_release(page); 950 put_page(page);
951 goto out; 951 goto out;
952 } 952 }
953 if (!buffer_mapped(map_bh)) 953 if (!buffer_mapped(map_bh))
@@ -988,7 +988,7 @@ do_holes:
988 988
989 /* AKPM: eargh, -ENOTBLK is a hack */ 989 /* AKPM: eargh, -ENOTBLK is a hack */
990 if (dio->rw & WRITE) { 990 if (dio->rw & WRITE) {
991 page_cache_release(page); 991 put_page(page);
992 return -ENOTBLK; 992 return -ENOTBLK;
993 } 993 }
994 994
@@ -1001,7 +1001,7 @@ do_holes:
1001 if (sdio->block_in_file >= 1001 if (sdio->block_in_file >=
1002 i_size_aligned >> blkbits) { 1002 i_size_aligned >> blkbits) {
1003 /* We hit eof */ 1003 /* We hit eof */
1004 page_cache_release(page); 1004 put_page(page);
1005 goto out; 1005 goto out;
1006 } 1006 }
1007 zero_user(page, from, 1 << blkbits); 1007 zero_user(page, from, 1 << blkbits);
@@ -1041,7 +1041,7 @@ do_holes:
1041 sdio->next_block_for_io, 1041 sdio->next_block_for_io,
1042 map_bh); 1042 map_bh);
1043 if (ret) { 1043 if (ret) {
1044 page_cache_release(page); 1044 put_page(page);
1045 goto out; 1045 goto out;
1046 } 1046 }
1047 sdio->next_block_for_io += this_chunk_blocks; 1047 sdio->next_block_for_io += this_chunk_blocks;
@@ -1057,7 +1057,7 @@ next_block:
1057 } 1057 }
1058 1058
1059 /* Drop the ref which was taken in get_user_pages() */ 1059 /* Drop the ref which was taken in get_user_pages() */
1060 page_cache_release(page); 1060 put_page(page);
1061 } 1061 }
1062out: 1062out:
1063 return ret; 1063 return ret;
@@ -1281,7 +1281,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1281 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); 1281 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1282 if (retval == 0) 1282 if (retval == 0)
1283 retval = ret2; 1283 retval = ret2;
1284 page_cache_release(sdio.cur_page); 1284 put_page(sdio.cur_page);
1285 sdio.cur_page = NULL; 1285 sdio.cur_page = NULL;
1286 } 1286 }
1287 if (sdio.bio) 1287 if (sdio.bio)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 00640e70ed7a..1ab012a27d9f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con)
640 con->rx_page = alloc_page(GFP_ATOMIC); 640 con->rx_page = alloc_page(GFP_ATOMIC);
641 if (con->rx_page == NULL) 641 if (con->rx_page == NULL)
642 goto out_resched; 642 goto out_resched;
643 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 643 cbuf_init(&con->cb, PAGE_SIZE);
644 } 644 }
645 645
646 /* 646 /*
@@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con)
657 * buffer and the start of the currently used section (cb.base) 657 * buffer and the start of the currently used section (cb.base)
658 */ 658 */
659 if (cbuf_data(&con->cb) >= con->cb.base) { 659 if (cbuf_data(&con->cb) >= con->cb.base) {
660 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 660 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
661 iov[1].iov_len = con->cb.base; 661 iov[1].iov_len = con->cb.base;
662 iov[1].iov_base = page_address(con->rx_page); 662 iov[1].iov_base = page_address(con->rx_page);
663 nvec = 2; 663 nvec = 2;
@@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con)
675 ret = dlm_process_incoming_buffer(con->nodeid, 675 ret = dlm_process_incoming_buffer(con->nodeid,
676 page_address(con->rx_page), 676 page_address(con->rx_page),
677 con->cb.base, con->cb.len, 677 con->cb.base, con->cb.len,
678 PAGE_CACHE_SIZE); 678 PAGE_SIZE);
679 if (ret == -EBADMSG) { 679 if (ret == -EBADMSG) {
680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", 680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
681 page_address(con->rx_page), con->cb.base, 681 page_address(con->rx_page), con->cb.base,
@@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1416 spin_lock(&con->writequeue_lock); 1416 spin_lock(&con->writequeue_lock);
1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1418 if ((&e->list == &con->writequeue) || 1418 if ((&e->list == &con->writequeue) ||
1419 (PAGE_CACHE_SIZE - e->end < len)) { 1419 (PAGE_SIZE - e->end < len)) {
1420 e = NULL; 1420 e = NULL;
1421 } else { 1421 } else {
1422 offset = e->end; 1422 offset = e->end;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 64026e53722a..d09cb4cdd09f 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -286,7 +286,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
286 pg = virt_to_page(addr); 286 pg = virt_to_page(addr);
287 offset = offset_in_page(addr); 287 offset = offset_in_page(addr);
288 sg_set_page(&sg[i], pg, 0, offset); 288 sg_set_page(&sg[i], pg, 0, offset);
289 remainder_of_page = PAGE_CACHE_SIZE - offset; 289 remainder_of_page = PAGE_SIZE - offset;
290 if (size >= remainder_of_page) { 290 if (size >= remainder_of_page) {
291 sg[i].length = remainder_of_page; 291 sg[i].length = remainder_of_page;
292 addr += remainder_of_page; 292 addr += remainder_of_page;
@@ -400,7 +400,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
400 struct page *page) 400 struct page *page)
401{ 401{
402 return ecryptfs_lower_header_size(crypt_stat) + 402 return ecryptfs_lower_header_size(crypt_stat) +
403 ((loff_t)page->index << PAGE_CACHE_SHIFT); 403 ((loff_t)page->index << PAGE_SHIFT);
404} 404}
405 405
406/** 406/**
@@ -428,7 +428,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
428 size_t extent_size = crypt_stat->extent_size; 428 size_t extent_size = crypt_stat->extent_size;
429 int rc; 429 int rc;
430 430
431 extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); 431 extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
432 rc = ecryptfs_derive_iv(extent_iv, crypt_stat, 432 rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
433 (extent_base + extent_offset)); 433 (extent_base + extent_offset));
434 if (rc) { 434 if (rc) {
@@ -498,7 +498,7 @@ int ecryptfs_encrypt_page(struct page *page)
498 } 498 }
499 499
500 for (extent_offset = 0; 500 for (extent_offset = 0;
501 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); 501 extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
502 extent_offset++) { 502 extent_offset++) {
503 rc = crypt_extent(crypt_stat, enc_extent_page, page, 503 rc = crypt_extent(crypt_stat, enc_extent_page, page,
504 extent_offset, ENCRYPT); 504 extent_offset, ENCRYPT);
@@ -512,7 +512,7 @@ int ecryptfs_encrypt_page(struct page *page)
512 lower_offset = lower_offset_for_page(crypt_stat, page); 512 lower_offset = lower_offset_for_page(crypt_stat, page);
513 enc_extent_virt = kmap(enc_extent_page); 513 enc_extent_virt = kmap(enc_extent_page);
514 rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, 514 rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
515 PAGE_CACHE_SIZE); 515 PAGE_SIZE);
516 kunmap(enc_extent_page); 516 kunmap(enc_extent_page);
517 if (rc < 0) { 517 if (rc < 0) {
518 ecryptfs_printk(KERN_ERR, 518 ecryptfs_printk(KERN_ERR,
@@ -560,7 +560,7 @@ int ecryptfs_decrypt_page(struct page *page)
560 560
561 lower_offset = lower_offset_for_page(crypt_stat, page); 561 lower_offset = lower_offset_for_page(crypt_stat, page);
562 page_virt = kmap(page); 562 page_virt = kmap(page);
563 rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, 563 rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
564 ecryptfs_inode); 564 ecryptfs_inode);
565 kunmap(page); 565 kunmap(page);
566 if (rc < 0) { 566 if (rc < 0) {
@@ -571,7 +571,7 @@ int ecryptfs_decrypt_page(struct page *page)
571 } 571 }
572 572
573 for (extent_offset = 0; 573 for (extent_offset = 0;
574 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); 574 extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
575 extent_offset++) { 575 extent_offset++) {
576 rc = crypt_extent(crypt_stat, page, page, 576 rc = crypt_extent(crypt_stat, page, page,
577 extent_offset, DECRYPT); 577 extent_offset, DECRYPT);
@@ -659,11 +659,11 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
659 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 659 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
660 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 660 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
661 else { 661 else {
662 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) 662 if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
663 crypt_stat->metadata_size = 663 crypt_stat->metadata_size =
664 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 664 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
665 else 665 else
666 crypt_stat->metadata_size = PAGE_CACHE_SIZE; 666 crypt_stat->metadata_size = PAGE_SIZE;
667 } 667 }
668} 668}
669 669
@@ -1442,7 +1442,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1442 ECRYPTFS_VALIDATE_HEADER_SIZE); 1442 ECRYPTFS_VALIDATE_HEADER_SIZE);
1443 if (rc) { 1443 if (rc) {
1444 /* metadata is not in the file header, so try xattrs */ 1444 /* metadata is not in the file header, so try xattrs */
1445 memset(page_virt, 0, PAGE_CACHE_SIZE); 1445 memset(page_virt, 0, PAGE_SIZE);
1446 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); 1446 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
1447 if (rc) { 1447 if (rc) {
1448 printk(KERN_DEBUG "Valid eCryptfs headers not found in " 1448 printk(KERN_DEBUG "Valid eCryptfs headers not found in "
@@ -1475,7 +1475,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1475 } 1475 }
1476out: 1476out:
1477 if (page_virt) { 1477 if (page_virt) {
1478 memset(page_virt, 0, PAGE_CACHE_SIZE); 1478 memset(page_virt, 0, PAGE_SIZE);
1479 kmem_cache_free(ecryptfs_header_cache, page_virt); 1479 kmem_cache_free(ecryptfs_header_cache, page_virt);
1480 } 1480 }
1481 return rc; 1481 return rc;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 121114e9a464..224b49e71aa4 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -763,10 +763,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
763 } else { /* ia->ia_size < i_size_read(inode) */ 763 } else { /* ia->ia_size < i_size_read(inode) */
764 /* We're chopping off all the pages down to the page 764 /* We're chopping off all the pages down to the page
765 * in which ia->ia_size is located. Fill in the end of 765 * in which ia->ia_size is located. Fill in the end of
766 * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to 766 * that page from (ia->ia_size & ~PAGE_MASK) to
767 * PAGE_CACHE_SIZE with zeros. */ 767 * PAGE_SIZE with zeros. */
768 size_t num_zeros = (PAGE_CACHE_SIZE 768 size_t num_zeros = (PAGE_SIZE
769 - (ia->ia_size & ~PAGE_CACHE_MASK)); 769 - (ia->ia_size & ~PAGE_MASK));
770 770
771 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 771 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
772 truncate_setsize(inode, ia->ia_size); 772 truncate_setsize(inode, ia->ia_size);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 9893d1538122..3cf1546dca82 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1798,7 +1798,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
1798 * added the our &auth_tok_list */ 1798 * added the our &auth_tok_list */
1799 next_packet_is_auth_tok_packet = 1; 1799 next_packet_is_auth_tok_packet = 1;
1800 while (next_packet_is_auth_tok_packet) { 1800 while (next_packet_is_auth_tok_packet) {
1801 size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i); 1801 size_t max_packet_size = ((PAGE_SIZE - 8) - i);
1802 1802
1803 switch (src[i]) { 1803 switch (src[i]) {
1804 case ECRYPTFS_TAG_3_PACKET_TYPE: 1804 case ECRYPTFS_TAG_3_PACKET_TYPE:
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 8b0b4a73116d..1698132d0e57 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -695,12 +695,12 @@ static struct ecryptfs_cache_info {
695 { 695 {
696 .cache = &ecryptfs_header_cache, 696 .cache = &ecryptfs_header_cache,
697 .name = "ecryptfs_headers", 697 .name = "ecryptfs_headers",
698 .size = PAGE_CACHE_SIZE, 698 .size = PAGE_SIZE,
699 }, 699 },
700 { 700 {
701 .cache = &ecryptfs_xattr_cache, 701 .cache = &ecryptfs_xattr_cache,
702 .name = "ecryptfs_xattr_cache", 702 .name = "ecryptfs_xattr_cache",
703 .size = PAGE_CACHE_SIZE, 703 .size = PAGE_SIZE,
704 }, 704 },
705 { 705 {
706 .cache = &ecryptfs_key_record_cache, 706 .cache = &ecryptfs_key_record_cache,
@@ -818,7 +818,7 @@ static int __init ecryptfs_init(void)
818{ 818{
819 int rc; 819 int rc;
820 820
821 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { 821 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
822 rc = -EINVAL; 822 rc = -EINVAL;
823 ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " 823 ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
824 "larger than the host's page size, and so " 824 "larger than the host's page size, and so "
@@ -826,7 +826,7 @@ static int __init ecryptfs_init(void)
826 "default eCryptfs extent size is [%u] bytes; " 826 "default eCryptfs extent size is [%u] bytes; "
827 "the page size is [%lu] bytes.\n", 827 "the page size is [%lu] bytes.\n",
828 ECRYPTFS_DEFAULT_EXTENT_SIZE, 828 ECRYPTFS_DEFAULT_EXTENT_SIZE,
829 (unsigned long)PAGE_CACHE_SIZE); 829 (unsigned long)PAGE_SIZE);
830 goto out; 830 goto out;
831 } 831 }
832 rc = ecryptfs_init_kmem_caches(); 832 rc = ecryptfs_init_kmem_caches();
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 1f5865263b3e..e6b1d80952b9 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -122,7 +122,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
122 struct ecryptfs_crypt_stat *crypt_stat) 122 struct ecryptfs_crypt_stat *crypt_stat)
123{ 123{
124 loff_t extent_num_in_page = 0; 124 loff_t extent_num_in_page = 0;
125 loff_t num_extents_per_page = (PAGE_CACHE_SIZE 125 loff_t num_extents_per_page = (PAGE_SIZE
126 / crypt_stat->extent_size); 126 / crypt_stat->extent_size);
127 int rc = 0; 127 int rc = 0;
128 128
@@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
138 char *page_virt; 138 char *page_virt;
139 139
140 page_virt = kmap_atomic(page); 140 page_virt = kmap_atomic(page);
141 memset(page_virt, 0, PAGE_CACHE_SIZE); 141 memset(page_virt, 0, PAGE_SIZE);
142 /* TODO: Support more than one header extent */ 142 /* TODO: Support more than one header extent */
143 if (view_extent_num == 0) { 143 if (view_extent_num == 0) {
144 size_t written; 144 size_t written;
@@ -164,8 +164,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
164 - crypt_stat->metadata_size); 164 - crypt_stat->metadata_size);
165 165
166 rc = ecryptfs_read_lower_page_segment( 166 rc = ecryptfs_read_lower_page_segment(
167 page, (lower_offset >> PAGE_CACHE_SHIFT), 167 page, (lower_offset >> PAGE_SHIFT),
168 (lower_offset & ~PAGE_CACHE_MASK), 168 (lower_offset & ~PAGE_MASK),
169 crypt_stat->extent_size, page->mapping->host); 169 crypt_stat->extent_size, page->mapping->host);
170 if (rc) { 170 if (rc) {
171 printk(KERN_ERR "%s: Error attempting to read " 171 printk(KERN_ERR "%s: Error attempting to read "
@@ -198,7 +198,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
198 198
199 if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 199 if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
200 rc = ecryptfs_read_lower_page_segment(page, page->index, 0, 200 rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
201 PAGE_CACHE_SIZE, 201 PAGE_SIZE,
202 page->mapping->host); 202 page->mapping->host);
203 } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { 203 } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
204 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { 204 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
@@ -215,7 +215,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
215 215
216 } else { 216 } else {
217 rc = ecryptfs_read_lower_page_segment( 217 rc = ecryptfs_read_lower_page_segment(
218 page, page->index, 0, PAGE_CACHE_SIZE, 218 page, page->index, 0, PAGE_SIZE,
219 page->mapping->host); 219 page->mapping->host);
220 if (rc) { 220 if (rc) {
221 printk(KERN_ERR "Error reading page; rc = " 221 printk(KERN_ERR "Error reading page; rc = "
@@ -250,12 +250,12 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
250 struct inode *inode = page->mapping->host; 250 struct inode *inode = page->mapping->host;
251 int end_byte_in_page; 251 int end_byte_in_page;
252 252
253 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) 253 if ((i_size_read(inode) / PAGE_SIZE) != page->index)
254 goto out; 254 goto out;
255 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 255 end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
256 if (to > end_byte_in_page) 256 if (to > end_byte_in_page)
257 end_byte_in_page = to; 257 end_byte_in_page = to;
258 zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE); 258 zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
259out: 259out:
260 return 0; 260 return 0;
261} 261}
@@ -279,7 +279,7 @@ static int ecryptfs_write_begin(struct file *file,
279 loff_t pos, unsigned len, unsigned flags, 279 loff_t pos, unsigned len, unsigned flags,
280 struct page **pagep, void **fsdata) 280 struct page **pagep, void **fsdata)
281{ 281{
282 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 282 pgoff_t index = pos >> PAGE_SHIFT;
283 struct page *page; 283 struct page *page;
284 loff_t prev_page_end_size; 284 loff_t prev_page_end_size;
285 int rc = 0; 285 int rc = 0;
@@ -289,14 +289,14 @@ static int ecryptfs_write_begin(struct file *file,
289 return -ENOMEM; 289 return -ENOMEM;
290 *pagep = page; 290 *pagep = page;
291 291
292 prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT); 292 prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
293 if (!PageUptodate(page)) { 293 if (!PageUptodate(page)) {
294 struct ecryptfs_crypt_stat *crypt_stat = 294 struct ecryptfs_crypt_stat *crypt_stat =
295 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; 295 &ecryptfs_inode_to_private(mapping->host)->crypt_stat;
296 296
297 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 297 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
298 rc = ecryptfs_read_lower_page_segment( 298 rc = ecryptfs_read_lower_page_segment(
299 page, index, 0, PAGE_CACHE_SIZE, mapping->host); 299 page, index, 0, PAGE_SIZE, mapping->host);
300 if (rc) { 300 if (rc) {
301 printk(KERN_ERR "%s: Error attempting to read " 301 printk(KERN_ERR "%s: Error attempting to read "
302 "lower page segment; rc = [%d]\n", 302 "lower page segment; rc = [%d]\n",
@@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file,
322 SetPageUptodate(page); 322 SetPageUptodate(page);
323 } else { 323 } else {
324 rc = ecryptfs_read_lower_page_segment( 324 rc = ecryptfs_read_lower_page_segment(
325 page, index, 0, PAGE_CACHE_SIZE, 325 page, index, 0, PAGE_SIZE,
326 mapping->host); 326 mapping->host);
327 if (rc) { 327 if (rc) {
328 printk(KERN_ERR "%s: Error reading " 328 printk(KERN_ERR "%s: Error reading "
@@ -336,9 +336,9 @@ static int ecryptfs_write_begin(struct file *file,
336 } else { 336 } else {
337 if (prev_page_end_size 337 if (prev_page_end_size
338 >= i_size_read(page->mapping->host)) { 338 >= i_size_read(page->mapping->host)) {
339 zero_user(page, 0, PAGE_CACHE_SIZE); 339 zero_user(page, 0, PAGE_SIZE);
340 SetPageUptodate(page); 340 SetPageUptodate(page);
341 } else if (len < PAGE_CACHE_SIZE) { 341 } else if (len < PAGE_SIZE) {
342 rc = ecryptfs_decrypt_page(page); 342 rc = ecryptfs_decrypt_page(page);
343 if (rc) { 343 if (rc) {
344 printk(KERN_ERR "%s: Error decrypting " 344 printk(KERN_ERR "%s: Error decrypting "
@@ -371,11 +371,11 @@ static int ecryptfs_write_begin(struct file *file,
371 * of page? Zero it out. */ 371 * of page? Zero it out. */
372 if ((i_size_read(mapping->host) == prev_page_end_size) 372 if ((i_size_read(mapping->host) == prev_page_end_size)
373 && (pos != 0)) 373 && (pos != 0))
374 zero_user(page, 0, PAGE_CACHE_SIZE); 374 zero_user(page, 0, PAGE_SIZE);
375out: 375out:
376 if (unlikely(rc)) { 376 if (unlikely(rc)) {
377 unlock_page(page); 377 unlock_page(page);
378 page_cache_release(page); 378 put_page(page);
379 *pagep = NULL; 379 *pagep = NULL;
380 } 380 }
381 return rc; 381 return rc;
@@ -437,7 +437,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
437 } 437 }
438 inode_lock(lower_inode); 438 inode_lock(lower_inode);
439 size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME, 439 size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
440 xattr_virt, PAGE_CACHE_SIZE); 440 xattr_virt, PAGE_SIZE);
441 if (size < 0) 441 if (size < 0)
442 size = 8; 442 size = 8;
443 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt); 443 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
@@ -479,8 +479,8 @@ static int ecryptfs_write_end(struct file *file,
479 loff_t pos, unsigned len, unsigned copied, 479 loff_t pos, unsigned len, unsigned copied,
480 struct page *page, void *fsdata) 480 struct page *page, void *fsdata)
481{ 481{
482 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 482 pgoff_t index = pos >> PAGE_SHIFT;
483 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 483 unsigned from = pos & (PAGE_SIZE - 1);
484 unsigned to = from + copied; 484 unsigned to = from + copied;
485 struct inode *ecryptfs_inode = mapping->host; 485 struct inode *ecryptfs_inode = mapping->host;
486 struct ecryptfs_crypt_stat *crypt_stat = 486 struct ecryptfs_crypt_stat *crypt_stat =
@@ -500,7 +500,7 @@ static int ecryptfs_write_end(struct file *file,
500 goto out; 500 goto out;
501 } 501 }
502 if (!PageUptodate(page)) { 502 if (!PageUptodate(page)) {
503 if (copied < PAGE_CACHE_SIZE) { 503 if (copied < PAGE_SIZE) {
504 rc = 0; 504 rc = 0;
505 goto out; 505 goto out;
506 } 506 }
@@ -533,7 +533,7 @@ static int ecryptfs_write_end(struct file *file,
533 rc = copied; 533 rc = copied;
534out: 534out:
535 unlock_page(page); 535 unlock_page(page);
536 page_cache_release(page); 536 put_page(page);
537 return rc; 537 return rc;
538} 538}
539 539
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 09fe622274e4..158a3a39f82d 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -74,7 +74,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
74 loff_t offset; 74 loff_t offset;
75 int rc; 75 int rc;
76 76
77 offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) 77 offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
78 + offset_in_page); 78 + offset_in_page);
79 virt = kmap(page_for_lower); 79 virt = kmap(page_for_lower);
80 rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size); 80 rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
@@ -123,9 +123,9 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
123 else 123 else
124 pos = offset; 124 pos = offset;
125 while (pos < (offset + size)) { 125 while (pos < (offset + size)) {
126 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); 126 pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
127 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); 127 size_t start_offset_in_page = (pos & ~PAGE_MASK);
128 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); 128 size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
129 loff_t total_remaining_bytes = ((offset + size) - pos); 129 loff_t total_remaining_bytes = ((offset + size) - pos);
130 130
131 if (fatal_signal_pending(current)) { 131 if (fatal_signal_pending(current)) {
@@ -165,7 +165,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
165 * Fill in zero values to the end of the page */ 165 * Fill in zero values to the end of the page */
166 memset(((char *)ecryptfs_page_virt 166 memset(((char *)ecryptfs_page_virt
167 + start_offset_in_page), 0, 167 + start_offset_in_page), 0,
168 PAGE_CACHE_SIZE - start_offset_in_page); 168 PAGE_SIZE - start_offset_in_page);
169 } 169 }
170 170
171 /* pos >= offset, we are now writing the data request */ 171 /* pos >= offset, we are now writing the data request */
@@ -186,7 +186,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
186 ecryptfs_page, 186 ecryptfs_page,
187 start_offset_in_page, 187 start_offset_in_page,
188 data_offset); 188 data_offset);
189 page_cache_release(ecryptfs_page); 189 put_page(ecryptfs_page);
190 if (rc) { 190 if (rc) {
191 printk(KERN_ERR "%s: Error encrypting " 191 printk(KERN_ERR "%s: Error encrypting "
192 "page; rc = [%d]\n", __func__, rc); 192 "page; rc = [%d]\n", __func__, rc);
@@ -262,7 +262,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
262 loff_t offset; 262 loff_t offset;
263 int rc; 263 int rc;
264 264
265 offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page); 265 offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
266 virt = kmap(page_for_ecryptfs); 266 virt = kmap(page_for_ecryptfs);
267 rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode); 267 rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
268 if (rc > 0) 268 if (rc > 0)
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index dd029d13ea61..553c5d2db4a4 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -197,8 +197,8 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
197 efivarfs_sb = sb; 197 efivarfs_sb = sb;
198 198
199 sb->s_maxbytes = MAX_LFS_FILESIZE; 199 sb->s_maxbytes = MAX_LFS_FILESIZE;
200 sb->s_blocksize = PAGE_CACHE_SIZE; 200 sb->s_blocksize = PAGE_SIZE;
201 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 201 sb->s_blocksize_bits = PAGE_SHIFT;
202 sb->s_magic = EFIVARFS_MAGIC; 202 sb->s_magic = EFIVARFS_MAGIC;
203 sb->s_op = &efivarfs_ops; 203 sb->s_op = &efivarfs_ops;
204 sb->s_d_op = &efivarfs_d_ops; 204 sb->s_d_op = &efivarfs_d_ops;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index e5bb2abf77f9..547b93cbea63 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -41,16 +41,16 @@ static inline unsigned exofs_chunk_size(struct inode *inode)
41static inline void exofs_put_page(struct page *page) 41static inline void exofs_put_page(struct page *page)
42{ 42{
43 kunmap(page); 43 kunmap(page);
44 page_cache_release(page); 44 put_page(page);
45} 45}
46 46
47static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) 47static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
48{ 48{
49 loff_t last_byte = inode->i_size; 49 loff_t last_byte = inode->i_size;
50 50
51 last_byte -= page_nr << PAGE_CACHE_SHIFT; 51 last_byte -= page_nr << PAGE_SHIFT;
52 if (last_byte > PAGE_CACHE_SIZE) 52 if (last_byte > PAGE_SIZE)
53 last_byte = PAGE_CACHE_SIZE; 53 last_byte = PAGE_SIZE;
54 return last_byte; 54 return last_byte;
55} 55}
56 56
@@ -85,13 +85,13 @@ static void exofs_check_page(struct page *page)
85 unsigned chunk_size = exofs_chunk_size(dir); 85 unsigned chunk_size = exofs_chunk_size(dir);
86 char *kaddr = page_address(page); 86 char *kaddr = page_address(page);
87 unsigned offs, rec_len; 87 unsigned offs, rec_len;
88 unsigned limit = PAGE_CACHE_SIZE; 88 unsigned limit = PAGE_SIZE;
89 struct exofs_dir_entry *p; 89 struct exofs_dir_entry *p;
90 char *error; 90 char *error;
91 91
92 /* if the page is the last one in the directory */ 92 /* if the page is the last one in the directory */
93 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 93 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
94 limit = dir->i_size & ~PAGE_CACHE_MASK; 94 limit = dir->i_size & ~PAGE_MASK;
95 if (limit & (chunk_size - 1)) 95 if (limit & (chunk_size - 1))
96 goto Ebadsize; 96 goto Ebadsize;
97 if (!limit) 97 if (!limit)
@@ -138,7 +138,7 @@ bad_entry:
138 EXOFS_ERR( 138 EXOFS_ERR(
139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " 139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
140 "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", 140 "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
141 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 141 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
142 _LLU(le64_to_cpu(p->inode_no)), 142 _LLU(le64_to_cpu(p->inode_no)),
143 rec_len, p->name_len); 143 rec_len, p->name_len);
144 goto fail; 144 goto fail;
@@ -147,7 +147,7 @@ Eend:
147 EXOFS_ERR("ERROR [exofs_check_page]: " 147 EXOFS_ERR("ERROR [exofs_check_page]: "
148 "entry in directory(0x%lx) spans the page boundary" 148 "entry in directory(0x%lx) spans the page boundary"
149 "offset=%lu, inode=0x%llx\n", 149 "offset=%lu, inode=0x%llx\n",
150 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 150 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
151 _LLU(le64_to_cpu(p->inode_no))); 151 _LLU(le64_to_cpu(p->inode_no)));
152fail: 152fail:
153 SetPageChecked(page); 153 SetPageChecked(page);
@@ -237,8 +237,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
237{ 237{
238 loff_t pos = ctx->pos; 238 loff_t pos = ctx->pos;
239 struct inode *inode = file_inode(file); 239 struct inode *inode = file_inode(file);
240 unsigned int offset = pos & ~PAGE_CACHE_MASK; 240 unsigned int offset = pos & ~PAGE_MASK;
241 unsigned long n = pos >> PAGE_CACHE_SHIFT; 241 unsigned long n = pos >> PAGE_SHIFT;
242 unsigned long npages = dir_pages(inode); 242 unsigned long npages = dir_pages(inode);
243 unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); 243 unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
244 int need_revalidate = (file->f_version != inode->i_version); 244 int need_revalidate = (file->f_version != inode->i_version);
@@ -254,7 +254,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
254 if (IS_ERR(page)) { 254 if (IS_ERR(page)) {
255 EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", 255 EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
256 inode->i_ino); 256 inode->i_ino);
257 ctx->pos += PAGE_CACHE_SIZE - offset; 257 ctx->pos += PAGE_SIZE - offset;
258 return PTR_ERR(page); 258 return PTR_ERR(page);
259 } 259 }
260 kaddr = page_address(page); 260 kaddr = page_address(page);
@@ -262,7 +262,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
262 if (offset) { 262 if (offset) {
263 offset = exofs_validate_entry(kaddr, offset, 263 offset = exofs_validate_entry(kaddr, offset,
264 chunk_mask); 264 chunk_mask);
265 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 265 ctx->pos = (n<<PAGE_SHIFT) + offset;
266 } 266 }
267 file->f_version = inode->i_version; 267 file->f_version = inode->i_version;
268 need_revalidate = 0; 268 need_revalidate = 0;
@@ -449,7 +449,7 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
449 kaddr = page_address(page); 449 kaddr = page_address(page);
450 dir_end = kaddr + exofs_last_byte(dir, n); 450 dir_end = kaddr + exofs_last_byte(dir, n);
451 de = (struct exofs_dir_entry *)kaddr; 451 de = (struct exofs_dir_entry *)kaddr;
452 kaddr += PAGE_CACHE_SIZE - reclen; 452 kaddr += PAGE_SIZE - reclen;
453 while ((char *)de <= kaddr) { 453 while ((char *)de <= kaddr) {
454 if ((char *)de == dir_end) { 454 if ((char *)de == dir_end) {
455 name_len = 0; 455 name_len = 0;
@@ -602,7 +602,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
602 kunmap_atomic(kaddr); 602 kunmap_atomic(kaddr);
603 err = exofs_commit_chunk(page, 0, chunk_size); 603 err = exofs_commit_chunk(page, 0, chunk_size);
604fail: 604fail:
605 page_cache_release(page); 605 put_page(page);
606 return err; 606 return err;
607} 607}
608 608
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 9eaf595aeaf8..49e1bd00b4ec 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -317,7 +317,7 @@ static int read_exec(struct page_collect *pcol)
317 317
318 if (!pcol->ios) { 318 if (!pcol->ios) {
319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true, 319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
320 pcol->pg_first << PAGE_CACHE_SHIFT, 320 pcol->pg_first << PAGE_SHIFT,
321 pcol->length, &pcol->ios); 321 pcol->length, &pcol->ios);
322 322
323 if (ret) 323 if (ret)
@@ -383,7 +383,7 @@ static int readpage_strip(void *data, struct page *page)
383 struct inode *inode = pcol->inode; 383 struct inode *inode = pcol->inode;
384 struct exofs_i_info *oi = exofs_i(inode); 384 struct exofs_i_info *oi = exofs_i(inode);
385 loff_t i_size = i_size_read(inode); 385 loff_t i_size = i_size_read(inode);
386 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 386 pgoff_t end_index = i_size >> PAGE_SHIFT;
387 size_t len; 387 size_t len;
388 int ret; 388 int ret;
389 389
@@ -397,9 +397,9 @@ static int readpage_strip(void *data, struct page *page)
397 pcol->that_locked_page = page; 397 pcol->that_locked_page = page;
398 398
399 if (page->index < end_index) 399 if (page->index < end_index)
400 len = PAGE_CACHE_SIZE; 400 len = PAGE_SIZE;
401 else if (page->index == end_index) 401 else if (page->index == end_index)
402 len = i_size & ~PAGE_CACHE_MASK; 402 len = i_size & ~PAGE_MASK;
403 else 403 else
404 len = 0; 404 len = 0;
405 405
@@ -442,8 +442,8 @@ try_again:
442 goto fail; 442 goto fail;
443 } 443 }
444 444
445 if (len != PAGE_CACHE_SIZE) 445 if (len != PAGE_SIZE)
446 zero_user(page, len, PAGE_CACHE_SIZE - len); 446 zero_user(page, len, PAGE_SIZE - len);
447 447
448 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n", 448 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
449 inode->i_ino, page->index, len); 449 inode->i_ino, page->index, len);
@@ -609,7 +609,7 @@ static void __r4w_put_page(void *priv, struct page *page)
609 609
610 if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) { 610 if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
611 EXOFS_DBGMSG2("index=0x%lx\n", page->index); 611 EXOFS_DBGMSG2("index=0x%lx\n", page->index);
612 page_cache_release(page); 612 put_page(page);
613 return; 613 return;
614 } 614 }
615 EXOFS_DBGMSG2("that_locked_page index=0x%lx\n", 615 EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
@@ -633,7 +633,7 @@ static int write_exec(struct page_collect *pcol)
633 633
634 BUG_ON(pcol->ios); 634 BUG_ON(pcol->ios);
635 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false, 635 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
636 pcol->pg_first << PAGE_CACHE_SHIFT, 636 pcol->pg_first << PAGE_SHIFT,
637 pcol->length, &pcol->ios); 637 pcol->length, &pcol->ios);
638 if (unlikely(ret)) 638 if (unlikely(ret))
639 goto err; 639 goto err;
@@ -696,7 +696,7 @@ static int writepage_strip(struct page *page,
696 struct inode *inode = pcol->inode; 696 struct inode *inode = pcol->inode;
697 struct exofs_i_info *oi = exofs_i(inode); 697 struct exofs_i_info *oi = exofs_i(inode);
698 loff_t i_size = i_size_read(inode); 698 loff_t i_size = i_size_read(inode);
699 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 699 pgoff_t end_index = i_size >> PAGE_SHIFT;
700 size_t len; 700 size_t len;
701 int ret; 701 int ret;
702 702
@@ -708,9 +708,9 @@ static int writepage_strip(struct page *page,
708 708
709 if (page->index < end_index) 709 if (page->index < end_index)
710 /* in this case, the page is within the limits of the file */ 710 /* in this case, the page is within the limits of the file */
711 len = PAGE_CACHE_SIZE; 711 len = PAGE_SIZE;
712 else { 712 else {
713 len = i_size & ~PAGE_CACHE_MASK; 713 len = i_size & ~PAGE_MASK;
714 714
715 if (page->index > end_index || !len) { 715 if (page->index > end_index || !len) {
716 /* in this case, the page is outside the limits 716 /* in this case, the page is outside the limits
@@ -790,10 +790,10 @@ static int exofs_writepages(struct address_space *mapping,
790 long start, end, expected_pages; 790 long start, end, expected_pages;
791 int ret; 791 int ret;
792 792
793 start = wbc->range_start >> PAGE_CACHE_SHIFT; 793 start = wbc->range_start >> PAGE_SHIFT;
794 end = (wbc->range_end == LLONG_MAX) ? 794 end = (wbc->range_end == LLONG_MAX) ?
795 start + mapping->nrpages : 795 start + mapping->nrpages :
796 wbc->range_end >> PAGE_CACHE_SHIFT; 796 wbc->range_end >> PAGE_SHIFT;
797 797
798 if (start || end) 798 if (start || end)
799 expected_pages = end - start + 1; 799 expected_pages = end - start + 1;
@@ -881,15 +881,15 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
881 } 881 }
882 882
883 /* read modify write */ 883 /* read modify write */
884 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { 884 if (!PageUptodate(page) && (len != PAGE_SIZE)) {
885 loff_t i_size = i_size_read(mapping->host); 885 loff_t i_size = i_size_read(mapping->host);
886 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 886 pgoff_t end_index = i_size >> PAGE_SHIFT;
887 size_t rlen; 887 size_t rlen;
888 888
889 if (page->index < end_index) 889 if (page->index < end_index)
890 rlen = PAGE_CACHE_SIZE; 890 rlen = PAGE_SIZE;
891 else if (page->index == end_index) 891 else if (page->index == end_index)
892 rlen = i_size & ~PAGE_CACHE_MASK; 892 rlen = i_size & ~PAGE_MASK;
893 else 893 else
894 rlen = 0; 894 rlen = 0;
895 895
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index c20d77df2679..622a686bb08b 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -292,11 +292,11 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
292out_dir: 292out_dir:
293 if (dir_de) { 293 if (dir_de) {
294 kunmap(dir_page); 294 kunmap(dir_page);
295 page_cache_release(dir_page); 295 put_page(dir_page);
296 } 296 }
297out_old: 297out_old:
298 kunmap(old_page); 298 kunmap(old_page);
299 page_cache_release(old_page); 299 put_page(old_page);
300out: 300out:
301 return err; 301 return err;
302} 302}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 0c6638b40f21..7ff6fcfa685d 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
37{ 37{
38 unsigned len = le16_to_cpu(dlen); 38 unsigned len = le16_to_cpu(dlen);
39 39
40#if (PAGE_CACHE_SIZE >= 65536) 40#if (PAGE_SIZE >= 65536)
41 if (len == EXT2_MAX_REC_LEN) 41 if (len == EXT2_MAX_REC_LEN)
42 return 1 << 16; 42 return 1 << 16;
43#endif 43#endif
@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
46 46
47static inline __le16 ext2_rec_len_to_disk(unsigned len) 47static inline __le16 ext2_rec_len_to_disk(unsigned len)
48{ 48{
49#if (PAGE_CACHE_SIZE >= 65536) 49#if (PAGE_SIZE >= 65536)
50 if (len == (1 << 16)) 50 if (len == (1 << 16))
51 return cpu_to_le16(EXT2_MAX_REC_LEN); 51 return cpu_to_le16(EXT2_MAX_REC_LEN);
52 else 52 else
@@ -67,7 +67,7 @@ static inline unsigned ext2_chunk_size(struct inode *inode)
67static inline void ext2_put_page(struct page *page) 67static inline void ext2_put_page(struct page *page)
68{ 68{
69 kunmap(page); 69 kunmap(page);
70 page_cache_release(page); 70 put_page(page);
71} 71}
72 72
73/* 73/*
@@ -79,9 +79,9 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr)
79{ 79{
80 unsigned last_byte = inode->i_size; 80 unsigned last_byte = inode->i_size;
81 81
82 last_byte -= page_nr << PAGE_CACHE_SHIFT; 82 last_byte -= page_nr << PAGE_SHIFT;
83 if (last_byte > PAGE_CACHE_SIZE) 83 if (last_byte > PAGE_SIZE)
84 last_byte = PAGE_CACHE_SIZE; 84 last_byte = PAGE_SIZE;
85 return last_byte; 85 return last_byte;
86} 86}
87 87
@@ -118,12 +118,12 @@ static void ext2_check_page(struct page *page, int quiet)
118 char *kaddr = page_address(page); 118 char *kaddr = page_address(page);
119 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); 119 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
120 unsigned offs, rec_len; 120 unsigned offs, rec_len;
121 unsigned limit = PAGE_CACHE_SIZE; 121 unsigned limit = PAGE_SIZE;
122 ext2_dirent *p; 122 ext2_dirent *p;
123 char *error; 123 char *error;
124 124
125 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 125 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
126 limit = dir->i_size & ~PAGE_CACHE_MASK; 126 limit = dir->i_size & ~PAGE_MASK;
127 if (limit & (chunk_size - 1)) 127 if (limit & (chunk_size - 1))
128 goto Ebadsize; 128 goto Ebadsize;
129 if (!limit) 129 if (!limit)
@@ -176,7 +176,7 @@ bad_entry:
176 if (!quiet) 176 if (!quiet)
177 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " 177 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
178 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 178 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
179 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 179 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
180 (unsigned long) le32_to_cpu(p->inode), 180 (unsigned long) le32_to_cpu(p->inode),
181 rec_len, p->name_len); 181 rec_len, p->name_len);
182 goto fail; 182 goto fail;
@@ -186,7 +186,7 @@ Eend:
186 ext2_error(sb, "ext2_check_page", 186 ext2_error(sb, "ext2_check_page",
187 "entry in directory #%lu spans the page boundary" 187 "entry in directory #%lu spans the page boundary"
188 "offset=%lu, inode=%lu", 188 "offset=%lu, inode=%lu",
189 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 189 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
190 (unsigned long) le32_to_cpu(p->inode)); 190 (unsigned long) le32_to_cpu(p->inode));
191 } 191 }
192fail: 192fail:
@@ -287,8 +287,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
287 loff_t pos = ctx->pos; 287 loff_t pos = ctx->pos;
288 struct inode *inode = file_inode(file); 288 struct inode *inode = file_inode(file);
289 struct super_block *sb = inode->i_sb; 289 struct super_block *sb = inode->i_sb;
290 unsigned int offset = pos & ~PAGE_CACHE_MASK; 290 unsigned int offset = pos & ~PAGE_MASK;
291 unsigned long n = pos >> PAGE_CACHE_SHIFT; 291 unsigned long n = pos >> PAGE_SHIFT;
292 unsigned long npages = dir_pages(inode); 292 unsigned long npages = dir_pages(inode);
293 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); 293 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
294 unsigned char *types = NULL; 294 unsigned char *types = NULL;
@@ -309,14 +309,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
309 ext2_error(sb, __func__, 309 ext2_error(sb, __func__,
310 "bad page in #%lu", 310 "bad page in #%lu",
311 inode->i_ino); 311 inode->i_ino);
312 ctx->pos += PAGE_CACHE_SIZE - offset; 312 ctx->pos += PAGE_SIZE - offset;
313 return PTR_ERR(page); 313 return PTR_ERR(page);
314 } 314 }
315 kaddr = page_address(page); 315 kaddr = page_address(page);
316 if (unlikely(need_revalidate)) { 316 if (unlikely(need_revalidate)) {
317 if (offset) { 317 if (offset) {
318 offset = ext2_validate_entry(kaddr, offset, chunk_mask); 318 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
319 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 319 ctx->pos = (n<<PAGE_SHIFT) + offset;
320 } 320 }
321 file->f_version = inode->i_version; 321 file->f_version = inode->i_version;
322 need_revalidate = 0; 322 need_revalidate = 0;
@@ -406,7 +406,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
406 if (++n >= npages) 406 if (++n >= npages)
407 n = 0; 407 n = 0;
408 /* next page is past the blocks we've got */ 408 /* next page is past the blocks we've got */
409 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { 409 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
410 ext2_error(dir->i_sb, __func__, 410 ext2_error(dir->i_sb, __func__,
411 "dir %lu size %lld exceeds block count %llu", 411 "dir %lu size %lld exceeds block count %llu",
412 dir->i_ino, dir->i_size, 412 dir->i_ino, dir->i_size,
@@ -511,7 +511,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
511 kaddr = page_address(page); 511 kaddr = page_address(page);
512 dir_end = kaddr + ext2_last_byte(dir, n); 512 dir_end = kaddr + ext2_last_byte(dir, n);
513 de = (ext2_dirent *)kaddr; 513 de = (ext2_dirent *)kaddr;
514 kaddr += PAGE_CACHE_SIZE - reclen; 514 kaddr += PAGE_SIZE - reclen;
515 while ((char *)de <= kaddr) { 515 while ((char *)de <= kaddr) {
516 if ((char *)de == dir_end) { 516 if ((char *)de == dir_end) {
517 /* We hit i_size */ 517 /* We hit i_size */
@@ -655,7 +655,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
655 kunmap_atomic(kaddr); 655 kunmap_atomic(kaddr);
656 err = ext2_commit_chunk(page, 0, chunk_size); 656 err = ext2_commit_chunk(page, 0, chunk_size);
657fail: 657fail:
658 page_cache_release(page); 658 put_page(page);
659 return err; 659 return err;
660} 660}
661 661
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 7a2be8f7f3c3..d34843925b23 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -398,7 +398,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
398 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); 398 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
399 else { 399 else {
400 kunmap(dir_page); 400 kunmap(dir_page);
401 page_cache_release(dir_page); 401 put_page(dir_page);
402 } 402 }
403 inode_dec_link_count(old_dir); 403 inode_dec_link_count(old_dir);
404 } 404 }
@@ -408,11 +408,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
408out_dir: 408out_dir:
409 if (dir_de) { 409 if (dir_de) {
410 kunmap(dir_page); 410 kunmap(dir_page);
411 page_cache_release(dir_page); 411 put_page(dir_page);
412 } 412 }
413out_old: 413out_old:
414 kunmap(old_page); 414 kunmap(old_page);
415 page_cache_release(old_page); 415 put_page(old_page);
416out: 416out:
417 return err; 417 return err;
418} 418}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index edc053a81914..2580ef3346ca 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -283,10 +283,10 @@ static int ext4_page_crypto(struct inode *inode,
283 EXT4_XTS_TWEAK_SIZE - sizeof(index)); 283 EXT4_XTS_TWEAK_SIZE - sizeof(index));
284 284
285 sg_init_table(&dst, 1); 285 sg_init_table(&dst, 1);
286 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 286 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
287 sg_init_table(&src, 1); 287 sg_init_table(&src, 1);
288 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 288 sg_set_page(&src, src_page, PAGE_SIZE, 0);
289 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 289 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
290 xts_tweak); 290 xts_tweak);
291 if (rw == EXT4_DECRYPT) 291 if (rw == EXT4_DECRYPT)
292 res = crypto_skcipher_decrypt(req); 292 res = crypto_skcipher_decrypt(req);
@@ -396,7 +396,7 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
396 (unsigned long) inode->i_ino, lblk, len); 396 (unsigned long) inode->i_ino, lblk, len);
397#endif 397#endif
398 398
399 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); 399 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
400 400
401 ctx = ext4_get_crypto_ctx(inode); 401 ctx = ext4_get_crypto_ctx(inode);
402 if (IS_ERR(ctx)) 402 if (IS_ERR(ctx))
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 50ba27cbed03..561d7308b393 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -155,13 +155,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
155 err = ext4_map_blocks(NULL, inode, &map, 0); 155 err = ext4_map_blocks(NULL, inode, &map, 0);
156 if (err > 0) { 156 if (err > 0) {
157 pgoff_t index = map.m_pblk >> 157 pgoff_t index = map.m_pblk >>
158 (PAGE_CACHE_SHIFT - inode->i_blkbits); 158 (PAGE_SHIFT - inode->i_blkbits);
159 if (!ra_has_index(&file->f_ra, index)) 159 if (!ra_has_index(&file->f_ra, index))
160 page_cache_sync_readahead( 160 page_cache_sync_readahead(
161 sb->s_bdev->bd_inode->i_mapping, 161 sb->s_bdev->bd_inode->i_mapping,
162 &file->f_ra, file, 162 &file->f_ra, file,
163 index, 1); 163 index, 1);
164 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 164 file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
165 bh = ext4_bread(NULL, inode, map.m_lblk, 0); 165 bh = ext4_bread(NULL, inode, map.m_lblk, 0);
166 if (IS_ERR(bh)) { 166 if (IS_ERR(bh)) {
167 err = PTR_ERR(bh); 167 err = PTR_ERR(bh);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c04743519865..7ccba1aa142d 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1961,7 +1961,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
1961{ 1961{
1962 unsigned len = le16_to_cpu(dlen); 1962 unsigned len = le16_to_cpu(dlen);
1963 1963
1964#if (PAGE_CACHE_SIZE >= 65536) 1964#if (PAGE_SIZE >= 65536)
1965 if (len == EXT4_MAX_REC_LEN || len == 0) 1965 if (len == EXT4_MAX_REC_LEN || len == 0)
1966 return blocksize; 1966 return blocksize;
1967 return (len & 65532) | ((len & 3) << 16); 1967 return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1974,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
1974{ 1974{
1975 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) 1975 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
1976 BUG(); 1976 BUG();
1977#if (PAGE_CACHE_SIZE >= 65536) 1977#if (PAGE_SIZE >= 65536)
1978 if (len < 65536) 1978 if (len < 65536)
1979 return cpu_to_le16(len); 1979 return cpu_to_le16(len);
1980 if (len == blocksize) { 1980 if (len == blocksize) {
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6659e216385e..0caece398eb8 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -428,8 +428,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
428 lastoff = startoff; 428 lastoff = startoff;
429 endoff = (loff_t)end_blk << blkbits; 429 endoff = (loff_t)end_blk << blkbits;
430 430
431 index = startoff >> PAGE_CACHE_SHIFT; 431 index = startoff >> PAGE_SHIFT;
432 end = endoff >> PAGE_CACHE_SHIFT; 432 end = endoff >> PAGE_SHIFT;
433 433
434 pagevec_init(&pvec, 0); 434 pagevec_init(&pvec, 0);
435 do { 435 do {
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 7cbdd3752ba5..7bc6c855cc18 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -482,7 +482,7 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page)
482 ret = ext4_read_inline_data(inode, kaddr, len, &iloc); 482 ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
483 flush_dcache_page(page); 483 flush_dcache_page(page);
484 kunmap_atomic(kaddr); 484 kunmap_atomic(kaddr);
485 zero_user_segment(page, len, PAGE_CACHE_SIZE); 485 zero_user_segment(page, len, PAGE_SIZE);
486 SetPageUptodate(page); 486 SetPageUptodate(page);
487 brelse(iloc.bh); 487 brelse(iloc.bh);
488 488
@@ -507,7 +507,7 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
507 if (!page->index) 507 if (!page->index)
508 ret = ext4_read_inline_page(inode, page); 508 ret = ext4_read_inline_page(inode, page);
509 else if (!PageUptodate(page)) { 509 else if (!PageUptodate(page)) {
510 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 510 zero_user_segment(page, 0, PAGE_SIZE);
511 SetPageUptodate(page); 511 SetPageUptodate(page);
512 } 512 }
513 513
@@ -595,7 +595,7 @@ retry:
595 595
596 if (ret) { 596 if (ret) {
597 unlock_page(page); 597 unlock_page(page);
598 page_cache_release(page); 598 put_page(page);
599 page = NULL; 599 page = NULL;
600 ext4_orphan_add(handle, inode); 600 ext4_orphan_add(handle, inode);
601 up_write(&EXT4_I(inode)->xattr_sem); 601 up_write(&EXT4_I(inode)->xattr_sem);
@@ -621,7 +621,7 @@ retry:
621out: 621out:
622 if (page) { 622 if (page) {
623 unlock_page(page); 623 unlock_page(page);
624 page_cache_release(page); 624 put_page(page);
625 } 625 }
626 if (sem_held) 626 if (sem_held)
627 up_write(&EXT4_I(inode)->xattr_sem); 627 up_write(&EXT4_I(inode)->xattr_sem);
@@ -690,7 +690,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
690 if (!ext4_has_inline_data(inode)) { 690 if (!ext4_has_inline_data(inode)) {
691 ret = 0; 691 ret = 0;
692 unlock_page(page); 692 unlock_page(page);
693 page_cache_release(page); 693 put_page(page);
694 goto out_up_read; 694 goto out_up_read;
695 } 695 }
696 696
@@ -815,7 +815,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
815 if (ret) { 815 if (ret) {
816 up_read(&EXT4_I(inode)->xattr_sem); 816 up_read(&EXT4_I(inode)->xattr_sem);
817 unlock_page(page); 817 unlock_page(page);
818 page_cache_release(page); 818 put_page(page);
819 ext4_truncate_failed_write(inode); 819 ext4_truncate_failed_write(inode);
820 return ret; 820 return ret;
821 } 821 }
@@ -829,7 +829,7 @@ out:
829 up_read(&EXT4_I(inode)->xattr_sem); 829 up_read(&EXT4_I(inode)->xattr_sem);
830 if (page) { 830 if (page) {
831 unlock_page(page); 831 unlock_page(page);
832 page_cache_release(page); 832 put_page(page);
833 } 833 }
834 return ret; 834 return ret;
835} 835}
@@ -919,7 +919,7 @@ retry_journal:
919out_release_page: 919out_release_page:
920 up_read(&EXT4_I(inode)->xattr_sem); 920 up_read(&EXT4_I(inode)->xattr_sem);
921 unlock_page(page); 921 unlock_page(page);
922 page_cache_release(page); 922 put_page(page);
923out_journal: 923out_journal:
924 ext4_journal_stop(handle); 924 ext4_journal_stop(handle);
925out: 925out:
@@ -947,7 +947,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
947 i_size_changed = 1; 947 i_size_changed = 1;
948 } 948 }
949 unlock_page(page); 949 unlock_page(page);
950 page_cache_release(page); 950 put_page(page);
951 951
952 /* 952 /*
953 * Don't mark the inode dirty under page lock. First, it unnecessarily 953 * Don't mark the inode dirty under page lock. First, it unnecessarily
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dab84a2530ff..4f7043ba4447 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1057,7 +1057,7 @@ int do_journal_get_write_access(handle_t *handle,
1057static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1057static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1058 get_block_t *get_block) 1058 get_block_t *get_block)
1059{ 1059{
1060 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1060 unsigned from = pos & (PAGE_SIZE - 1);
1061 unsigned to = from + len; 1061 unsigned to = from + len;
1062 struct inode *inode = page->mapping->host; 1062 struct inode *inode = page->mapping->host;
1063 unsigned block_start, block_end; 1063 unsigned block_start, block_end;
@@ -1069,15 +1069,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1069 bool decrypt = false; 1069 bool decrypt = false;
1070 1070
1071 BUG_ON(!PageLocked(page)); 1071 BUG_ON(!PageLocked(page));
1072 BUG_ON(from > PAGE_CACHE_SIZE); 1072 BUG_ON(from > PAGE_SIZE);
1073 BUG_ON(to > PAGE_CACHE_SIZE); 1073 BUG_ON(to > PAGE_SIZE);
1074 BUG_ON(from > to); 1074 BUG_ON(from > to);
1075 1075
1076 if (!page_has_buffers(page)) 1076 if (!page_has_buffers(page))
1077 create_empty_buffers(page, blocksize, 0); 1077 create_empty_buffers(page, blocksize, 0);
1078 head = page_buffers(page); 1078 head = page_buffers(page);
1079 bbits = ilog2(blocksize); 1079 bbits = ilog2(blocksize);
1080 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1080 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1081 1081
1082 for (bh = head, block_start = 0; bh != head || !block_start; 1082 for (bh = head, block_start = 0; bh != head || !block_start;
1083 block++, block_start = block_end, bh = bh->b_this_page) { 1083 block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1159,8 +1159,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
1159 * we allocate blocks but write fails for some reason 1159 * we allocate blocks but write fails for some reason
1160 */ 1160 */
1161 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1161 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1162 index = pos >> PAGE_CACHE_SHIFT; 1162 index = pos >> PAGE_SHIFT;
1163 from = pos & (PAGE_CACHE_SIZE - 1); 1163 from = pos & (PAGE_SIZE - 1);
1164 to = from + len; 1164 to = from + len;
1165 1165
1166 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1166 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -1188,7 +1188,7 @@ retry_grab:
1188retry_journal: 1188retry_journal:
1189 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1189 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1190 if (IS_ERR(handle)) { 1190 if (IS_ERR(handle)) {
1191 page_cache_release(page); 1191 put_page(page);
1192 return PTR_ERR(handle); 1192 return PTR_ERR(handle);
1193 } 1193 }
1194 1194
@@ -1196,7 +1196,7 @@ retry_journal:
1196 if (page->mapping != mapping) { 1196 if (page->mapping != mapping) {
1197 /* The page got truncated from under us */ 1197 /* The page got truncated from under us */
1198 unlock_page(page); 1198 unlock_page(page);
1199 page_cache_release(page); 1199 put_page(page);
1200 ext4_journal_stop(handle); 1200 ext4_journal_stop(handle);
1201 goto retry_grab; 1201 goto retry_grab;
1202 } 1202 }
@@ -1252,7 +1252,7 @@ retry_journal:
1252 if (ret == -ENOSPC && 1252 if (ret == -ENOSPC &&
1253 ext4_should_retry_alloc(inode->i_sb, &retries)) 1253 ext4_should_retry_alloc(inode->i_sb, &retries))
1254 goto retry_journal; 1254 goto retry_journal;
1255 page_cache_release(page); 1255 put_page(page);
1256 return ret; 1256 return ret;
1257 } 1257 }
1258 *pagep = page; 1258 *pagep = page;
@@ -1295,7 +1295,7 @@ static int ext4_write_end(struct file *file,
1295 ret = ext4_jbd2_file_inode(handle, inode); 1295 ret = ext4_jbd2_file_inode(handle, inode);
1296 if (ret) { 1296 if (ret) {
1297 unlock_page(page); 1297 unlock_page(page);
1298 page_cache_release(page); 1298 put_page(page);
1299 goto errout; 1299 goto errout;
1300 } 1300 }
1301 } 1301 }
@@ -1315,7 +1315,7 @@ static int ext4_write_end(struct file *file,
1315 */ 1315 */
1316 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1316 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1317 unlock_page(page); 1317 unlock_page(page);
1318 page_cache_release(page); 1318 put_page(page);
1319 1319
1320 if (old_size < pos) 1320 if (old_size < pos)
1321 pagecache_isize_extended(inode, old_size, pos); 1321 pagecache_isize_extended(inode, old_size, pos);
@@ -1399,7 +1399,7 @@ static int ext4_journalled_write_end(struct file *file,
1399 int size_changed = 0; 1399 int size_changed = 0;
1400 1400
1401 trace_ext4_journalled_write_end(inode, pos, len, copied); 1401 trace_ext4_journalled_write_end(inode, pos, len, copied);
1402 from = pos & (PAGE_CACHE_SIZE - 1); 1402 from = pos & (PAGE_SIZE - 1);
1403 to = from + len; 1403 to = from + len;
1404 1404
1405 BUG_ON(!ext4_handle_valid(handle)); 1405 BUG_ON(!ext4_handle_valid(handle));
@@ -1423,7 +1423,7 @@ static int ext4_journalled_write_end(struct file *file,
1423 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1423 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1424 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1424 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1425 unlock_page(page); 1425 unlock_page(page);
1426 page_cache_release(page); 1426 put_page(page);
1427 1427
1428 if (old_size < pos) 1428 if (old_size < pos)
1429 pagecache_isize_extended(inode, old_size, pos); 1429 pagecache_isize_extended(inode, old_size, pos);
@@ -1537,7 +1537,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1537 int num_clusters; 1537 int num_clusters;
1538 ext4_fsblk_t lblk; 1538 ext4_fsblk_t lblk;
1539 1539
1540 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1540 BUG_ON(stop > PAGE_SIZE || stop < length);
1541 1541
1542 head = page_buffers(page); 1542 head = page_buffers(page);
1543 bh = head; 1543 bh = head;
@@ -1553,7 +1553,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1553 clear_buffer_delay(bh); 1553 clear_buffer_delay(bh);
1554 } else if (contiguous_blks) { 1554 } else if (contiguous_blks) {
1555 lblk = page->index << 1555 lblk = page->index <<
1556 (PAGE_CACHE_SHIFT - inode->i_blkbits); 1556 (PAGE_SHIFT - inode->i_blkbits);
1557 lblk += (curr_off >> inode->i_blkbits) - 1557 lblk += (curr_off >> inode->i_blkbits) -
1558 contiguous_blks; 1558 contiguous_blks;
1559 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1559 ext4_es_remove_extent(inode, lblk, contiguous_blks);
@@ -1563,7 +1563,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1563 } while ((bh = bh->b_this_page) != head); 1563 } while ((bh = bh->b_this_page) != head);
1564 1564
1565 if (contiguous_blks) { 1565 if (contiguous_blks) {
1566 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1566 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
1567 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1567 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1568 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1568 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1569 } 1569 }
@@ -1572,7 +1572,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1572 * need to release the reserved space for that cluster. */ 1572 * need to release the reserved space for that cluster. */
1573 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1573 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1574 while (num_clusters > 0) { 1574 while (num_clusters > 0) {
1575 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 1575 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
1576 ((num_clusters - 1) << sbi->s_cluster_bits); 1576 ((num_clusters - 1) << sbi->s_cluster_bits);
1577 if (sbi->s_cluster_ratio == 1 || 1577 if (sbi->s_cluster_ratio == 1 ||
1578 !ext4_find_delalloc_cluster(inode, lblk)) 1578 !ext4_find_delalloc_cluster(inode, lblk))
@@ -1619,8 +1619,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1619 end = mpd->next_page - 1; 1619 end = mpd->next_page - 1;
1620 if (invalidate) { 1620 if (invalidate) {
1621 ext4_lblk_t start, last; 1621 ext4_lblk_t start, last;
1622 start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1622 start = index << (PAGE_SHIFT - inode->i_blkbits);
1623 last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1623 last = end << (PAGE_SHIFT - inode->i_blkbits);
1624 ext4_es_remove_extent(inode, start, last - start + 1); 1624 ext4_es_remove_extent(inode, start, last - start + 1);
1625 } 1625 }
1626 1626
@@ -1636,7 +1636,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1636 BUG_ON(!PageLocked(page)); 1636 BUG_ON(!PageLocked(page));
1637 BUG_ON(PageWriteback(page)); 1637 BUG_ON(PageWriteback(page));
1638 if (invalidate) { 1638 if (invalidate) {
1639 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1639 block_invalidatepage(page, 0, PAGE_SIZE);
1640 ClearPageUptodate(page); 1640 ClearPageUptodate(page);
1641 } 1641 }
1642 unlock_page(page); 1642 unlock_page(page);
@@ -2007,10 +2007,10 @@ static int ext4_writepage(struct page *page,
2007 2007
2008 trace_ext4_writepage(page); 2008 trace_ext4_writepage(page);
2009 size = i_size_read(inode); 2009 size = i_size_read(inode);
2010 if (page->index == size >> PAGE_CACHE_SHIFT) 2010 if (page->index == size >> PAGE_SHIFT)
2011 len = size & ~PAGE_CACHE_MASK; 2011 len = size & ~PAGE_MASK;
2012 else 2012 else
2013 len = PAGE_CACHE_SIZE; 2013 len = PAGE_SIZE;
2014 2014
2015 page_bufs = page_buffers(page); 2015 page_bufs = page_buffers(page);
2016 /* 2016 /*
@@ -2034,7 +2034,7 @@ static int ext4_writepage(struct page *page,
2034 ext4_bh_delay_or_unwritten)) { 2034 ext4_bh_delay_or_unwritten)) {
2035 redirty_page_for_writepage(wbc, page); 2035 redirty_page_for_writepage(wbc, page);
2036 if ((current->flags & PF_MEMALLOC) || 2036 if ((current->flags & PF_MEMALLOC) ||
2037 (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { 2037 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2038 /* 2038 /*
2039 * For memory cleaning there's no point in writing only 2039 * For memory cleaning there's no point in writing only
2040 * some buffers. So just bail out. Warn if we came here 2040 * some buffers. So just bail out. Warn if we came here
@@ -2076,10 +2076,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2076 int err; 2076 int err;
2077 2077
2078 BUG_ON(page->index != mpd->first_page); 2078 BUG_ON(page->index != mpd->first_page);
2079 if (page->index == size >> PAGE_CACHE_SHIFT) 2079 if (page->index == size >> PAGE_SHIFT)
2080 len = size & ~PAGE_CACHE_MASK; 2080 len = size & ~PAGE_MASK;
2081 else 2081 else
2082 len = PAGE_CACHE_SIZE; 2082 len = PAGE_SIZE;
2083 clear_page_dirty_for_io(page); 2083 clear_page_dirty_for_io(page);
2084 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2084 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2085 if (!err) 2085 if (!err)
@@ -2213,7 +2213,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2213 int nr_pages, i; 2213 int nr_pages, i;
2214 struct inode *inode = mpd->inode; 2214 struct inode *inode = mpd->inode;
2215 struct buffer_head *head, *bh; 2215 struct buffer_head *head, *bh;
2216 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; 2216 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2217 pgoff_t start, end; 2217 pgoff_t start, end;
2218 ext4_lblk_t lblk; 2218 ext4_lblk_t lblk;
2219 sector_t pblock; 2219 sector_t pblock;
@@ -2274,7 +2274,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2274 * supports blocksize < pagesize as we will try to 2274 * supports blocksize < pagesize as we will try to
2275 * convert potentially unmapped parts of inode. 2275 * convert potentially unmapped parts of inode.
2276 */ 2276 */
2277 mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; 2277 mpd->io_submit.io_end->size += PAGE_SIZE;
2278 /* Page fully mapped - let IO run! */ 2278 /* Page fully mapped - let IO run! */
2279 err = mpage_submit_page(mpd, page); 2279 err = mpage_submit_page(mpd, page);
2280 if (err < 0) { 2280 if (err < 0) {
@@ -2426,7 +2426,7 @@ update_disksize:
2426 * Update on-disk size after IO is submitted. Races with 2426 * Update on-disk size after IO is submitted. Races with
2427 * truncate are avoided by checking i_size under i_data_sem. 2427 * truncate are avoided by checking i_size under i_data_sem.
2428 */ 2428 */
2429 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; 2429 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2430 if (disksize > EXT4_I(inode)->i_disksize) { 2430 if (disksize > EXT4_I(inode)->i_disksize) {
2431 int err2; 2431 int err2;
2432 loff_t i_size; 2432 loff_t i_size;
@@ -2562,7 +2562,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2562 mpd->next_page = page->index + 1; 2562 mpd->next_page = page->index + 1;
2563 /* Add all dirty buffers to mpd */ 2563 /* Add all dirty buffers to mpd */
2564 lblk = ((ext4_lblk_t)page->index) << 2564 lblk = ((ext4_lblk_t)page->index) <<
2565 (PAGE_CACHE_SHIFT - blkbits); 2565 (PAGE_SHIFT - blkbits);
2566 head = page_buffers(page); 2566 head = page_buffers(page);
2567 err = mpage_process_page_bufs(mpd, head, head, lblk); 2567 err = mpage_process_page_bufs(mpd, head, head, lblk);
2568 if (err <= 0) 2568 if (err <= 0)
@@ -2647,7 +2647,7 @@ static int ext4_writepages(struct address_space *mapping,
2647 * We may need to convert up to one extent per block in 2647 * We may need to convert up to one extent per block in
2648 * the page and we may dirty the inode. 2648 * the page and we may dirty the inode.
2649 */ 2649 */
2650 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); 2650 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
2651 } 2651 }
2652 2652
2653 /* 2653 /*
@@ -2678,8 +2678,8 @@ static int ext4_writepages(struct address_space *mapping,
2678 mpd.first_page = writeback_index; 2678 mpd.first_page = writeback_index;
2679 mpd.last_page = -1; 2679 mpd.last_page = -1;
2680 } else { 2680 } else {
2681 mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; 2681 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2682 mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; 2682 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2683 } 2683 }
2684 2684
2685 mpd.inode = inode; 2685 mpd.inode = inode;
@@ -2838,7 +2838,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2838 struct inode *inode = mapping->host; 2838 struct inode *inode = mapping->host;
2839 handle_t *handle; 2839 handle_t *handle;
2840 2840
2841 index = pos >> PAGE_CACHE_SHIFT; 2841 index = pos >> PAGE_SHIFT;
2842 2842
2843 if (ext4_nonda_switch(inode->i_sb)) { 2843 if (ext4_nonda_switch(inode->i_sb)) {
2844 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2844 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -2881,7 +2881,7 @@ retry_journal:
2881 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2881 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2882 ext4_da_write_credits(inode, pos, len)); 2882 ext4_da_write_credits(inode, pos, len));
2883 if (IS_ERR(handle)) { 2883 if (IS_ERR(handle)) {
2884 page_cache_release(page); 2884 put_page(page);
2885 return PTR_ERR(handle); 2885 return PTR_ERR(handle);
2886 } 2886 }
2887 2887
@@ -2889,7 +2889,7 @@ retry_journal:
2889 if (page->mapping != mapping) { 2889 if (page->mapping != mapping) {
2890 /* The page got truncated from under us */ 2890 /* The page got truncated from under us */
2891 unlock_page(page); 2891 unlock_page(page);
2892 page_cache_release(page); 2892 put_page(page);
2893 ext4_journal_stop(handle); 2893 ext4_journal_stop(handle);
2894 goto retry_grab; 2894 goto retry_grab;
2895 } 2895 }
@@ -2917,7 +2917,7 @@ retry_journal:
2917 ext4_should_retry_alloc(inode->i_sb, &retries)) 2917 ext4_should_retry_alloc(inode->i_sb, &retries))
2918 goto retry_journal; 2918 goto retry_journal;
2919 2919
2920 page_cache_release(page); 2920 put_page(page);
2921 return ret; 2921 return ret;
2922 } 2922 }
2923 2923
@@ -2965,7 +2965,7 @@ static int ext4_da_write_end(struct file *file,
2965 len, copied, page, fsdata); 2965 len, copied, page, fsdata);
2966 2966
2967 trace_ext4_da_write_end(inode, pos, len, copied); 2967 trace_ext4_da_write_end(inode, pos, len, copied);
2968 start = pos & (PAGE_CACHE_SIZE - 1); 2968 start = pos & (PAGE_SIZE - 1);
2969 end = start + copied - 1; 2969 end = start + copied - 1;
2970 2970
2971 /* 2971 /*
@@ -3187,7 +3187,7 @@ static int __ext4_journalled_invalidatepage(struct page *page,
3187 /* 3187 /*
3188 * If it's a full truncate we just forget about the pending dirtying 3188 * If it's a full truncate we just forget about the pending dirtying
3189 */ 3189 */
3190 if (offset == 0 && length == PAGE_CACHE_SIZE) 3190 if (offset == 0 && length == PAGE_SIZE)
3191 ClearPageChecked(page); 3191 ClearPageChecked(page);
3192 3192
3193 return jbd2_journal_invalidatepage(journal, page, offset, length); 3193 return jbd2_journal_invalidatepage(journal, page, offset, length);
@@ -3556,8 +3556,8 @@ void ext4_set_aops(struct inode *inode)
3556static int __ext4_block_zero_page_range(handle_t *handle, 3556static int __ext4_block_zero_page_range(handle_t *handle,
3557 struct address_space *mapping, loff_t from, loff_t length) 3557 struct address_space *mapping, loff_t from, loff_t length)
3558{ 3558{
3559 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3559 ext4_fsblk_t index = from >> PAGE_SHIFT;
3560 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3560 unsigned offset = from & (PAGE_SIZE-1);
3561 unsigned blocksize, pos; 3561 unsigned blocksize, pos;
3562 ext4_lblk_t iblock; 3562 ext4_lblk_t iblock;
3563 struct inode *inode = mapping->host; 3563 struct inode *inode = mapping->host;
@@ -3565,14 +3565,14 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3565 struct page *page; 3565 struct page *page;
3566 int err = 0; 3566 int err = 0;
3567 3567
3568 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3568 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3569 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3569 mapping_gfp_constraint(mapping, ~__GFP_FS));
3570 if (!page) 3570 if (!page)
3571 return -ENOMEM; 3571 return -ENOMEM;
3572 3572
3573 blocksize = inode->i_sb->s_blocksize; 3573 blocksize = inode->i_sb->s_blocksize;
3574 3574
3575 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3575 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3576 3576
3577 if (!page_has_buffers(page)) 3577 if (!page_has_buffers(page))
3578 create_empty_buffers(page, blocksize, 0); 3578 create_empty_buffers(page, blocksize, 0);
@@ -3614,7 +3614,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3614 ext4_encrypted_inode(inode)) { 3614 ext4_encrypted_inode(inode)) {
3615 /* We expect the key to be set. */ 3615 /* We expect the key to be set. */
3616 BUG_ON(!ext4_has_encryption_key(inode)); 3616 BUG_ON(!ext4_has_encryption_key(inode));
3617 BUG_ON(blocksize != PAGE_CACHE_SIZE); 3617 BUG_ON(blocksize != PAGE_SIZE);
3618 WARN_ON_ONCE(ext4_decrypt(page)); 3618 WARN_ON_ONCE(ext4_decrypt(page));
3619 } 3619 }
3620 } 3620 }
@@ -3638,7 +3638,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3638 3638
3639unlock: 3639unlock:
3640 unlock_page(page); 3640 unlock_page(page);
3641 page_cache_release(page); 3641 put_page(page);
3642 return err; 3642 return err;
3643} 3643}
3644 3644
@@ -3653,7 +3653,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3653 struct address_space *mapping, loff_t from, loff_t length) 3653 struct address_space *mapping, loff_t from, loff_t length)
3654{ 3654{
3655 struct inode *inode = mapping->host; 3655 struct inode *inode = mapping->host;
3656 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3656 unsigned offset = from & (PAGE_SIZE-1);
3657 unsigned blocksize = inode->i_sb->s_blocksize; 3657 unsigned blocksize = inode->i_sb->s_blocksize;
3658 unsigned max = blocksize - (offset & (blocksize - 1)); 3658 unsigned max = blocksize - (offset & (blocksize - 1));
3659 3659
@@ -3678,7 +3678,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3678static int ext4_block_truncate_page(handle_t *handle, 3678static int ext4_block_truncate_page(handle_t *handle,
3679 struct address_space *mapping, loff_t from) 3679 struct address_space *mapping, loff_t from)
3680{ 3680{
3681 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3681 unsigned offset = from & (PAGE_SIZE-1);
3682 unsigned length; 3682 unsigned length;
3683 unsigned blocksize; 3683 unsigned blocksize;
3684 struct inode *inode = mapping->host; 3684 struct inode *inode = mapping->host;
@@ -3816,7 +3816,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3816 */ 3816 */
3817 if (offset + length > inode->i_size) { 3817 if (offset + length > inode->i_size) {
3818 length = inode->i_size + 3818 length = inode->i_size +
3819 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 3819 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3820 offset; 3820 offset;
3821 } 3821 }
3822 3822
@@ -4891,23 +4891,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
4891 tid_t commit_tid = 0; 4891 tid_t commit_tid = 0;
4892 int ret; 4892 int ret;
4893 4893
4894 offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 4894 offset = inode->i_size & (PAGE_SIZE - 1);
4895 /* 4895 /*
4896 * All buffers in the last page remain valid? Then there's nothing to 4896 * All buffers in the last page remain valid? Then there's nothing to
4897 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4897 * do. We do the check mainly to optimize the common PAGE_SIZE ==
4898 * blocksize case 4898 * blocksize case
4899 */ 4899 */
4900 if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 4900 if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
4901 return; 4901 return;
4902 while (1) { 4902 while (1) {
4903 page = find_lock_page(inode->i_mapping, 4903 page = find_lock_page(inode->i_mapping,
4904 inode->i_size >> PAGE_CACHE_SHIFT); 4904 inode->i_size >> PAGE_SHIFT);
4905 if (!page) 4905 if (!page)
4906 return; 4906 return;
4907 ret = __ext4_journalled_invalidatepage(page, offset, 4907 ret = __ext4_journalled_invalidatepage(page, offset,
4908 PAGE_CACHE_SIZE - offset); 4908 PAGE_SIZE - offset);
4909 unlock_page(page); 4909 unlock_page(page);
4910 page_cache_release(page); 4910 put_page(page);
4911 if (ret != -EBUSY) 4911 if (ret != -EBUSY)
4912 return; 4912 return;
4913 commit_tid = 0; 4913 commit_tid = 0;
@@ -5546,10 +5546,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5546 goto out; 5546 goto out;
5547 } 5547 }
5548 5548
5549 if (page->index == size >> PAGE_CACHE_SHIFT) 5549 if (page->index == size >> PAGE_SHIFT)
5550 len = size & ~PAGE_CACHE_MASK; 5550 len = size & ~PAGE_MASK;
5551 else 5551 else
5552 len = PAGE_CACHE_SIZE; 5552 len = PAGE_SIZE;
5553 /* 5553 /*
5554 * Return if we have all the buffers mapped. This avoids the need to do 5554 * Return if we have all the buffers mapped. This avoids the need to do
5555 * journal_start/journal_stop which can block and take a long time 5555 * journal_start/journal_stop which can block and take a long time
@@ -5580,7 +5580,7 @@ retry_alloc:
5580 ret = block_page_mkwrite(vma, vmf, get_block); 5580 ret = block_page_mkwrite(vma, vmf, get_block);
5581 if (!ret && ext4_should_journal_data(inode)) { 5581 if (!ret && ext4_should_journal_data(inode)) {
5582 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5582 if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5583 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 5583 PAGE_SIZE, NULL, do_journal_get_write_access)) {
5584 unlock_page(page); 5584 unlock_page(page);
5585 ret = VM_FAULT_SIGBUS; 5585 ret = VM_FAULT_SIGBUS;
5586 ext4_journal_stop(handle); 5586 ext4_journal_stop(handle);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 50e05df28f66..eeeade76012e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
119 * 119 *
120 * 120 *
121 * one block each for bitmap and buddy information. So for each group we 121 * one block each for bitmap and buddy information. So for each group we
122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
123 * blocksize) blocks. So it can have information regarding groups_per_page 123 * blocksize) blocks. So it can have information regarding groups_per_page
124 * which is blocks_per_page/2 124 * which is blocks_per_page/2
125 * 125 *
@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
807 * 807 *
808 * one block each for bitmap and buddy information. 808 * one block each for bitmap and buddy information.
809 * So for each group we take up 2 blocks. A page can 809 * So for each group we take up 2 blocks. A page can
810 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 810 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
811 * So it can have information regarding groups_per_page which 811 * So it can have information regarding groups_per_page which
812 * is blocks_per_page/2 812 * is blocks_per_page/2
813 * 813 *
@@ -839,7 +839,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
839 sb = inode->i_sb; 839 sb = inode->i_sb;
840 ngroups = ext4_get_groups_count(sb); 840 ngroups = ext4_get_groups_count(sb);
841 blocksize = 1 << inode->i_blkbits; 841 blocksize = 1 << inode->i_blkbits;
842 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 842 blocks_per_page = PAGE_SIZE / blocksize;
843 843
844 groups_per_page = blocks_per_page >> 1; 844 groups_per_page = blocks_per_page >> 1;
845 if (groups_per_page == 0) 845 if (groups_per_page == 0)
@@ -993,7 +993,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
993 e4b->bd_buddy_page = NULL; 993 e4b->bd_buddy_page = NULL;
994 e4b->bd_bitmap_page = NULL; 994 e4b->bd_bitmap_page = NULL;
995 995
996 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 996 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
997 /* 997 /*
998 * the buddy cache inode stores the block bitmap 998 * the buddy cache inode stores the block bitmap
999 * and buddy information in consecutive blocks. 999 * and buddy information in consecutive blocks.
@@ -1028,11 +1028,11 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1028{ 1028{
1029 if (e4b->bd_bitmap_page) { 1029 if (e4b->bd_bitmap_page) {
1030 unlock_page(e4b->bd_bitmap_page); 1030 unlock_page(e4b->bd_bitmap_page);
1031 page_cache_release(e4b->bd_bitmap_page); 1031 put_page(e4b->bd_bitmap_page);
1032 } 1032 }
1033 if (e4b->bd_buddy_page) { 1033 if (e4b->bd_buddy_page) {
1034 unlock_page(e4b->bd_buddy_page); 1034 unlock_page(e4b->bd_buddy_page);
1035 page_cache_release(e4b->bd_buddy_page); 1035 put_page(e4b->bd_buddy_page);
1036 } 1036 }
1037} 1037}
1038 1038
@@ -1125,7 +1125,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1125 might_sleep(); 1125 might_sleep();
1126 mb_debug(1, "load group %u\n", group); 1126 mb_debug(1, "load group %u\n", group);
1127 1127
1128 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1128 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1129 grp = ext4_get_group_info(sb, group); 1129 grp = ext4_get_group_info(sb, group);
1130 1130
1131 e4b->bd_blkbits = sb->s_blocksize_bits; 1131 e4b->bd_blkbits = sb->s_blocksize_bits;
@@ -1167,7 +1167,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1167 * is yet to initialize the same. So 1167 * is yet to initialize the same. So
1168 * wait for it to initialize. 1168 * wait for it to initialize.
1169 */ 1169 */
1170 page_cache_release(page); 1170 put_page(page);
1171 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1171 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1172 if (page) { 1172 if (page) {
1173 BUG_ON(page->mapping != inode->i_mapping); 1173 BUG_ON(page->mapping != inode->i_mapping);
@@ -1203,7 +1203,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1204 if (page == NULL || !PageUptodate(page)) { 1204 if (page == NULL || !PageUptodate(page)) {
1205 if (page) 1205 if (page)
1206 page_cache_release(page); 1206 put_page(page);
1207 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1207 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1208 if (page) { 1208 if (page) {
1209 BUG_ON(page->mapping != inode->i_mapping); 1209 BUG_ON(page->mapping != inode->i_mapping);
@@ -1238,11 +1238,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1238 1238
1239err: 1239err:
1240 if (page) 1240 if (page)
1241 page_cache_release(page); 1241 put_page(page);
1242 if (e4b->bd_bitmap_page) 1242 if (e4b->bd_bitmap_page)
1243 page_cache_release(e4b->bd_bitmap_page); 1243 put_page(e4b->bd_bitmap_page);
1244 if (e4b->bd_buddy_page) 1244 if (e4b->bd_buddy_page)
1245 page_cache_release(e4b->bd_buddy_page); 1245 put_page(e4b->bd_buddy_page);
1246 e4b->bd_buddy = NULL; 1246 e4b->bd_buddy = NULL;
1247 e4b->bd_bitmap = NULL; 1247 e4b->bd_bitmap = NULL;
1248 return ret; 1248 return ret;
@@ -1257,9 +1257,9 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1257static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1257static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1258{ 1258{
1259 if (e4b->bd_bitmap_page) 1259 if (e4b->bd_bitmap_page)
1260 page_cache_release(e4b->bd_bitmap_page); 1260 put_page(e4b->bd_bitmap_page);
1261 if (e4b->bd_buddy_page) 1261 if (e4b->bd_buddy_page)
1262 page_cache_release(e4b->bd_buddy_page); 1262 put_page(e4b->bd_buddy_page);
1263} 1263}
1264 1264
1265 1265
@@ -2833,8 +2833,8 @@ static void ext4_free_data_callback(struct super_block *sb,
2833 /* No more items in the per group rb tree 2833 /* No more items in the per group rb tree
2834 * balance refcounts from ext4_mb_free_metadata() 2834 * balance refcounts from ext4_mb_free_metadata()
2835 */ 2835 */
2836 page_cache_release(e4b.bd_buddy_page); 2836 put_page(e4b.bd_buddy_page);
2837 page_cache_release(e4b.bd_bitmap_page); 2837 put_page(e4b.bd_bitmap_page);
2838 } 2838 }
2839 ext4_unlock_group(sb, entry->efd_group); 2839 ext4_unlock_group(sb, entry->efd_group);
2840 kmem_cache_free(ext4_free_data_cachep, entry); 2840 kmem_cache_free(ext4_free_data_cachep, entry);
@@ -4385,9 +4385,9 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4385 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4385 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4386 } 4386 }
4387 if (ac->ac_bitmap_page) 4387 if (ac->ac_bitmap_page)
4388 page_cache_release(ac->ac_bitmap_page); 4388 put_page(ac->ac_bitmap_page);
4389 if (ac->ac_buddy_page) 4389 if (ac->ac_buddy_page)
4390 page_cache_release(ac->ac_buddy_page); 4390 put_page(ac->ac_buddy_page);
4391 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4391 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4392 mutex_unlock(&ac->ac_lg->lg_mutex); 4392 mutex_unlock(&ac->ac_lg->lg_mutex);
4393 ext4_mb_collect_stats(ac); 4393 ext4_mb_collect_stats(ac);
@@ -4599,8 +4599,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4599 * otherwise we'll refresh it from 4599 * otherwise we'll refresh it from
4600 * on-disk bitmap and lose not-yet-available 4600 * on-disk bitmap and lose not-yet-available
4601 * blocks */ 4601 * blocks */
4602 page_cache_get(e4b->bd_buddy_page); 4602 get_page(e4b->bd_buddy_page);
4603 page_cache_get(e4b->bd_bitmap_page); 4603 get_page(e4b->bd_bitmap_page);
4604 } 4604 }
4605 while (*n) { 4605 while (*n) {
4606 parent = *n; 4606 parent = *n;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 4098acc701c3..675b67e5d5c2 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -156,7 +156,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); 156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
157 if (!page[1]) { 157 if (!page[1]) {
158 unlock_page(page[0]); 158 unlock_page(page[0]);
159 page_cache_release(page[0]); 159 put_page(page[0]);
160 return -ENOMEM; 160 return -ENOMEM;
161 } 161 }
162 /* 162 /*
@@ -192,7 +192,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
192 create_empty_buffers(page, blocksize, 0); 192 create_empty_buffers(page, blocksize, 0);
193 193
194 head = page_buffers(page); 194 head = page_buffers(page);
195 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 195 block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
196 for (bh = head, block_start = 0; bh != head || !block_start; 196 for (bh = head, block_start = 0; bh != head || !block_start;
197 block++, block_start = block_end, bh = bh->b_this_page) { 197 block++, block_start = block_end, bh = bh->b_this_page) {
198 block_end = block_start + blocksize; 198 block_end = block_start + blocksize;
@@ -268,7 +268,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
268 int i, err2, jblocks, retries = 0; 268 int i, err2, jblocks, retries = 0;
269 int replaced_count = 0; 269 int replaced_count = 0;
270 int from = data_offset_in_page << orig_inode->i_blkbits; 270 int from = data_offset_in_page << orig_inode->i_blkbits;
271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 271 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
272 struct super_block *sb = orig_inode->i_sb; 272 struct super_block *sb = orig_inode->i_sb;
273 struct buffer_head *bh = NULL; 273 struct buffer_head *bh = NULL;
274 274
@@ -404,9 +404,9 @@ data_copy:
404 404
405unlock_pages: 405unlock_pages:
406 unlock_page(pagep[0]); 406 unlock_page(pagep[0]);
407 page_cache_release(pagep[0]); 407 put_page(pagep[0]);
408 unlock_page(pagep[1]); 408 unlock_page(pagep[1]);
409 page_cache_release(pagep[1]); 409 put_page(pagep[1]);
410stop_journal: 410stop_journal:
411 ext4_journal_stop(handle); 411 ext4_journal_stop(handle);
412 if (*err == -ENOSPC && 412 if (*err == -ENOSPC &&
@@ -554,7 +554,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
554 struct inode *orig_inode = file_inode(o_filp); 554 struct inode *orig_inode = file_inode(o_filp);
555 struct inode *donor_inode = file_inode(d_filp); 555 struct inode *donor_inode = file_inode(d_filp);
556 struct ext4_ext_path *path = NULL; 556 struct ext4_ext_path *path = NULL;
557 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 557 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
558 ext4_lblk_t o_end, o_start = orig_blk; 558 ext4_lblk_t o_end, o_start = orig_blk;
559 ext4_lblk_t d_start = donor_blk; 559 ext4_lblk_t d_start = donor_blk;
560 int ret; 560 int ret;
@@ -648,9 +648,9 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
648 if (o_end - o_start < cur_len) 648 if (o_end - o_start < cur_len)
649 cur_len = o_end - o_start; 649 cur_len = o_end - o_start;
650 650
651 orig_page_index = o_start >> (PAGE_CACHE_SHIFT - 651 orig_page_index = o_start >> (PAGE_SHIFT -
652 orig_inode->i_blkbits); 652 orig_inode->i_blkbits);
653 donor_page_index = d_start >> (PAGE_CACHE_SHIFT - 653 donor_page_index = d_start >> (PAGE_SHIFT -
654 donor_inode->i_blkbits); 654 donor_inode->i_blkbits);
655 offset_in_page = o_start % blocks_per_page; 655 offset_in_page = o_start % blocks_per_page;
656 if (cur_len > blocks_per_page- offset_in_page) 656 if (cur_len > blocks_per_page- offset_in_page)
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d77d15f4b674..93ad0acf704c 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -432,8 +432,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
432 * the page size, the remaining memory is zeroed when mapped, and 432 * the page size, the remaining memory is zeroed when mapped, and
433 * writes to that region are not written out to the file." 433 * writes to that region are not written out to the file."
434 */ 434 */
435 if (len < PAGE_CACHE_SIZE) 435 if (len < PAGE_SIZE)
436 zero_user_segment(page, len, PAGE_CACHE_SIZE); 436 zero_user_segment(page, len, PAGE_SIZE);
437 /* 437 /*
438 * In the first loop we prepare and mark buffers to submit. We have to 438 * In the first loop we prepare and mark buffers to submit. We have to
439 * mark all buffers in the page before submitting so that 439 * mark all buffers in the page before submitting so that
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 5dc5e95063de..f24e7299e1c8 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * then this code just gives up and calls the buffer_head-based read function. 24 * then this code just gives up and calls the buffer_head-based read function.
25 * It does handle a page which has holes at the end - that is a common case: 25 * It does handle a page which has holes at the end - that is a common case:
26 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 26 * the end-of-file on blocksize < PAGE_SIZE setups.
27 * 27 *
28 */ 28 */
29 29
@@ -140,7 +140,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
140 140
141 struct inode *inode = mapping->host; 141 struct inode *inode = mapping->host;
142 const unsigned blkbits = inode->i_blkbits; 142 const unsigned blkbits = inode->i_blkbits;
143 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 143 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
144 const unsigned blocksize = 1 << blkbits; 144 const unsigned blocksize = 1 << blkbits;
145 sector_t block_in_file; 145 sector_t block_in_file;
146 sector_t last_block; 146 sector_t last_block;
@@ -173,7 +173,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
173 if (page_has_buffers(page)) 173 if (page_has_buffers(page))
174 goto confused; 174 goto confused;
175 175
176 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 176 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
177 last_block = block_in_file + nr_pages * blocks_per_page; 177 last_block = block_in_file + nr_pages * blocks_per_page;
178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
179 if (last_block > last_block_in_file) 179 if (last_block > last_block_in_file)
@@ -217,7 +217,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
217 set_error_page: 217 set_error_page:
218 SetPageError(page); 218 SetPageError(page);
219 zero_user_segment(page, 0, 219 zero_user_segment(page, 0,
220 PAGE_CACHE_SIZE); 220 PAGE_SIZE);
221 unlock_page(page); 221 unlock_page(page);
222 goto next_page; 222 goto next_page;
223 } 223 }
@@ -250,7 +250,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
250 } 250 }
251 if (first_hole != blocks_per_page) { 251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits, 252 zero_user_segment(page, first_hole << blkbits,
253 PAGE_CACHE_SIZE); 253 PAGE_SIZE);
254 if (first_hole == 0) { 254 if (first_hole == 0) {
255 SetPageUptodate(page); 255 SetPageUptodate(page);
256 unlock_page(page); 256 unlock_page(page);
@@ -319,7 +319,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
319 unlock_page(page); 319 unlock_page(page);
320 next_page: 320 next_page:
321 if (pages) 321 if (pages)
322 page_cache_release(page); 322 put_page(page);
323 } 323 }
324 BUG_ON(pages && !list_empty(pages)); 324 BUG_ON(pages && !list_empty(pages));
325 if (bio) 325 if (bio)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 539297515896..0bb74aacb8c0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1784,7 +1784,7 @@ static int parse_options(char *options, struct super_block *sb,
1784 int blocksize = 1784 int blocksize =
1785 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 1785 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
1786 1786
1787 if (blocksize < PAGE_CACHE_SIZE) { 1787 if (blocksize < PAGE_SIZE) {
1788 ext4_msg(sb, KERN_ERR, "can't mount with " 1788 ext4_msg(sb, KERN_ERR, "can't mount with "
1789 "dioread_nolock if block size != PAGE_SIZE"); 1789 "dioread_nolock if block size != PAGE_SIZE");
1790 return 0; 1790 return 0;
@@ -3808,7 +3808,7 @@ no_journal:
3808 } 3808 }
3809 3809
3810 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && 3810 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
3811 (blocksize != PAGE_CACHE_SIZE)) { 3811 (blocksize != PAGE_SIZE)) {
3812 ext4_msg(sb, KERN_ERR, 3812 ext4_msg(sb, KERN_ERR,
3813 "Unsupported blocksize for fs encryption"); 3813 "Unsupported blocksize for fs encryption");
3814 goto failed_mount_wq; 3814 goto failed_mount_wq;
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 6f7ee30a89ce..75ed5c2f0c16 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -80,12 +80,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
80 if (res <= plen) 80 if (res <= plen)
81 paddr[res] = '\0'; 81 paddr[res] = '\0';
82 if (cpage) 82 if (cpage)
83 page_cache_release(cpage); 83 put_page(cpage);
84 set_delayed_call(done, kfree_link, paddr); 84 set_delayed_call(done, kfree_link, paddr);
85 return paddr; 85 return paddr;
86errout: 86errout:
87 if (cpage) 87 if (cpage)
88 page_cache_release(cpage); 88 put_page(cpage);
89 kfree(paddr); 89 kfree(paddr);
90 return ERR_PTR(res); 90 return ERR_PTR(res);
91} 91}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e5c762b37239..53fec0872e60 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -223,7 +223,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
223 /* Allocate a new bio */ 223 /* Allocate a new bio */
224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); 224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
225 225
226 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 226 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
227 bio_put(bio); 227 bio_put(bio);
228 return -EFAULT; 228 return -EFAULT;
229 } 229 }
@@ -265,8 +265,8 @@ alloc_new:
265 265
266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; 266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
267 267
268 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < 268 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
269 PAGE_CACHE_SIZE) { 269 PAGE_SIZE) {
270 __submit_merged_bio(io); 270 __submit_merged_bio(io);
271 goto alloc_new; 271 goto alloc_new;
272 } 272 }
@@ -406,7 +406,7 @@ got_it:
406 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 406 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
407 */ 407 */
408 if (dn.data_blkaddr == NEW_ADDR) { 408 if (dn.data_blkaddr == NEW_ADDR) {
409 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 409 zero_user_segment(page, 0, PAGE_SIZE);
410 SetPageUptodate(page); 410 SetPageUptodate(page);
411 unlock_page(page); 411 unlock_page(page);
412 return page; 412 return page;
@@ -517,7 +517,7 @@ struct page *get_new_data_page(struct inode *inode,
517 goto got_it; 517 goto got_it;
518 518
519 if (dn.data_blkaddr == NEW_ADDR) { 519 if (dn.data_blkaddr == NEW_ADDR) {
520 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 520 zero_user_segment(page, 0, PAGE_SIZE);
521 SetPageUptodate(page); 521 SetPageUptodate(page);
522 } else { 522 } else {
523 f2fs_put_page(page, 1); 523 f2fs_put_page(page, 1);
@@ -530,8 +530,8 @@ struct page *get_new_data_page(struct inode *inode,
530 } 530 }
531got_it: 531got_it:
532 if (new_i_size && i_size_read(inode) < 532 if (new_i_size && i_size_read(inode) <
533 ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { 533 ((loff_t)(index + 1) << PAGE_SHIFT)) {
534 i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); 534 i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
535 /* Only the directory inode sets new_i_size */ 535 /* Only the directory inode sets new_i_size */
536 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 536 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
537 } 537 }
@@ -570,9 +570,9 @@ alloc:
570 /* update i_size */ 570 /* update i_size */
571 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + 571 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
572 dn->ofs_in_node; 572 dn->ofs_in_node;
573 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) 573 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
574 i_size_write(dn->inode, 574 i_size_write(dn->inode,
575 ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); 575 ((loff_t)(fofs + 1) << PAGE_SHIFT));
576 return 0; 576 return 0;
577} 577}
578 578
@@ -971,7 +971,7 @@ got_it:
971 goto confused; 971 goto confused;
972 } 972 }
973 } else { 973 } else {
974 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 974 zero_user_segment(page, 0, PAGE_SIZE);
975 SetPageUptodate(page); 975 SetPageUptodate(page);
976 unlock_page(page); 976 unlock_page(page);
977 goto next_page; 977 goto next_page;
@@ -1021,7 +1021,7 @@ submit_and_realloc:
1021 goto next_page; 1021 goto next_page;
1022set_error_page: 1022set_error_page:
1023 SetPageError(page); 1023 SetPageError(page);
1024 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1024 zero_user_segment(page, 0, PAGE_SIZE);
1025 unlock_page(page); 1025 unlock_page(page);
1026 goto next_page; 1026 goto next_page;
1027confused: 1027confused:
@@ -1032,7 +1032,7 @@ confused:
1032 unlock_page(page); 1032 unlock_page(page);
1033next_page: 1033next_page:
1034 if (pages) 1034 if (pages)
1035 page_cache_release(page); 1035 put_page(page);
1036 } 1036 }
1037 BUG_ON(pages && !list_empty(pages)); 1037 BUG_ON(pages && !list_empty(pages));
1038 if (bio) 1038 if (bio)
@@ -1136,7 +1136,7 @@ static int f2fs_write_data_page(struct page *page,
1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1137 loff_t i_size = i_size_read(inode); 1137 loff_t i_size = i_size_read(inode);
1138 const pgoff_t end_index = ((unsigned long long) i_size) 1138 const pgoff_t end_index = ((unsigned long long) i_size)
1139 >> PAGE_CACHE_SHIFT; 1139 >> PAGE_SHIFT;
1140 unsigned offset = 0; 1140 unsigned offset = 0;
1141 bool need_balance_fs = false; 1141 bool need_balance_fs = false;
1142 int err = 0; 1142 int err = 0;
@@ -1157,11 +1157,11 @@ static int f2fs_write_data_page(struct page *page,
1157 * If the offset is out-of-range of file size, 1157 * If the offset is out-of-range of file size,
1158 * this page does not have to be written to disk. 1158 * this page does not have to be written to disk.
1159 */ 1159 */
1160 offset = i_size & (PAGE_CACHE_SIZE - 1); 1160 offset = i_size & (PAGE_SIZE - 1);
1161 if ((page->index >= end_index + 1) || !offset) 1161 if ((page->index >= end_index + 1) || !offset)
1162 goto out; 1162 goto out;
1163 1163
1164 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 1164 zero_user_segment(page, offset, PAGE_SIZE);
1165write: 1165write:
1166 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1166 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1167 goto redirty_out; 1167 goto redirty_out;
@@ -1267,8 +1267,8 @@ next:
1267 cycled = 0; 1267 cycled = 0;
1268 end = -1; 1268 end = -1;
1269 } else { 1269 } else {
1270 index = wbc->range_start >> PAGE_CACHE_SHIFT; 1270 index = wbc->range_start >> PAGE_SHIFT;
1271 end = wbc->range_end >> PAGE_CACHE_SHIFT; 1271 end = wbc->range_end >> PAGE_SHIFT;
1272 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1272 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1273 range_whole = 1; 1273 range_whole = 1;
1274 cycled = 1; /* ignore range_cyclic tests */ 1274 cycled = 1; /* ignore range_cyclic tests */
@@ -1448,11 +1448,11 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
1448 * the block addresses when there is no need to fill the page. 1448 * the block addresses when there is no need to fill the page.
1449 */ 1449 */
1450 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && 1450 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
1451 len == PAGE_CACHE_SIZE) 1451 len == PAGE_SIZE)
1452 return 0; 1452 return 0;
1453 1453
1454 if (f2fs_has_inline_data(inode) || 1454 if (f2fs_has_inline_data(inode) ||
1455 (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1455 (pos & PAGE_MASK) >= i_size_read(inode)) {
1456 f2fs_lock_op(sbi); 1456 f2fs_lock_op(sbi);
1457 locked = true; 1457 locked = true;
1458 } 1458 }
@@ -1513,7 +1513,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1513 struct inode *inode = mapping->host; 1513 struct inode *inode = mapping->host;
1514 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1514 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1515 struct page *page = NULL; 1515 struct page *page = NULL;
1516 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 1516 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1517 bool need_balance = false; 1517 bool need_balance = false;
1518 block_t blkaddr = NULL_ADDR; 1518 block_t blkaddr = NULL_ADDR;
1519 int err = 0; 1519 int err = 0;
@@ -1561,22 +1561,22 @@ repeat:
1561 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 1561 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1562 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); 1562 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1563 1563
1564 if (len == PAGE_CACHE_SIZE) 1564 if (len == PAGE_SIZE)
1565 goto out_update; 1565 goto out_update;
1566 if (PageUptodate(page)) 1566 if (PageUptodate(page))
1567 goto out_clear; 1567 goto out_clear;
1568 1568
1569 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1569 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
1570 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1570 unsigned start = pos & (PAGE_SIZE - 1);
1571 unsigned end = start + len; 1571 unsigned end = start + len;
1572 1572
1573 /* Reading beyond i_size is simple: memset to zero */ 1573 /* Reading beyond i_size is simple: memset to zero */
1574 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1574 zero_user_segments(page, 0, start, end, PAGE_SIZE);
1575 goto out_update; 1575 goto out_update;
1576 } 1576 }
1577 1577
1578 if (blkaddr == NEW_ADDR) { 1578 if (blkaddr == NEW_ADDR) {
1579 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1579 zero_user_segment(page, 0, PAGE_SIZE);
1580 } else { 1580 } else {
1581 struct f2fs_io_info fio = { 1581 struct f2fs_io_info fio = {
1582 .sbi = sbi, 1582 .sbi = sbi,
@@ -1688,7 +1688,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
1688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1689 1689
1690 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 1690 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1691 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) 1691 (offset % PAGE_SIZE || length != PAGE_SIZE))
1692 return; 1692 return;
1693 1693
1694 if (PageDirty(page)) { 1694 if (PageDirty(page)) {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 4fb6ef88a34f..f4a61a5ff79f 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -164,7 +164,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
164 164
165 /* build curseg */ 165 /* build curseg */
166 si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE; 166 si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
167 si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE; 167 si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
168 168
169 /* build dirty segmap */ 169 /* build dirty segmap */
170 si->base_mem += sizeof(struct dirty_seglist_info); 170 si->base_mem += sizeof(struct dirty_seglist_info);
@@ -201,9 +201,9 @@ get_cache:
201 201
202 si->page_mem = 0; 202 si->page_mem = 0;
203 npages = NODE_MAPPING(sbi)->nrpages; 203 npages = NODE_MAPPING(sbi)->nrpages;
204 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; 204 si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
205 npages = META_MAPPING(sbi)->nrpages; 205 npages = META_MAPPING(sbi)->nrpages;
206 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; 206 si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
207} 207}
208 208
209static int stat_show(struct seq_file *s, void *v) 209static int stat_show(struct seq_file *s, void *v)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 80641ad82745..af819571bce7 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -17,8 +17,8 @@
17 17
18static unsigned long dir_blocks(struct inode *inode) 18static unsigned long dir_blocks(struct inode *inode)
19{ 19{
20 return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1)) 20 return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
21 >> PAGE_CACHE_SHIFT; 21 >> PAGE_SHIFT;
22} 22}
23 23
24static unsigned int dir_buckets(unsigned int level, int dir_level) 24static unsigned int dir_buckets(unsigned int level, int dir_level)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index bbe2cd1265d0..7a4558d17f36 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1294,7 +1294,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
1294 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 1294 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
1295 unlock_page(page); 1295 unlock_page(page);
1296 } 1296 }
1297 page_cache_release(page); 1297 put_page(page);
1298} 1298}
1299 1299
1300static inline void f2fs_put_dnode(struct dnode_of_data *dn) 1300static inline void f2fs_put_dnode(struct dnode_of_data *dn)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b41c3579ea9e..443e07705c2a 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -74,11 +74,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
74 goto mapped; 74 goto mapped;
75 75
76 /* page is wholly or partially inside EOF */ 76 /* page is wholly or partially inside EOF */
77 if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) > 77 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
78 i_size_read(inode)) { 78 i_size_read(inode)) {
79 unsigned offset; 79 unsigned offset;
80 offset = i_size_read(inode) & ~PAGE_CACHE_MASK; 80 offset = i_size_read(inode) & ~PAGE_MASK;
81 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 81 zero_user_segment(page, offset, PAGE_SIZE);
82 } 82 }
83 set_page_dirty(page); 83 set_page_dirty(page);
84 SetPageUptodate(page); 84 SetPageUptodate(page);
@@ -346,11 +346,11 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
346 goto found; 346 goto found;
347 } 347 }
348 348
349 pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); 349 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
350 350
351 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 351 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
352 352
353 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { 353 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
354 set_new_dnode(&dn, inode, NULL, NULL, 0); 354 set_new_dnode(&dn, inode, NULL, NULL, 0);
355 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); 355 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
356 if (err && err != -ENOENT) { 356 if (err && err != -ENOENT) {
@@ -370,7 +370,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
370 /* find data/hole in dnode block */ 370 /* find data/hole in dnode block */
371 for (; dn.ofs_in_node < end_offset; 371 for (; dn.ofs_in_node < end_offset;
372 dn.ofs_in_node++, pgofs++, 372 dn.ofs_in_node++, pgofs++,
373 data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { 373 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
374 block_t blkaddr; 374 block_t blkaddr;
375 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 375 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
376 376
@@ -508,8 +508,8 @@ void truncate_data_blocks(struct dnode_of_data *dn)
508static int truncate_partial_data_page(struct inode *inode, u64 from, 508static int truncate_partial_data_page(struct inode *inode, u64 from,
509 bool cache_only) 509 bool cache_only)
510{ 510{
511 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 511 unsigned offset = from & (PAGE_SIZE - 1);
512 pgoff_t index = from >> PAGE_CACHE_SHIFT; 512 pgoff_t index = from >> PAGE_SHIFT;
513 struct address_space *mapping = inode->i_mapping; 513 struct address_space *mapping = inode->i_mapping;
514 struct page *page; 514 struct page *page;
515 515
@@ -529,7 +529,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
529 return 0; 529 return 0;
530truncate_out: 530truncate_out:
531 f2fs_wait_on_page_writeback(page, DATA, true); 531 f2fs_wait_on_page_writeback(page, DATA, true);
532 zero_user(page, offset, PAGE_CACHE_SIZE - offset); 532 zero_user(page, offset, PAGE_SIZE - offset);
533 if (!cache_only || !f2fs_encrypted_inode(inode) || 533 if (!cache_only || !f2fs_encrypted_inode(inode) ||
534 !S_ISREG(inode->i_mode)) 534 !S_ISREG(inode->i_mode))
535 set_page_dirty(page); 535 set_page_dirty(page);
@@ -799,11 +799,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
799 if (ret) 799 if (ret)
800 return ret; 800 return ret;
801 801
802 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 802 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
803 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 803 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
804 804
805 off_start = offset & (PAGE_CACHE_SIZE - 1); 805 off_start = offset & (PAGE_SIZE - 1);
806 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 806 off_end = (offset + len) & (PAGE_SIZE - 1);
807 807
808 if (pg_start == pg_end) { 808 if (pg_start == pg_end) {
809 ret = fill_zero(inode, pg_start, off_start, 809 ret = fill_zero(inode, pg_start, off_start,
@@ -813,7 +813,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
813 } else { 813 } else {
814 if (off_start) { 814 if (off_start) {
815 ret = fill_zero(inode, pg_start++, off_start, 815 ret = fill_zero(inode, pg_start++, off_start,
816 PAGE_CACHE_SIZE - off_start); 816 PAGE_SIZE - off_start);
817 if (ret) 817 if (ret)
818 return ret; 818 return ret;
819 } 819 }
@@ -830,8 +830,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
830 830
831 f2fs_balance_fs(sbi, true); 831 f2fs_balance_fs(sbi, true);
832 832
833 blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT; 833 blk_start = (loff_t)pg_start << PAGE_SHIFT;
834 blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT; 834 blk_end = (loff_t)pg_end << PAGE_SHIFT;
835 truncate_inode_pages_range(mapping, blk_start, 835 truncate_inode_pages_range(mapping, blk_start,
836 blk_end - 1); 836 blk_end - 1);
837 837
@@ -954,8 +954,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
954 if (ret) 954 if (ret)
955 return ret; 955 return ret;
956 956
957 pg_start = offset >> PAGE_CACHE_SHIFT; 957 pg_start = offset >> PAGE_SHIFT;
958 pg_end = (offset + len) >> PAGE_CACHE_SHIFT; 958 pg_end = (offset + len) >> PAGE_SHIFT;
959 959
960 /* write out all dirty pages from offset */ 960 /* write out all dirty pages from offset */
961 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 961 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -1006,11 +1006,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1006 1006
1007 truncate_pagecache_range(inode, offset, offset + len - 1); 1007 truncate_pagecache_range(inode, offset, offset + len - 1);
1008 1008
1009 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 1009 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1010 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 1010 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1011 1011
1012 off_start = offset & (PAGE_CACHE_SIZE - 1); 1012 off_start = offset & (PAGE_SIZE - 1);
1013 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 1013 off_end = (offset + len) & (PAGE_SIZE - 1);
1014 1014
1015 if (pg_start == pg_end) { 1015 if (pg_start == pg_end) {
1016 ret = fill_zero(inode, pg_start, off_start, 1016 ret = fill_zero(inode, pg_start, off_start,
@@ -1024,12 +1024,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1024 } else { 1024 } else {
1025 if (off_start) { 1025 if (off_start) {
1026 ret = fill_zero(inode, pg_start++, off_start, 1026 ret = fill_zero(inode, pg_start++, off_start,
1027 PAGE_CACHE_SIZE - off_start); 1027 PAGE_SIZE - off_start);
1028 if (ret) 1028 if (ret)
1029 return ret; 1029 return ret;
1030 1030
1031 new_size = max_t(loff_t, new_size, 1031 new_size = max_t(loff_t, new_size,
1032 (loff_t)pg_start << PAGE_CACHE_SHIFT); 1032 (loff_t)pg_start << PAGE_SHIFT);
1033 } 1033 }
1034 1034
1035 for (index = pg_start; index < pg_end; index++) { 1035 for (index = pg_start; index < pg_end; index++) {
@@ -1060,7 +1060,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1060 f2fs_unlock_op(sbi); 1060 f2fs_unlock_op(sbi);
1061 1061
1062 new_size = max_t(loff_t, new_size, 1062 new_size = max_t(loff_t, new_size,
1063 (loff_t)(index + 1) << PAGE_CACHE_SHIFT); 1063 (loff_t)(index + 1) << PAGE_SHIFT);
1064 } 1064 }
1065 1065
1066 if (off_end) { 1066 if (off_end) {
@@ -1117,8 +1117,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1117 1117
1118 truncate_pagecache(inode, offset); 1118 truncate_pagecache(inode, offset);
1119 1119
1120 pg_start = offset >> PAGE_CACHE_SHIFT; 1120 pg_start = offset >> PAGE_SHIFT;
1121 pg_end = (offset + len) >> PAGE_CACHE_SHIFT; 1121 pg_end = (offset + len) >> PAGE_SHIFT;
1122 delta = pg_end - pg_start; 1122 delta = pg_end - pg_start;
1123 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1123 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1124 1124
@@ -1158,11 +1158,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
1158 1158
1159 f2fs_balance_fs(sbi, true); 1159 f2fs_balance_fs(sbi, true);
1160 1160
1161 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 1161 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1162 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 1162 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1163 1163
1164 off_start = offset & (PAGE_CACHE_SIZE - 1); 1164 off_start = offset & (PAGE_SIZE - 1);
1165 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 1165 off_end = (offset + len) & (PAGE_SIZE - 1);
1166 1166
1167 f2fs_lock_op(sbi); 1167 f2fs_lock_op(sbi);
1168 1168
@@ -1180,12 +1180,12 @@ noalloc:
1180 if (pg_start == pg_end) 1180 if (pg_start == pg_end)
1181 new_size = offset + len; 1181 new_size = offset + len;
1182 else if (index == pg_start && off_start) 1182 else if (index == pg_start && off_start)
1183 new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT; 1183 new_size = (loff_t)(index + 1) << PAGE_SHIFT;
1184 else if (index == pg_end) 1184 else if (index == pg_end)
1185 new_size = ((loff_t)index << PAGE_CACHE_SHIFT) + 1185 new_size = ((loff_t)index << PAGE_SHIFT) +
1186 off_end; 1186 off_end;
1187 else 1187 else
1188 new_size += PAGE_CACHE_SIZE; 1188 new_size += PAGE_SIZE;
1189 } 1189 }
1190 1190
1191 if (!(mode & FALLOC_FL_KEEP_SIZE) && 1191 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -1652,8 +1652,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
1652 if (need_inplace_update(inode)) 1652 if (need_inplace_update(inode))
1653 return -EINVAL; 1653 return -EINVAL;
1654 1654
1655 pg_start = range->start >> PAGE_CACHE_SHIFT; 1655 pg_start = range->start >> PAGE_SHIFT;
1656 pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT; 1656 pg_end = (range->start + range->len) >> PAGE_SHIFT;
1657 1657
1658 f2fs_balance_fs(sbi, true); 1658 f2fs_balance_fs(sbi, true);
1659 1659
@@ -1770,7 +1770,7 @@ clear_out:
1770out: 1770out:
1771 inode_unlock(inode); 1771 inode_unlock(inode);
1772 if (!err) 1772 if (!err)
1773 range->len = (u64)total << PAGE_CACHE_SHIFT; 1773 range->len = (u64)total << PAGE_SHIFT;
1774 return err; 1774 return err;
1775} 1775}
1776 1776
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 358214e9f707..a2fbe6f427d3 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -51,7 +51,7 @@ void read_inline_data(struct page *page, struct page *ipage)
51 51
52 f2fs_bug_on(F2FS_P_SB(page), page->index); 52 f2fs_bug_on(F2FS_P_SB(page), page->index);
53 53
54 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 54 zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
55 55
56 /* Copy the whole inline data block */ 56 /* Copy the whole inline data block */
57 src_addr = inline_data_addr(ipage); 57 src_addr = inline_data_addr(ipage);
@@ -93,7 +93,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
93 } 93 }
94 94
95 if (page->index) 95 if (page->index)
96 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 96 zero_user_segment(page, 0, PAGE_SIZE);
97 else 97 else
98 read_inline_data(page, ipage); 98 read_inline_data(page, ipage);
99 99
@@ -375,7 +375,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
375 goto out; 375 goto out;
376 376
377 f2fs_wait_on_page_writeback(page, DATA, true); 377 f2fs_wait_on_page_writeback(page, DATA, true);
378 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 378 zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
379 379
380 dentry_blk = kmap_atomic(page); 380 dentry_blk = kmap_atomic(page);
381 381
@@ -405,8 +405,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
405 stat_dec_inline_dir(dir); 405 stat_dec_inline_dir(dir);
406 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 406 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
407 407
408 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 408 if (i_size_read(dir) < PAGE_SIZE) {
409 i_size_write(dir, PAGE_CACHE_SIZE); 409 i_size_write(dir, PAGE_SIZE);
410 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 410 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
411 } 411 }
412 412
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 7876f1052101..013e57932d61 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1027,12 +1027,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1027 goto errout; 1027 goto errout;
1028 } 1028 }
1029 1029
1030 /* this is broken symlink case */
1031 if (unlikely(cstr.name[0] == 0)) {
1032 res = -ENOENT;
1033 goto errout;
1034 }
1035
1036 if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { 1030 if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
1037 /* Symlink data on the disk is corrupted */ 1031 /* Symlink data on the disk is corrupted */
1038 res = -EIO; 1032 res = -EIO;
@@ -1046,17 +1040,23 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1046 if (res < 0) 1040 if (res < 0)
1047 goto errout; 1041 goto errout;
1048 1042
1043 /* this is broken symlink case */
1044 if (unlikely(pstr.name[0] == 0)) {
1045 res = -ENOENT;
1046 goto errout;
1047 }
1048
1049 paddr = pstr.name; 1049 paddr = pstr.name;
1050 1050
1051 /* Null-terminate the name */ 1051 /* Null-terminate the name */
1052 paddr[res] = '\0'; 1052 paddr[res] = '\0';
1053 1053
1054 page_cache_release(cpage); 1054 put_page(cpage);
1055 set_delayed_call(done, kfree_link, paddr); 1055 set_delayed_call(done, kfree_link, paddr);
1056 return paddr; 1056 return paddr;
1057errout: 1057errout:
1058 fscrypt_fname_free_buffer(&pstr); 1058 fscrypt_fname_free_buffer(&pstr);
1059 page_cache_release(cpage); 1059 put_page(cpage);
1060 return ERR_PTR(res); 1060 return ERR_PTR(res);
1061} 1061}
1062 1062
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 118321bd1a7f..1a33de9d84b1 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -46,11 +46,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
46 */ 46 */
47 if (type == FREE_NIDS) { 47 if (type == FREE_NIDS) {
48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
49 PAGE_CACHE_SHIFT; 49 PAGE_SHIFT;
50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
51 } else if (type == NAT_ENTRIES) { 51 } else if (type == NAT_ENTRIES) {
52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53 PAGE_CACHE_SHIFT; 53 PAGE_SHIFT;
54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
55 } else if (type == DIRTY_DENTS) { 55 } else if (type == DIRTY_DENTS) {
56 if (sbi->sb->s_bdi->wb.dirty_exceeded) 56 if (sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -62,13 +62,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
62 62
63 for (i = 0; i <= UPDATE_INO; i++) 63 for (i = 0; i <= UPDATE_INO; i++)
64 mem_size += (sbi->im[i].ino_num * 64 mem_size += (sbi->im[i].ino_num *
65 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 65 sizeof(struct ino_entry)) >> PAGE_SHIFT;
66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
67 } else if (type == EXTENT_CACHE) { 67 } else if (type == EXTENT_CACHE) {
68 mem_size = (atomic_read(&sbi->total_ext_tree) * 68 mem_size = (atomic_read(&sbi->total_ext_tree) *
69 sizeof(struct extent_tree) + 69 sizeof(struct extent_tree) +
70 atomic_read(&sbi->total_ext_node) * 70 atomic_read(&sbi->total_ext_node) *
71 sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; 71 sizeof(struct extent_node)) >> PAGE_SHIFT;
72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
73 } else { 73 } else {
74 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 74 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -121,7 +121,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
121 121
122 src_addr = page_address(src_page); 122 src_addr = page_address(src_page);
123 dst_addr = page_address(dst_page); 123 dst_addr = page_address(dst_page);
124 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 124 memcpy(dst_addr, src_addr, PAGE_SIZE);
125 set_page_dirty(dst_page); 125 set_page_dirty(dst_page);
126 f2fs_put_page(src_page, 1); 126 f2fs_put_page(src_page, 1);
127 127
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 0b30cd2aeebd..011942f94d64 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -591,7 +591,7 @@ out:
591 591
592 /* truncate meta pages to be used by the recovery */ 592 /* truncate meta pages to be used by the recovery */
593 truncate_inode_pages_range(META_MAPPING(sbi), 593 truncate_inode_pages_range(META_MAPPING(sbi),
594 (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1); 594 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
595 595
596 if (err) { 596 if (err) {
597 truncate_inode_pages_final(NODE_MAPPING(sbi)); 597 truncate_inode_pages_final(NODE_MAPPING(sbi));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6f16b39f0b52..540669d6978e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -885,12 +885,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
885 } 885 }
886 } 886 }
887 887
888 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - 888 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
889 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 889 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
890 if (valid_sum_count <= sum_in_page) 890 if (valid_sum_count <= sum_in_page)
891 return 1; 891 return 1;
892 else if ((valid_sum_count - sum_in_page) <= 892 else if ((valid_sum_count - sum_in_page) <=
893 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 893 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
894 return 2; 894 return 2;
895 return 3; 895 return 3;
896} 896}
@@ -909,9 +909,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
909 void *dst = page_address(page); 909 void *dst = page_address(page);
910 910
911 if (src) 911 if (src)
912 memcpy(dst, src, PAGE_CACHE_SIZE); 912 memcpy(dst, src, PAGE_SIZE);
913 else 913 else
914 memset(dst, 0, PAGE_CACHE_SIZE); 914 memset(dst, 0, PAGE_SIZE);
915 set_page_dirty(page); 915 set_page_dirty(page);
916 f2fs_put_page(page, 1); 916 f2fs_put_page(page, 1);
917} 917}
@@ -1596,7 +1596,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1596 s = (struct f2fs_summary *)(kaddr + offset); 1596 s = (struct f2fs_summary *)(kaddr + offset);
1597 seg_i->sum_blk->entries[j] = *s; 1597 seg_i->sum_blk->entries[j] = *s;
1598 offset += SUMMARY_SIZE; 1598 offset += SUMMARY_SIZE;
1599 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1599 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
1600 SUM_FOOTER_SIZE) 1600 SUM_FOOTER_SIZE)
1601 continue; 1601 continue;
1602 1602
@@ -1757,7 +1757,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1757 *summary = seg_i->sum_blk->entries[j]; 1757 *summary = seg_i->sum_blk->entries[j];
1758 written_size += SUMMARY_SIZE; 1758 written_size += SUMMARY_SIZE;
1759 1759
1760 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1760 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
1761 SUM_FOOTER_SIZE) 1761 SUM_FOOTER_SIZE)
1762 continue; 1762 continue;
1763 1763
@@ -1844,7 +1844,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1844 1844
1845 src_addr = page_address(src_page); 1845 src_addr = page_address(src_page);
1846 dst_addr = page_address(dst_page); 1846 dst_addr = page_address(dst_page);
1847 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1847 memcpy(dst_addr, src_addr, PAGE_SIZE);
1848 1848
1849 set_page_dirty(dst_page); 1849 set_page_dirty(dst_page);
1850 f2fs_put_page(src_page, 1); 1850 f2fs_put_page(src_page, 1);
@@ -2171,7 +2171,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
2171 2171
2172 for (i = 0; i < NR_CURSEG_TYPE; i++) { 2172 for (i = 0; i < NR_CURSEG_TYPE; i++) {
2173 mutex_init(&array[i].curseg_mutex); 2173 mutex_init(&array[i].curseg_mutex);
2174 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 2174 array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
2175 if (!array[i].sum_blk) 2175 if (!array[i].sum_blk)
2176 return -ENOMEM; 2176 return -ENOMEM;
2177 init_rwsem(&array[i].journal_rwsem); 2177 init_rwsem(&array[i].journal_rwsem);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 15bb81f8dac2..006f87d69921 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -984,9 +984,25 @@ static loff_t max_file_blocks(void)
984 return result; 984 return result;
985} 985}
986 986
987static int __f2fs_commit_super(struct buffer_head *bh,
988 struct f2fs_super_block *super)
989{
990 lock_buffer(bh);
991 if (super)
992 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
993 set_buffer_uptodate(bh);
994 set_buffer_dirty(bh);
995 unlock_buffer(bh);
996
997 /* it's rare case, we can do fua all the time */
998 return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
999}
1000
987static inline bool sanity_check_area_boundary(struct super_block *sb, 1001static inline bool sanity_check_area_boundary(struct super_block *sb,
988 struct f2fs_super_block *raw_super) 1002 struct buffer_head *bh)
989{ 1003{
1004 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1005 (bh->b_data + F2FS_SUPER_OFFSET);
990 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1006 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
991 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); 1007 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
992 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); 1008 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -1000,6 +1016,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
1000 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); 1016 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1001 u32 segment_count = le32_to_cpu(raw_super->segment_count); 1017 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1002 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 1018 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1019 u64 main_end_blkaddr = main_blkaddr +
1020 (segment_count_main << log_blocks_per_seg);
1021 u64 seg_end_blkaddr = segment0_blkaddr +
1022 (segment_count << log_blocks_per_seg);
1003 1023
1004 if (segment0_blkaddr != cp_blkaddr) { 1024 if (segment0_blkaddr != cp_blkaddr) {
1005 f2fs_msg(sb, KERN_INFO, 1025 f2fs_msg(sb, KERN_INFO,
@@ -1044,22 +1064,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
1044 return true; 1064 return true;
1045 } 1065 }
1046 1066
1047 if (main_blkaddr + (segment_count_main << log_blocks_per_seg) != 1067 if (main_end_blkaddr > seg_end_blkaddr) {
1048 segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
1049 f2fs_msg(sb, KERN_INFO, 1068 f2fs_msg(sb, KERN_INFO,
1050 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)", 1069 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1051 main_blkaddr, 1070 main_blkaddr,
1052 segment0_blkaddr + (segment_count << log_blocks_per_seg), 1071 segment0_blkaddr +
1072 (segment_count << log_blocks_per_seg),
1053 segment_count_main << log_blocks_per_seg); 1073 segment_count_main << log_blocks_per_seg);
1054 return true; 1074 return true;
1075 } else if (main_end_blkaddr < seg_end_blkaddr) {
1076 int err = 0;
1077 char *res;
1078
1079 /* fix in-memory information all the time */
1080 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
1081 segment0_blkaddr) >> log_blocks_per_seg);
1082
1083 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
1084 res = "internally";
1085 } else {
1086 err = __f2fs_commit_super(bh, NULL);
1087 res = err ? "failed" : "done";
1088 }
1089 f2fs_msg(sb, KERN_INFO,
1090 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1091 res, main_blkaddr,
1092 segment0_blkaddr +
1093 (segment_count << log_blocks_per_seg),
1094 segment_count_main << log_blocks_per_seg);
1095 if (err)
1096 return true;
1055 } 1097 }
1056
1057 return false; 1098 return false;
1058} 1099}
1059 1100
1060static int sanity_check_raw_super(struct super_block *sb, 1101static int sanity_check_raw_super(struct super_block *sb,
1061 struct f2fs_super_block *raw_super) 1102 struct buffer_head *bh)
1062{ 1103{
1104 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1105 (bh->b_data + F2FS_SUPER_OFFSET);
1063 unsigned int blocksize; 1106 unsigned int blocksize;
1064 1107
1065 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 1108 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1070,10 +1113,10 @@ static int sanity_check_raw_super(struct super_block *sb,
1070 } 1113 }
1071 1114
1072 /* Currently, support only 4KB page cache size */ 1115 /* Currently, support only 4KB page cache size */
1073 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { 1116 if (F2FS_BLKSIZE != PAGE_SIZE) {
1074 f2fs_msg(sb, KERN_INFO, 1117 f2fs_msg(sb, KERN_INFO,
1075 "Invalid page_cache_size (%lu), supports only 4KB\n", 1118 "Invalid page_cache_size (%lu), supports only 4KB\n",
1076 PAGE_CACHE_SIZE); 1119 PAGE_SIZE);
1077 return 1; 1120 return 1;
1078 } 1121 }
1079 1122
@@ -1126,7 +1169,7 @@ static int sanity_check_raw_super(struct super_block *sb,
1126 } 1169 }
1127 1170
1128 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ 1171 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1129 if (sanity_check_area_boundary(sb, raw_super)) 1172 if (sanity_check_area_boundary(sb, bh))
1130 return 1; 1173 return 1;
1131 1174
1132 return 0; 1175 return 0;
@@ -1202,7 +1245,7 @@ static int read_raw_super_block(struct super_block *sb,
1202{ 1245{
1203 int block; 1246 int block;
1204 struct buffer_head *bh; 1247 struct buffer_head *bh;
1205 struct f2fs_super_block *super, *buf; 1248 struct f2fs_super_block *super;
1206 int err = 0; 1249 int err = 0;
1207 1250
1208 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL); 1251 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
@@ -1218,11 +1261,8 @@ static int read_raw_super_block(struct super_block *sb,
1218 continue; 1261 continue;
1219 } 1262 }
1220 1263
1221 buf = (struct f2fs_super_block *)
1222 (bh->b_data + F2FS_SUPER_OFFSET);
1223
1224 /* sanity checking of raw super */ 1264 /* sanity checking of raw super */
1225 if (sanity_check_raw_super(sb, buf)) { 1265 if (sanity_check_raw_super(sb, bh)) {
1226 f2fs_msg(sb, KERN_ERR, 1266 f2fs_msg(sb, KERN_ERR,
1227 "Can't find valid F2FS filesystem in %dth superblock", 1267 "Can't find valid F2FS filesystem in %dth superblock",
1228 block + 1); 1268 block + 1);
@@ -1232,7 +1272,8 @@ static int read_raw_super_block(struct super_block *sb,
1232 } 1272 }
1233 1273
1234 if (!*raw_super) { 1274 if (!*raw_super) {
1235 memcpy(super, buf, sizeof(*super)); 1275 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
1276 sizeof(*super));
1236 *valid_super_block = block; 1277 *valid_super_block = block;
1237 *raw_super = super; 1278 *raw_super = super;
1238 } 1279 }
@@ -1252,42 +1293,29 @@ static int read_raw_super_block(struct super_block *sb,
1252 return err; 1293 return err;
1253} 1294}
1254 1295
1255static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block) 1296int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1256{ 1297{
1257 struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
1258 struct buffer_head *bh; 1298 struct buffer_head *bh;
1259 int err; 1299 int err;
1260 1300
1261 bh = sb_getblk(sbi->sb, block); 1301 /* write back-up superblock first */
1302 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
1262 if (!bh) 1303 if (!bh)
1263 return -EIO; 1304 return -EIO;
1264 1305 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1265 lock_buffer(bh);
1266 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1267 set_buffer_uptodate(bh);
1268 set_buffer_dirty(bh);
1269 unlock_buffer(bh);
1270
1271 /* it's rare case, we can do fua all the time */
1272 err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
1273 brelse(bh); 1306 brelse(bh);
1274 1307
1275 return err;
1276}
1277
1278int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1279{
1280 int err;
1281
1282 /* write back-up superblock first */
1283 err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
1284
1285 /* if we are in recovery path, skip writing valid superblock */ 1308 /* if we are in recovery path, skip writing valid superblock */
1286 if (recover || err) 1309 if (recover || err)
1287 return err; 1310 return err;
1288 1311
1289 /* write current valid superblock */ 1312 /* write current valid superblock */
1290 return __f2fs_commit_super(sbi, sbi->valid_super_block); 1313 bh = sb_getblk(sbi->sb, sbi->valid_super_block);
1314 if (!bh)
1315 return -EIO;
1316 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1317 brelse(bh);
1318 return err;
1291} 1319}
1292 1320
1293static int f2fs_fill_super(struct super_block *sb, void *data, int silent) 1321static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1442,7 +1470,7 @@ try_onemore:
1442 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); 1470 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1443 if (__exist_node_summaries(sbi)) 1471 if (__exist_node_summaries(sbi))
1444 sbi->kbytes_written = 1472 sbi->kbytes_written =
1445 le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written); 1473 le64_to_cpu(seg_i->journal->info.kbytes_written);
1446 1474
1447 build_gc_manager(sbi); 1475 build_gc_manager(sbi);
1448 1476
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index cb84f0fcc72a..bfc780c682fb 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -66,11 +66,11 @@ static int
66vxfs_immed_readpage(struct file *fp, struct page *pp) 66vxfs_immed_readpage(struct file *fp, struct page *pp)
67{ 67{
68 struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); 68 struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
69 u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT; 69 u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT;
70 caddr_t kaddr; 70 caddr_t kaddr;
71 71
72 kaddr = kmap(pp); 72 kaddr = kmap(pp);
73 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); 73 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
74 kunmap(pp); 74 kunmap(pp);
75 75
76 flush_dcache_page(pp); 76 flush_dcache_page(pp);
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 1cff72df0389..a49e0cfbb686 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -45,7 +45,7 @@
45/* 45/*
46 * Number of VxFS blocks per page. 46 * Number of VxFS blocks per page.
47 */ 47 */
48#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize)) 48#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize))
49 49
50 50
51static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); 51static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
@@ -175,7 +175,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
175 if (de) { 175 if (de) {
176 ino = de->d_ino; 176 ino = de->d_ino;
177 kunmap(pp); 177 kunmap(pp);
178 page_cache_release(pp); 178 put_page(pp);
179 } 179 }
180 180
181 return (ino); 181 return (ino);
@@ -255,8 +255,8 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
255 nblocks = dir_blocks(ip); 255 nblocks = dir_blocks(ip);
256 pblocks = VXFS_BLOCK_PER_PAGE(sbp); 256 pblocks = VXFS_BLOCK_PER_PAGE(sbp);
257 257
258 page = pos >> PAGE_CACHE_SHIFT; 258 page = pos >> PAGE_SHIFT;
259 offset = pos & ~PAGE_CACHE_MASK; 259 offset = pos & ~PAGE_MASK;
260 block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks; 260 block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
261 261
262 for (; page < npages; page++, block = 0) { 262 for (; page < npages; page++, block = 0) {
@@ -289,7 +289,7 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
289 continue; 289 continue;
290 290
291 offset = (char *)de - kaddr; 291 offset = (char *)de - kaddr;
292 ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; 292 ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
293 if (!dir_emit(ctx, de->d_name, de->d_namelen, 293 if (!dir_emit(ctx, de->d_name, de->d_namelen,
294 de->d_ino, DT_UNKNOWN)) { 294 de->d_ino, DT_UNKNOWN)) {
295 vxfs_put_page(pp); 295 vxfs_put_page(pp);
@@ -301,6 +301,6 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
301 vxfs_put_page(pp); 301 vxfs_put_page(pp);
302 offset = 0; 302 offset = 0;
303 } 303 }
304 ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; 304 ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
305 return 0; 305 return 0;
306} 306}
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 5d318c44f855..e806694d4145 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -50,7 +50,7 @@ inline void
50vxfs_put_page(struct page *pp) 50vxfs_put_page(struct page *pp)
51{ 51{
52 kunmap(pp); 52 kunmap(pp);
53 page_cache_release(pp); 53 put_page(pp);
54} 54}
55 55
56/** 56/**
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fee81e8768c9..592cea54cea0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -33,7 +33,7 @@
33/* 33/*
34 * 4MB minimal write chunk size 34 * 4MB minimal write chunk size
35 */ 35 */
36#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 36#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
37 37
38struct wb_completion { 38struct wb_completion {
39 atomic_t cnt; 39 atomic_t cnt;
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 6b35fc4860a0..3078b679fcd1 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -113,7 +113,7 @@ try_again:
113 113
114 wake_up_bit(&cookie->flags, 0); 114 wake_up_bit(&cookie->flags, 0);
115 if (xpage) 115 if (xpage)
116 page_cache_release(xpage); 116 put_page(xpage);
117 __fscache_uncache_page(cookie, page); 117 __fscache_uncache_page(cookie, page);
118 return true; 118 return true;
119 119
@@ -164,7 +164,7 @@ static void fscache_end_page_write(struct fscache_object *object,
164 } 164 }
165 spin_unlock(&object->lock); 165 spin_unlock(&object->lock);
166 if (xpage) 166 if (xpage)
167 page_cache_release(xpage); 167 put_page(xpage);
168} 168}
169 169
170/* 170/*
@@ -884,7 +884,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
884 spin_unlock(&cookie->stores_lock); 884 spin_unlock(&cookie->stores_lock);
885 885
886 for (i = n - 1; i >= 0; i--) 886 for (i = n - 1; i >= 0; i--)
887 page_cache_release(results[i]); 887 put_page(results[i]);
888 } 888 }
889 889
890 _leave(""); 890 _leave("");
@@ -982,7 +982,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
982 982
983 radix_tree_tag_set(&cookie->stores, page->index, 983 radix_tree_tag_set(&cookie->stores, page->index,
984 FSCACHE_COOKIE_PENDING_TAG); 984 FSCACHE_COOKIE_PENDING_TAG);
985 page_cache_get(page); 985 get_page(page);
986 986
987 /* we only want one writer at a time, but we do need to queue new 987 /* we only want one writer at a time, but we do need to queue new
988 * writers after exclusive ops */ 988 * writers after exclusive ops */
@@ -1026,7 +1026,7 @@ submit_failed:
1026 radix_tree_delete(&cookie->stores, page->index); 1026 radix_tree_delete(&cookie->stores, page->index);
1027 spin_unlock(&cookie->stores_lock); 1027 spin_unlock(&cookie->stores_lock);
1028 wake_cookie = __fscache_unuse_cookie(cookie); 1028 wake_cookie = __fscache_unuse_cookie(cookie);
1029 page_cache_release(page); 1029 put_page(page);
1030 ret = -ENOBUFS; 1030 ret = -ENOBUFS;
1031 goto nobufs; 1031 goto nobufs;
1032 1032
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ebb5e37455a0..cbece1221417 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -897,7 +897,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
897 return err; 897 return err;
898 } 898 }
899 899
900 page_cache_get(newpage); 900 get_page(newpage);
901 901
902 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 902 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
903 lru_cache_add_file(newpage); 903 lru_cache_add_file(newpage);
@@ -912,12 +912,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
912 912
913 if (err) { 913 if (err) {
914 unlock_page(newpage); 914 unlock_page(newpage);
915 page_cache_release(newpage); 915 put_page(newpage);
916 return err; 916 return err;
917 } 917 }
918 918
919 unlock_page(oldpage); 919 unlock_page(oldpage);
920 page_cache_release(oldpage); 920 put_page(oldpage);
921 cs->len = 0; 921 cs->len = 0;
922 922
923 return 0; 923 return 0;
@@ -951,7 +951,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
951 fuse_copy_finish(cs); 951 fuse_copy_finish(cs);
952 952
953 buf = cs->pipebufs; 953 buf = cs->pipebufs;
954 page_cache_get(page); 954 get_page(page);
955 buf->page = page; 955 buf->page = page;
956 buf->offset = offset; 956 buf->offset = offset;
957 buf->len = count; 957 buf->len = count;
@@ -1435,7 +1435,7 @@ out_unlock:
1435 1435
1436out: 1436out:
1437 for (; page_nr < cs.nr_segs; page_nr++) 1437 for (; page_nr < cs.nr_segs; page_nr++)
1438 page_cache_release(bufs[page_nr].page); 1438 put_page(bufs[page_nr].page);
1439 1439
1440 kfree(bufs); 1440 kfree(bufs);
1441 return ret; 1441 return ret;
@@ -1632,8 +1632,8 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1632 goto out_up_killsb; 1632 goto out_up_killsb;
1633 1633
1634 mapping = inode->i_mapping; 1634 mapping = inode->i_mapping;
1635 index = outarg.offset >> PAGE_CACHE_SHIFT; 1635 index = outarg.offset >> PAGE_SHIFT;
1636 offset = outarg.offset & ~PAGE_CACHE_MASK; 1636 offset = outarg.offset & ~PAGE_MASK;
1637 file_size = i_size_read(inode); 1637 file_size = i_size_read(inode);
1638 end = outarg.offset + outarg.size; 1638 end = outarg.offset + outarg.size;
1639 if (end > file_size) { 1639 if (end > file_size) {
@@ -1652,13 +1652,13 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1652 if (!page) 1652 if (!page)
1653 goto out_iput; 1653 goto out_iput;
1654 1654
1655 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); 1655 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1656 err = fuse_copy_page(cs, &page, offset, this_num, 0); 1656 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1657 if (!err && offset == 0 && 1657 if (!err && offset == 0 &&
1658 (this_num == PAGE_CACHE_SIZE || file_size == end)) 1658 (this_num == PAGE_SIZE || file_size == end))
1659 SetPageUptodate(page); 1659 SetPageUptodate(page);
1660 unlock_page(page); 1660 unlock_page(page);
1661 page_cache_release(page); 1661 put_page(page);
1662 1662
1663 if (err) 1663 if (err)
1664 goto out_iput; 1664 goto out_iput;
@@ -1697,7 +1697,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1697 size_t total_len = 0; 1697 size_t total_len = 0;
1698 int num_pages; 1698 int num_pages;
1699 1699
1700 offset = outarg->offset & ~PAGE_CACHE_MASK; 1700 offset = outarg->offset & ~PAGE_MASK;
1701 file_size = i_size_read(inode); 1701 file_size = i_size_read(inode);
1702 1702
1703 num = outarg->size; 1703 num = outarg->size;
@@ -1720,7 +1720,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1720 req->page_descs[0].offset = offset; 1720 req->page_descs[0].offset = offset;
1721 req->end = fuse_retrieve_end; 1721 req->end = fuse_retrieve_end;
1722 1722
1723 index = outarg->offset >> PAGE_CACHE_SHIFT; 1723 index = outarg->offset >> PAGE_SHIFT;
1724 1724
1725 while (num && req->num_pages < num_pages) { 1725 while (num && req->num_pages < num_pages) {
1726 struct page *page; 1726 struct page *page;
@@ -1730,7 +1730,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1730 if (!page) 1730 if (!page)
1731 break; 1731 break;
1732 1732
1733 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); 1733 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1734 req->pages[req->num_pages] = page; 1734 req->pages[req->num_pages] = page;
1735 req->page_descs[req->num_pages].length = this_num; 1735 req->page_descs[req->num_pages].length = this_num;
1736 req->num_pages++; 1736 req->num_pages++;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9dde38f12c07..719924d6c706 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -348,7 +348,7 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
348 pgoff_t curr_index; 348 pgoff_t curr_index;
349 349
350 BUG_ON(req->inode != inode); 350 BUG_ON(req->inode != inode);
351 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 351 curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
352 if (idx_from < curr_index + req->num_pages && 352 if (idx_from < curr_index + req->num_pages &&
353 curr_index <= idx_to) { 353 curr_index <= idx_to) {
354 found = true; 354 found = true;
@@ -683,11 +683,11 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
683 * present there. 683 * present there.
684 */ 684 */
685 int i; 685 int i;
686 int start_idx = num_read >> PAGE_CACHE_SHIFT; 686 int start_idx = num_read >> PAGE_SHIFT;
687 size_t off = num_read & (PAGE_CACHE_SIZE - 1); 687 size_t off = num_read & (PAGE_SIZE - 1);
688 688
689 for (i = start_idx; i < req->num_pages; i++) { 689 for (i = start_idx; i < req->num_pages; i++) {
690 zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); 690 zero_user_segment(req->pages[i], off, PAGE_SIZE);
691 off = 0; 691 off = 0;
692 } 692 }
693 } else { 693 } else {
@@ -704,7 +704,7 @@ static int fuse_do_readpage(struct file *file, struct page *page)
704 struct fuse_req *req; 704 struct fuse_req *req;
705 size_t num_read; 705 size_t num_read;
706 loff_t pos = page_offset(page); 706 loff_t pos = page_offset(page);
707 size_t count = PAGE_CACHE_SIZE; 707 size_t count = PAGE_SIZE;
708 u64 attr_ver; 708 u64 attr_ver;
709 int err; 709 int err;
710 710
@@ -789,7 +789,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
789 else 789 else
790 SetPageError(page); 790 SetPageError(page);
791 unlock_page(page); 791 unlock_page(page);
792 page_cache_release(page); 792 put_page(page);
793 } 793 }
794 if (req->ff) 794 if (req->ff)
795 fuse_file_put(req->ff, false); 795 fuse_file_put(req->ff, false);
@@ -800,7 +800,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
800 struct fuse_file *ff = file->private_data; 800 struct fuse_file *ff = file->private_data;
801 struct fuse_conn *fc = ff->fc; 801 struct fuse_conn *fc = ff->fc;
802 loff_t pos = page_offset(req->pages[0]); 802 loff_t pos = page_offset(req->pages[0]);
803 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 803 size_t count = req->num_pages << PAGE_SHIFT;
804 804
805 req->out.argpages = 1; 805 req->out.argpages = 1;
806 req->out.page_zeroing = 1; 806 req->out.page_zeroing = 1;
@@ -836,7 +836,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
836 836
837 if (req->num_pages && 837 if (req->num_pages &&
838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
839 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 839 (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
840 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 840 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
841 int nr_alloc = min_t(unsigned, data->nr_pages, 841 int nr_alloc = min_t(unsigned, data->nr_pages,
842 FUSE_MAX_PAGES_PER_REQ); 842 FUSE_MAX_PAGES_PER_REQ);
@@ -858,7 +858,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
858 return -EIO; 858 return -EIO;
859 } 859 }
860 860
861 page_cache_get(page); 861 get_page(page);
862 req->pages[req->num_pages] = page; 862 req->pages[req->num_pages] = page;
863 req->page_descs[req->num_pages].length = PAGE_SIZE; 863 req->page_descs[req->num_pages].length = PAGE_SIZE;
864 req->num_pages++; 864 req->num_pages++;
@@ -1003,17 +1003,17 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
1003 for (i = 0; i < req->num_pages; i++) { 1003 for (i = 0; i < req->num_pages; i++) {
1004 struct page *page = req->pages[i]; 1004 struct page *page = req->pages[i];
1005 1005
1006 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 1006 if (!req->out.h.error && !offset && count >= PAGE_SIZE)
1007 SetPageUptodate(page); 1007 SetPageUptodate(page);
1008 1008
1009 if (count > PAGE_CACHE_SIZE - offset) 1009 if (count > PAGE_SIZE - offset)
1010 count -= PAGE_CACHE_SIZE - offset; 1010 count -= PAGE_SIZE - offset;
1011 else 1011 else
1012 count = 0; 1012 count = 0;
1013 offset = 0; 1013 offset = 0;
1014 1014
1015 unlock_page(page); 1015 unlock_page(page);
1016 page_cache_release(page); 1016 put_page(page);
1017 } 1017 }
1018 1018
1019 return res; 1019 return res;
@@ -1024,7 +1024,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1024 struct iov_iter *ii, loff_t pos) 1024 struct iov_iter *ii, loff_t pos)
1025{ 1025{
1026 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1026 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 1027 unsigned offset = pos & (PAGE_SIZE - 1);
1028 size_t count = 0; 1028 size_t count = 0;
1029 int err; 1029 int err;
1030 1030
@@ -1034,8 +1034,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1034 do { 1034 do {
1035 size_t tmp; 1035 size_t tmp;
1036 struct page *page; 1036 struct page *page;
1037 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1037 pgoff_t index = pos >> PAGE_SHIFT;
1038 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 1038 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
1039 iov_iter_count(ii)); 1039 iov_iter_count(ii));
1040 1040
1041 bytes = min_t(size_t, bytes, fc->max_write - count); 1041 bytes = min_t(size_t, bytes, fc->max_write - count);
@@ -1059,7 +1059,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1059 iov_iter_advance(ii, tmp); 1059 iov_iter_advance(ii, tmp);
1060 if (!tmp) { 1060 if (!tmp) {
1061 unlock_page(page); 1061 unlock_page(page);
1062 page_cache_release(page); 1062 put_page(page);
1063 bytes = min(bytes, iov_iter_single_seg_count(ii)); 1063 bytes = min(bytes, iov_iter_single_seg_count(ii));
1064 goto again; 1064 goto again;
1065 } 1065 }
@@ -1072,7 +1072,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1072 count += tmp; 1072 count += tmp;
1073 pos += tmp; 1073 pos += tmp;
1074 offset += tmp; 1074 offset += tmp;
1075 if (offset == PAGE_CACHE_SIZE) 1075 if (offset == PAGE_SIZE)
1076 offset = 0; 1076 offset = 0;
1077 1077
1078 if (!fc->big_writes) 1078 if (!fc->big_writes)
@@ -1086,8 +1086,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1086static inline unsigned fuse_wr_pages(loff_t pos, size_t len) 1086static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
1087{ 1087{
1088 return min_t(unsigned, 1088 return min_t(unsigned,
1089 ((pos + len - 1) >> PAGE_CACHE_SHIFT) - 1089 ((pos + len - 1) >> PAGE_SHIFT) -
1090 (pos >> PAGE_CACHE_SHIFT) + 1, 1090 (pos >> PAGE_SHIFT) + 1,
1091 FUSE_MAX_PAGES_PER_REQ); 1091 FUSE_MAX_PAGES_PER_REQ);
1092} 1092}
1093 1093
@@ -1205,8 +1205,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1205 goto out; 1205 goto out;
1206 1206
1207 invalidate_mapping_pages(file->f_mapping, 1207 invalidate_mapping_pages(file->f_mapping,
1208 pos >> PAGE_CACHE_SHIFT, 1208 pos >> PAGE_SHIFT,
1209 endbyte >> PAGE_CACHE_SHIFT); 1209 endbyte >> PAGE_SHIFT);
1210 1210
1211 written += written_buffered; 1211 written += written_buffered;
1212 iocb->ki_pos = pos + written_buffered; 1212 iocb->ki_pos = pos + written_buffered;
@@ -1315,8 +1315,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1315 size_t nmax = write ? fc->max_write : fc->max_read; 1315 size_t nmax = write ? fc->max_write : fc->max_read;
1316 loff_t pos = *ppos; 1316 loff_t pos = *ppos;
1317 size_t count = iov_iter_count(iter); 1317 size_t count = iov_iter_count(iter);
1318 pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT; 1318 pgoff_t idx_from = pos >> PAGE_SHIFT;
1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1320 ssize_t res = 0; 1320 ssize_t res = 0;
1321 struct fuse_req *req; 1321 struct fuse_req *req;
1322 int err = 0; 1322 int err = 0;
@@ -1466,7 +1466,7 @@ __acquires(fc->lock)
1466{ 1466{
1467 struct fuse_inode *fi = get_fuse_inode(req->inode); 1467 struct fuse_inode *fi = get_fuse_inode(req->inode);
1468 struct fuse_write_in *inarg = &req->misc.write.in; 1468 struct fuse_write_in *inarg = &req->misc.write.in;
1469 __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; 1469 __u64 data_size = req->num_pages * PAGE_SIZE;
1470 1470
1471 if (!fc->connected) 1471 if (!fc->connected)
1472 goto out_free; 1472 goto out_free;
@@ -1727,7 +1727,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1727 list_del(&new_req->writepages_entry); 1727 list_del(&new_req->writepages_entry);
1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) { 1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
1729 BUG_ON(old_req->inode != new_req->inode); 1729 BUG_ON(old_req->inode != new_req->inode);
1730 curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1730 curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
1731 if (curr_index <= page->index && 1731 if (curr_index <= page->index &&
1732 page->index < curr_index + old_req->num_pages) { 1732 page->index < curr_index + old_req->num_pages) {
1733 found = true; 1733 found = true;
@@ -1742,7 +1742,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1742 new_req->num_pages = 1; 1742 new_req->num_pages = 1;
1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { 1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
1744 BUG_ON(tmp->inode != new_req->inode); 1744 BUG_ON(tmp->inode != new_req->inode);
1745 curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1745 curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
1746 if (tmp->num_pages == 1 && 1746 if (tmp->num_pages == 1 &&
1747 curr_index == page->index) { 1747 curr_index == page->index) {
1748 old_req = tmp; 1748 old_req = tmp;
@@ -1799,7 +1799,7 @@ static int fuse_writepages_fill(struct page *page,
1799 1799
1800 if (req && req->num_pages && 1800 if (req && req->num_pages &&
1801 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || 1801 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
1802 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || 1802 (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
1803 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { 1803 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
1804 fuse_writepages_send(data); 1804 fuse_writepages_send(data);
1805 data->req = NULL; 1805 data->req = NULL;
@@ -1924,7 +1924,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
1924 loff_t pos, unsigned len, unsigned flags, 1924 loff_t pos, unsigned len, unsigned flags,
1925 struct page **pagep, void **fsdata) 1925 struct page **pagep, void **fsdata)
1926{ 1926{
1927 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1927 pgoff_t index = pos >> PAGE_SHIFT;
1928 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 1928 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
1929 struct page *page; 1929 struct page *page;
1930 loff_t fsize; 1930 loff_t fsize;
@@ -1938,15 +1938,15 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
1938 1938
1939 fuse_wait_on_page_writeback(mapping->host, page->index); 1939 fuse_wait_on_page_writeback(mapping->host, page->index);
1940 1940
1941 if (PageUptodate(page) || len == PAGE_CACHE_SIZE) 1941 if (PageUptodate(page) || len == PAGE_SIZE)
1942 goto success; 1942 goto success;
1943 /* 1943 /*
1944 * Check if the start this page comes after the end of file, in which 1944 * Check if the start this page comes after the end of file, in which
1945 * case the readpage can be optimized away. 1945 * case the readpage can be optimized away.
1946 */ 1946 */
1947 fsize = i_size_read(mapping->host); 1947 fsize = i_size_read(mapping->host);
1948 if (fsize <= (pos & PAGE_CACHE_MASK)) { 1948 if (fsize <= (pos & PAGE_MASK)) {
1949 size_t off = pos & ~PAGE_CACHE_MASK; 1949 size_t off = pos & ~PAGE_MASK;
1950 if (off) 1950 if (off)
1951 zero_user_segment(page, 0, off); 1951 zero_user_segment(page, 0, off);
1952 goto success; 1952 goto success;
@@ -1960,7 +1960,7 @@ success:
1960 1960
1961cleanup: 1961cleanup:
1962 unlock_page(page); 1962 unlock_page(page);
1963 page_cache_release(page); 1963 put_page(page);
1964error: 1964error:
1965 return err; 1965 return err;
1966} 1966}
@@ -1973,16 +1973,16 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1973 1973
1974 if (!PageUptodate(page)) { 1974 if (!PageUptodate(page)) {
1975 /* Zero any unwritten bytes at the end of the page */ 1975 /* Zero any unwritten bytes at the end of the page */
1976 size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; 1976 size_t endoff = (pos + copied) & ~PAGE_MASK;
1977 if (endoff) 1977 if (endoff)
1978 zero_user_segment(page, endoff, PAGE_CACHE_SIZE); 1978 zero_user_segment(page, endoff, PAGE_SIZE);
1979 SetPageUptodate(page); 1979 SetPageUptodate(page);
1980 } 1980 }
1981 1981
1982 fuse_write_update_size(inode, pos + copied); 1982 fuse_write_update_size(inode, pos + copied);
1983 set_page_dirty(page); 1983 set_page_dirty(page);
1984 unlock_page(page); 1984 unlock_page(page);
1985 page_cache_release(page); 1985 put_page(page);
1986 1986
1987 return copied; 1987 return copied;
1988} 1988}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4d69d5c0bedc..1ce67668a8e1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -339,11 +339,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
339 339
340 fuse_invalidate_attr(inode); 340 fuse_invalidate_attr(inode);
341 if (offset >= 0) { 341 if (offset >= 0) {
342 pg_start = offset >> PAGE_CACHE_SHIFT; 342 pg_start = offset >> PAGE_SHIFT;
343 if (len <= 0) 343 if (len <= 0)
344 pg_end = -1; 344 pg_end = -1;
345 else 345 else
346 pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; 346 pg_end = (offset + len - 1) >> PAGE_SHIFT;
347 invalidate_inode_pages2_range(inode->i_mapping, 347 invalidate_inode_pages2_range(inode->i_mapping,
348 pg_start, pg_end); 348 pg_start, pg_end);
349 } 349 }
@@ -864,7 +864,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
864 process_init_limits(fc, arg); 864 process_init_limits(fc, arg);
865 865
866 if (arg->minor >= 6) { 866 if (arg->minor >= 6) {
867 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; 867 ra_pages = arg->max_readahead / PAGE_SIZE;
868 if (arg->flags & FUSE_ASYNC_READ) 868 if (arg->flags & FUSE_ASYNC_READ)
869 fc->async_read = 1; 869 fc->async_read = 1;
870 if (!(arg->flags & FUSE_POSIX_LOCKS)) 870 if (!(arg->flags & FUSE_POSIX_LOCKS))
@@ -901,7 +901,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
901 if (arg->time_gran && arg->time_gran <= 1000000000) 901 if (arg->time_gran && arg->time_gran <= 1000000000)
902 fc->sb->s_time_gran = arg->time_gran; 902 fc->sb->s_time_gran = arg->time_gran;
903 } else { 903 } else {
904 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 904 ra_pages = fc->max_read / PAGE_SIZE;
905 fc->no_lock = 1; 905 fc->no_lock = 1;
906 fc->no_flock = 1; 906 fc->no_flock = 1;
907 } 907 }
@@ -922,7 +922,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
922 922
923 arg->major = FUSE_KERNEL_VERSION; 923 arg->major = FUSE_KERNEL_VERSION;
924 arg->minor = FUSE_KERNEL_MINOR_VERSION; 924 arg->minor = FUSE_KERNEL_MINOR_VERSION;
925 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 925 arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE;
926 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 926 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
927 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | 927 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -955,7 +955,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
955 int err; 955 int err;
956 956
957 fc->bdi.name = "fuse"; 957 fc->bdi.name = "fuse";
958 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 958 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
959 /* fuse does it's own writeback accounting */ 959 /* fuse does it's own writeback accounting */
960 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT; 960 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
961 961
@@ -1053,8 +1053,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1053 goto err; 1053 goto err;
1054#endif 1054#endif
1055 } else { 1055 } else {
1056 sb->s_blocksize = PAGE_CACHE_SIZE; 1056 sb->s_blocksize = PAGE_SIZE;
1057 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1057 sb->s_blocksize_bits = PAGE_SHIFT;
1058 } 1058 }
1059 sb->s_magic = FUSE_SUPER_MAGIC; 1059 sb->s_magic = FUSE_SUPER_MAGIC;
1060 sb->s_op = &fuse_super_operations; 1060 sb->s_op = &fuse_super_operations;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aa016e4b8bec..1bbbee945f46 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -101,7 +101,7 @@ static int gfs2_writepage_common(struct page *page,
101 struct gfs2_inode *ip = GFS2_I(inode); 101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_sbd *sdp = GFS2_SB(inode); 102 struct gfs2_sbd *sdp = GFS2_SB(inode);
103 loff_t i_size = i_size_read(inode); 103 loff_t i_size = i_size_read(inode);
104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 104 pgoff_t end_index = i_size >> PAGE_SHIFT;
105 unsigned offset; 105 unsigned offset;
106 106
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
@@ -109,9 +109,9 @@ static int gfs2_writepage_common(struct page *page,
109 if (current->journal_info) 109 if (current->journal_info)
110 goto redirty; 110 goto redirty;
111 /* Is the page fully outside i_size? (truncate in progress) */ 111 /* Is the page fully outside i_size? (truncate in progress) */
112 offset = i_size & (PAGE_CACHE_SIZE-1); 112 offset = i_size & (PAGE_SIZE-1);
113 if (page->index > end_index || (page->index == end_index && !offset)) { 113 if (page->index > end_index || (page->index == end_index && !offset)) {
114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
115 goto out; 115 goto out;
116 } 116 }
117 return 1; 117 return 1;
@@ -238,7 +238,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
238{ 238{
239 struct inode *inode = mapping->host; 239 struct inode *inode = mapping->host;
240 struct gfs2_sbd *sdp = GFS2_SB(inode); 240 struct gfs2_sbd *sdp = GFS2_SB(inode);
241 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 241 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
242 int i; 242 int i;
243 int ret; 243 int ret;
244 244
@@ -366,8 +366,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
366 cycled = 0; 366 cycled = 0;
367 end = -1; 367 end = -1;
368 } else { 368 } else {
369 index = wbc->range_start >> PAGE_CACHE_SHIFT; 369 index = wbc->range_start >> PAGE_SHIFT;
370 end = wbc->range_end >> PAGE_CACHE_SHIFT; 370 end = wbc->range_end >> PAGE_SHIFT;
371 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 371 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
372 range_whole = 1; 372 range_whole = 1;
373 cycled = 1; /* ignore range_cyclic tests */ 373 cycled = 1; /* ignore range_cyclic tests */
@@ -458,7 +458,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
458 * so we need to supply one here. It doesn't happen often. 458 * so we need to supply one here. It doesn't happen often.
459 */ 459 */
460 if (unlikely(page->index)) { 460 if (unlikely(page->index)) {
461 zero_user(page, 0, PAGE_CACHE_SIZE); 461 zero_user(page, 0, PAGE_SIZE);
462 SetPageUptodate(page); 462 SetPageUptodate(page);
463 return 0; 463 return 0;
464 } 464 }
@@ -471,7 +471,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
471 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 471 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
472 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 472 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
473 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 473 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
474 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 474 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
475 kunmap_atomic(kaddr); 475 kunmap_atomic(kaddr);
476 flush_dcache_page(page); 476 flush_dcache_page(page);
477 brelse(dibh); 477 brelse(dibh);
@@ -560,8 +560,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
560 unsigned size) 560 unsigned size)
561{ 561{
562 struct address_space *mapping = ip->i_inode.i_mapping; 562 struct address_space *mapping = ip->i_inode.i_mapping;
563 unsigned long index = *pos / PAGE_CACHE_SIZE; 563 unsigned long index = *pos / PAGE_SIZE;
564 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); 564 unsigned offset = *pos & (PAGE_SIZE - 1);
565 unsigned copied = 0; 565 unsigned copied = 0;
566 unsigned amt; 566 unsigned amt;
567 struct page *page; 567 struct page *page;
@@ -569,15 +569,15 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
569 569
570 do { 570 do {
571 amt = size - copied; 571 amt = size - copied;
572 if (offset + size > PAGE_CACHE_SIZE) 572 if (offset + size > PAGE_SIZE)
573 amt = PAGE_CACHE_SIZE - offset; 573 amt = PAGE_SIZE - offset;
574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
575 if (IS_ERR(page)) 575 if (IS_ERR(page))
576 return PTR_ERR(page); 576 return PTR_ERR(page);
577 p = kmap_atomic(page); 577 p = kmap_atomic(page);
578 memcpy(buf + copied, p + offset, amt); 578 memcpy(buf + copied, p + offset, amt);
579 kunmap_atomic(p); 579 kunmap_atomic(p);
580 page_cache_release(page); 580 put_page(page);
581 copied += amt; 581 copied += amt;
582 index++; 582 index++;
583 offset = 0; 583 offset = 0;
@@ -651,8 +651,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
651 unsigned requested = 0; 651 unsigned requested = 0;
652 int alloc_required; 652 int alloc_required;
653 int error = 0; 653 int error = 0;
654 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 654 pgoff_t index = pos >> PAGE_SHIFT;
655 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 655 unsigned from = pos & (PAGE_SIZE - 1);
656 struct page *page; 656 struct page *page;
657 657
658 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 658 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -697,7 +697,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
697 rblocks += gfs2_rg_blocks(ip, requested); 697 rblocks += gfs2_rg_blocks(ip, requested);
698 698
699 error = gfs2_trans_begin(sdp, rblocks, 699 error = gfs2_trans_begin(sdp, rblocks,
700 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 700 PAGE_SIZE/sdp->sd_sb.sb_bsize);
701 if (error) 701 if (error)
702 goto out_trans_fail; 702 goto out_trans_fail;
703 703
@@ -727,7 +727,7 @@ out:
727 return 0; 727 return 0;
728 728
729 unlock_page(page); 729 unlock_page(page);
730 page_cache_release(page); 730 put_page(page);
731 731
732 gfs2_trans_end(sdp); 732 gfs2_trans_end(sdp);
733 if (pos + len > ip->i_inode.i_size) 733 if (pos + len > ip->i_inode.i_size)
@@ -827,7 +827,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
827 if (!PageUptodate(page)) 827 if (!PageUptodate(page))
828 SetPageUptodate(page); 828 SetPageUptodate(page);
829 unlock_page(page); 829 unlock_page(page);
830 page_cache_release(page); 830 put_page(page);
831 831
832 if (copied) { 832 if (copied) {
833 if (inode->i_size < to) 833 if (inode->i_size < to)
@@ -877,7 +877,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
877 struct gfs2_sbd *sdp = GFS2_SB(inode); 877 struct gfs2_sbd *sdp = GFS2_SB(inode);
878 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 878 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
879 struct buffer_head *dibh; 879 struct buffer_head *dibh;
880 unsigned int from = pos & (PAGE_CACHE_SIZE - 1); 880 unsigned int from = pos & (PAGE_SIZE - 1);
881 unsigned int to = from + len; 881 unsigned int to = from + len;
882 int ret; 882 int ret;
883 struct gfs2_trans *tr = current->journal_info; 883 struct gfs2_trans *tr = current->journal_info;
@@ -888,7 +888,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
888 ret = gfs2_meta_inode_buffer(ip, &dibh); 888 ret = gfs2_meta_inode_buffer(ip, &dibh);
889 if (unlikely(ret)) { 889 if (unlikely(ret)) {
890 unlock_page(page); 890 unlock_page(page);
891 page_cache_release(page); 891 put_page(page);
892 goto failed; 892 goto failed;
893 } 893 }
894 894
@@ -992,7 +992,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
992{ 992{
993 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 993 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
994 unsigned int stop = offset + length; 994 unsigned int stop = offset + length;
995 int partial_page = (offset || length < PAGE_CACHE_SIZE); 995 int partial_page = (offset || length < PAGE_SIZE);
996 struct buffer_head *bh, *head; 996 struct buffer_head *bh, *head;
997 unsigned long pos = 0; 997 unsigned long pos = 0;
998 998
@@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1082 * the first place, mapping->nr_pages will always be zero. 1082 * the first place, mapping->nr_pages will always be zero.
1083 */ 1083 */
1084 if (mapping->nrpages) { 1084 if (mapping->nrpages) {
1085 loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1); 1085 loff_t lstart = offset & ~(PAGE_SIZE - 1);
1086 loff_t len = iov_iter_count(iter); 1086 loff_t len = iov_iter_count(iter);
1087 loff_t end = PAGE_ALIGN(offset + len) - 1; 1087 loff_t end = PAGE_ALIGN(offset + len) - 1;
1088 1088
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 0860f0b5b3f1..24ce1cdd434a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -75,7 +75,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
75 dsize = dibh->b_size - sizeof(struct gfs2_dinode); 75 dsize = dibh->b_size - sizeof(struct gfs2_dinode);
76 76
77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
78 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 78 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
79 kunmap(page); 79 kunmap(page);
80 80
81 SetPageUptodate(page); 81 SetPageUptodate(page);
@@ -98,7 +98,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
98 98
99 if (release) { 99 if (release) {
100 unlock_page(page); 100 unlock_page(page);
101 page_cache_release(page); 101 put_page(page);
102 } 102 }
103 103
104 return 0; 104 return 0;
@@ -932,8 +932,8 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
932{ 932{
933 struct inode *inode = mapping->host; 933 struct inode *inode = mapping->host;
934 struct gfs2_inode *ip = GFS2_I(inode); 934 struct gfs2_inode *ip = GFS2_I(inode);
935 unsigned long index = from >> PAGE_CACHE_SHIFT; 935 unsigned long index = from >> PAGE_SHIFT;
936 unsigned offset = from & (PAGE_CACHE_SIZE-1); 936 unsigned offset = from & (PAGE_SIZE-1);
937 unsigned blocksize, iblock, length, pos; 937 unsigned blocksize, iblock, length, pos;
938 struct buffer_head *bh; 938 struct buffer_head *bh;
939 struct page *page; 939 struct page *page;
@@ -945,7 +945,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
945 945
946 blocksize = inode->i_sb->s_blocksize; 946 blocksize = inode->i_sb->s_blocksize;
947 length = blocksize - (offset & (blocksize - 1)); 947 length = blocksize - (offset & (blocksize - 1));
948 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 948 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
949 949
950 if (!page_has_buffers(page)) 950 if (!page_has_buffers(page))
951 create_empty_buffers(page, blocksize, 0); 951 create_empty_buffers(page, blocksize, 0);
@@ -989,7 +989,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
989 mark_buffer_dirty(bh); 989 mark_buffer_dirty(bh);
990unlock: 990unlock:
991 unlock_page(page); 991 unlock_page(page);
992 page_cache_release(page); 992 put_page(page);
993 return err; 993 return err;
994} 994}
995 995
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index c9384f932975..208efc70ad49 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -354,8 +354,8 @@ static int gfs2_allocate_page_backing(struct page *page)
354{ 354{
355 struct inode *inode = page->mapping->host; 355 struct inode *inode = page->mapping->host;
356 struct buffer_head bh; 356 struct buffer_head bh;
357 unsigned long size = PAGE_CACHE_SIZE; 357 unsigned long size = PAGE_SIZE;
358 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 358 u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
359 359
360 do { 360 do {
361 bh.b_state = 0; 361 bh.b_state = 0;
@@ -386,7 +386,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
386 struct gfs2_sbd *sdp = GFS2_SB(inode); 386 struct gfs2_sbd *sdp = GFS2_SB(inode);
387 struct gfs2_alloc_parms ap = { .aflags = 0, }; 387 struct gfs2_alloc_parms ap = { .aflags = 0, };
388 unsigned long last_index; 388 unsigned long last_index;
389 u64 pos = page->index << PAGE_CACHE_SHIFT; 389 u64 pos = page->index << PAGE_SHIFT;
390 unsigned int data_blocks, ind_blocks, rblocks; 390 unsigned int data_blocks, ind_blocks, rblocks;
391 struct gfs2_holder gh; 391 struct gfs2_holder gh;
392 loff_t size; 392 loff_t size;
@@ -401,7 +401,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
401 if (ret) 401 if (ret)
402 goto out; 402 goto out;
403 403
404 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); 404 gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
405 405
406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
407 ret = gfs2_glock_nq(&gh); 407 ret = gfs2_glock_nq(&gh);
@@ -411,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
411 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 411 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
412 set_bit(GIF_SW_PAGED, &ip->i_flags); 412 set_bit(GIF_SW_PAGED, &ip->i_flags);
413 413
414 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { 414 if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
415 lock_page(page); 415 lock_page(page);
416 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { 416 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
417 ret = -EAGAIN; 417 ret = -EAGAIN;
@@ -424,7 +424,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
424 if (ret) 424 if (ret)
425 goto out_unlock; 425 goto out_unlock;
426 426
427 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 427 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
428 ap.target = data_blocks + ind_blocks; 428 ap.target = data_blocks + ind_blocks;
429 ret = gfs2_quota_lock_check(ip, &ap); 429 ret = gfs2_quota_lock_check(ip, &ap);
430 if (ret) 430 if (ret)
@@ -447,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
447 lock_page(page); 447 lock_page(page);
448 ret = -EINVAL; 448 ret = -EINVAL;
449 size = i_size_read(inode); 449 size = i_size_read(inode);
450 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 450 last_index = (size - 1) >> PAGE_SHIFT;
451 /* Check page index against inode size */ 451 /* Check page index against inode size */
452 if (size == 0 || (page->index > last_index)) 452 if (size == 0 || (page->index > last_index))
453 goto out_trans_end; 453 goto out_trans_end;
@@ -873,7 +873,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
873 rblocks += data_blocks ? data_blocks : 1; 873 rblocks += data_blocks ? data_blocks : 1;
874 874
875 error = gfs2_trans_begin(sdp, rblocks, 875 error = gfs2_trans_begin(sdp, rblocks,
876 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 876 PAGE_SIZE/sdp->sd_sb.sb_bsize);
877 if (error) 877 if (error)
878 goto out_trans_fail; 878 goto out_trans_fail;
879 879
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index e137d96f1b17..0448524c11bc 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -124,7 +124,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
124 if (mapping == NULL) 124 if (mapping == NULL)
125 mapping = &sdp->sd_aspace; 125 mapping = &sdp->sd_aspace;
126 126
127 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift; 127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128 index = blkno >> shift; /* convert block to page */ 128 index = blkno >> shift; /* convert block to page */
129 bufnum = blkno - (index << shift); /* block buf index within page */ 129 bufnum = blkno - (index << shift); /* block buf index within page */
130 130
@@ -154,7 +154,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
154 map_bh(bh, sdp->sd_vfs, blkno); 154 map_bh(bh, sdp->sd_vfs, blkno);
155 155
156 unlock_page(page); 156 unlock_page(page);
157 page_cache_release(page); 157 put_page(page);
158 158
159 return bh; 159 return bh;
160} 160}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a39891344259..ce7d69a2fdc0 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -701,7 +701,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
701 unsigned to_write = bytes, pg_off = off; 701 unsigned to_write = bytes, pg_off = off;
702 int done = 0; 702 int done = 0;
703 703
704 blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift); 704 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
705 boff = off % bsize; 705 boff = off % bsize;
706 706
707 page = find_or_create_page(mapping, index, GFP_NOFS); 707 page = find_or_create_page(mapping, index, GFP_NOFS);
@@ -753,13 +753,13 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
753 flush_dcache_page(page); 753 flush_dcache_page(page);
754 kunmap_atomic(kaddr); 754 kunmap_atomic(kaddr);
755 unlock_page(page); 755 unlock_page(page);
756 page_cache_release(page); 756 put_page(page);
757 757
758 return 0; 758 return 0;
759 759
760unlock_out: 760unlock_out:
761 unlock_page(page); 761 unlock_page(page);
762 page_cache_release(page); 762 put_page(page);
763 return -EIO; 763 return -EIO;
764} 764}
765 765
@@ -773,13 +773,13 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
773 773
774 nbytes = sizeof(struct gfs2_quota); 774 nbytes = sizeof(struct gfs2_quota);
775 775
776 pg_beg = loc >> PAGE_CACHE_SHIFT; 776 pg_beg = loc >> PAGE_SHIFT;
777 pg_off = loc % PAGE_CACHE_SIZE; 777 pg_off = loc % PAGE_SIZE;
778 778
779 /* If the quota straddles a page boundary, split the write in two */ 779 /* If the quota straddles a page boundary, split the write in two */
780 if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { 780 if ((pg_off + nbytes) > PAGE_SIZE) {
781 pg_oflow = 1; 781 pg_oflow = 1;
782 overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; 782 overflow = (pg_off + nbytes) - PAGE_SIZE;
783 } 783 }
784 784
785 ptr = qp; 785 ptr = qp;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 07c0265aa195..99a0bdac8796 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -918,9 +918,8 @@ static int read_rindex_entry(struct gfs2_inode *ip)
918 goto fail; 918 goto fail;
919 919
920 rgd->rd_gl->gl_object = rgd; 920 rgd->rd_gl->gl_object = rgd;
921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK; 921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
922 rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr + 922 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
923 rgd->rd_length) * bsize) - 1;
924 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; 923 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
925 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED); 924 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
926 if (rgd->rd_data > sdp->sd_max_rg_data) 925 if (rgd->rd_data > sdp->sd_max_rg_data)
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 221719eac5de..d77d844b668b 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
278 278
279 mapping = tree->inode->i_mapping; 279 mapping = tree->inode->i_mapping;
280 off = (loff_t)cnid * tree->node_size; 280 off = (loff_t)cnid * tree->node_size;
281 block = off >> PAGE_CACHE_SHIFT; 281 block = off >> PAGE_SHIFT;
282 node->page_offset = off & ~PAGE_CACHE_MASK; 282 node->page_offset = off & ~PAGE_MASK;
283 for (i = 0; i < tree->pages_per_bnode; i++) { 283 for (i = 0; i < tree->pages_per_bnode; i++) {
284 page = read_mapping_page(mapping, block++, NULL); 284 page = read_mapping_page(mapping, block++, NULL);
285 if (IS_ERR(page)) 285 if (IS_ERR(page))
286 goto fail; 286 goto fail;
287 if (PageError(page)) { 287 if (PageError(page)) {
288 page_cache_release(page); 288 put_page(page);
289 goto fail; 289 goto fail;
290 } 290 }
291 node->page[i] = page; 291 node->page[i] = page;
@@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
401 401
402 for (i = 0; i < node->tree->pages_per_bnode; i++) 402 for (i = 0; i < node->tree->pages_per_bnode; i++)
403 if (node->page[i]) 403 if (node->page[i])
404 page_cache_release(node->page[i]); 404 put_page(node->page[i]);
405 kfree(node); 405 kfree(node);
406} 406}
407 407
@@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
429 429
430 pagep = node->page; 430 pagep = node->page;
431 memset(kmap(*pagep) + node->page_offset, 0, 431 memset(kmap(*pagep) + node->page_offset, 0,
432 min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); 432 min((int)PAGE_SIZE, (int)tree->node_size));
433 set_page_dirty(*pagep); 433 set_page_dirty(*pagep);
434 kunmap(*pagep); 434 kunmap(*pagep);
435 for (i = 1; i < tree->pages_per_bnode; i++) { 435 for (i = 1; i < tree->pages_per_bnode; i++) {
436 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 436 memset(kmap(*++pagep), 0, PAGE_SIZE);
437 set_page_dirty(*pagep); 437 set_page_dirty(*pagep);
438 kunmap(*pagep); 438 kunmap(*pagep);
439 } 439 }
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e660e69..37cdd955eceb 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
116 } 116 }
117 117
118 tree->node_size_shift = ffs(size) - 1; 118 tree->node_size_shift = ffs(size) - 1;
119 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 119 tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 120
121 kunmap(page); 121 kunmap(page);
122 page_cache_release(page); 122 put_page(page);
123 return tree; 123 return tree;
124 124
125fail_page: 125fail_page:
126 page_cache_release(page); 126 put_page(page);
127free_inode: 127free_inode:
128 tree->inode->i_mapping->a_ops = &hfs_aops; 128 tree->inode->i_mapping->a_ops = &hfs_aops;
129 iput(tree->inode); 129 iput(tree->inode);
@@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
257 off = off16; 257 off = off16;
258 258
259 off += node->page_offset; 259 off += node->page_offset;
260 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 260 pagep = node->page + (off >> PAGE_SHIFT);
261 data = kmap(*pagep); 261 data = kmap(*pagep);
262 off &= ~PAGE_CACHE_MASK; 262 off &= ~PAGE_MASK;
263 idx = 0; 263 idx = 0;
264 264
265 for (;;) { 265 for (;;) {
@@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
279 } 279 }
280 } 280 }
281 } 281 }
282 if (++off >= PAGE_CACHE_SIZE) { 282 if (++off >= PAGE_SIZE) {
283 kunmap(*pagep); 283 kunmap(*pagep);
284 data = kmap(*++pagep); 284 data = kmap(*++pagep);
285 off = 0; 285 off = 0;
@@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
302 len = hfs_brec_lenoff(node, 0, &off16); 302 len = hfs_brec_lenoff(node, 0, &off16);
303 off = off16; 303 off = off16;
304 off += node->page_offset; 304 off += node->page_offset;
305 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 305 pagep = node->page + (off >> PAGE_SHIFT);
306 data = kmap(*pagep); 306 data = kmap(*pagep);
307 off &= ~PAGE_CACHE_MASK; 307 off &= ~PAGE_MASK;
308 } 308 }
309} 309}
310 310
@@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
348 len = hfs_brec_lenoff(node, 0, &off); 348 len = hfs_brec_lenoff(node, 0, &off);
349 } 349 }
350 off += node->page_offset + nidx / 8; 350 off += node->page_offset + nidx / 8;
351 page = node->page[off >> PAGE_CACHE_SHIFT]; 351 page = node->page[off >> PAGE_SHIFT];
352 data = kmap(page); 352 data = kmap(page);
353 off &= ~PAGE_CACHE_MASK; 353 off &= ~PAGE_MASK;
354 m = 1 << (~nidx & 7); 354 m = 1 << (~nidx & 7);
355 byte = data[off]; 355 byte = data[off];
356 if (!(byte & m)) { 356 if (!(byte & m)) {
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 6686bf39a5b5..cb1e5faa2fb7 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
91 if (!tree) 91 if (!tree)
92 return 0; 92 return 0;
93 93
94 if (tree->node_size >= PAGE_CACHE_SIZE) { 94 if (tree->node_size >= PAGE_SIZE) {
95 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); 95 nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
96 spin_lock(&tree->hash_lock); 96 spin_lock(&tree->hash_lock);
97 node = hfs_bnode_findhash(tree, nidx); 97 node = hfs_bnode_findhash(tree, nidx);
98 if (!node) 98 if (!node)
@@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
105 } 105 }
106 spin_unlock(&tree->hash_lock); 106 spin_unlock(&tree->hash_lock);
107 } else { 107 } else {
108 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); 108 nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
109 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 109 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
110 spin_lock(&tree->hash_lock); 110 spin_lock(&tree->hash_lock);
111 do { 111 do {
112 node = hfs_bnode_findhash(tree, nidx++); 112 node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index d2954451519e..c0ae274c0a22 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -13,7 +13,7 @@
13#include "hfsplus_fs.h" 13#include "hfsplus_fs.h"
14#include "hfsplus_raw.h" 14#include "hfsplus_raw.h"
15 15
16#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8) 16#define PAGE_CACHE_BITS (PAGE_SIZE * 8)
17 17
18int hfsplus_block_allocate(struct super_block *sb, u32 size, 18int hfsplus_block_allocate(struct super_block *sb, u32 size,
19 u32 offset, u32 *max) 19 u32 offset, u32 *max)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 63924662aaf3..ce014ceb89ef 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
24 int l; 24 int l;
25 25
26 off += node->page_offset; 26 off += node->page_offset;
27 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 27 pagep = node->page + (off >> PAGE_SHIFT);
28 off &= ~PAGE_CACHE_MASK; 28 off &= ~PAGE_MASK;
29 29
30 l = min_t(int, len, PAGE_CACHE_SIZE - off); 30 l = min_t(int, len, PAGE_SIZE - off);
31 memcpy(buf, kmap(*pagep) + off, l); 31 memcpy(buf, kmap(*pagep) + off, l);
32 kunmap(*pagep); 32 kunmap(*pagep);
33 33
34 while ((len -= l) != 0) { 34 while ((len -= l) != 0) {
35 buf += l; 35 buf += l;
36 l = min_t(int, len, PAGE_CACHE_SIZE); 36 l = min_t(int, len, PAGE_SIZE);
37 memcpy(buf, kmap(*++pagep), l); 37 memcpy(buf, kmap(*++pagep), l);
38 kunmap(*pagep); 38 kunmap(*pagep);
39 } 39 }
@@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
77 int l; 77 int l;
78 78
79 off += node->page_offset; 79 off += node->page_offset;
80 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 80 pagep = node->page + (off >> PAGE_SHIFT);
81 off &= ~PAGE_CACHE_MASK; 81 off &= ~PAGE_MASK;
82 82
83 l = min_t(int, len, PAGE_CACHE_SIZE - off); 83 l = min_t(int, len, PAGE_SIZE - off);
84 memcpy(kmap(*pagep) + off, buf, l); 84 memcpy(kmap(*pagep) + off, buf, l);
85 set_page_dirty(*pagep); 85 set_page_dirty(*pagep);
86 kunmap(*pagep); 86 kunmap(*pagep);
87 87
88 while ((len -= l) != 0) { 88 while ((len -= l) != 0) {
89 buf += l; 89 buf += l;
90 l = min_t(int, len, PAGE_CACHE_SIZE); 90 l = min_t(int, len, PAGE_SIZE);
91 memcpy(kmap(*++pagep), buf, l); 91 memcpy(kmap(*++pagep), buf, l);
92 set_page_dirty(*pagep); 92 set_page_dirty(*pagep);
93 kunmap(*pagep); 93 kunmap(*pagep);
@@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
107 int l; 107 int l;
108 108
109 off += node->page_offset; 109 off += node->page_offset;
110 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 110 pagep = node->page + (off >> PAGE_SHIFT);
111 off &= ~PAGE_CACHE_MASK; 111 off &= ~PAGE_MASK;
112 112
113 l = min_t(int, len, PAGE_CACHE_SIZE - off); 113 l = min_t(int, len, PAGE_SIZE - off);
114 memset(kmap(*pagep) + off, 0, l); 114 memset(kmap(*pagep) + off, 0, l);
115 set_page_dirty(*pagep); 115 set_page_dirty(*pagep);
116 kunmap(*pagep); 116 kunmap(*pagep);
117 117
118 while ((len -= l) != 0) { 118 while ((len -= l) != 0) {
119 l = min_t(int, len, PAGE_CACHE_SIZE); 119 l = min_t(int, len, PAGE_SIZE);
120 memset(kmap(*++pagep), 0, l); 120 memset(kmap(*++pagep), 0, l);
121 set_page_dirty(*pagep); 121 set_page_dirty(*pagep);
122 kunmap(*pagep); 122 kunmap(*pagep);
@@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
136 tree = src_node->tree; 136 tree = src_node->tree;
137 src += src_node->page_offset; 137 src += src_node->page_offset;
138 dst += dst_node->page_offset; 138 dst += dst_node->page_offset;
139 src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); 139 src_page = src_node->page + (src >> PAGE_SHIFT);
140 src &= ~PAGE_CACHE_MASK; 140 src &= ~PAGE_MASK;
141 dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); 141 dst_page = dst_node->page + (dst >> PAGE_SHIFT);
142 dst &= ~PAGE_CACHE_MASK; 142 dst &= ~PAGE_MASK;
143 143
144 if (src == dst) { 144 if (src == dst) {
145 l = min_t(int, len, PAGE_CACHE_SIZE - src); 145 l = min_t(int, len, PAGE_SIZE - src);
146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); 146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
147 kunmap(*src_page); 147 kunmap(*src_page);
148 set_page_dirty(*dst_page); 148 set_page_dirty(*dst_page);
149 kunmap(*dst_page); 149 kunmap(*dst_page);
150 150
151 while ((len -= l) != 0) { 151 while ((len -= l) != 0) {
152 l = min_t(int, len, PAGE_CACHE_SIZE); 152 l = min_t(int, len, PAGE_SIZE);
153 memcpy(kmap(*++dst_page), kmap(*++src_page), l); 153 memcpy(kmap(*++dst_page), kmap(*++src_page), l);
154 kunmap(*src_page); 154 kunmap(*src_page);
155 set_page_dirty(*dst_page); 155 set_page_dirty(*dst_page);
@@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
161 do { 161 do {
162 src_ptr = kmap(*src_page) + src; 162 src_ptr = kmap(*src_page) + src;
163 dst_ptr = kmap(*dst_page) + dst; 163 dst_ptr = kmap(*dst_page) + dst;
164 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { 164 if (PAGE_SIZE - src < PAGE_SIZE - dst) {
165 l = PAGE_CACHE_SIZE - src; 165 l = PAGE_SIZE - src;
166 src = 0; 166 src = 0;
167 dst += l; 167 dst += l;
168 } else { 168 } else {
169 l = PAGE_CACHE_SIZE - dst; 169 l = PAGE_SIZE - dst;
170 src += l; 170 src += l;
171 dst = 0; 171 dst = 0;
172 } 172 }
@@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
195 dst += node->page_offset; 195 dst += node->page_offset;
196 if (dst > src) { 196 if (dst > src) {
197 src += len - 1; 197 src += len - 1;
198 src_page = node->page + (src >> PAGE_CACHE_SHIFT); 198 src_page = node->page + (src >> PAGE_SHIFT);
199 src = (src & ~PAGE_CACHE_MASK) + 1; 199 src = (src & ~PAGE_MASK) + 1;
200 dst += len - 1; 200 dst += len - 1;
201 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); 201 dst_page = node->page + (dst >> PAGE_SHIFT);
202 dst = (dst & ~PAGE_CACHE_MASK) + 1; 202 dst = (dst & ~PAGE_MASK) + 1;
203 203
204 if (src == dst) { 204 if (src == dst) {
205 while (src < len) { 205 while (src < len) {
@@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
208 set_page_dirty(*dst_page); 208 set_page_dirty(*dst_page);
209 kunmap(*dst_page); 209 kunmap(*dst_page);
210 len -= src; 210 len -= src;
211 src = PAGE_CACHE_SIZE; 211 src = PAGE_SIZE;
212 src_page--; 212 src_page--;
213 dst_page--; 213 dst_page--;
214 } 214 }
@@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
226 dst_ptr = kmap(*dst_page) + dst; 226 dst_ptr = kmap(*dst_page) + dst;
227 if (src < dst) { 227 if (src < dst) {
228 l = src; 228 l = src;
229 src = PAGE_CACHE_SIZE; 229 src = PAGE_SIZE;
230 dst -= l; 230 dst -= l;
231 } else { 231 } else {
232 l = dst; 232 l = dst;
233 src -= l; 233 src -= l;
234 dst = PAGE_CACHE_SIZE; 234 dst = PAGE_SIZE;
235 } 235 }
236 l = min(len, l); 236 l = min(len, l);
237 memmove(dst_ptr - l, src_ptr - l, l); 237 memmove(dst_ptr - l, src_ptr - l, l);
238 kunmap(*src_page); 238 kunmap(*src_page);
239 set_page_dirty(*dst_page); 239 set_page_dirty(*dst_page);
240 kunmap(*dst_page); 240 kunmap(*dst_page);
241 if (dst == PAGE_CACHE_SIZE) 241 if (dst == PAGE_SIZE)
242 dst_page--; 242 dst_page--;
243 else 243 else
244 src_page--; 244 src_page--;
245 } while ((len -= l)); 245 } while ((len -= l));
246 } 246 }
247 } else { 247 } else {
248 src_page = node->page + (src >> PAGE_CACHE_SHIFT); 248 src_page = node->page + (src >> PAGE_SHIFT);
249 src &= ~PAGE_CACHE_MASK; 249 src &= ~PAGE_MASK;
250 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); 250 dst_page = node->page + (dst >> PAGE_SHIFT);
251 dst &= ~PAGE_CACHE_MASK; 251 dst &= ~PAGE_MASK;
252 252
253 if (src == dst) { 253 if (src == dst) {
254 l = min_t(int, len, PAGE_CACHE_SIZE - src); 254 l = min_t(int, len, PAGE_SIZE - src);
255 memmove(kmap(*dst_page) + src, 255 memmove(kmap(*dst_page) + src,
256 kmap(*src_page) + src, l); 256 kmap(*src_page) + src, l);
257 kunmap(*src_page); 257 kunmap(*src_page);
@@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
259 kunmap(*dst_page); 259 kunmap(*dst_page);
260 260
261 while ((len -= l) != 0) { 261 while ((len -= l) != 0) {
262 l = min_t(int, len, PAGE_CACHE_SIZE); 262 l = min_t(int, len, PAGE_SIZE);
263 memmove(kmap(*++dst_page), 263 memmove(kmap(*++dst_page),
264 kmap(*++src_page), l); 264 kmap(*++src_page), l);
265 kunmap(*src_page); 265 kunmap(*src_page);
@@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
272 do { 272 do {
273 src_ptr = kmap(*src_page) + src; 273 src_ptr = kmap(*src_page) + src;
274 dst_ptr = kmap(*dst_page) + dst; 274 dst_ptr = kmap(*dst_page) + dst;
275 if (PAGE_CACHE_SIZE - src < 275 if (PAGE_SIZE - src <
276 PAGE_CACHE_SIZE - dst) { 276 PAGE_SIZE - dst) {
277 l = PAGE_CACHE_SIZE - src; 277 l = PAGE_SIZE - src;
278 src = 0; 278 src = 0;
279 dst += l; 279 dst += l;
280 } else { 280 } else {
281 l = PAGE_CACHE_SIZE - dst; 281 l = PAGE_SIZE - dst;
282 src += l; 282 src += l;
283 dst = 0; 283 dst = 0;
284 } 284 }
@@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
444 444
445 mapping = tree->inode->i_mapping; 445 mapping = tree->inode->i_mapping;
446 off = (loff_t)cnid << tree->node_size_shift; 446 off = (loff_t)cnid << tree->node_size_shift;
447 block = off >> PAGE_CACHE_SHIFT; 447 block = off >> PAGE_SHIFT;
448 node->page_offset = off & ~PAGE_CACHE_MASK; 448 node->page_offset = off & ~PAGE_MASK;
449 for (i = 0; i < tree->pages_per_bnode; block++, i++) { 449 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
450 page = read_mapping_page(mapping, block, NULL); 450 page = read_mapping_page(mapping, block, NULL);
451 if (IS_ERR(page)) 451 if (IS_ERR(page))
452 goto fail; 452 goto fail;
453 if (PageError(page)) { 453 if (PageError(page)) {
454 page_cache_release(page); 454 put_page(page);
455 goto fail; 455 goto fail;
456 } 456 }
457 node->page[i] = page; 457 node->page[i] = page;
@@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
569 569
570 for (i = 0; i < node->tree->pages_per_bnode; i++) 570 for (i = 0; i < node->tree->pages_per_bnode; i++)
571 if (node->page[i]) 571 if (node->page[i])
572 page_cache_release(node->page[i]); 572 put_page(node->page[i]);
573 kfree(node); 573 kfree(node);
574} 574}
575 575
@@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
597 597
598 pagep = node->page; 598 pagep = node->page;
599 memset(kmap(*pagep) + node->page_offset, 0, 599 memset(kmap(*pagep) + node->page_offset, 0,
600 min_t(int, PAGE_CACHE_SIZE, tree->node_size)); 600 min_t(int, PAGE_SIZE, tree->node_size));
601 set_page_dirty(*pagep); 601 set_page_dirty(*pagep);
602 kunmap(*pagep); 602 kunmap(*pagep);
603 for (i = 1; i < tree->pages_per_bnode; i++) { 603 for (i = 1; i < tree->pages_per_bnode; i++) {
604 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 604 memset(kmap(*++pagep), 0, PAGE_SIZE);
605 set_page_dirty(*pagep); 605 set_page_dirty(*pagep);
606 kunmap(*pagep); 606 kunmap(*pagep);
607 } 607 }
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 3345c7553edc..d9d1a36ba826 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -236,15 +236,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
236 tree->node_size_shift = ffs(size) - 1; 236 tree->node_size_shift = ffs(size) - 1;
237 237
238 tree->pages_per_bnode = 238 tree->pages_per_bnode =
239 (tree->node_size + PAGE_CACHE_SIZE - 1) >> 239 (tree->node_size + PAGE_SIZE - 1) >>
240 PAGE_CACHE_SHIFT; 240 PAGE_SHIFT;
241 241
242 kunmap(page); 242 kunmap(page);
243 page_cache_release(page); 243 put_page(page);
244 return tree; 244 return tree;
245 245
246 fail_page: 246 fail_page:
247 page_cache_release(page); 247 put_page(page);
248 free_inode: 248 free_inode:
249 tree->inode->i_mapping->a_ops = &hfsplus_aops; 249 tree->inode->i_mapping->a_ops = &hfsplus_aops;
250 iput(tree->inode); 250 iput(tree->inode);
@@ -380,9 +380,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
380 off = off16; 380 off = off16;
381 381
382 off += node->page_offset; 382 off += node->page_offset;
383 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 383 pagep = node->page + (off >> PAGE_SHIFT);
384 data = kmap(*pagep); 384 data = kmap(*pagep);
385 off &= ~PAGE_CACHE_MASK; 385 off &= ~PAGE_MASK;
386 idx = 0; 386 idx = 0;
387 387
388 for (;;) { 388 for (;;) {
@@ -403,7 +403,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
403 } 403 }
404 } 404 }
405 } 405 }
406 if (++off >= PAGE_CACHE_SIZE) { 406 if (++off >= PAGE_SIZE) {
407 kunmap(*pagep); 407 kunmap(*pagep);
408 data = kmap(*++pagep); 408 data = kmap(*++pagep);
409 off = 0; 409 off = 0;
@@ -426,9 +426,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
426 len = hfs_brec_lenoff(node, 0, &off16); 426 len = hfs_brec_lenoff(node, 0, &off16);
427 off = off16; 427 off = off16;
428 off += node->page_offset; 428 off += node->page_offset;
429 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 429 pagep = node->page + (off >> PAGE_SHIFT);
430 data = kmap(*pagep); 430 data = kmap(*pagep);
431 off &= ~PAGE_CACHE_MASK; 431 off &= ~PAGE_MASK;
432 } 432 }
433} 433}
434 434
@@ -475,9 +475,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
475 len = hfs_brec_lenoff(node, 0, &off); 475 len = hfs_brec_lenoff(node, 0, &off);
476 } 476 }
477 off += node->page_offset + nidx / 8; 477 off += node->page_offset + nidx / 8;
478 page = node->page[off >> PAGE_CACHE_SHIFT]; 478 page = node->page[off >> PAGE_SHIFT];
479 data = kmap(page); 479 data = kmap(page);
480 off &= ~PAGE_CACHE_MASK; 480 off &= ~PAGE_MASK;
481 m = 1 << (~nidx & 7); 481 m = 1 << (~nidx & 7);
482 byte = data[off]; 482 byte = data[off];
483 if (!(byte & m)) { 483 if (!(byte & m)) {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 1a6394cdb54e..b28f39865c3a 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -87,9 +87,9 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
87 } 87 }
88 if (!tree) 88 if (!tree)
89 return 0; 89 return 0;
90 if (tree->node_size >= PAGE_CACHE_SIZE) { 90 if (tree->node_size >= PAGE_SIZE) {
91 nidx = page->index >> 91 nidx = page->index >>
92 (tree->node_size_shift - PAGE_CACHE_SHIFT); 92 (tree->node_size_shift - PAGE_SHIFT);
93 spin_lock(&tree->hash_lock); 93 spin_lock(&tree->hash_lock);
94 node = hfs_bnode_findhash(tree, nidx); 94 node = hfs_bnode_findhash(tree, nidx);
95 if (!node) 95 if (!node)
@@ -103,8 +103,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
103 spin_unlock(&tree->hash_lock); 103 spin_unlock(&tree->hash_lock);
104 } else { 104 } else {
105 nidx = page->index << 105 nidx = page->index <<
106 (PAGE_CACHE_SHIFT - tree->node_size_shift); 106 (PAGE_SHIFT - tree->node_size_shift);
107 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 107 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
108 spin_lock(&tree->hash_lock); 108 spin_lock(&tree->hash_lock);
109 do { 109 do {
110 node = hfs_bnode_findhash(tree, nidx++); 110 node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 5d54490a136d..c35911362ff9 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -438,7 +438,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
438 err = -EFBIG; 438 err = -EFBIG;
439 last_fs_block = sbi->total_blocks - 1; 439 last_fs_block = sbi->total_blocks - 1;
440 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 440 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
441 PAGE_CACHE_SHIFT; 441 PAGE_SHIFT;
442 442
443 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 443 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
444 (last_fs_page > (pgoff_t)(~0ULL))) { 444 (last_fs_page > (pgoff_t)(~0ULL))) {
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index ab01530b4930..70e445ff0cff 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -220,7 +220,7 @@ check_attr_tree_state_again:
220 220
221 index = 0; 221 index = 0;
222 written = 0; 222 written = 0;
223 for (; written < node_size; index++, written += PAGE_CACHE_SIZE) { 223 for (; written < node_size; index++, written += PAGE_SIZE) {
224 void *kaddr; 224 void *kaddr;
225 225
226 page = read_mapping_page(mapping, index, NULL); 226 page = read_mapping_page(mapping, index, NULL);
@@ -231,11 +231,11 @@ check_attr_tree_state_again:
231 231
232 kaddr = kmap_atomic(page); 232 kaddr = kmap_atomic(page);
233 memcpy(kaddr, buf + written, 233 memcpy(kaddr, buf + written,
234 min_t(size_t, PAGE_CACHE_SIZE, node_size - written)); 234 min_t(size_t, PAGE_SIZE, node_size - written));
235 kunmap_atomic(kaddr); 235 kunmap_atomic(kaddr);
236 236
237 set_page_dirty(page); 237 set_page_dirty(page);
238 page_cache_release(page); 238 put_page(page);
239 } 239 }
240 240
241 hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY); 241 hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d1abbee281d1..7016653f3e41 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -410,12 +410,12 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
410 struct inode *inode = mapping->host; 410 struct inode *inode = mapping->host;
411 char *buffer; 411 char *buffer;
412 loff_t base = page_offset(page); 412 loff_t base = page_offset(page);
413 int count = PAGE_CACHE_SIZE; 413 int count = PAGE_SIZE;
414 int end_index = inode->i_size >> PAGE_CACHE_SHIFT; 414 int end_index = inode->i_size >> PAGE_SHIFT;
415 int err; 415 int err;
416 416
417 if (page->index >= end_index) 417 if (page->index >= end_index)
418 count = inode->i_size & (PAGE_CACHE_SIZE-1); 418 count = inode->i_size & (PAGE_SIZE-1);
419 419
420 buffer = kmap(page); 420 buffer = kmap(page);
421 421
@@ -447,7 +447,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
447 447
448 buffer = kmap(page); 448 buffer = kmap(page);
449 bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, 449 bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
450 PAGE_CACHE_SIZE); 450 PAGE_SIZE);
451 if (bytes_read < 0) { 451 if (bytes_read < 0) {
452 ClearPageUptodate(page); 452 ClearPageUptodate(page);
453 SetPageError(page); 453 SetPageError(page);
@@ -455,7 +455,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
455 goto out; 455 goto out;
456 } 456 }
457 457
458 memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); 458 memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
459 459
460 ClearPageError(page); 460 ClearPageError(page);
461 SetPageUptodate(page); 461 SetPageUptodate(page);
@@ -471,7 +471,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping,
471 loff_t pos, unsigned len, unsigned flags, 471 loff_t pos, unsigned len, unsigned flags,
472 struct page **pagep, void **fsdata) 472 struct page **pagep, void **fsdata)
473{ 473{
474 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 474 pgoff_t index = pos >> PAGE_SHIFT;
475 475
476 *pagep = grab_cache_page_write_begin(mapping, index, flags); 476 *pagep = grab_cache_page_write_begin(mapping, index, flags);
477 if (!*pagep) 477 if (!*pagep)
@@ -485,14 +485,14 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
485{ 485{
486 struct inode *inode = mapping->host; 486 struct inode *inode = mapping->host;
487 void *buffer; 487 void *buffer;
488 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 488 unsigned from = pos & (PAGE_SIZE - 1);
489 int err; 489 int err;
490 490
491 buffer = kmap(page); 491 buffer = kmap(page);
492 err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); 492 err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
493 kunmap(page); 493 kunmap(page);
494 494
495 if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) 495 if (!PageUptodate(page) && err == PAGE_SIZE)
496 SetPageUptodate(page); 496 SetPageUptodate(page);
497 497
498 /* 498 /*
@@ -502,7 +502,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
502 if (err > 0 && (pos > inode->i_size)) 502 if (err > 0 && (pos > inode->i_size))
503 inode->i_size = pos; 503 inode->i_size = pos;
504 unlock_page(page); 504 unlock_page(page);
505 page_cache_release(page); 505 put_page(page);
506 506
507 return err; 507 return err;
508} 508}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e1f465a389d5..4ea71eba40a5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -213,12 +213,12 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
213 int i, chunksize; 213 int i, chunksize;
214 214
215 /* Find which 4k chunk and offset with in that chunk */ 215 /* Find which 4k chunk and offset with in that chunk */
216 i = offset >> PAGE_CACHE_SHIFT; 216 i = offset >> PAGE_SHIFT;
217 offset = offset & ~PAGE_CACHE_MASK; 217 offset = offset & ~PAGE_MASK;
218 218
219 while (size) { 219 while (size) {
220 size_t n; 220 size_t n;
221 chunksize = PAGE_CACHE_SIZE; 221 chunksize = PAGE_SIZE;
222 if (offset) 222 if (offset)
223 chunksize -= offset; 223 chunksize -= offset;
224 if (chunksize > size) 224 if (chunksize > size)
@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
237/* 237/*
238 * Support for read() - Find the page attached to f_mapping and copy out the 238 * Support for read() - Find the page attached to f_mapping and copy out the
239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
240 * since it has PAGE_CACHE_SIZE assumptions. 240 * since it has PAGE_SIZE assumptions.
241 */ 241 */
242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
243{ 243{
@@ -285,7 +285,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
285 * We have the page, copy it to user space buffer. 285 * We have the page, copy it to user space buffer.
286 */ 286 */
287 copied = hugetlbfs_read_actor(page, offset, to, nr); 287 copied = hugetlbfs_read_actor(page, offset, to, nr);
288 page_cache_release(page); 288 put_page(page);
289 } 289 }
290 offset += copied; 290 offset += copied;
291 retval += copied; 291 retval += copied;
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index f311bf084015..2e4e834d1a98 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -26,7 +26,7 @@
26#include "zisofs.h" 26#include "zisofs.h"
27 27
28/* This should probably be global. */ 28/* This should probably be global. */
29static char zisofs_sink_page[PAGE_CACHE_SIZE]; 29static char zisofs_sink_page[PAGE_SIZE];
30 30
31/* 31/*
32 * This contains the zlib memory allocation and the mutex for the 32 * This contains the zlib memory allocation and the mutex for the
@@ -70,11 +70,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
70 for ( i = 0 ; i < pcount ; i++ ) { 70 for ( i = 0 ; i < pcount ; i++ ) {
71 if (!pages[i]) 71 if (!pages[i])
72 continue; 72 continue;
73 memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); 73 memset(page_address(pages[i]), 0, PAGE_SIZE);
74 flush_dcache_page(pages[i]); 74 flush_dcache_page(pages[i]);
75 SetPageUptodate(pages[i]); 75 SetPageUptodate(pages[i]);
76 } 76 }
77 return ((loff_t)pcount) << PAGE_CACHE_SHIFT; 77 return ((loff_t)pcount) << PAGE_SHIFT;
78 } 78 }
79 79
80 /* Because zlib is not thread-safe, do all the I/O at the top. */ 80 /* Because zlib is not thread-safe, do all the I/O at the top. */
@@ -121,11 +121,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
121 if (pages[curpage]) { 121 if (pages[curpage]) {
122 stream.next_out = page_address(pages[curpage]) 122 stream.next_out = page_address(pages[curpage])
123 + poffset; 123 + poffset;
124 stream.avail_out = PAGE_CACHE_SIZE - poffset; 124 stream.avail_out = PAGE_SIZE - poffset;
125 poffset = 0; 125 poffset = 0;
126 } else { 126 } else {
127 stream.next_out = (void *)&zisofs_sink_page; 127 stream.next_out = (void *)&zisofs_sink_page;
128 stream.avail_out = PAGE_CACHE_SIZE; 128 stream.avail_out = PAGE_SIZE;
129 } 129 }
130 } 130 }
131 if (!stream.avail_in) { 131 if (!stream.avail_in) {
@@ -220,14 +220,14 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
220 * pages with the data we have anyway... 220 * pages with the data we have anyway...
221 */ 221 */
222 start_off = page_offset(pages[full_page]); 222 start_off = page_offset(pages[full_page]);
223 end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); 223 end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
224 224
225 cstart_block = start_off >> zisofs_block_shift; 225 cstart_block = start_off >> zisofs_block_shift;
226 cend_block = (end_off + (1 << zisofs_block_shift) - 1) 226 cend_block = (end_off + (1 << zisofs_block_shift) - 1)
227 >> zisofs_block_shift; 227 >> zisofs_block_shift;
228 228
229 WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) != 229 WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
230 ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK)); 230 ((cstart_block << zisofs_block_shift) & PAGE_MASK));
231 231
232 /* Find the pointer to this specific chunk */ 232 /* Find the pointer to this specific chunk */
233 /* Note: we're not using isonum_731() here because the data is known aligned */ 233 /* Note: we're not using isonum_731() here because the data is known aligned */
@@ -260,10 +260,10 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
260 ret = zisofs_uncompress_block(inode, block_start, block_end, 260 ret = zisofs_uncompress_block(inode, block_start, block_end,
261 pcount, pages, poffset, &err); 261 pcount, pages, poffset, &err);
262 poffset += ret; 262 poffset += ret;
263 pages += poffset >> PAGE_CACHE_SHIFT; 263 pages += poffset >> PAGE_SHIFT;
264 pcount -= poffset >> PAGE_CACHE_SHIFT; 264 pcount -= poffset >> PAGE_SHIFT;
265 full_page -= poffset >> PAGE_CACHE_SHIFT; 265 full_page -= poffset >> PAGE_SHIFT;
266 poffset &= ~PAGE_CACHE_MASK; 266 poffset &= ~PAGE_MASK;
267 267
268 if (err) { 268 if (err) {
269 brelse(bh); 269 brelse(bh);
@@ -282,7 +282,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
282 282
283 if (poffset && *pages) { 283 if (poffset && *pages) {
284 memset(page_address(*pages) + poffset, 0, 284 memset(page_address(*pages) + poffset, 0,
285 PAGE_CACHE_SIZE - poffset); 285 PAGE_SIZE - poffset);
286 flush_dcache_page(*pages); 286 flush_dcache_page(*pages);
287 SetPageUptodate(*pages); 287 SetPageUptodate(*pages);
288 } 288 }
@@ -302,12 +302,12 @@ static int zisofs_readpage(struct file *file, struct page *page)
302 int i, pcount, full_page; 302 int i, pcount, full_page;
303 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; 303 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
304 unsigned int zisofs_pages_per_cblock = 304 unsigned int zisofs_pages_per_cblock =
305 PAGE_CACHE_SHIFT <= zisofs_block_shift ? 305 PAGE_SHIFT <= zisofs_block_shift ?
306 (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0; 306 (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
307 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; 307 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
308 pgoff_t index = page->index, end_index; 308 pgoff_t index = page->index, end_index;
309 309
310 end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 310 end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
311 /* 311 /*
312 * If this page is wholly outside i_size we just return zero; 312 * If this page is wholly outside i_size we just return zero;
313 * do_generic_file_read() will handle this for us 313 * do_generic_file_read() will handle this for us
@@ -318,7 +318,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
318 return 0; 318 return 0;
319 } 319 }
320 320
321 if (PAGE_CACHE_SHIFT <= zisofs_block_shift) { 321 if (PAGE_SHIFT <= zisofs_block_shift) {
322 /* We have already been given one page, this is the one 322 /* We have already been given one page, this is the one
323 we must do. */ 323 we must do. */
324 full_page = index & (zisofs_pages_per_cblock - 1); 324 full_page = index & (zisofs_pages_per_cblock - 1);
@@ -351,7 +351,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
351 kunmap(pages[i]); 351 kunmap(pages[i]);
352 unlock_page(pages[i]); 352 unlock_page(pages[i]);
353 if (i != full_page) 353 if (i != full_page)
354 page_cache_release(pages[i]); 354 put_page(pages[i]);
355 } 355 }
356 } 356 }
357 357
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bcd2d41b318a..131dedc920d8 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1021,7 +1021,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
1021 * the page with useless information without generating any 1021 * the page with useless information without generating any
1022 * I/O errors. 1022 * I/O errors.
1023 */ 1023 */
1024 if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { 1024 if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
1025 printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n", 1025 printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
1026 __func__, b_off, 1026 __func__, b_off,
1027 (unsigned long long)inode->i_size); 1027 (unsigned long long)inode->i_size);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 517f2de784cf..2ad98d6e19f4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -81,11 +81,11 @@ static void release_buffer_page(struct buffer_head *bh)
81 if (!trylock_page(page)) 81 if (!trylock_page(page))
82 goto nope; 82 goto nope;
83 83
84 page_cache_get(page); 84 get_page(page);
85 __brelse(bh); 85 __brelse(bh);
86 try_to_free_buffers(page); 86 try_to_free_buffers(page);
87 unlock_page(page); 87 unlock_page(page);
88 page_cache_release(page); 88 put_page(page);
89 return; 89 return;
90 90
91nope: 91nope:
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index de73a9516a54..435f0b26ac20 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2221,7 +2221,7 @@ void jbd2_journal_ack_err(journal_t *journal)
2221 2221
2222int jbd2_journal_blocks_per_page(struct inode *inode) 2222int jbd2_journal_blocks_per_page(struct inode *inode)
2223{ 2223{
2224 return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 2224 return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
2225} 2225}
2226 2226
2227/* 2227/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 01e4652d88f6..67c103867bf8 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2263,7 +2263,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
2263 struct buffer_head *head, *bh, *next; 2263 struct buffer_head *head, *bh, *next;
2264 unsigned int stop = offset + length; 2264 unsigned int stop = offset + length;
2265 unsigned int curr_off = 0; 2265 unsigned int curr_off = 0;
2266 int partial_page = (offset || length < PAGE_CACHE_SIZE); 2266 int partial_page = (offset || length < PAGE_SIZE);
2267 int may_free = 1; 2267 int may_free = 1;
2268 int ret = 0; 2268 int ret = 0;
2269 2269
@@ -2272,7 +2272,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
2272 if (!page_has_buffers(page)) 2272 if (!page_has_buffers(page))
2273 return 0; 2273 return 0;
2274 2274
2275 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 2275 BUG_ON(stop > PAGE_SIZE || stop < length);
2276 2276
2277 /* We will potentially be playing with lists other than just the 2277 /* We will potentially be playing with lists other than just the
2278 * data lists (especially for journaled data mode), so be 2278 * data lists (especially for journaled data mode), so be
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 1090eb64b90d..9d26b1b9fc01 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -95,15 +95,15 @@ __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
95 rather than mucking around with actually reading the node 95 rather than mucking around with actually reading the node
96 and checking the compression type, which is the real way 96 and checking the compression type, which is the real way
97 to tell a hole node. */ 97 to tell a hole node. */
98 if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) 98 if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag)
99 && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { 99 && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
100 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", 100 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
101 ref_offset(fn->raw)); 101 ref_offset(fn->raw));
102 bitched = 1; 102 bitched = 1;
103 } 103 }
104 104
105 if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) 105 if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag)
106 && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { 106 && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
107 JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", 107 JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
108 ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); 108 ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
109 bitched = 1; 109 bitched = 1;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index cad86bac3453..0e62dec3effc 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
87 int ret; 87 int ret;
88 88
89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", 89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
90 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); 90 __func__, inode->i_ino, pg->index << PAGE_SHIFT);
91 91
92 BUG_ON(!PageLocked(pg)); 92 BUG_ON(!PageLocked(pg));
93 93
94 pg_buf = kmap(pg); 94 pg_buf = kmap(pg);
95 /* FIXME: Can kmap fail? */ 95 /* FIXME: Can kmap fail? */
96 96
97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); 97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
98 PAGE_SIZE);
98 99
99 if (ret) { 100 if (ret) {
100 ClearPageUptodate(pg); 101 ClearPageUptodate(pg);
@@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
137 struct page *pg; 138 struct page *pg;
138 struct inode *inode = mapping->host; 139 struct inode *inode = mapping->host;
139 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 140 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
140 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 141 pgoff_t index = pos >> PAGE_SHIFT;
141 uint32_t pageofs = index << PAGE_CACHE_SHIFT; 142 uint32_t pageofs = index << PAGE_SHIFT;
142 int ret = 0; 143 int ret = 0;
143 144
144 pg = grab_cache_page_write_begin(mapping, index, flags); 145 pg = grab_cache_page_write_begin(mapping, index, flags);
@@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
230 231
231out_page: 232out_page:
232 unlock_page(pg); 233 unlock_page(pg);
233 page_cache_release(pg); 234 put_page(pg);
234 return ret; 235 return ret;
235} 236}
236 237
@@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
245 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 246 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
246 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 247 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
247 struct jffs2_raw_inode *ri; 248 struct jffs2_raw_inode *ri;
248 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 249 unsigned start = pos & (PAGE_SIZE - 1);
249 unsigned end = start + copied; 250 unsigned end = start + copied;
250 unsigned aligned_start = start & ~3; 251 unsigned aligned_start = start & ~3;
251 int ret = 0; 252 int ret = 0;
252 uint32_t writtenlen = 0; 253 uint32_t writtenlen = 0;
253 254
254 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", 255 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
255 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, 256 __func__, inode->i_ino, pg->index << PAGE_SHIFT,
256 start, end, pg->flags); 257 start, end, pg->flags);
257 258
258 /* We need to avoid deadlock with page_cache_read() in 259 /* We need to avoid deadlock with page_cache_read() in
@@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
261 to re-lock it. */ 262 to re-lock it. */
262 BUG_ON(!PageUptodate(pg)); 263 BUG_ON(!PageUptodate(pg));
263 264
264 if (end == PAGE_CACHE_SIZE) { 265 if (end == PAGE_SIZE) {
265 /* When writing out the end of a page, write out the 266 /* When writing out the end of a page, write out the
266 _whole_ page. This helps to reduce the number of 267 _whole_ page. This helps to reduce the number of
267 nodes in files which have many short writes, like 268 nodes in files which have many short writes, like
@@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
275 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", 276 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
276 __func__); 277 __func__);
277 unlock_page(pg); 278 unlock_page(pg);
278 page_cache_release(pg); 279 put_page(pg);
279 return -ENOMEM; 280 return -ENOMEM;
280 } 281 }
281 282
@@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
292 kmap(pg); 293 kmap(pg);
293 294
294 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, 295 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
295 (pg->index << PAGE_CACHE_SHIFT) + aligned_start, 296 (pg->index << PAGE_SHIFT) + aligned_start,
296 end - aligned_start, &writtenlen); 297 end - aligned_start, &writtenlen);
297 298
298 kunmap(pg); 299 kunmap(pg);
@@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
329 jffs2_dbg(1, "%s() returning %d\n", 330 jffs2_dbg(1, "%s() returning %d\n",
330 __func__, writtenlen > 0 ? writtenlen : ret); 331 __func__, writtenlen > 0 ? writtenlen : ret);
331 unlock_page(pg); 332 unlock_page(pg);
332 page_cache_release(pg); 333 put_page(pg);
333 return writtenlen > 0 ? writtenlen : ret; 334 return writtenlen > 0 ? writtenlen : ret;
334} 335}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bead25ae8fe4..ae2ebb26b446 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -586,8 +586,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
586 goto out_root; 586 goto out_root;
587 587
588 sb->s_maxbytes = 0xFFFFFFFF; 588 sb->s_maxbytes = 0xFFFFFFFF;
589 sb->s_blocksize = PAGE_CACHE_SIZE; 589 sb->s_blocksize = PAGE_SIZE;
590 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 590 sb->s_blocksize_bits = PAGE_SHIFT;
591 sb->s_magic = JFFS2_SUPER_MAGIC; 591 sb->s_magic = JFFS2_SUPER_MAGIC;
592 if (!(sb->s_flags & MS_RDONLY)) 592 if (!(sb->s_flags & MS_RDONLY))
593 jffs2_start_garbage_collect_thread(c); 593 jffs2_start_garbage_collect_thread(c);
@@ -685,7 +685,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
685 struct inode *inode = OFNI_EDONI_2SFFJ(f); 685 struct inode *inode = OFNI_EDONI_2SFFJ(f);
686 struct page *pg; 686 struct page *pg;
687 687
688 pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 688 pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
689 (void *)jffs2_do_readpage_unlock, inode); 689 (void *)jffs2_do_readpage_unlock, inode);
690 if (IS_ERR(pg)) 690 if (IS_ERR(pg))
691 return (void *)pg; 691 return (void *)pg;
@@ -701,7 +701,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c,
701 struct page *pg = (void *)*priv; 701 struct page *pg = (void *)*priv;
702 702
703 kunmap(pg); 703 kunmap(pg);
704 page_cache_release(pg); 704 put_page(pg);
705} 705}
706 706
707static int jffs2_flash_setup(struct jffs2_sb_info *c) { 707static int jffs2_flash_setup(struct jffs2_sb_info *c) {
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 7e553f286775..9ed0f26cf023 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -552,7 +552,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
552 goto upnout; 552 goto upnout;
553 } 553 }
554 /* We found a datanode. Do the GC */ 554 /* We found a datanode. Do the GC */
555 if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) { 555 if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
556 /* It crosses a page boundary. Therefore, it must be a hole. */ 556 /* It crosses a page boundary. Therefore, it must be a hole. */
557 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); 557 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
558 } else { 558 } else {
@@ -1192,8 +1192,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1192 struct jffs2_node_frag *frag; 1192 struct jffs2_node_frag *frag;
1193 uint32_t min, max; 1193 uint32_t min, max;
1194 1194
1195 min = start & ~(PAGE_CACHE_SIZE-1); 1195 min = start & ~(PAGE_SIZE-1);
1196 max = min + PAGE_CACHE_SIZE; 1196 max = min + PAGE_SIZE;
1197 1197
1198 frag = jffs2_lookup_node_frag(&f->fragtree, start); 1198 frag = jffs2_lookup_node_frag(&f->fragtree, start);
1199 1199
@@ -1351,7 +1351,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1351 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); 1351 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
1352 datalen = end - offset; 1352 datalen = end - offset;
1353 1353
1354 writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1)); 1354 writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
1355 1355
1356 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); 1356 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
1357 1357
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 9a5449bc3afb..b86c78d178c6 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list,
90 90
91 /* If the last fragment starts at the RAM page boundary, it is 91 /* If the last fragment starts at the RAM page boundary, it is
92 * REF_PRISTINE irrespective of its size. */ 92 * REF_PRISTINE irrespective of its size. */
93 if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { 93 if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
94 dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", 94 dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
95 frag->ofs, frag->ofs + frag->size); 95 frag->ofs, frag->ofs + frag->size);
96 frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; 96 frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
@@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
237 If so, both 'this' and the new node get marked REF_NORMAL so 237 If so, both 'this' and the new node get marked REF_NORMAL so
238 the GC can take a look. 238 the GC can take a look.
239 */ 239 */
240 if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { 240 if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
241 if (this->node) 241 if (this->node)
242 mark_ref_normal(this->node->raw); 242 mark_ref_normal(this->node->raw);
243 mark_ref_normal(newfrag->node->raw); 243 mark_ref_normal(newfrag->node->raw);
@@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
382 382
383 /* If we now share a page with other nodes, mark either previous 383 /* If we now share a page with other nodes, mark either previous
384 or next node REF_NORMAL, as appropriate. */ 384 or next node REF_NORMAL, as appropriate. */
385 if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { 385 if (newfrag->ofs & (PAGE_SIZE-1)) {
386 struct jffs2_node_frag *prev = frag_prev(newfrag); 386 struct jffs2_node_frag *prev = frag_prev(newfrag);
387 387
388 mark_ref_normal(fn->raw); 388 mark_ref_normal(fn->raw);
@@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
391 mark_ref_normal(prev->node->raw); 391 mark_ref_normal(prev->node->raw);
392 } 392 }
393 393
394 if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { 394 if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
395 struct jffs2_node_frag *next = frag_next(newfrag); 395 struct jffs2_node_frag *next = frag_next(newfrag);
396 396
397 if (next) { 397 if (next) {
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index b634de4c8101..7fb187ab2682 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -172,8 +172,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
172 beginning of a page and runs to the end of the file, or if 172 beginning of a page and runs to the end of the file, or if
173 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. 173 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
174 */ 174 */
175 if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || 175 if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
176 ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && 176 ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
177 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { 177 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
178 flash_ofs |= REF_PRISTINE; 178 flash_ofs |= REF_PRISTINE;
179 } else { 179 } else {
@@ -366,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
366 break; 366 break;
367 } 367 }
368 mutex_lock(&f->sem); 368 mutex_lock(&f->sem);
369 datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); 369 datalen = min_t(uint32_t, writelen,
370 PAGE_SIZE - (offset & (PAGE_SIZE-1)));
370 cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); 371 cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
371 372
372 comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); 373 comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index a3eb316b1ac3..b60e015cc757 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -80,7 +80,7 @@ static inline void lock_metapage(struct metapage *mp)
80static struct kmem_cache *metapage_cache; 80static struct kmem_cache *metapage_cache;
81static mempool_t *metapage_mempool; 81static mempool_t *metapage_mempool;
82 82
83#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) 83#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
84 84
85#if MPS_PER_PAGE > 1 85#if MPS_PER_PAGE > 1
86 86
@@ -316,7 +316,7 @@ static void last_write_complete(struct page *page)
316 struct metapage *mp; 316 struct metapage *mp;
317 unsigned int offset; 317 unsigned int offset;
318 318
319 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 319 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
320 mp = page_to_mp(page, offset); 320 mp = page_to_mp(page, offset);
321 if (mp && test_bit(META_io, &mp->flag)) { 321 if (mp && test_bit(META_io, &mp->flag)) {
322 if (mp->lsn) 322 if (mp->lsn)
@@ -366,12 +366,12 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
366 int bad_blocks = 0; 366 int bad_blocks = 0;
367 367
368 page_start = (sector_t)page->index << 368 page_start = (sector_t)page->index <<
369 (PAGE_CACHE_SHIFT - inode->i_blkbits); 369 (PAGE_SHIFT - inode->i_blkbits);
370 BUG_ON(!PageLocked(page)); 370 BUG_ON(!PageLocked(page));
371 BUG_ON(PageWriteback(page)); 371 BUG_ON(PageWriteback(page));
372 set_page_writeback(page); 372 set_page_writeback(page);
373 373
374 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 374 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
375 mp = page_to_mp(page, offset); 375 mp = page_to_mp(page, offset);
376 376
377 if (!mp || !test_bit(META_dirty, &mp->flag)) 377 if (!mp || !test_bit(META_dirty, &mp->flag))
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
416 bio = NULL; 416 bio = NULL;
417 } else 417 } else
418 inc_io(page); 418 inc_io(page);
419 xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; 419 xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
420 pblock = metapage_get_blocks(inode, lblock, &xlen); 420 pblock = metapage_get_blocks(inode, lblock, &xlen);
421 if (!pblock) { 421 if (!pblock) {
422 printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); 422 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -485,7 +485,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
485 struct inode *inode = page->mapping->host; 485 struct inode *inode = page->mapping->host;
486 struct bio *bio = NULL; 486 struct bio *bio = NULL;
487 int block_offset; 487 int block_offset;
488 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; 488 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
489 sector_t page_start; /* address of page in fs blocks */ 489 sector_t page_start; /* address of page in fs blocks */
490 sector_t pblock; 490 sector_t pblock;
491 int xlen; 491 int xlen;
@@ -494,7 +494,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
494 494
495 BUG_ON(!PageLocked(page)); 495 BUG_ON(!PageLocked(page));
496 page_start = (sector_t)page->index << 496 page_start = (sector_t)page->index <<
497 (PAGE_CACHE_SHIFT - inode->i_blkbits); 497 (PAGE_SHIFT - inode->i_blkbits);
498 498
499 block_offset = 0; 499 block_offset = 0;
500 while (block_offset < blocks_per_page) { 500 while (block_offset < blocks_per_page) {
@@ -542,7 +542,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
542 int ret = 1; 542 int ret = 1;
543 int offset; 543 int offset;
544 544
545 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 545 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
546 mp = page_to_mp(page, offset); 546 mp = page_to_mp(page, offset);
547 547
548 if (!mp) 548 if (!mp)
@@ -568,7 +568,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
568static void metapage_invalidatepage(struct page *page, unsigned int offset, 568static void metapage_invalidatepage(struct page *page, unsigned int offset,
569 unsigned int length) 569 unsigned int length)
570{ 570{
571 BUG_ON(offset || length < PAGE_CACHE_SIZE); 571 BUG_ON(offset || length < PAGE_SIZE);
572 572
573 BUG_ON(PageWriteback(page)); 573 BUG_ON(PageWriteback(page));
574 574
@@ -599,10 +599,10 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
599 inode->i_ino, lblock, absolute); 599 inode->i_ino, lblock, absolute);
600 600
601 l2bsize = inode->i_blkbits; 601 l2bsize = inode->i_blkbits;
602 l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize; 602 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
603 page_index = lblock >> l2BlocksPerPage; 603 page_index = lblock >> l2BlocksPerPage;
604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; 604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
605 if ((page_offset + size) > PAGE_CACHE_SIZE) { 605 if ((page_offset + size) > PAGE_SIZE) {
606 jfs_err("MetaData crosses page boundary!!"); 606 jfs_err("MetaData crosses page boundary!!");
607 jfs_err("lblock = %lx, size = %d", lblock, size); 607 jfs_err("lblock = %lx, size = %d", lblock, size);
608 dump_stack(); 608 dump_stack();
@@ -621,7 +621,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
621 mapping = inode->i_mapping; 621 mapping = inode->i_mapping;
622 } 622 }
623 623
624 if (new && (PSIZE == PAGE_CACHE_SIZE)) { 624 if (new && (PSIZE == PAGE_SIZE)) {
625 page = grab_cache_page(mapping, page_index); 625 page = grab_cache_page(mapping, page_index);
626 if (!page) { 626 if (!page) {
627 jfs_err("grab_cache_page failed!"); 627 jfs_err("grab_cache_page failed!");
@@ -693,7 +693,7 @@ unlock:
693void grab_metapage(struct metapage * mp) 693void grab_metapage(struct metapage * mp)
694{ 694{
695 jfs_info("grab_metapage: mp = 0x%p", mp); 695 jfs_info("grab_metapage: mp = 0x%p", mp);
696 page_cache_get(mp->page); 696 get_page(mp->page);
697 lock_page(mp->page); 697 lock_page(mp->page);
698 mp->count++; 698 mp->count++;
699 lock_metapage(mp); 699 lock_metapage(mp);
@@ -706,12 +706,12 @@ void force_metapage(struct metapage *mp)
706 jfs_info("force_metapage: mp = 0x%p", mp); 706 jfs_info("force_metapage: mp = 0x%p", mp);
707 set_bit(META_forcewrite, &mp->flag); 707 set_bit(META_forcewrite, &mp->flag);
708 clear_bit(META_sync, &mp->flag); 708 clear_bit(META_sync, &mp->flag);
709 page_cache_get(page); 709 get_page(page);
710 lock_page(page); 710 lock_page(page);
711 set_page_dirty(page); 711 set_page_dirty(page);
712 write_one_page(page, 1); 712 write_one_page(page, 1);
713 clear_bit(META_forcewrite, &mp->flag); 713 clear_bit(META_forcewrite, &mp->flag);
714 page_cache_release(page); 714 put_page(page);
715} 715}
716 716
717void hold_metapage(struct metapage *mp) 717void hold_metapage(struct metapage *mp)
@@ -726,7 +726,7 @@ void put_metapage(struct metapage *mp)
726 unlock_page(mp->page); 726 unlock_page(mp->page);
727 return; 727 return;
728 } 728 }
729 page_cache_get(mp->page); 729 get_page(mp->page);
730 mp->count++; 730 mp->count++;
731 lock_metapage(mp); 731 lock_metapage(mp);
732 unlock_page(mp->page); 732 unlock_page(mp->page);
@@ -746,7 +746,7 @@ void release_metapage(struct metapage * mp)
746 assert(mp->count); 746 assert(mp->count);
747 if (--mp->count || mp->nohomeok) { 747 if (--mp->count || mp->nohomeok) {
748 unlock_page(page); 748 unlock_page(page);
749 page_cache_release(page); 749 put_page(page);
750 return; 750 return;
751 } 751 }
752 752
@@ -764,13 +764,13 @@ void release_metapage(struct metapage * mp)
764 drop_metapage(page, mp); 764 drop_metapage(page, mp);
765 765
766 unlock_page(page); 766 unlock_page(page);
767 page_cache_release(page); 767 put_page(page);
768} 768}
769 769
770void __invalidate_metapages(struct inode *ip, s64 addr, int len) 770void __invalidate_metapages(struct inode *ip, s64 addr, int len)
771{ 771{
772 sector_t lblock; 772 sector_t lblock;
773 int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits; 773 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
774 int BlocksPerPage = 1 << l2BlocksPerPage; 774 int BlocksPerPage = 1 << l2BlocksPerPage;
775 /* All callers are interested in block device's mapping */ 775 /* All callers are interested in block device's mapping */
776 struct address_space *mapping = 776 struct address_space *mapping =
@@ -788,7 +788,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage); 788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
789 if (!page) 789 if (!page)
790 continue; 790 continue;
791 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 791 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
792 mp = page_to_mp(page, offset); 792 mp = page_to_mp(page, offset);
793 if (!mp) 793 if (!mp)
794 continue; 794 continue;
@@ -803,7 +803,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
803 remove_from_logsync(mp); 803 remove_from_logsync(mp);
804 } 804 }
805 unlock_page(page); 805 unlock_page(page);
806 page_cache_release(page); 806 put_page(page);
807 } 807 }
808} 808}
809 809
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index 337e9e51ac06..a869fb4a20d6 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -106,7 +106,7 @@ static inline void metapage_nohomeok(struct metapage *mp)
106 lock_page(page); 106 lock_page(page);
107 if (!mp->nohomeok++) { 107 if (!mp->nohomeok++) {
108 mark_metapage_dirty(mp); 108 mark_metapage_dirty(mp);
109 page_cache_get(page); 109 get_page(page);
110 wait_on_page_writeback(page); 110 wait_on_page_writeback(page);
111 } 111 }
112 unlock_page(page); 112 unlock_page(page);
@@ -128,7 +128,7 @@ static inline void metapage_wait_for_io(struct metapage *mp)
128static inline void _metapage_homeok(struct metapage *mp) 128static inline void _metapage_homeok(struct metapage *mp)
129{ 129{
130 if (!--mp->nohomeok) 130 if (!--mp->nohomeok)
131 page_cache_release(mp->page); 131 put_page(mp->page);
132} 132}
133 133
134static inline void metapage_homeok(struct metapage *mp) 134static inline void metapage_homeok(struct metapage *mp)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f5d85ba8e23..78d599198bf5 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -596,7 +596,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
596 * Page cache is indexed by long. 596 * Page cache is indexed by long.
597 * I would use MAX_LFS_FILESIZE, but it's only half as big 597 * I would use MAX_LFS_FILESIZE, but it's only half as big
598 */ 598 */
599 sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, 599 sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
600 (u64)sb->s_maxbytes); 600 (u64)sb->s_maxbytes);
601#endif 601#endif
602 sb->s_time_gran = 1; 602 sb->s_time_gran = 1;
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index b67dbccdaf88..f73541fbe7af 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -138,8 +138,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
138 struct dentry *root; 138 struct dentry *root;
139 139
140 info->sb = sb; 140 info->sb = sb;
141 sb->s_blocksize = PAGE_CACHE_SIZE; 141 sb->s_blocksize = PAGE_SIZE;
142 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 142 sb->s_blocksize_bits = PAGE_SHIFT;
143 sb->s_magic = magic; 143 sb->s_magic = magic;
144 sb->s_op = &kernfs_sops; 144 sb->s_op = &kernfs_sops;
145 sb->s_time_gran = 1; 145 sb->s_time_gran = 1;
diff --git a/fs/libfs.c b/fs/libfs.c
index 0ca80b2af420..f3fa82ce9b70 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -25,7 +25,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
25{ 25{
26 struct inode *inode = d_inode(dentry); 26 struct inode *inode = d_inode(dentry);
27 generic_fillattr(inode, stat); 27 generic_fillattr(inode, stat);
28 stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); 28 stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
29 return 0; 29 return 0;
30} 30}
31EXPORT_SYMBOL(simple_getattr); 31EXPORT_SYMBOL(simple_getattr);
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(simple_getattr);
33int simple_statfs(struct dentry *dentry, struct kstatfs *buf) 33int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
34{ 34{
35 buf->f_type = dentry->d_sb->s_magic; 35 buf->f_type = dentry->d_sb->s_magic;
36 buf->f_bsize = PAGE_CACHE_SIZE; 36 buf->f_bsize = PAGE_SIZE;
37 buf->f_namelen = NAME_MAX; 37 buf->f_namelen = NAME_MAX;
38 return 0; 38 return 0;
39} 39}
@@ -395,7 +395,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
395 struct page *page; 395 struct page *page;
396 pgoff_t index; 396 pgoff_t index;
397 397
398 index = pos >> PAGE_CACHE_SHIFT; 398 index = pos >> PAGE_SHIFT;
399 399
400 page = grab_cache_page_write_begin(mapping, index, flags); 400 page = grab_cache_page_write_begin(mapping, index, flags);
401 if (!page) 401 if (!page)
@@ -403,10 +403,10 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
403 403
404 *pagep = page; 404 *pagep = page;
405 405
406 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { 406 if (!PageUptodate(page) && (len != PAGE_SIZE)) {
407 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 407 unsigned from = pos & (PAGE_SIZE - 1);
408 408
409 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); 409 zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
410 } 410 }
411 return 0; 411 return 0;
412} 412}
@@ -442,7 +442,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
442 442
443 /* zero the stale part of the page if we did a short copy */ 443 /* zero the stale part of the page if we did a short copy */
444 if (copied < len) { 444 if (copied < len) {
445 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 445 unsigned from = pos & (PAGE_SIZE - 1);
446 446
447 zero_user(page, from + copied, len - copied); 447 zero_user(page, from + copied, len - copied);
448 } 448 }
@@ -458,7 +458,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
458 458
459 set_page_dirty(page); 459 set_page_dirty(page);
460 unlock_page(page); 460 unlock_page(page);
461 page_cache_release(page); 461 put_page(page);
462 462
463 return copied; 463 return copied;
464} 464}
@@ -477,8 +477,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
477 struct dentry *dentry; 477 struct dentry *dentry;
478 int i; 478 int i;
479 479
480 s->s_blocksize = PAGE_CACHE_SIZE; 480 s->s_blocksize = PAGE_SIZE;
481 s->s_blocksize_bits = PAGE_CACHE_SHIFT; 481 s->s_blocksize_bits = PAGE_SHIFT;
482 s->s_magic = magic; 482 s->s_magic = magic;
483 s->s_op = &simple_super_operations; 483 s->s_op = &simple_super_operations;
484 s->s_time_gran = 1; 484 s->s_time_gran = 1;
@@ -994,12 +994,12 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
994{ 994{
995 u64 last_fs_block = num_blocks - 1; 995 u64 last_fs_block = num_blocks - 1;
996 u64 last_fs_page = 996 u64 last_fs_page =
997 last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits); 997 last_fs_block >> (PAGE_SHIFT - blocksize_bits);
998 998
999 if (unlikely(num_blocks == 0)) 999 if (unlikely(num_blocks == 0))
1000 return 0; 1000 return 0;
1001 1001
1002 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT)) 1002 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
1003 return -EINVAL; 1003 return -EINVAL;
1004 1004
1005 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || 1005 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index a709d80c8ebc..cc26f8f215f5 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -64,7 +64,7 @@ static void writeseg_end_io(struct bio *bio)
64 64
65 bio_for_each_segment_all(bvec, bio, i) { 65 bio_for_each_segment_all(bvec, bio, i) {
66 end_page_writeback(bvec->bv_page); 66 end_page_writeback(bvec->bv_page);
67 page_cache_release(bvec->bv_page); 67 put_page(bvec->bv_page);
68 } 68 }
69 bio_put(bio); 69 bio_put(bio);
70 if (atomic_dec_and_test(&super->s_pending_writes)) 70 if (atomic_dec_and_test(&super->s_pending_writes))
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 9c501449450d..b76a62b1978f 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -46,9 +46,9 @@ static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
46 46
47 BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs)); 47 BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
48 BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift); 48 BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
49 BUG_ON(len > PAGE_CACHE_SIZE); 49 BUG_ON(len > PAGE_SIZE);
50 page_start = ofs & PAGE_CACHE_MASK; 50 page_start = ofs & PAGE_MASK;
51 page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; 51 page_end = PAGE_ALIGN(ofs + len) - 1;
52 ret = mtd_write(mtd, ofs, len, &retlen, buf); 52 ret = mtd_write(mtd, ofs, len, &retlen, buf);
53 if (ret || (retlen != len)) 53 if (ret || (retlen != len))
54 return -EIO; 54 return -EIO;
@@ -82,7 +82,7 @@ static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
82 if (!page) 82 if (!page)
83 continue; 83 continue;
84 memset(page_address(page), 0xFF, PAGE_SIZE); 84 memset(page_address(page), 0xFF, PAGE_SIZE);
85 page_cache_release(page); 85 put_page(page);
86 } 86 }
87 return 0; 87 return 0;
88} 88}
@@ -195,7 +195,7 @@ static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
196 page_address(page)); 196 page_address(page));
197 unlock_page(page); 197 unlock_page(page);
198 page_cache_release(page); 198 put_page(page);
199 if (err) 199 if (err)
200 return err; 200 return err;
201 } 201 }
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 542468e9bfb4..ddbed2be5366 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -183,7 +183,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
183 if (name->len != be16_to_cpu(dd->namelen) || 183 if (name->len != be16_to_cpu(dd->namelen) ||
184 memcmp(name->name, dd->name, name->len)) { 184 memcmp(name->name, dd->name, name->len)) {
185 kunmap_atomic(dd); 185 kunmap_atomic(dd);
186 page_cache_release(page); 186 put_page(page);
187 continue; 187 continue;
188 } 188 }
189 189
@@ -238,7 +238,7 @@ static int logfs_unlink(struct inode *dir, struct dentry *dentry)
238 return PTR_ERR(page); 238 return PTR_ERR(page);
239 } 239 }
240 index = page->index; 240 index = page->index;
241 page_cache_release(page); 241 put_page(page);
242 242
243 mutex_lock(&super->s_dirop_mutex); 243 mutex_lock(&super->s_dirop_mutex);
244 logfs_add_transaction(dir, ta); 244 logfs_add_transaction(dir, ta);
@@ -316,7 +316,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
316 be16_to_cpu(dd->namelen), 316 be16_to_cpu(dd->namelen),
317 be64_to_cpu(dd->ino), dd->type); 317 be64_to_cpu(dd->ino), dd->type);
318 kunmap(page); 318 kunmap(page);
319 page_cache_release(page); 319 put_page(page);
320 if (full) 320 if (full)
321 break; 321 break;
322 } 322 }
@@ -349,7 +349,7 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
349 dd = kmap_atomic(page); 349 dd = kmap_atomic(page);
350 ino = be64_to_cpu(dd->ino); 350 ino = be64_to_cpu(dd->ino);
351 kunmap_atomic(dd); 351 kunmap_atomic(dd);
352 page_cache_release(page); 352 put_page(page);
353 353
354 inode = logfs_iget(dir->i_sb, ino); 354 inode = logfs_iget(dir->i_sb, ino);
355 if (IS_ERR(inode)) 355 if (IS_ERR(inode))
@@ -392,7 +392,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
392 392
393 err = logfs_write_buf(dir, page, WF_LOCK); 393 err = logfs_write_buf(dir, page, WF_LOCK);
394 unlock_page(page); 394 unlock_page(page);
395 page_cache_release(page); 395 put_page(page);
396 if (!err) 396 if (!err)
397 grow_dir(dir, index); 397 grow_dir(dir, index);
398 return err; 398 return err;
@@ -561,7 +561,7 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
561 map = kmap_atomic(page); 561 map = kmap_atomic(page);
562 memcpy(dd, map, sizeof(*dd)); 562 memcpy(dd, map, sizeof(*dd));
563 kunmap_atomic(map); 563 kunmap_atomic(map);
564 page_cache_release(page); 564 put_page(page);
565 return 0; 565 return 0;
566} 566}
567 567
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 61eaeb1b6cac..f01ddfb1a03b 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -15,21 +15,21 @@ static int logfs_write_begin(struct file *file, struct address_space *mapping,
15{ 15{
16 struct inode *inode = mapping->host; 16 struct inode *inode = mapping->host;
17 struct page *page; 17 struct page *page;
18 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 18 pgoff_t index = pos >> PAGE_SHIFT;
19 19
20 page = grab_cache_page_write_begin(mapping, index, flags); 20 page = grab_cache_page_write_begin(mapping, index, flags);
21 if (!page) 21 if (!page)
22 return -ENOMEM; 22 return -ENOMEM;
23 *pagep = page; 23 *pagep = page;
24 24
25 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 25 if ((len == PAGE_SIZE) || PageUptodate(page))
26 return 0; 26 return 0;
27 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 27 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
28 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 28 unsigned start = pos & (PAGE_SIZE - 1);
29 unsigned end = start + len; 29 unsigned end = start + len;
30 30
31 /* Reading beyond i_size is simple: memset to zero */ 31 /* Reading beyond i_size is simple: memset to zero */
32 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 32 zero_user_segments(page, 0, start, end, PAGE_SIZE);
33 return 0; 33 return 0;
34 } 34 }
35 return logfs_readpage_nolock(page); 35 return logfs_readpage_nolock(page);
@@ -41,11 +41,11 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
41{ 41{
42 struct inode *inode = mapping->host; 42 struct inode *inode = mapping->host;
43 pgoff_t index = page->index; 43 pgoff_t index = page->index;
44 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 44 unsigned start = pos & (PAGE_SIZE - 1);
45 unsigned end = start + copied; 45 unsigned end = start + copied;
46 int ret = 0; 46 int ret = 0;
47 47
48 BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); 48 BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize);
49 BUG_ON(page->index > I3_BLOCKS); 49 BUG_ON(page->index > I3_BLOCKS);
50 50
51 if (copied < len) { 51 if (copied < len) {
@@ -61,8 +61,8 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
61 if (copied == 0) 61 if (copied == 0)
62 goto out; /* FIXME: do we need to update inode? */ 62 goto out; /* FIXME: do we need to update inode? */
63 63
64 if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) { 64 if (i_size_read(inode) < (index << PAGE_SHIFT) + end) {
65 i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end); 65 i_size_write(inode, (index << PAGE_SHIFT) + end);
66 mark_inode_dirty_sync(inode); 66 mark_inode_dirty_sync(inode);
67 } 67 }
68 68
@@ -75,7 +75,7 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
75 } 75 }
76out: 76out:
77 unlock_page(page); 77 unlock_page(page);
78 page_cache_release(page); 78 put_page(page);
79 return ret ? ret : copied; 79 return ret ? ret : copied;
80} 80}
81 81
@@ -118,7 +118,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
118{ 118{
119 struct inode *inode = page->mapping->host; 119 struct inode *inode = page->mapping->host;
120 loff_t i_size = i_size_read(inode); 120 loff_t i_size = i_size_read(inode);
121 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 121 pgoff_t end_index = i_size >> PAGE_SHIFT;
122 unsigned offset; 122 unsigned offset;
123 u64 bix; 123 u64 bix;
124 level_t level; 124 level_t level;
@@ -142,7 +142,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
142 return __logfs_writepage(page); 142 return __logfs_writepage(page);
143 143
144 /* Is the page fully outside i_size? (truncate in progress) */ 144 /* Is the page fully outside i_size? (truncate in progress) */
145 offset = i_size & (PAGE_CACHE_SIZE-1); 145 offset = i_size & (PAGE_SIZE-1);
146 if (bix > end_index || offset == 0) { 146 if (bix > end_index || offset == 0) {
147 unlock_page(page); 147 unlock_page(page);
148 return 0; /* don't care */ 148 return 0; /* don't care */
@@ -155,7 +155,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
155 * the page size, the remaining memory is zeroed when mapped, and 155 * the page size, the remaining memory is zeroed when mapped, and
156 * writes to that region are not written out to the file." 156 * writes to that region are not written out to the file."
157 */ 157 */
158 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 158 zero_user_segment(page, offset, PAGE_SIZE);
159 return __logfs_writepage(page); 159 return __logfs_writepage(page);
160} 160}
161 161
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 20973c9e52f8..3fb8c6d67303 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -281,7 +281,7 @@ static struct page *logfs_get_read_page(struct inode *inode, u64 bix,
281static void logfs_put_read_page(struct page *page) 281static void logfs_put_read_page(struct page *page)
282{ 282{
283 unlock_page(page); 283 unlock_page(page);
284 page_cache_release(page); 284 put_page(page);
285} 285}
286 286
287static void logfs_lock_write_page(struct page *page) 287static void logfs_lock_write_page(struct page *page)
@@ -323,7 +323,7 @@ repeat:
323 return NULL; 323 return NULL;
324 err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS); 324 err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
325 if (unlikely(err)) { 325 if (unlikely(err)) {
326 page_cache_release(page); 326 put_page(page);
327 if (err == -EEXIST) 327 if (err == -EEXIST)
328 goto repeat; 328 goto repeat;
329 return NULL; 329 return NULL;
@@ -342,7 +342,7 @@ static void logfs_unlock_write_page(struct page *page)
342static void logfs_put_write_page(struct page *page) 342static void logfs_put_write_page(struct page *page)
343{ 343{
344 logfs_unlock_write_page(page); 344 logfs_unlock_write_page(page);
345 page_cache_release(page); 345 put_page(page);
346} 346}
347 347
348static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level, 348static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
@@ -562,7 +562,7 @@ static void indirect_free_block(struct super_block *sb,
562 562
563 if (PagePrivate(page)) { 563 if (PagePrivate(page)) {
564 ClearPagePrivate(page); 564 ClearPagePrivate(page);
565 page_cache_release(page); 565 put_page(page);
566 set_page_private(page, 0); 566 set_page_private(page, 0);
567 } 567 }
568 __free_block(sb, block); 568 __free_block(sb, block);
@@ -655,7 +655,7 @@ static void alloc_data_block(struct inode *inode, struct page *page)
655 block->page = page; 655 block->page = page;
656 656
657 SetPagePrivate(page); 657 SetPagePrivate(page);
658 page_cache_get(page); 658 get_page(page);
659 set_page_private(page, (unsigned long) block); 659 set_page_private(page, (unsigned long) block);
660 660
661 block->ops = &indirect_block_ops; 661 block->ops = &indirect_block_ops;
@@ -709,7 +709,7 @@ static u64 block_get_pointer(struct page *page, int index)
709 709
710static int logfs_read_empty(struct page *page) 710static int logfs_read_empty(struct page *page)
711{ 711{
712 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 712 zero_user_segment(page, 0, PAGE_SIZE);
713 return 0; 713 return 0;
714} 714}
715 715
@@ -1660,7 +1660,7 @@ static int truncate_data_block(struct inode *inode, struct page *page,
1660 if (err) 1660 if (err)
1661 return err; 1661 return err;
1662 1662
1663 zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE); 1663 zero_user_segment(page, size - pageofs, PAGE_SIZE);
1664 return logfs_segment_write(inode, page, shadow); 1664 return logfs_segment_write(inode, page, shadow);
1665} 1665}
1666 1666
@@ -1919,7 +1919,7 @@ static void move_page_to_inode(struct inode *inode, struct page *page)
1919 block->page = NULL; 1919 block->page = NULL;
1920 if (PagePrivate(page)) { 1920 if (PagePrivate(page)) {
1921 ClearPagePrivate(page); 1921 ClearPagePrivate(page);
1922 page_cache_release(page); 1922 put_page(page);
1923 set_page_private(page, 0); 1923 set_page_private(page, 0);
1924 } 1924 }
1925} 1925}
@@ -1940,7 +1940,7 @@ static void move_inode_to_page(struct page *page, struct inode *inode)
1940 1940
1941 if (!PagePrivate(page)) { 1941 if (!PagePrivate(page)) {
1942 SetPagePrivate(page); 1942 SetPagePrivate(page);
1943 page_cache_get(page); 1943 get_page(page);
1944 set_page_private(page, (unsigned long) block); 1944 set_page_private(page, (unsigned long) block);
1945 } 1945 }
1946 1946
@@ -1971,7 +1971,7 @@ int logfs_read_inode(struct inode *inode)
1971 logfs_disk_to_inode(di, inode); 1971 logfs_disk_to_inode(di, inode);
1972 kunmap_atomic(di); 1972 kunmap_atomic(di);
1973 move_page_to_inode(inode, page); 1973 move_page_to_inode(inode, page);
1974 page_cache_release(page); 1974 put_page(page);
1975 return 0; 1975 return 0;
1976} 1976}
1977 1977
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index d270e4b2ab6b..1efd6055f4b0 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -90,9 +90,9 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
90 90
91 if (!PagePrivate(page)) { 91 if (!PagePrivate(page)) {
92 SetPagePrivate(page); 92 SetPagePrivate(page);
93 page_cache_get(page); 93 get_page(page);
94 } 94 }
95 page_cache_release(page); 95 put_page(page);
96 96
97 buf += copylen; 97 buf += copylen;
98 len -= copylen; 98 len -= copylen;
@@ -117,9 +117,9 @@ static void pad_partial_page(struct logfs_area *area)
117 memset(page_address(page) + offset, 0xff, len); 117 memset(page_address(page) + offset, 0xff, len);
118 if (!PagePrivate(page)) { 118 if (!PagePrivate(page)) {
119 SetPagePrivate(page); 119 SetPagePrivate(page);
120 page_cache_get(page); 120 get_page(page);
121 } 121 }
122 page_cache_release(page); 122 put_page(page);
123 } 123 }
124} 124}
125 125
@@ -129,20 +129,20 @@ static void pad_full_pages(struct logfs_area *area)
129 struct logfs_super *super = logfs_super(sb); 129 struct logfs_super *super = logfs_super(sb);
130 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 130 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
131 u32 len = super->s_segsize - area->a_used_bytes; 131 u32 len = super->s_segsize - area->a_used_bytes;
132 pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT; 132 pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT;
133 pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT; 133 pgoff_t no_indizes = len >> PAGE_SHIFT;
134 struct page *page; 134 struct page *page;
135 135
136 while (no_indizes) { 136 while (no_indizes) {
137 page = get_mapping_page(sb, index, 0); 137 page = get_mapping_page(sb, index, 0);
138 BUG_ON(!page); /* FIXME: reserve a pool */ 138 BUG_ON(!page); /* FIXME: reserve a pool */
139 SetPageUptodate(page); 139 SetPageUptodate(page);
140 memset(page_address(page), 0xff, PAGE_CACHE_SIZE); 140 memset(page_address(page), 0xff, PAGE_SIZE);
141 if (!PagePrivate(page)) { 141 if (!PagePrivate(page)) {
142 SetPagePrivate(page); 142 SetPagePrivate(page);
143 page_cache_get(page); 143 get_page(page);
144 } 144 }
145 page_cache_release(page); 145 put_page(page);
146 index++; 146 index++;
147 no_indizes--; 147 no_indizes--;
148 } 148 }
@@ -411,7 +411,7 @@ int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
411 if (IS_ERR(page)) 411 if (IS_ERR(page))
412 return PTR_ERR(page); 412 return PTR_ERR(page);
413 memcpy(buf, page_address(page) + offset, copylen); 413 memcpy(buf, page_address(page) + offset, copylen);
414 page_cache_release(page); 414 put_page(page);
415 415
416 buf += copylen; 416 buf += copylen;
417 len -= copylen; 417 len -= copylen;
@@ -499,7 +499,7 @@ static void move_btree_to_page(struct inode *inode, struct page *page,
499 499
500 if (!PagePrivate(page)) { 500 if (!PagePrivate(page)) {
501 SetPagePrivate(page); 501 SetPagePrivate(page);
502 page_cache_get(page); 502 get_page(page);
503 set_page_private(page, (unsigned long) block); 503 set_page_private(page, (unsigned long) block);
504 } 504 }
505 block->ops = &indirect_block_ops; 505 block->ops = &indirect_block_ops;
@@ -554,7 +554,7 @@ void move_page_to_btree(struct page *page)
554 554
555 if (PagePrivate(page)) { 555 if (PagePrivate(page)) {
556 ClearPagePrivate(page); 556 ClearPagePrivate(page);
557 page_cache_release(page); 557 put_page(page);
558 set_page_private(page, 0); 558 set_page_private(page, 0);
559 } 559 }
560 block->ops = &btree_block_ops; 560 block->ops = &btree_block_ops;
@@ -723,9 +723,9 @@ void freeseg(struct super_block *sb, u32 segno)
723 continue; 723 continue;
724 if (PagePrivate(page)) { 724 if (PagePrivate(page)) {
725 ClearPagePrivate(page); 725 ClearPagePrivate(page);
726 page_cache_release(page); 726 put_page(page);
727 } 727 }
728 page_cache_release(page); 728 put_page(page);
729 } 729 }
730} 730}
731 731
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 54360293bcb5..5751082dba52 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -48,7 +48,7 @@ void emergency_read_end(struct page *page)
48 if (page == emergency_page) 48 if (page == emergency_page)
49 mutex_unlock(&emergency_mutex); 49 mutex_unlock(&emergency_mutex);
50 else 50 else
51 page_cache_release(page); 51 put_page(page);
52} 52}
53 53
54static void dump_segfile(struct super_block *sb) 54static void dump_segfile(struct super_block *sb)
@@ -206,7 +206,7 @@ static int write_one_sb(struct super_block *sb,
206 logfs_set_segment_erased(sb, segno, ec, 0); 206 logfs_set_segment_erased(sb, segno, ec, 0);
207 logfs_write_ds(sb, ds, segno, ec); 207 logfs_write_ds(sb, ds, segno, ec);
208 err = super->s_devops->write_sb(sb, page); 208 err = super->s_devops->write_sb(sb, page);
209 page_cache_release(page); 209 put_page(page);
210 return err; 210 return err;
211} 211}
212 212
@@ -366,24 +366,24 @@ static struct page *find_super_block(struct super_block *sb)
366 return NULL; 366 return NULL;
367 last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); 367 last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
368 if (!last || IS_ERR(last)) { 368 if (!last || IS_ERR(last)) {
369 page_cache_release(first); 369 put_page(first);
370 return NULL; 370 return NULL;
371 } 371 }
372 372
373 if (!logfs_check_ds(page_address(first))) { 373 if (!logfs_check_ds(page_address(first))) {
374 page_cache_release(last); 374 put_page(last);
375 return first; 375 return first;
376 } 376 }
377 377
378 /* First one didn't work, try the second superblock */ 378 /* First one didn't work, try the second superblock */
379 if (!logfs_check_ds(page_address(last))) { 379 if (!logfs_check_ds(page_address(last))) {
380 page_cache_release(first); 380 put_page(first);
381 return last; 381 return last;
382 } 382 }
383 383
384 /* Neither worked, sorry folks */ 384 /* Neither worked, sorry folks */
385 page_cache_release(first); 385 put_page(first);
386 page_cache_release(last); 386 put_page(last);
387 return NULL; 387 return NULL;
388} 388}
389 389
@@ -425,7 +425,7 @@ static int __logfs_read_sb(struct super_block *sb)
425 super->s_data_levels = ds->ds_data_levels; 425 super->s_data_levels = ds->ds_data_levels;
426 super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels 426 super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
427 + super->s_data_levels; 427 + super->s_data_levels;
428 page_cache_release(page); 428 put_page(page);
429 return 0; 429 return 0;
430} 430}
431 431
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index d19ac258105a..33957c07cd11 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -28,7 +28,7 @@ const struct file_operations minix_dir_operations = {
28static inline void dir_put_page(struct page *page) 28static inline void dir_put_page(struct page *page)
29{ 29{
30 kunmap(page); 30 kunmap(page);
31 page_cache_release(page); 31 put_page(page);
32} 32}
33 33
34/* 34/*
@@ -38,10 +38,10 @@ static inline void dir_put_page(struct page *page)
38static unsigned 38static unsigned
39minix_last_byte(struct inode *inode, unsigned long page_nr) 39minix_last_byte(struct inode *inode, unsigned long page_nr)
40{ 40{
41 unsigned last_byte = PAGE_CACHE_SIZE; 41 unsigned last_byte = PAGE_SIZE;
42 42
43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) 43 if (page_nr == (inode->i_size >> PAGE_SHIFT))
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); 44 last_byte = inode->i_size & (PAGE_SIZE - 1);
45 return last_byte; 45 return last_byte;
46} 46}
47 47
@@ -92,8 +92,8 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
92 if (pos >= inode->i_size) 92 if (pos >= inode->i_size)
93 return 0; 93 return 0;
94 94
95 offset = pos & ~PAGE_CACHE_MASK; 95 offset = pos & ~PAGE_MASK;
96 n = pos >> PAGE_CACHE_SHIFT; 96 n = pos >> PAGE_SHIFT;
97 97
98 for ( ; n < npages; n++, offset = 0) { 98 for ( ; n < npages; n++, offset = 0) {
99 char *p, *kaddr, *limit; 99 char *p, *kaddr, *limit;
@@ -229,7 +229,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
229 lock_page(page); 229 lock_page(page);
230 kaddr = (char*)page_address(page); 230 kaddr = (char*)page_address(page);
231 dir_end = kaddr + minix_last_byte(dir, n); 231 dir_end = kaddr + minix_last_byte(dir, n);
232 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; 232 limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
233 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { 233 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
234 de = (minix_dirent *)p; 234 de = (minix_dirent *)p;
235 de3 = (minix3_dirent *)p; 235 de3 = (minix3_dirent *)p;
@@ -327,7 +327,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
327 } 327 }
328 328
329 kaddr = kmap_atomic(page); 329 kaddr = kmap_atomic(page);
330 memset(kaddr, 0, PAGE_CACHE_SIZE); 330 memset(kaddr, 0, PAGE_SIZE);
331 331
332 if (sbi->s_version == MINIX_V3) { 332 if (sbi->s_version == MINIX_V3) {
333 minix3_dirent *de3 = (minix3_dirent *)kaddr; 333 minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -350,7 +350,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
350 350
351 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); 351 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
352fail: 352fail:
353 page_cache_release(page); 353 put_page(page);
354 return err; 354 return err;
355} 355}
356 356
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index a795a11e50c7..2887d1d95ce2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -243,11 +243,11 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
243out_dir: 243out_dir:
244 if (dir_de) { 244 if (dir_de) {
245 kunmap(dir_page); 245 kunmap(dir_page);
246 page_cache_release(dir_page); 246 put_page(dir_page);
247 } 247 }
248out_old: 248out_old:
249 kunmap(old_page); 249 kunmap(old_page);
250 page_cache_release(old_page); 250 put_page(old_page);
251out: 251out:
252 return err; 252 return err;
253} 253}
diff --git a/fs/mpage.c b/fs/mpage.c
index 6bd9fd90964e..eedc644b78d7 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
107 * don't make any buffers if there is only one buffer on 107 * don't make any buffers if there is only one buffer on
108 * the page and the page just needs to be set up to date 108 * the page and the page just needs to be set up to date
109 */ 109 */
110 if (inode->i_blkbits == PAGE_CACHE_SHIFT && 110 if (inode->i_blkbits == PAGE_SHIFT &&
111 buffer_uptodate(bh)) { 111 buffer_uptodate(bh)) {
112 SetPageUptodate(page); 112 SetPageUptodate(page);
113 return; 113 return;
@@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
145{ 145{
146 struct inode *inode = page->mapping->host; 146 struct inode *inode = page->mapping->host;
147 const unsigned blkbits = inode->i_blkbits; 147 const unsigned blkbits = inode->i_blkbits;
148 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 148 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
149 const unsigned blocksize = 1 << blkbits; 149 const unsigned blocksize = 1 << blkbits;
150 sector_t block_in_file; 150 sector_t block_in_file;
151 sector_t last_block; 151 sector_t last_block;
@@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
162 if (page_has_buffers(page)) 162 if (page_has_buffers(page))
163 goto confused; 163 goto confused;
164 164
165 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 165 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
166 last_block = block_in_file + nr_pages * blocks_per_page; 166 last_block = block_in_file + nr_pages * blocks_per_page;
167 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 167 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
168 if (last_block > last_block_in_file) 168 if (last_block > last_block_in_file)
@@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
249 } 249 }
250 250
251 if (first_hole != blocks_per_page) { 251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); 252 zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
253 if (first_hole == 0) { 253 if (first_hole == 0) {
254 SetPageUptodate(page); 254 SetPageUptodate(page);
255 unlock_page(page); 255 unlock_page(page);
@@ -331,7 +331,7 @@ confused:
331 * 331 *
332 * then this code just gives up and calls the buffer_head-based read function. 332 * then this code just gives up and calls the buffer_head-based read function.
333 * It does handle a page which has holes at the end - that is a common case: 333 * It does handle a page which has holes at the end - that is a common case:
334 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 334 * the end-of-file on blocksize < PAGE_SIZE setups.
335 * 335 *
336 * BH_Boundary explanation: 336 * BH_Boundary explanation:
337 * 337 *
@@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
380 &first_logical_block, 380 &first_logical_block,
381 get_block, gfp); 381 get_block, gfp);
382 } 382 }
383 page_cache_release(page); 383 put_page(page);
384 } 384 }
385 BUG_ON(!list_empty(pages)); 385 BUG_ON(!list_empty(pages));
386 if (bio) 386 if (bio)
@@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
472 struct inode *inode = page->mapping->host; 472 struct inode *inode = page->mapping->host;
473 const unsigned blkbits = inode->i_blkbits; 473 const unsigned blkbits = inode->i_blkbits;
474 unsigned long end_index; 474 unsigned long end_index;
475 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 475 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
476 sector_t last_block; 476 sector_t last_block;
477 sector_t block_in_file; 477 sector_t block_in_file;
478 sector_t blocks[MAX_BUF_PER_PAGE]; 478 sector_t blocks[MAX_BUF_PER_PAGE];
@@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
542 * The page has no buffers: map it to disk 542 * The page has no buffers: map it to disk
543 */ 543 */
544 BUG_ON(!PageUptodate(page)); 544 BUG_ON(!PageUptodate(page));
545 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 545 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
546 last_block = (i_size - 1) >> blkbits; 546 last_block = (i_size - 1) >> blkbits;
547 map_bh.b_page = page; 547 map_bh.b_page = page;
548 for (page_block = 0; page_block < blocks_per_page; ) { 548 for (page_block = 0; page_block < blocks_per_page; ) {
@@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
574 first_unmapped = page_block; 574 first_unmapped = page_block;
575 575
576page_is_mapped: 576page_is_mapped:
577 end_index = i_size >> PAGE_CACHE_SHIFT; 577 end_index = i_size >> PAGE_SHIFT;
578 if (page->index >= end_index) { 578 if (page->index >= end_index) {
579 /* 579 /*
580 * The page straddles i_size. It must be zeroed out on each 580 * The page straddles i_size. It must be zeroed out on each
@@ -584,11 +584,11 @@ page_is_mapped:
584 * is zeroed when mapped, and writes to that region are not 584 * is zeroed when mapped, and writes to that region are not
585 * written out to the file." 585 * written out to the file."
586 */ 586 */
587 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); 587 unsigned offset = i_size & (PAGE_SIZE - 1);
588 588
589 if (page->index > end_index || !offset) 589 if (page->index > end_index || !offset)
590 goto confused; 590 goto confused;
591 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 591 zero_user_segment(page, offset, PAGE_SIZE);
592 } 592 }
593 593
594 /* 594 /*
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index b7f8eaeea5d8..bfdad003ee56 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -510,7 +510,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
510 kunmap(ctl.page); 510 kunmap(ctl.page);
511 SetPageUptodate(ctl.page); 511 SetPageUptodate(ctl.page);
512 unlock_page(ctl.page); 512 unlock_page(ctl.page);
513 page_cache_release(ctl.page); 513 put_page(ctl.page);
514 ctl.page = NULL; 514 ctl.page = NULL;
515 } 515 }
516 ctl.idx = 0; 516 ctl.idx = 0;
@@ -520,7 +520,7 @@ invalid_cache:
520 if (ctl.page) { 520 if (ctl.page) {
521 kunmap(ctl.page); 521 kunmap(ctl.page);
522 unlock_page(ctl.page); 522 unlock_page(ctl.page);
523 page_cache_release(ctl.page); 523 put_page(ctl.page);
524 ctl.page = NULL; 524 ctl.page = NULL;
525 } 525 }
526 ctl.cache = cache; 526 ctl.cache = cache;
@@ -554,14 +554,14 @@ finished:
554 kunmap(ctl.page); 554 kunmap(ctl.page);
555 SetPageUptodate(ctl.page); 555 SetPageUptodate(ctl.page);
556 unlock_page(ctl.page); 556 unlock_page(ctl.page);
557 page_cache_release(ctl.page); 557 put_page(ctl.page);
558 } 558 }
559 if (page) { 559 if (page) {
560 cache->head = ctl.head; 560 cache->head = ctl.head;
561 kunmap(page); 561 kunmap(page);
562 SetPageUptodate(page); 562 SetPageUptodate(page);
563 unlock_page(page); 563 unlock_page(page);
564 page_cache_release(page); 564 put_page(page);
565 } 565 }
566out: 566out:
567 return result; 567 return result;
@@ -649,7 +649,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
649 kunmap(ctl.page); 649 kunmap(ctl.page);
650 SetPageUptodate(ctl.page); 650 SetPageUptodate(ctl.page);
651 unlock_page(ctl.page); 651 unlock_page(ctl.page);
652 page_cache_release(ctl.page); 652 put_page(ctl.page);
653 } 653 }
654 ctl.cache = NULL; 654 ctl.cache = NULL;
655 ctl.idx -= NCP_DIRCACHE_SIZE; 655 ctl.idx -= NCP_DIRCACHE_SIZE;
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 5233fbc1747a..17cfb743b5bf 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -191,7 +191,7 @@ struct ncp_cache_head {
191 int eof; 191 int eof;
192}; 192};
193 193
194#define NCP_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *))) 194#define NCP_DIRCACHE_SIZE ((int)(PAGE_SIZE/sizeof(struct dentry *)))
195union ncp_dir_cache { 195union ncp_dir_cache {
196 struct ncp_cache_head head; 196 struct ncp_cache_head head;
197 struct dentry *dentry[NCP_DIRCACHE_SIZE]; 197 struct dentry *dentry[NCP_DIRCACHE_SIZE];
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 02e4d87d2ed3..17a42e4eb872 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -231,7 +231,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
231 size_t bytes_left = header->args.count; 231 size_t bytes_left = header->args.count;
232 unsigned int pg_offset = header->args.pgbase, pg_len; 232 unsigned int pg_offset = header->args.pgbase, pg_len;
233 struct page **pages = header->args.pages; 233 struct page **pages = header->args.pages;
234 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 234 int pg_index = header->args.pgbase >> PAGE_SHIFT;
235 const bool is_dio = (header->dreq != NULL); 235 const bool is_dio = (header->dreq != NULL);
236 struct blk_plug plug; 236 struct blk_plug plug;
237 int i; 237 int i;
@@ -263,13 +263,13 @@ bl_read_pagelist(struct nfs_pgio_header *header)
263 } 263 }
264 264
265 if (is_dio) { 265 if (is_dio) {
266 if (pg_offset + bytes_left > PAGE_CACHE_SIZE) 266 if (pg_offset + bytes_left > PAGE_SIZE)
267 pg_len = PAGE_CACHE_SIZE - pg_offset; 267 pg_len = PAGE_SIZE - pg_offset;
268 else 268 else
269 pg_len = bytes_left; 269 pg_len = bytes_left;
270 } else { 270 } else {
271 BUG_ON(pg_offset != 0); 271 BUG_ON(pg_offset != 0);
272 pg_len = PAGE_CACHE_SIZE; 272 pg_len = PAGE_SIZE;
273 } 273 }
274 274
275 if (is_hole(&be)) { 275 if (is_hole(&be)) {
@@ -339,9 +339,9 @@ static void bl_write_cleanup(struct work_struct *work)
339 339
340 if (likely(!hdr->pnfs_error)) { 340 if (likely(!hdr->pnfs_error)) {
341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); 341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
342 u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK; 342 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
343 u64 end = (hdr->args.offset + hdr->args.count + 343 u64 end = (hdr->args.offset + hdr->args.count +
344 PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK; 344 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
345 345
346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, 346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
347 (end - start) >> SECTOR_SHIFT); 347 (end - start) >> SECTOR_SHIFT);
@@ -373,7 +373,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
373 loff_t offset = header->args.offset; 373 loff_t offset = header->args.offset;
374 size_t count = header->args.count; 374 size_t count = header->args.count;
375 struct page **pages = header->args.pages; 375 struct page **pages = header->args.pages;
376 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 376 int pg_index = header->args.pgbase >> PAGE_SHIFT;
377 unsigned int pg_len; 377 unsigned int pg_len;
378 struct blk_plug plug; 378 struct blk_plug plug;
379 int i; 379 int i;
@@ -392,7 +392,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
392 blk_start_plug(&plug); 392 blk_start_plug(&plug);
393 393
394 /* we always write out the whole page */ 394 /* we always write out the whole page */
395 offset = offset & (loff_t)PAGE_CACHE_MASK; 395 offset = offset & (loff_t)PAGE_MASK;
396 isect = offset >> SECTOR_SHIFT; 396 isect = offset >> SECTOR_SHIFT;
397 397
398 for (i = pg_index; i < header->page_array.npages; i++) { 398 for (i = pg_index; i < header->page_array.npages; i++) {
@@ -408,7 +408,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
408 extent_length = be.be_length - (isect - be.be_f_offset); 408 extent_length = be.be_length - (isect - be.be_f_offset);
409 } 409 }
410 410
411 pg_len = PAGE_CACHE_SIZE; 411 pg_len = PAGE_SIZE;
412 bio = do_add_page_to_bio(bio, header->page_array.npages - i, 412 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
413 WRITE, isect, pages[i], &map, &be, 413 WRITE, isect, pages[i], &map, &be,
414 bl_end_io_write, par, 414 bl_end_io_write, par,
@@ -820,7 +820,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
820 pgoff_t end; 820 pgoff_t end;
821 821
822 /* Optimize common case that writes from 0 to end of file */ 822 /* Optimize common case that writes from 0 to end of file */
823 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); 823 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
824 if (end != inode->i_mapping->nrpages) { 824 if (end != inode->i_mapping->nrpages) {
825 rcu_read_lock(); 825 rcu_read_lock();
826 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX); 826 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
@@ -828,9 +828,9 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
828 } 828 }
829 829
830 if (!end) 830 if (!end)
831 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT); 831 return i_size_read(inode) - (idx << PAGE_SHIFT);
832 else 832 else
833 return (end - idx) << PAGE_CACHE_SHIFT; 833 return (end - idx) << PAGE_SHIFT;
834} 834}
835 835
836static void 836static void
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index bc21205309e0..18e6fd0b9506 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -40,8 +40,8 @@
40#include "../pnfs.h" 40#include "../pnfs.h"
41#include "../netns.h" 41#include "../netns.h"
42 42
43#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) 43#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT)
44#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) 44#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
45#define SECTOR_SIZE (1 << SECTOR_SHIFT) 45#define SECTOR_SIZE (1 << SECTOR_SHIFT)
46 46
47struct pnfs_block_dev; 47struct pnfs_block_dev;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d6d5d2a48e83..0c96528db94a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -736,7 +736,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
736 server->rsize = max_rpc_payload; 736 server->rsize = max_rpc_payload;
737 if (server->rsize > NFS_MAX_FILE_IO_SIZE) 737 if (server->rsize > NFS_MAX_FILE_IO_SIZE)
738 server->rsize = NFS_MAX_FILE_IO_SIZE; 738 server->rsize = NFS_MAX_FILE_IO_SIZE;
739 server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 739 server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
740 740
741 server->backing_dev_info.name = "nfs"; 741 server->backing_dev_info.name = "nfs";
742 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; 742 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
@@ -745,13 +745,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
745 server->wsize = max_rpc_payload; 745 server->wsize = max_rpc_payload;
746 if (server->wsize > NFS_MAX_FILE_IO_SIZE) 746 if (server->wsize > NFS_MAX_FILE_IO_SIZE)
747 server->wsize = NFS_MAX_FILE_IO_SIZE; 747 server->wsize = NFS_MAX_FILE_IO_SIZE;
748 server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 748 server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
749 749
750 server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); 750 server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
751 751
752 server->dtsize = nfs_block_size(fsinfo->dtpref, NULL); 752 server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
753 if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES) 753 if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES)
754 server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES; 754 server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES;
755 if (server->dtsize > server->rsize) 755 if (server->dtsize > server->rsize)
756 server->dtsize = server->rsize; 756 server->dtsize = server->rsize;
757 757
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4bfa7d8bcade..adef506c5786 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -707,7 +707,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
707{ 707{
708 if (!desc->page->mapping) 708 if (!desc->page->mapping)
709 nfs_readdir_clear_array(desc->page); 709 nfs_readdir_clear_array(desc->page);
710 page_cache_release(desc->page); 710 put_page(desc->page);
711 desc->page = NULL; 711 desc->page = NULL;
712} 712}
713 713
@@ -1923,7 +1923,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1923 * add_to_page_cache_lru() grabs an extra page refcount. 1923 * add_to_page_cache_lru() grabs an extra page refcount.
1924 * Drop it here to avoid leaking this page later. 1924 * Drop it here to avoid leaking this page later.
1925 */ 1925 */
1926 page_cache_release(page); 1926 put_page(page);
1927 } else 1927 } else
1928 __free_page(page); 1928 __free_page(page);
1929 1929
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 7a0cfd3266e5..c93826e4a8c6 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -269,7 +269,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
269{ 269{
270 unsigned int i; 270 unsigned int i;
271 for (i = 0; i < npages; i++) 271 for (i = 0; i < npages; i++)
272 page_cache_release(pages[i]); 272 put_page(pages[i]);
273} 273}
274 274
275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, 275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
@@ -1003,7 +1003,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1003 iov_iter_count(iter)); 1003 iov_iter_count(iter));
1004 1004
1005 pos = iocb->ki_pos; 1005 pos = iocb->ki_pos;
1006 end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; 1006 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
1007 1007
1008 inode_lock(inode); 1008 inode_lock(inode);
1009 1009
@@ -1013,7 +1013,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1013 1013
1014 if (mapping->nrpages) { 1014 if (mapping->nrpages) {
1015 result = invalidate_inode_pages2_range(mapping, 1015 result = invalidate_inode_pages2_range(mapping,
1016 pos >> PAGE_CACHE_SHIFT, end); 1016 pos >> PAGE_SHIFT, end);
1017 if (result) 1017 if (result)
1018 goto out_unlock; 1018 goto out_unlock;
1019 } 1019 }
@@ -1042,7 +1042,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1042 1042
1043 if (mapping->nrpages) { 1043 if (mapping->nrpages) {
1044 invalidate_inode_pages2_range(mapping, 1044 invalidate_inode_pages2_range(mapping,
1045 pos >> PAGE_CACHE_SHIFT, end); 1045 pos >> PAGE_SHIFT, end);
1046 } 1046 }
1047 1047
1048 inode_unlock(inode); 1048 inode_unlock(inode);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 89bf093d342a..be01095b97ae 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -320,7 +320,7 @@ static int nfs_want_read_modify_write(struct file *file, struct page *page,
320 loff_t pos, unsigned len) 320 loff_t pos, unsigned len)
321{ 321{
322 unsigned int pglen = nfs_page_length(page); 322 unsigned int pglen = nfs_page_length(page);
323 unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); 323 unsigned int offset = pos & (PAGE_SIZE - 1);
324 unsigned int end = offset + len; 324 unsigned int end = offset + len;
325 325
326 if (pnfs_ld_read_whole_page(file->f_mapping->host)) { 326 if (pnfs_ld_read_whole_page(file->f_mapping->host)) {
@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
351 struct page **pagep, void **fsdata) 351 struct page **pagep, void **fsdata)
352{ 352{
353 int ret; 353 int ret;
354 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 354 pgoff_t index = pos >> PAGE_SHIFT;
355 struct page *page; 355 struct page *page;
356 int once_thru = 0; 356 int once_thru = 0;
357 357
@@ -380,12 +380,12 @@ start:
380 ret = nfs_flush_incompatible(file, page); 380 ret = nfs_flush_incompatible(file, page);
381 if (ret) { 381 if (ret) {
382 unlock_page(page); 382 unlock_page(page);
383 page_cache_release(page); 383 put_page(page);
384 } else if (!once_thru && 384 } else if (!once_thru &&
385 nfs_want_read_modify_write(file, page, pos, len)) { 385 nfs_want_read_modify_write(file, page, pos, len)) {
386 once_thru = 1; 386 once_thru = 1;
387 ret = nfs_readpage(file, page); 387 ret = nfs_readpage(file, page);
388 page_cache_release(page); 388 put_page(page);
389 if (!ret) 389 if (!ret)
390 goto start; 390 goto start;
391 } 391 }
@@ -396,7 +396,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
396 loff_t pos, unsigned len, unsigned copied, 396 loff_t pos, unsigned len, unsigned copied,
397 struct page *page, void *fsdata) 397 struct page *page, void *fsdata)
398{ 398{
399 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 399 unsigned offset = pos & (PAGE_SIZE - 1);
400 struct nfs_open_context *ctx = nfs_file_open_context(file); 400 struct nfs_open_context *ctx = nfs_file_open_context(file);
401 int status; 401 int status;
402 402
@@ -413,20 +413,20 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
413 413
414 if (pglen == 0) { 414 if (pglen == 0) {
415 zero_user_segments(page, 0, offset, 415 zero_user_segments(page, 0, offset,
416 end, PAGE_CACHE_SIZE); 416 end, PAGE_SIZE);
417 SetPageUptodate(page); 417 SetPageUptodate(page);
418 } else if (end >= pglen) { 418 } else if (end >= pglen) {
419 zero_user_segment(page, end, PAGE_CACHE_SIZE); 419 zero_user_segment(page, end, PAGE_SIZE);
420 if (offset == 0) 420 if (offset == 0)
421 SetPageUptodate(page); 421 SetPageUptodate(page);
422 } else 422 } else
423 zero_user_segment(page, pglen, PAGE_CACHE_SIZE); 423 zero_user_segment(page, pglen, PAGE_SIZE);
424 } 424 }
425 425
426 status = nfs_updatepage(file, page, offset, copied); 426 status = nfs_updatepage(file, page, offset, copied);
427 427
428 unlock_page(page); 428 unlock_page(page);
429 page_cache_release(page); 429 put_page(page);
430 430
431 if (status < 0) 431 if (status < 0)
432 return status; 432 return status;
@@ -454,7 +454,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
454 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", 454 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
455 page, offset, length); 455 page, offset, length);
456 456
457 if (offset != 0 || length < PAGE_CACHE_SIZE) 457 if (offset != 0 || length < PAGE_SIZE)
458 return; 458 return;
459 /* Cancel any unstarted writes on this page */ 459 /* Cancel any unstarted writes on this page */
460 nfs_wb_page_cancel(page_file_mapping(page)->host, page); 460 nfs_wb_page_cancel(page_file_mapping(page)->host, page);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 565f8135ae1f..f1d1d2c472e9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -638,11 +638,11 @@ unsigned int nfs_page_length(struct page *page)
638 638
639 if (i_size > 0) { 639 if (i_size > 0) {
640 pgoff_t page_index = page_file_index(page); 640 pgoff_t page_index = page_file_index(page);
641 pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 641 pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
642 if (page_index < end_index) 642 if (page_index < end_index)
643 return PAGE_CACHE_SIZE; 643 return PAGE_SIZE;
644 if (page_index == end_index) 644 if (page_index == end_index)
645 return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; 645 return ((i_size - 1) & ~PAGE_MASK) + 1;
646 } 646 }
647 return 0; 647 return 0;
648} 648}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4e4441216804..88474a4fc669 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5001,7 +5001,7 @@ static int decode_space_limit(struct xdr_stream *xdr,
5001 blocksize = be32_to_cpup(p); 5001 blocksize = be32_to_cpup(p);
5002 maxsize = (uint64_t)nblocks * (uint64_t)blocksize; 5002 maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
5003 } 5003 }
5004 maxsize >>= PAGE_CACHE_SHIFT; 5004 maxsize >>= PAGE_SHIFT;
5005 *pagemod_limit = min_t(u64, maxsize, ULONG_MAX); 5005 *pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
5006 return 0; 5006 return 0;
5007out_overflow: 5007out_overflow:
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9aebffb40505..049c1b1f2932 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -486,7 +486,7 @@ static void __r4w_put_page(void *priv, struct page *page)
486 dprintk("%s: index=0x%lx\n", __func__, 486 dprintk("%s: index=0x%lx\n", __func__,
487 (page == ZERO_PAGE(0)) ? -1UL : page->index); 487 (page == ZERO_PAGE(0)) ? -1UL : page->index);
488 if (ZERO_PAGE(0) != page) 488 if (ZERO_PAGE(0) != page)
489 page_cache_release(page); 489 put_page(page);
490 return; 490 return;
491} 491}
492 492
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 8ce4f61cbaa5..1f6db4231057 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
342 * update_nfs_request below if the region is not locked. */ 342 * update_nfs_request below if the region is not locked. */
343 req->wb_page = page; 343 req->wb_page = page;
344 req->wb_index = page_file_index(page); 344 req->wb_index = page_file_index(page);
345 page_cache_get(page); 345 get_page(page);
346 req->wb_offset = offset; 346 req->wb_offset = offset;
347 req->wb_pgbase = offset; 347 req->wb_pgbase = offset;
348 req->wb_bytes = count; 348 req->wb_bytes = count;
@@ -392,7 +392,7 @@ static void nfs_clear_request(struct nfs_page *req)
392 struct nfs_lock_context *l_ctx = req->wb_lock_context; 392 struct nfs_lock_context *l_ctx = req->wb_lock_context;
393 393
394 if (page != NULL) { 394 if (page != NULL) {
395 page_cache_release(page); 395 put_page(page);
396 req->wb_page = NULL; 396 req->wb_page = NULL;
397 } 397 }
398 if (l_ctx != NULL) { 398 if (l_ctx != NULL) {
@@ -904,7 +904,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
904 return false; 904 return false;
905 } else { 905 } else {
906 if (req->wb_pgbase != 0 || 906 if (req->wb_pgbase != 0 ||
907 prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 907 prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
908 return false; 908 return false;
909 } 909 }
910 } 910 }
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2fa483e6dbe2..89a5ef4df08a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -841,7 +841,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
841 841
842 i_size = i_size_read(ino); 842 i_size = i_size_read(ino);
843 843
844 lgp->args.minlength = PAGE_CACHE_SIZE; 844 lgp->args.minlength = PAGE_SIZE;
845 if (lgp->args.minlength > range->length) 845 if (lgp->args.minlength > range->length)
846 lgp->args.minlength = range->length; 846 lgp->args.minlength = range->length;
847 if (range->iomode == IOMODE_READ) { 847 if (range->iomode == IOMODE_READ) {
@@ -1618,13 +1618,13 @@ lookup_again:
1618 spin_unlock(&clp->cl_lock); 1618 spin_unlock(&clp->cl_lock);
1619 } 1619 }
1620 1620
1621 pg_offset = arg.offset & ~PAGE_CACHE_MASK; 1621 pg_offset = arg.offset & ~PAGE_MASK;
1622 if (pg_offset) { 1622 if (pg_offset) {
1623 arg.offset -= pg_offset; 1623 arg.offset -= pg_offset;
1624 arg.length += pg_offset; 1624 arg.length += pg_offset;
1625 } 1625 }
1626 if (arg.length != NFS4_MAX_UINT64) 1626 if (arg.length != NFS4_MAX_UINT64)
1627 arg.length = PAGE_CACHE_ALIGN(arg.length); 1627 arg.length = PAGE_ALIGN(arg.length);
1628 1628
1629 lseg = send_layoutget(lo, ctx, &arg, gfp_flags); 1629 lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1630 atomic_dec(&lo->plh_outstanding); 1630 atomic_dec(&lo->plh_outstanding);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index eb31e23e7def..6776d7a7839e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -46,7 +46,7 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
46static 46static
47int nfs_return_empty_page(struct page *page) 47int nfs_return_empty_page(struct page *page)
48{ 48{
49 zero_user(page, 0, PAGE_CACHE_SIZE); 49 zero_user(page, 0, PAGE_SIZE);
50 SetPageUptodate(page); 50 SetPageUptodate(page);
51 unlock_page(page); 51 unlock_page(page);
52 return 0; 52 return 0;
@@ -118,8 +118,8 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118 unlock_page(page); 118 unlock_page(page);
119 return PTR_ERR(new); 119 return PTR_ERR(new);
120 } 120 }
121 if (len < PAGE_CACHE_SIZE) 121 if (len < PAGE_SIZE)
122 zero_user_segment(page, len, PAGE_CACHE_SIZE); 122 zero_user_segment(page, len, PAGE_SIZE);
123 123
124 nfs_pageio_init_read(&pgio, inode, false, 124 nfs_pageio_init_read(&pgio, inode, false,
125 &nfs_async_read_completion_ops); 125 &nfs_async_read_completion_ops);
@@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page)
295 int error; 295 int error;
296 296
297 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 297 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
298 page, PAGE_CACHE_SIZE, page_file_index(page)); 298 page, PAGE_SIZE, page_file_index(page));
299 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 299 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
300 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 300 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
301 301
@@ -361,8 +361,8 @@ readpage_async_filler(void *data, struct page *page)
361 if (IS_ERR(new)) 361 if (IS_ERR(new))
362 goto out_error; 362 goto out_error;
363 363
364 if (len < PAGE_CACHE_SIZE) 364 if (len < PAGE_SIZE)
365 zero_user_segment(page, len, PAGE_CACHE_SIZE); 365 zero_user_segment(page, len, PAGE_SIZE);
366 if (!nfs_pageio_add_request(desc->pgio, new)) { 366 if (!nfs_pageio_add_request(desc->pgio, new)) {
367 nfs_list_remove_request(new); 367 nfs_list_remove_request(new);
368 nfs_readpage_release(new); 368 nfs_readpage_release(new);
@@ -424,8 +424,8 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
424 424
425 pgm = &pgio.pg_mirrors[0]; 425 pgm = &pgio.pg_mirrors[0];
426 NFS_I(inode)->read_io += pgm->pg_bytes_written; 426 NFS_I(inode)->read_io += pgm->pg_bytes_written;
427 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> 427 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
428 PAGE_CACHE_SHIFT; 428 PAGE_SHIFT;
429 nfs_add_stats(inode, NFSIOS_READPAGES, npages); 429 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
430read_complete: 430read_complete:
431 put_nfs_open_context(desc.ctx); 431 put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5754835a2886..5f4fd53e5764 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -150,7 +150,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
150 150
151 spin_lock(&inode->i_lock); 151 spin_lock(&inode->i_lock);
152 i_size = i_size_read(inode); 152 i_size = i_size_read(inode);
153 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 153 end_index = (i_size - 1) >> PAGE_SHIFT;
154 if (i_size > 0 && page_file_index(page) < end_index) 154 if (i_size > 0 && page_file_index(page) < end_index)
155 goto out; 155 goto out;
156 end = page_file_offset(page) + ((loff_t)offset+count); 156 end = page_file_offset(page) + ((loff_t)offset+count);
@@ -1942,7 +1942,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1942int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) 1942int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
1943{ 1943{
1944 loff_t range_start = page_file_offset(page); 1944 loff_t range_start = page_file_offset(page);
1945 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1945 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
1946 struct writeback_control wbc = { 1946 struct writeback_control wbc = {
1947 .sync_mode = WB_SYNC_ALL, 1947 .sync_mode = WB_SYNC_ALL,
1948 .nr_to_write = 0, 1948 .nr_to_write = 0,
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 27f75bcbeb30..a9fb3636c142 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -458,7 +458,7 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
458 struct buffer_head *pbh; 458 struct buffer_head *pbh;
459 __u64 key; 459 __u64 key;
460 460
461 key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - 461 key = page_index(bh->b_page) << (PAGE_SHIFT -
462 bmap->b_inode->i_blkbits); 462 bmap->b_inode->i_blkbits);
463 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) 463 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
464 key++; 464 key++;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index a35ae35e6932..e0c9daf9aa22 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
62 set_buffer_uptodate(bh); 62 set_buffer_uptodate(bh);
63 63
64 unlock_page(bh->b_page); 64 unlock_page(bh->b_page);
65 page_cache_release(bh->b_page); 65 put_page(bh->b_page);
66 return bh; 66 return bh;
67} 67}
68 68
@@ -128,7 +128,7 @@ found:
128 128
129out_locked: 129out_locked:
130 unlock_page(page); 130 unlock_page(page);
131 page_cache_release(page); 131 put_page(page);
132 return err; 132 return err;
133} 133}
134 134
@@ -146,7 +146,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
146 pgoff_t index = page_index(page); 146 pgoff_t index = page_index(page);
147 int still_dirty; 147 int still_dirty;
148 148
149 page_cache_get(page); 149 get_page(page);
150 lock_page(page); 150 lock_page(page);
151 wait_on_page_writeback(page); 151 wait_on_page_writeback(page);
152 152
@@ -154,7 +154,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
154 still_dirty = PageDirty(page); 154 still_dirty = PageDirty(page);
155 mapping = page->mapping; 155 mapping = page->mapping;
156 unlock_page(page); 156 unlock_page(page);
157 page_cache_release(page); 157 put_page(page);
158 158
159 if (!still_dirty && mapping) 159 if (!still_dirty && mapping)
160 invalidate_inode_pages2_range(mapping, index, index); 160 invalidate_inode_pages2_range(mapping, index, index);
@@ -181,7 +181,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
181 obh = ctxt->bh; 181 obh = ctxt->bh;
182 ctxt->newbh = NULL; 182 ctxt->newbh = NULL;
183 183
184 if (inode->i_blkbits == PAGE_CACHE_SHIFT) { 184 if (inode->i_blkbits == PAGE_SHIFT) {
185 lock_page(obh->b_page); 185 lock_page(obh->b_page);
186 /* 186 /*
187 * We cannot call radix_tree_preload for the kernels older 187 * We cannot call radix_tree_preload for the kernels older
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 6b8b92b19cec..e08f064e4bd7 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -58,7 +58,7 @@ static inline unsigned nilfs_chunk_size(struct inode *inode)
58static inline void nilfs_put_page(struct page *page) 58static inline void nilfs_put_page(struct page *page)
59{ 59{
60 kunmap(page); 60 kunmap(page);
61 page_cache_release(page); 61 put_page(page);
62} 62}
63 63
64/* 64/*
@@ -69,9 +69,9 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
69{ 69{
70 unsigned last_byte = inode->i_size; 70 unsigned last_byte = inode->i_size;
71 71
72 last_byte -= page_nr << PAGE_CACHE_SHIFT; 72 last_byte -= page_nr << PAGE_SHIFT;
73 if (last_byte > PAGE_CACHE_SIZE) 73 if (last_byte > PAGE_SIZE)
74 last_byte = PAGE_CACHE_SIZE; 74 last_byte = PAGE_SIZE;
75 return last_byte; 75 return last_byte;
76} 76}
77 77
@@ -109,12 +109,12 @@ static void nilfs_check_page(struct page *page)
109 unsigned chunk_size = nilfs_chunk_size(dir); 109 unsigned chunk_size = nilfs_chunk_size(dir);
110 char *kaddr = page_address(page); 110 char *kaddr = page_address(page);
111 unsigned offs, rec_len; 111 unsigned offs, rec_len;
112 unsigned limit = PAGE_CACHE_SIZE; 112 unsigned limit = PAGE_SIZE;
113 struct nilfs_dir_entry *p; 113 struct nilfs_dir_entry *p;
114 char *error; 114 char *error;
115 115
116 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 116 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
117 limit = dir->i_size & ~PAGE_CACHE_MASK; 117 limit = dir->i_size & ~PAGE_MASK;
118 if (limit & (chunk_size - 1)) 118 if (limit & (chunk_size - 1))
119 goto Ebadsize; 119 goto Ebadsize;
120 if (!limit) 120 if (!limit)
@@ -161,7 +161,7 @@ Espan:
161bad_entry: 161bad_entry:
162 nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - " 162 nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
163 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 163 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
164 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 164 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
165 (unsigned long) le64_to_cpu(p->inode), 165 (unsigned long) le64_to_cpu(p->inode),
166 rec_len, p->name_len); 166 rec_len, p->name_len);
167 goto fail; 167 goto fail;
@@ -170,7 +170,7 @@ Eend:
170 nilfs_error(sb, "nilfs_check_page", 170 nilfs_error(sb, "nilfs_check_page",
171 "entry in directory #%lu spans the page boundary" 171 "entry in directory #%lu spans the page boundary"
172 "offset=%lu, inode=%lu", 172 "offset=%lu, inode=%lu",
173 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 173 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
174 (unsigned long) le64_to_cpu(p->inode)); 174 (unsigned long) le64_to_cpu(p->inode));
175fail: 175fail:
176 SetPageChecked(page); 176 SetPageChecked(page);
@@ -256,8 +256,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
256 loff_t pos = ctx->pos; 256 loff_t pos = ctx->pos;
257 struct inode *inode = file_inode(file); 257 struct inode *inode = file_inode(file);
258 struct super_block *sb = inode->i_sb; 258 struct super_block *sb = inode->i_sb;
259 unsigned int offset = pos & ~PAGE_CACHE_MASK; 259 unsigned int offset = pos & ~PAGE_MASK;
260 unsigned long n = pos >> PAGE_CACHE_SHIFT; 260 unsigned long n = pos >> PAGE_SHIFT;
261 unsigned long npages = dir_pages(inode); 261 unsigned long npages = dir_pages(inode);
262/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ 262/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
263 263
@@ -272,7 +272,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
272 if (IS_ERR(page)) { 272 if (IS_ERR(page)) {
273 nilfs_error(sb, __func__, "bad page in #%lu", 273 nilfs_error(sb, __func__, "bad page in #%lu",
274 inode->i_ino); 274 inode->i_ino);
275 ctx->pos += PAGE_CACHE_SIZE - offset; 275 ctx->pos += PAGE_SIZE - offset;
276 return -EIO; 276 return -EIO;
277 } 277 }
278 kaddr = page_address(page); 278 kaddr = page_address(page);
@@ -361,7 +361,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
361 if (++n >= npages) 361 if (++n >= npages)
362 n = 0; 362 n = 0;
363 /* next page is past the blocks we've got */ 363 /* next page is past the blocks we've got */
364 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { 364 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
365 nilfs_error(dir->i_sb, __func__, 365 nilfs_error(dir->i_sb, __func__,
366 "dir %lu size %lld exceeds block count %llu", 366 "dir %lu size %lld exceeds block count %llu",
367 dir->i_ino, dir->i_size, 367 dir->i_ino, dir->i_size,
@@ -401,7 +401,7 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
401 if (de) { 401 if (de) {
402 res = le64_to_cpu(de->inode); 402 res = le64_to_cpu(de->inode);
403 kunmap(page); 403 kunmap(page);
404 page_cache_release(page); 404 put_page(page);
405 } 405 }
406 return res; 406 return res;
407} 407}
@@ -460,7 +460,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
460 kaddr = page_address(page); 460 kaddr = page_address(page);
461 dir_end = kaddr + nilfs_last_byte(dir, n); 461 dir_end = kaddr + nilfs_last_byte(dir, n);
462 de = (struct nilfs_dir_entry *)kaddr; 462 de = (struct nilfs_dir_entry *)kaddr;
463 kaddr += PAGE_CACHE_SIZE - reclen; 463 kaddr += PAGE_SIZE - reclen;
464 while ((char *)de <= kaddr) { 464 while ((char *)de <= kaddr) {
465 if ((char *)de == dir_end) { 465 if ((char *)de == dir_end) {
466 /* We hit i_size */ 466 /* We hit i_size */
@@ -603,7 +603,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
603 kunmap_atomic(kaddr); 603 kunmap_atomic(kaddr);
604 nilfs_commit_chunk(page, mapping, 0, chunk_size); 604 nilfs_commit_chunk(page, mapping, 0, chunk_size);
605fail: 605fail:
606 page_cache_release(page); 606 put_page(page);
607 return err; 607 return err;
608} 608}
609 609
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 748ca238915a..0224b7826ace 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -115,7 +115,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
115 115
116 failed: 116 failed:
117 unlock_page(bh->b_page); 117 unlock_page(bh->b_page);
118 page_cache_release(bh->b_page); 118 put_page(bh->b_page);
119 return err; 119 return err;
120} 120}
121 121
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 21a1e2e0d92f..534631358b13 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -249,7 +249,7 @@ static int nilfs_set_page_dirty(struct page *page)
249 if (nr_dirty) 249 if (nr_dirty)
250 nilfs_set_file_dirty(inode, nr_dirty); 250 nilfs_set_file_dirty(inode, nr_dirty);
251 } else if (ret) { 251 } else if (ret) {
252 unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 252 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
253 253
254 nilfs_set_file_dirty(inode, nr_dirty); 254 nilfs_set_file_dirty(inode, nr_dirty);
255 } 255 }
@@ -291,7 +291,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
291 struct page *page, void *fsdata) 291 struct page *page, void *fsdata)
292{ 292{
293 struct inode *inode = mapping->host; 293 struct inode *inode = mapping->host;
294 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 294 unsigned start = pos & (PAGE_SIZE - 1);
295 unsigned nr_dirty; 295 unsigned nr_dirty;
296 int err; 296 int err;
297 297
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 1125f40233ff..f6982b9153d5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -110,7 +110,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
110 110
111 failed_bh: 111 failed_bh:
112 unlock_page(bh->b_page); 112 unlock_page(bh->b_page);
113 page_cache_release(bh->b_page); 113 put_page(bh->b_page);
114 brelse(bh); 114 brelse(bh);
115 115
116 failed_unlock: 116 failed_unlock:
@@ -170,7 +170,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
170 170
171 failed_bh: 171 failed_bh:
172 unlock_page(bh->b_page); 172 unlock_page(bh->b_page);
173 page_cache_release(bh->b_page); 173 put_page(bh->b_page);
174 brelse(bh); 174 brelse(bh);
175 failed: 175 failed:
176 return ret; 176 return ret;
@@ -363,7 +363,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
363int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) 363int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
364{ 364{
365 pgoff_t index = (pgoff_t)block >> 365 pgoff_t index = (pgoff_t)block >>
366 (PAGE_CACHE_SHIFT - inode->i_blkbits); 366 (PAGE_SHIFT - inode->i_blkbits);
367 struct page *page; 367 struct page *page;
368 unsigned long first_block; 368 unsigned long first_block;
369 int ret = 0; 369 int ret = 0;
@@ -376,7 +376,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
376 wait_on_page_writeback(page); 376 wait_on_page_writeback(page);
377 377
378 first_block = (unsigned long)index << 378 first_block = (unsigned long)index <<
379 (PAGE_CACHE_SHIFT - inode->i_blkbits); 379 (PAGE_SHIFT - inode->i_blkbits);
380 if (page_has_buffers(page)) { 380 if (page_has_buffers(page)) {
381 struct buffer_head *bh; 381 struct buffer_head *bh;
382 382
@@ -385,7 +385,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
385 } 385 }
386 still_dirty = PageDirty(page); 386 still_dirty = PageDirty(page);
387 unlock_page(page); 387 unlock_page(page);
388 page_cache_release(page); 388 put_page(page);
389 389
390 if (still_dirty || 390 if (still_dirty ||
391 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) 391 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
@@ -578,7 +578,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
578 } 578 }
579 579
580 unlock_page(page); 580 unlock_page(page);
581 page_cache_release(page); 581 put_page(page);
582 return 0; 582 return 0;
583} 583}
584 584
@@ -597,7 +597,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
597 bh_frozen = nilfs_page_get_nth_block(page, n); 597 bh_frozen = nilfs_page_get_nth_block(page, n);
598 } 598 }
599 unlock_page(page); 599 unlock_page(page);
600 page_cache_release(page); 600 put_page(page);
601 } 601 }
602 return bh_frozen; 602 return bh_frozen;
603} 603}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 7ccdb961eea9..151bc19d47c0 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -431,11 +431,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
431out_dir: 431out_dir:
432 if (dir_de) { 432 if (dir_de) {
433 kunmap(dir_page); 433 kunmap(dir_page);
434 page_cache_release(dir_page); 434 put_page(dir_page);
435 } 435 }
436out_old: 436out_old:
437 kunmap(old_page); 437 kunmap(old_page);
438 page_cache_release(old_page); 438 put_page(old_page);
439out: 439out:
440 nilfs_transaction_abort(old_dir->i_sb); 440 nilfs_transaction_abort(old_dir->i_sb);
441 return err; 441 return err;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index c20df77eff99..489391561cda 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -50,7 +50,7 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
50 if (!page_has_buffers(page)) 50 if (!page_has_buffers(page))
51 create_empty_buffers(page, 1 << blkbits, b_state); 51 create_empty_buffers(page, 1 << blkbits, b_state);
52 52
53 first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits); 53 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
54 bh = nilfs_page_get_nth_block(page, block - first_block); 54 bh = nilfs_page_get_nth_block(page, block - first_block);
55 55
56 touch_buffer(bh); 56 touch_buffer(bh);
@@ -64,7 +64,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
64 unsigned long b_state) 64 unsigned long b_state)
65{ 65{
66 int blkbits = inode->i_blkbits; 66 int blkbits = inode->i_blkbits;
67 pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); 67 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
68 struct page *page; 68 struct page *page;
69 struct buffer_head *bh; 69 struct buffer_head *bh;
70 70
@@ -75,7 +75,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); 75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
76 if (unlikely(!bh)) { 76 if (unlikely(!bh)) {
77 unlock_page(page); 77 unlock_page(page);
78 page_cache_release(page); 78 put_page(page);
79 return NULL; 79 return NULL;
80 } 80 }
81 return bh; 81 return bh;
@@ -288,7 +288,7 @@ repeat:
288 __set_page_dirty_nobuffers(dpage); 288 __set_page_dirty_nobuffers(dpage);
289 289
290 unlock_page(dpage); 290 unlock_page(dpage);
291 page_cache_release(dpage); 291 put_page(dpage);
292 unlock_page(page); 292 unlock_page(page);
293 } 293 }
294 pagevec_release(&pvec); 294 pagevec_release(&pvec);
@@ -333,7 +333,7 @@ repeat:
333 WARN_ON(PageDirty(dpage)); 333 WARN_ON(PageDirty(dpage));
334 nilfs_copy_page(dpage, page, 0); 334 nilfs_copy_page(dpage, page, 0);
335 unlock_page(dpage); 335 unlock_page(dpage);
336 page_cache_release(dpage); 336 put_page(dpage);
337 } else { 337 } else {
338 struct page *page2; 338 struct page *page2;
339 339
@@ -350,7 +350,7 @@ repeat:
350 if (unlikely(err < 0)) { 350 if (unlikely(err < 0)) {
351 WARN_ON(err == -EEXIST); 351 WARN_ON(err == -EEXIST);
352 page->mapping = NULL; 352 page->mapping = NULL;
353 page_cache_release(page); /* for cache */ 353 put_page(page); /* for cache */
354 } else { 354 } else {
355 page->mapping = dmap; 355 page->mapping = dmap;
356 dmap->nrpages++; 356 dmap->nrpages++;
@@ -523,8 +523,8 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
523 if (inode->i_mapping->nrpages == 0) 523 if (inode->i_mapping->nrpages == 0)
524 return 0; 524 return 0;
525 525
526 index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 526 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
527 nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); 527 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
528 528
529 pagevec_init(&pvec, 0); 529 pagevec_init(&pvec, 0);
530 530
@@ -537,7 +537,7 @@ repeat:
537 if (length > 0 && pvec.pages[0]->index > index) 537 if (length > 0 && pvec.pages[0]->index > index)
538 goto out; 538 goto out;
539 539
540 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 540 b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
541 i = 0; 541 i = 0;
542 do { 542 do {
543 page = pvec.pages[i]; 543 page = pvec.pages[i];
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 9b4f205d1173..5afa77fadc11 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -544,14 +544,14 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
544 blocksize, page, NULL); 544 blocksize, page, NULL);
545 545
546 unlock_page(page); 546 unlock_page(page);
547 page_cache_release(page); 547 put_page(page);
548 548
549 (*nr_salvaged_blocks)++; 549 (*nr_salvaged_blocks)++;
550 goto next; 550 goto next;
551 551
552 failed_page: 552 failed_page:
553 unlock_page(page); 553 unlock_page(page);
554 page_cache_release(page); 554 put_page(page);
555 555
556 failed_inode: 556 failed_inode:
557 printk(KERN_WARNING 557 printk(KERN_WARNING
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65adaae7e4..4317f72568e6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2070,7 +2070,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2070 goto failed_to_write; 2070 goto failed_to_write;
2071 2071
2072 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || 2072 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2073 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) { 2073 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2074 /* 2074 /*
2075 * At this point, we avoid double buffering 2075 * At this point, we avoid double buffering
2076 * for blocksize < pagesize because page dirty 2076 * for blocksize < pagesize because page dirty
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 7521e11db728..97768a1379f2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
74 74
75 set_buffer_uptodate(bh); 75 set_buffer_uptodate(bh);
76 76
77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + 77 file_ofs = ((s64)page->index << PAGE_SHIFT) +
78 bh_offset(bh); 78 bh_offset(bh);
79 read_lock_irqsave(&ni->size_lock, flags); 79 read_lock_irqsave(&ni->size_lock, flags);
80 init_size = ni->initialized_size; 80 init_size = ni->initialized_size;
@@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
142 u32 rec_size; 142 u32 rec_size;
143 143
144 rec_size = ni->itype.index.block_size; 144 rec_size = ni->itype.index.block_size;
145 recs = PAGE_CACHE_SIZE / rec_size; 145 recs = PAGE_SIZE / rec_size;
146 /* Should have been verified before we got here... */ 146 /* Should have been verified before we got here... */
147 BUG_ON(!recs); 147 BUG_ON(!recs);
148 local_irq_save(flags); 148 local_irq_save(flags);
@@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page)
229 * fully truncated, truncate will throw it away as soon as we unlock 229 * fully truncated, truncate will throw it away as soon as we unlock
230 * it so no need to worry what we do with it. 230 * it so no need to worry what we do with it.
231 */ 231 */
232 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 232 iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
233 read_lock_irqsave(&ni->size_lock, flags); 233 read_lock_irqsave(&ni->size_lock, flags);
234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; 234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
235 init_size = ni->initialized_size; 235 init_size = ni->initialized_size;
@@ -412,9 +412,9 @@ retry_readpage:
412 vi = page->mapping->host; 412 vi = page->mapping->host;
413 i_size = i_size_read(vi); 413 i_size = i_size_read(vi);
414 /* Is the page fully outside i_size? (truncate in progress) */ 414 /* Is the page fully outside i_size? (truncate in progress) */
415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 415 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
416 PAGE_CACHE_SHIFT)) { 416 PAGE_SHIFT)) {
417 zero_user(page, 0, PAGE_CACHE_SIZE); 417 zero_user(page, 0, PAGE_SIZE);
418 ntfs_debug("Read outside i_size - truncated?"); 418 ntfs_debug("Read outside i_size - truncated?");
419 goto done; 419 goto done;
420 } 420 }
@@ -463,7 +463,7 @@ retry_readpage:
463 * ok to ignore the compressed flag here. 463 * ok to ignore the compressed flag here.
464 */ 464 */
465 if (unlikely(page->index > 0)) { 465 if (unlikely(page->index > 0)) {
466 zero_user(page, 0, PAGE_CACHE_SIZE); 466 zero_user(page, 0, PAGE_SIZE);
467 goto done; 467 goto done;
468 } 468 }
469 if (!NInoAttr(ni)) 469 if (!NInoAttr(ni))
@@ -509,7 +509,7 @@ retry_readpage:
509 le16_to_cpu(ctx->attr->data.resident.value_offset), 509 le16_to_cpu(ctx->attr->data.resident.value_offset),
510 attr_len); 510 attr_len);
511 /* Zero the remainder of the page. */ 511 /* Zero the remainder of the page. */
512 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 512 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
513 flush_dcache_page(page); 513 flush_dcache_page(page);
514 kunmap_atomic(addr); 514 kunmap_atomic(addr);
515put_unm_err_out: 515put_unm_err_out:
@@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
599 /* NOTE: Different naming scheme to ntfs_read_block()! */ 599 /* NOTE: Different naming scheme to ntfs_read_block()! */
600 600
601 /* The first block in the page. */ 601 /* The first block in the page. */
602 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 602 block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
603 603
604 read_lock_irqsave(&ni->size_lock, flags); 604 read_lock_irqsave(&ni->size_lock, flags);
605 i_size = i_size_read(vi); 605 i_size = i_size_read(vi);
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
674 // in the inode. 674 // in the inode.
675 // Again, for each page do: 675 // Again, for each page do:
676 // __set_page_dirty_buffers(); 676 // __set_page_dirty_buffers();
677 // page_cache_release() 677 // put_page()
678 // We don't need to wait on the writes. 678 // We don't need to wait on the writes.
679 // Update iblock. 679 // Update iblock.
680 } 680 }
@@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page,
925 ntfs_volume *vol = ni->vol; 925 ntfs_volume *vol = ni->vol;
926 u8 *kaddr; 926 u8 *kaddr;
927 unsigned int rec_size = ni->itype.index.block_size; 927 unsigned int rec_size = ni->itype.index.block_size;
928 ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; 928 ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
929 struct buffer_head *bh, *head, *tbh, *rec_start_bh; 929 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
930 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 930 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
931 runlist_element *rl; 931 runlist_element *rl;
@@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page,
949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); 949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
950 bh_size = vol->sb->s_blocksize; 950 bh_size = vol->sb->s_blocksize;
951 bh_size_bits = vol->sb->s_blocksize_bits; 951 bh_size_bits = vol->sb->s_blocksize_bits;
952 max_bhs = PAGE_CACHE_SIZE / bh_size; 952 max_bhs = PAGE_SIZE / bh_size;
953 BUG_ON(!max_bhs); 953 BUG_ON(!max_bhs);
954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE); 954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
955 955
@@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page,
961 BUG_ON(!bh); 961 BUG_ON(!bh);
962 962
963 rec_size_bits = ni->itype.index.block_size_bits; 963 rec_size_bits = ni->itype.index.block_size_bits;
964 BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); 964 BUG_ON(!(PAGE_SIZE >> rec_size_bits));
965 bhs_per_rec = rec_size >> bh_size_bits; 965 bhs_per_rec = rec_size >> bh_size_bits;
966 BUG_ON(!bhs_per_rec); 966 BUG_ON(!bhs_per_rec);
967 967
968 /* The first block in the page. */ 968 /* The first block in the page. */
969 rec_block = block = (sector_t)page->index << 969 rec_block = block = (sector_t)page->index <<
970 (PAGE_CACHE_SHIFT - bh_size_bits); 970 (PAGE_SHIFT - bh_size_bits);
971 971
972 /* The first out of bounds block for the data size. */ 972 /* The first out of bounds block for the data size. */
973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; 973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
@@ -1133,7 +1133,7 @@ lock_retry_remap:
1133 unsigned long mft_no; 1133 unsigned long mft_no;
1134 1134
1135 /* Get the mft record number. */ 1135 /* Get the mft record number. */
1136 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1136 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1137 >> rec_size_bits; 1137 >> rec_size_bits;
1138 /* Check whether to write this mft record. */ 1138 /* Check whether to write this mft record. */
1139 tni = NULL; 1139 tni = NULL;
@@ -1249,7 +1249,7 @@ do_mirror:
1249 continue; 1249 continue;
1250 ofs = bh_offset(tbh); 1250 ofs = bh_offset(tbh);
1251 /* Get the mft record number. */ 1251 /* Get the mft record number. */
1252 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1252 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1253 >> rec_size_bits; 1253 >> rec_size_bits;
1254 if (mft_no < vol->mftmirr_size) 1254 if (mft_no < vol->mftmirr_size)
1255 ntfs_sync_mft_mirror(vol, mft_no, 1255 ntfs_sync_mft_mirror(vol, mft_no,
@@ -1300,7 +1300,7 @@ done:
1300 * Set page error if there is only one ntfs record in the page. 1300 * Set page error if there is only one ntfs record in the page.
1301 * Otherwise we would loose per-record granularity. 1301 * Otherwise we would loose per-record granularity.
1302 */ 1302 */
1303 if (ni->itype.index.block_size == PAGE_CACHE_SIZE) 1303 if (ni->itype.index.block_size == PAGE_SIZE)
1304 SetPageError(page); 1304 SetPageError(page);
1305 NVolSetErrors(vol); 1305 NVolSetErrors(vol);
1306 } 1306 }
@@ -1308,7 +1308,7 @@ done:
1308 ntfs_debug("Page still contains one or more dirty ntfs " 1308 ntfs_debug("Page still contains one or more dirty ntfs "
1309 "records. Redirtying the page starting at " 1309 "records. Redirtying the page starting at "
1310 "record 0x%lx.", page->index << 1310 "record 0x%lx.", page->index <<
1311 (PAGE_CACHE_SHIFT - rec_size_bits)); 1311 (PAGE_SHIFT - rec_size_bits));
1312 redirty_page_for_writepage(wbc, page); 1312 redirty_page_for_writepage(wbc, page);
1313 unlock_page(page); 1313 unlock_page(page);
1314 } else { 1314 } else {
@@ -1365,13 +1365,13 @@ retry_writepage:
1365 BUG_ON(!PageLocked(page)); 1365 BUG_ON(!PageLocked(page));
1366 i_size = i_size_read(vi); 1366 i_size = i_size_read(vi);
1367 /* Is the page fully outside i_size? (truncate in progress) */ 1367 /* Is the page fully outside i_size? (truncate in progress) */
1368 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 1368 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
1369 PAGE_CACHE_SHIFT)) { 1369 PAGE_SHIFT)) {
1370 /* 1370 /*
1371 * The page may have dirty, unmapped buffers. Make them 1371 * The page may have dirty, unmapped buffers. Make them
1372 * freeable here, so the page does not leak. 1372 * freeable here, so the page does not leak.
1373 */ 1373 */
1374 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1374 block_invalidatepage(page, 0, PAGE_SIZE);
1375 unlock_page(page); 1375 unlock_page(page);
1376 ntfs_debug("Write outside i_size - truncated?"); 1376 ntfs_debug("Write outside i_size - truncated?");
1377 return 0; 1377 return 0;
@@ -1414,10 +1414,10 @@ retry_writepage:
1414 /* NInoNonResident() == NInoIndexAllocPresent() */ 1414 /* NInoNonResident() == NInoIndexAllocPresent() */
1415 if (NInoNonResident(ni)) { 1415 if (NInoNonResident(ni)) {
1416 /* We have to zero every time due to mmap-at-end-of-file. */ 1416 /* We have to zero every time due to mmap-at-end-of-file. */
1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { 1417 if (page->index >= (i_size >> PAGE_SHIFT)) {
1418 /* The page straddles i_size. */ 1418 /* The page straddles i_size. */
1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; 1419 unsigned int ofs = i_size & ~PAGE_MASK;
1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE); 1420 zero_user_segment(page, ofs, PAGE_SIZE);
1421 } 1421 }
1422 /* Handle mst protected attributes. */ 1422 /* Handle mst protected attributes. */
1423 if (NInoMstProtected(ni)) 1423 if (NInoMstProtected(ni))
@@ -1500,7 +1500,7 @@ retry_writepage:
1500 le16_to_cpu(ctx->attr->data.resident.value_offset), 1500 le16_to_cpu(ctx->attr->data.resident.value_offset),
1501 addr, attr_len); 1501 addr, attr_len);
1502 /* Zero out of bounds area in the page cache page. */ 1502 /* Zero out of bounds area in the page cache page. */
1503 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1503 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1504 kunmap_atomic(addr); 1504 kunmap_atomic(addr);
1505 flush_dcache_page(page); 1505 flush_dcache_page(page);
1506 flush_dcache_mft_record_page(ctx->ntfs_ino); 1506 flush_dcache_mft_record_page(ctx->ntfs_ino);
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index caecc58f529c..820d6eabf60f 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -40,7 +40,7 @@
40static inline void ntfs_unmap_page(struct page *page) 40static inline void ntfs_unmap_page(struct page *page)
41{ 41{
42 kunmap(page); 42 kunmap(page);
43 page_cache_release(page); 43 put_page(page);
44} 44}
45 45
46/** 46/**
@@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page)
49 * @index: index into the page cache for @mapping of the page to map 49 * @index: index into the page cache for @mapping of the page to map
50 * 50 *
51 * Read a page from the page cache of the address space @mapping at position 51 * Read a page from the page cache of the address space @mapping at position
52 * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes. 52 * @index, where @index is in units of PAGE_SIZE, and not in bytes.
53 * 53 *
54 * If the page is not in memory it is loaded from disk first using the readpage 54 * If the page is not in memory it is loaded from disk first using the readpage
55 * method defined in the address space operations of @mapping and the page is 55 * method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 250ed5b20c8f..44a39a099b54 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -152,7 +152,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino != 152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
153 old_ctx.base_ntfs_ino) { 153 old_ctx.base_ntfs_ino) {
154 put_this_page = old_ctx.ntfs_ino->page; 154 put_this_page = old_ctx.ntfs_ino->page;
155 page_cache_get(put_this_page); 155 get_page(put_this_page);
156 } 156 }
157 /* 157 /*
158 * Reinitialize the search context so we can lookup the 158 * Reinitialize the search context so we can lookup the
@@ -275,7 +275,7 @@ retry_map:
275 * the pieces anyway. 275 * the pieces anyway.
276 */ 276 */
277 if (put_this_page) 277 if (put_this_page)
278 page_cache_release(put_this_page); 278 put_page(put_this_page);
279 } 279 }
280 return err; 280 return err;
281} 281}
@@ -1660,7 +1660,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1660 memcpy(kaddr, (u8*)a + 1660 memcpy(kaddr, (u8*)a +
1661 le16_to_cpu(a->data.resident.value_offset), 1661 le16_to_cpu(a->data.resident.value_offset),
1662 attr_size); 1662 attr_size);
1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); 1663 memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
1664 kunmap_atomic(kaddr); 1664 kunmap_atomic(kaddr);
1665 flush_dcache_page(page); 1665 flush_dcache_page(page);
1666 SetPageUptodate(page); 1666 SetPageUptodate(page);
@@ -1748,7 +1748,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1748 if (page) { 1748 if (page) {
1749 set_page_dirty(page); 1749 set_page_dirty(page);
1750 unlock_page(page); 1750 unlock_page(page);
1751 page_cache_release(page); 1751 put_page(page);
1752 } 1752 }
1753 ntfs_debug("Done."); 1753 ntfs_debug("Done.");
1754 return 0; 1754 return 0;
@@ -1835,7 +1835,7 @@ rl_err_out:
1835 ntfs_free(rl); 1835 ntfs_free(rl);
1836page_err_out: 1836page_err_out:
1837 unlock_page(page); 1837 unlock_page(page);
1838 page_cache_release(page); 1838 put_page(page);
1839 } 1839 }
1840 if (err == -EINVAL) 1840 if (err == -EINVAL)
1841 err = -EIO; 1841 err = -EIO;
@@ -2513,17 +2513,17 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2513 BUG_ON(NInoEncrypted(ni)); 2513 BUG_ON(NInoEncrypted(ni));
2514 mapping = VFS_I(ni)->i_mapping; 2514 mapping = VFS_I(ni)->i_mapping;
2515 /* Work out the starting index and page offset. */ 2515 /* Work out the starting index and page offset. */
2516 idx = ofs >> PAGE_CACHE_SHIFT; 2516 idx = ofs >> PAGE_SHIFT;
2517 start_ofs = ofs & ~PAGE_CACHE_MASK; 2517 start_ofs = ofs & ~PAGE_MASK;
2518 /* Work out the ending index and page offset. */ 2518 /* Work out the ending index and page offset. */
2519 end = ofs + cnt; 2519 end = ofs + cnt;
2520 end_ofs = end & ~PAGE_CACHE_MASK; 2520 end_ofs = end & ~PAGE_MASK;
2521 /* If the end is outside the inode size return -ESPIPE. */ 2521 /* If the end is outside the inode size return -ESPIPE. */
2522 if (unlikely(end > i_size_read(VFS_I(ni)))) { 2522 if (unlikely(end > i_size_read(VFS_I(ni)))) {
2523 ntfs_error(vol->sb, "Request exceeds end of attribute."); 2523 ntfs_error(vol->sb, "Request exceeds end of attribute.");
2524 return -ESPIPE; 2524 return -ESPIPE;
2525 } 2525 }
2526 end >>= PAGE_CACHE_SHIFT; 2526 end >>= PAGE_SHIFT;
2527 /* If there is a first partial page, need to do it the slow way. */ 2527 /* If there is a first partial page, need to do it the slow way. */
2528 if (start_ofs) { 2528 if (start_ofs) {
2529 page = read_mapping_page(mapping, idx, NULL); 2529 page = read_mapping_page(mapping, idx, NULL);
@@ -2536,7 +2536,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2536 * If the last page is the same as the first page, need to 2536 * If the last page is the same as the first page, need to
2537 * limit the write to the end offset. 2537 * limit the write to the end offset.
2538 */ 2538 */
2539 size = PAGE_CACHE_SIZE; 2539 size = PAGE_SIZE;
2540 if (idx == end) 2540 if (idx == end)
2541 size = end_ofs; 2541 size = end_ofs;
2542 kaddr = kmap_atomic(page); 2542 kaddr = kmap_atomic(page);
@@ -2544,7 +2544,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2544 flush_dcache_page(page); 2544 flush_dcache_page(page);
2545 kunmap_atomic(kaddr); 2545 kunmap_atomic(kaddr);
2546 set_page_dirty(page); 2546 set_page_dirty(page);
2547 page_cache_release(page); 2547 put_page(page);
2548 balance_dirty_pages_ratelimited(mapping); 2548 balance_dirty_pages_ratelimited(mapping);
2549 cond_resched(); 2549 cond_resched();
2550 if (idx == end) 2550 if (idx == end)
@@ -2561,7 +2561,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2561 return -ENOMEM; 2561 return -ENOMEM;
2562 } 2562 }
2563 kaddr = kmap_atomic(page); 2563 kaddr = kmap_atomic(page);
2564 memset(kaddr, val, PAGE_CACHE_SIZE); 2564 memset(kaddr, val, PAGE_SIZE);
2565 flush_dcache_page(page); 2565 flush_dcache_page(page);
2566 kunmap_atomic(kaddr); 2566 kunmap_atomic(kaddr);
2567 /* 2567 /*
@@ -2585,7 +2585,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2585 set_page_dirty(page); 2585 set_page_dirty(page);
2586 /* Finally unlock and release the page. */ 2586 /* Finally unlock and release the page. */
2587 unlock_page(page); 2587 unlock_page(page);
2588 page_cache_release(page); 2588 put_page(page);
2589 balance_dirty_pages_ratelimited(mapping); 2589 balance_dirty_pages_ratelimited(mapping);
2590 cond_resched(); 2590 cond_resched();
2591 } 2591 }
@@ -2602,7 +2602,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2602 flush_dcache_page(page); 2602 flush_dcache_page(page);
2603 kunmap_atomic(kaddr); 2603 kunmap_atomic(kaddr);
2604 set_page_dirty(page); 2604 set_page_dirty(page);
2605 page_cache_release(page); 2605 put_page(page);
2606 balance_dirty_pages_ratelimited(mapping); 2606 balance_dirty_pages_ratelimited(mapping);
2607 cond_resched(); 2607 cond_resched();
2608 } 2608 }
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
index 0809cf876098..ec130c588d2b 100644
--- a/fs/ntfs/bitmap.c
+++ b/fs/ntfs/bitmap.c
@@ -67,8 +67,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
67 * Calculate the indices for the pages containing the first and last 67 * Calculate the indices for the pages containing the first and last
68 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively. 68 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
69 */ 69 */
70 index = start_bit >> (3 + PAGE_CACHE_SHIFT); 70 index = start_bit >> (3 + PAGE_SHIFT);
71 end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT); 71 end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
72 72
73 /* Get the page containing the first bit (@start_bit). */ 73 /* Get the page containing the first bit (@start_bit). */
74 mapping = vi->i_mapping; 74 mapping = vi->i_mapping;
@@ -82,7 +82,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
82 kaddr = page_address(page); 82 kaddr = page_address(page);
83 83
84 /* Set @pos to the position of the byte containing @start_bit. */ 84 /* Set @pos to the position of the byte containing @start_bit. */
85 pos = (start_bit >> 3) & ~PAGE_CACHE_MASK; 85 pos = (start_bit >> 3) & ~PAGE_MASK;
86 86
87 /* Calculate the position of @start_bit in the first byte. */ 87 /* Calculate the position of @start_bit in the first byte. */
88 bit = start_bit & 7; 88 bit = start_bit & 7;
@@ -108,7 +108,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
108 * Depending on @value, modify all remaining whole bytes in the page up 108 * Depending on @value, modify all remaining whole bytes in the page up
109 * to @cnt. 109 * to @cnt.
110 */ 110 */
111 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); 111 len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
112 memset(kaddr + pos, value ? 0xff : 0, len); 112 memset(kaddr + pos, value ? 0xff : 0, len);
113 cnt -= len << 3; 113 cnt -= len << 3;
114 114
@@ -132,7 +132,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
132 * Depending on @value, modify all remaining whole bytes in the 132 * Depending on @value, modify all remaining whole bytes in the
133 * page up to @cnt. 133 * page up to @cnt.
134 */ 134 */
135 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); 135 len = min_t(s64, cnt >> 3, PAGE_SIZE);
136 memset(kaddr, value ? 0xff : 0, len); 136 memset(kaddr, value ? 0xff : 0, len);
137 cnt -= len << 3; 137 cnt -= len << 3;
138 } 138 }
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index f82498c35e78..f2b5e746f49b 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page,
104 unsigned int kp_ofs; 104 unsigned int kp_ofs;
105 105
106 ntfs_debug("Zeroing page region outside initialized size."); 106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { 107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
108 /*
109 * FIXME: Using clear_page() will become wrong when we get
110 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
111 */
112 clear_page(kp); 108 clear_page(kp);
113 return; 109 return;
114 } 110 }
115 kp_ofs = initialized_size & ~PAGE_CACHE_MASK; 111 kp_ofs = initialized_size & ~PAGE_MASK;
116 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); 112 memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
117 return; 113 return;
118} 114}
119 115
@@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page,
123static inline void handle_bounds_compressed_page(struct page *page, 119static inline void handle_bounds_compressed_page(struct page *page,
124 const loff_t i_size, const s64 initialized_size) 120 const loff_t i_size, const s64 initialized_size)
125{ 121{
126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && 122 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
127 (initialized_size < i_size)) 123 (initialized_size < i_size))
128 zero_partial_compressed_page(page, initialized_size); 124 zero_partial_compressed_page(page, initialized_size);
129 return; 125 return;
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
160 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was 156 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
161 * completed during the decompression of the compression block (@cb_start). 157 * completed during the decompression of the compression block (@cb_start).
162 * 158 *
163 * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up 159 * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
164 * unpredicatbly! You have been warned! 160 * unpredicatbly! You have been warned!
165 * 161 *
166 * Note to hackers: This function may not sleep until it has finished accessing 162 * Note to hackers: This function may not sleep until it has finished accessing
@@ -241,7 +237,7 @@ return_error:
241 if (di == xpage) 237 if (di == xpage)
242 *xpage_done = 1; 238 *xpage_done = 1;
243 else 239 else
244 page_cache_release(dp); 240 put_page(dp);
245 dest_pages[di] = NULL; 241 dest_pages[di] = NULL;
246 } 242 }
247 } 243 }
@@ -274,7 +270,7 @@ return_error:
274 cb = cb_sb_end; 270 cb = cb_sb_end;
275 271
276 /* Advance destination position to next sub-block. */ 272 /* Advance destination position to next sub-block. */
277 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; 273 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
278 if (!*dest_ofs && (++*dest_index > dest_max_index)) 274 if (!*dest_ofs && (++*dest_index > dest_max_index))
279 goto return_overflow; 275 goto return_overflow;
280 goto do_next_sb; 276 goto do_next_sb;
@@ -301,7 +297,7 @@ return_error:
301 297
302 /* Advance destination position to next sub-block. */ 298 /* Advance destination position to next sub-block. */
303 *dest_ofs += NTFS_SB_SIZE; 299 *dest_ofs += NTFS_SB_SIZE;
304 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { 300 if (!(*dest_ofs &= ~PAGE_MASK)) {
305finalize_page: 301finalize_page:
306 /* 302 /*
307 * First stage: add current page index to array of 303 * First stage: add current page index to array of
@@ -335,7 +331,7 @@ do_next_tag:
335 *dest_ofs += nr_bytes; 331 *dest_ofs += nr_bytes;
336 } 332 }
337 /* We have finished the current sub-block. */ 333 /* We have finished the current sub-block. */
338 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) 334 if (!(*dest_ofs &= ~PAGE_MASK))
339 goto finalize_page; 335 goto finalize_page;
340 goto do_next_sb; 336 goto do_next_sb;
341 } 337 }
@@ -462,7 +458,7 @@ return_overflow:
462 * have been written to so that we would lose data if we were to just overwrite 458 * have been written to so that we would lose data if we were to just overwrite
463 * them with the out-of-date uncompressed data. 459 * them with the out-of-date uncompressed data.
464 * 460 *
465 * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at 461 * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
466 * the end of the file I think. We need to detect this case and zero the out 462 * the end of the file I think. We need to detect this case and zero the out
467 * of bounds remainder of the page in question and mark it as handled. At the 463 * of bounds remainder of the page in question and mark it as handled. At the
468 * moment we would just return -EIO on such a page. This bug will only become 464 * moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
470 * clusters so is probably not going to be seen by anyone. Still this should 466 * clusters so is probably not going to be seen by anyone. Still this should
471 * be fixed. (AIA) 467 * be fixed. (AIA)
472 * 468 *
473 * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in 469 * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
474 * handling sparse and compressed cbs. (AIA) 470 * handling sparse and compressed cbs. (AIA)
475 * 471 *
476 * FIXME: At the moment we don't do any zeroing out in the case that 472 * FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page)
497 u64 cb_size_mask = cb_size - 1UL; 493 u64 cb_size_mask = cb_size - 1UL;
498 VCN vcn; 494 VCN vcn;
499 LCN lcn; 495 LCN lcn;
500 /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ 496 /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> 497 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits; 498 vol->cluster_size_bits;
503 /* 499 /*
504 * The first vcn after the last wanted vcn (minimum alignment is again 500 * The first vcn after the last wanted vcn (minimum alignment is again
505 * PAGE_CACHE_SIZE. 501 * PAGE_SIZE.
506 */ 502 */
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) 503 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
508 & ~cb_size_mask) >> vol->cluster_size_bits; 504 & ~cb_size_mask) >> vol->cluster_size_bits;
509 /* Number of compression blocks (cbs) in the wanted vcn range. */ 505 /* Number of compression blocks (cbs) in the wanted vcn range. */
510 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits 506 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
@@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page)
515 * guarantees of start_vcn and end_vcn, no need to round up here. 511 * guarantees of start_vcn and end_vcn, no need to round up here.
516 */ 512 */
517 unsigned int nr_pages = (end_vcn - start_vcn) << 513 unsigned int nr_pages = (end_vcn - start_vcn) <<
518 vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 514 vol->cluster_size_bits >> PAGE_SHIFT;
519 unsigned int xpage, max_page, cur_page, cur_ofs, i; 515 unsigned int xpage, max_page, cur_page, cur_ofs, i;
520 unsigned int cb_clusters, cb_max_ofs; 516 unsigned int cb_clusters, cb_max_ofs;
521 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; 517 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
@@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page)
549 * We have already been given one page, this is the one we must do. 545 * We have already been given one page, this is the one we must do.
550 * Once again, the alignment guarantees keep it simple. 546 * Once again, the alignment guarantees keep it simple.
551 */ 547 */
552 offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 548 offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
553 xpage = index - offset; 549 xpage = index - offset;
554 pages[xpage] = page; 550 pages[xpage] = page;
555 /* 551 /*
@@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page)
560 i_size = i_size_read(VFS_I(ni)); 556 i_size = i_size_read(VFS_I(ni));
561 initialized_size = ni->initialized_size; 557 initialized_size = ni->initialized_size;
562 read_unlock_irqrestore(&ni->size_lock, flags); 558 read_unlock_irqrestore(&ni->size_lock, flags);
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 559 max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
564 offset; 560 offset;
565 /* Is the page fully outside i_size? (truncate in progress) */ 561 /* Is the page fully outside i_size? (truncate in progress) */
566 if (xpage >= max_page) { 562 if (xpage >= max_page) {
567 kfree(bhs); 563 kfree(bhs);
568 kfree(pages); 564 kfree(pages);
569 zero_user(page, 0, PAGE_CACHE_SIZE); 565 zero_user(page, 0, PAGE_SIZE);
570 ntfs_debug("Compressed read outside i_size - truncated?"); 566 ntfs_debug("Compressed read outside i_size - truncated?");
571 SetPageUptodate(page); 567 SetPageUptodate(page);
572 unlock_page(page); 568 unlock_page(page);
@@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page)
591 continue; 587 continue;
592 } 588 }
593 unlock_page(page); 589 unlock_page(page);
594 page_cache_release(page); 590 put_page(page);
595 pages[i] = NULL; 591 pages[i] = NULL;
596 } 592 }
597 } 593 }
@@ -735,9 +731,9 @@ lock_retry_remap:
735 ntfs_debug("Successfully read the compression block."); 731 ntfs_debug("Successfully read the compression block.");
736 732
737 /* The last page and maximum offset within it for the current cb. */ 733 /* The last page and maximum offset within it for the current cb. */
738 cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; 734 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
739 cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; 735 cb_max_ofs = cb_max_page & ~PAGE_MASK;
740 cb_max_page >>= PAGE_CACHE_SHIFT; 736 cb_max_page >>= PAGE_SHIFT;
741 737
742 /* Catch end of file inside a compression block. */ 738 /* Catch end of file inside a compression block. */
743 if (cb_max_page > max_page) 739 if (cb_max_page > max_page)
@@ -753,16 +749,11 @@ lock_retry_remap:
753 for (; cur_page < cb_max_page; cur_page++) { 749 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page]; 750 page = pages[cur_page];
755 if (page) { 751 if (page) {
756 /*
757 * FIXME: Using clear_page() will become wrong
758 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
759 * for now there is no problem.
760 */
761 if (likely(!cur_ofs)) 752 if (likely(!cur_ofs))
762 clear_page(page_address(page)); 753 clear_page(page_address(page));
763 else 754 else
764 memset(page_address(page) + cur_ofs, 0, 755 memset(page_address(page) + cur_ofs, 0,
765 PAGE_CACHE_SIZE - 756 PAGE_SIZE -
766 cur_ofs); 757 cur_ofs);
767 flush_dcache_page(page); 758 flush_dcache_page(page);
768 kunmap(page); 759 kunmap(page);
@@ -771,10 +762,10 @@ lock_retry_remap:
771 if (cur_page == xpage) 762 if (cur_page == xpage)
772 xpage_done = 1; 763 xpage_done = 1;
773 else 764 else
774 page_cache_release(page); 765 put_page(page);
775 pages[cur_page] = NULL; 766 pages[cur_page] = NULL;
776 } 767 }
777 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 768 cb_pos += PAGE_SIZE - cur_ofs;
778 cur_ofs = 0; 769 cur_ofs = 0;
779 if (cb_pos >= cb_end) 770 if (cb_pos >= cb_end)
780 break; 771 break;
@@ -807,7 +798,7 @@ lock_retry_remap:
807 * synchronous io for the majority of pages. 798 * synchronous io for the majority of pages.
808 * Or if we choose not to do the read-ahead/-behind stuff, we 799 * Or if we choose not to do the read-ahead/-behind stuff, we
809 * could just return block_read_full_page(pages[xpage]) as long 800 * could just return block_read_full_page(pages[xpage]) as long
810 * as PAGE_CACHE_SIZE <= cb_size. 801 * as PAGE_SIZE <= cb_size.
811 */ 802 */
812 if (cb_max_ofs) 803 if (cb_max_ofs)
813 cb_max_page--; 804 cb_max_page--;
@@ -816,8 +807,8 @@ lock_retry_remap:
816 page = pages[cur_page]; 807 page = pages[cur_page];
817 if (page) 808 if (page)
818 memcpy(page_address(page) + cur_ofs, cb_pos, 809 memcpy(page_address(page) + cur_ofs, cb_pos,
819 PAGE_CACHE_SIZE - cur_ofs); 810 PAGE_SIZE - cur_ofs);
820 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 811 cb_pos += PAGE_SIZE - cur_ofs;
821 cur_ofs = 0; 812 cur_ofs = 0;
822 if (cb_pos >= cb_end) 813 if (cb_pos >= cb_end)
823 break; 814 break;
@@ -850,10 +841,10 @@ lock_retry_remap:
850 if (cur2_page == xpage) 841 if (cur2_page == xpage)
851 xpage_done = 1; 842 xpage_done = 1;
852 else 843 else
853 page_cache_release(page); 844 put_page(page);
854 pages[cur2_page] = NULL; 845 pages[cur2_page] = NULL;
855 } 846 }
856 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; 847 cb_pos2 += PAGE_SIZE - cur_ofs2;
857 cur_ofs2 = 0; 848 cur_ofs2 = 0;
858 if (cb_pos2 >= cb_end) 849 if (cb_pos2 >= cb_end)
859 break; 850 break;
@@ -884,7 +875,7 @@ lock_retry_remap:
884 kunmap(page); 875 kunmap(page);
885 unlock_page(page); 876 unlock_page(page);
886 if (prev_cur_page != xpage) 877 if (prev_cur_page != xpage)
887 page_cache_release(page); 878 put_page(page);
888 pages[prev_cur_page] = NULL; 879 pages[prev_cur_page] = NULL;
889 } 880 }
890 } 881 }
@@ -914,7 +905,7 @@ lock_retry_remap:
914 kunmap(page); 905 kunmap(page);
915 unlock_page(page); 906 unlock_page(page);
916 if (cur_page != xpage) 907 if (cur_page != xpage)
917 page_cache_release(page); 908 put_page(page);
918 pages[cur_page] = NULL; 909 pages[cur_page] = NULL;
919 } 910 }
920 } 911 }
@@ -961,7 +952,7 @@ err_out:
961 kunmap(page); 952 kunmap(page);
962 unlock_page(page); 953 unlock_page(page);
963 if (i != xpage) 954 if (i != xpage)
964 page_cache_release(page); 955 put_page(page);
965 } 956 }
966 } 957 }
967 kfree(pages); 958 kfree(pages);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index b2eff5816adc..a18613579001 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,11 +315,11 @@ found_it:
315descend_into_child_node: 315descend_into_child_node:
316 /* 316 /*
317 * Convert vcn to index into the index allocation attribute in units 317 * Convert vcn to index into the index allocation attribute in units
318 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 318 * of PAGE_SIZE and map the page cache page, reading it from
319 * disk if necessary. 319 * disk if necessary.
320 */ 320 */
321 page = ntfs_map_page(ia_mapping, vcn << 321 page = ntfs_map_page(ia_mapping, vcn <<
322 dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 322 dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
323 if (IS_ERR(page)) { 323 if (IS_ERR(page)) {
324 ntfs_error(sb, "Failed to map directory index page, error %ld.", 324 ntfs_error(sb, "Failed to map directory index page, error %ld.",
325 -PTR_ERR(page)); 325 -PTR_ERR(page));
@@ -331,9 +331,9 @@ descend_into_child_node:
331fast_descend_into_child_node: 331fast_descend_into_child_node:
332 /* Get to the index allocation block. */ 332 /* Get to the index allocation block. */
333 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 333 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
334 dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 334 dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
335 /* Bounds checks. */ 335 /* Bounds checks. */
336 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 336 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
337 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 337 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
338 "inode 0x%lx or driver bug.", dir_ni->mft_no); 338 "inode 0x%lx or driver bug.", dir_ni->mft_no);
339 goto unm_err_out; 339 goto unm_err_out;
@@ -366,7 +366,7 @@ fast_descend_into_child_node:
366 goto unm_err_out; 366 goto unm_err_out;
367 } 367 }
368 index_end = (u8*)ia + dir_ni->itype.index.block_size; 368 index_end = (u8*)ia + dir_ni->itype.index.block_size;
369 if (index_end > kaddr + PAGE_CACHE_SIZE) { 369 if (index_end > kaddr + PAGE_SIZE) {
370 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 370 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
371 "0x%lx crosses page boundary. Impossible! " 371 "0x%lx crosses page boundary. Impossible! "
372 "Cannot access! This is probably a bug in the " 372 "Cannot access! This is probably a bug in the "
@@ -559,9 +559,9 @@ found_it2:
559 /* If vcn is in the same page cache page as old_vcn we 559 /* If vcn is in the same page cache page as old_vcn we
560 * recycle the mapped page. */ 560 * recycle the mapped page. */
561 if (old_vcn << vol->cluster_size_bits >> 561 if (old_vcn << vol->cluster_size_bits >>
562 PAGE_CACHE_SHIFT == vcn << 562 PAGE_SHIFT == vcn <<
563 vol->cluster_size_bits >> 563 vol->cluster_size_bits >>
564 PAGE_CACHE_SHIFT) 564 PAGE_SHIFT)
565 goto fast_descend_into_child_node; 565 goto fast_descend_into_child_node;
566 unlock_page(page); 566 unlock_page(page);
567 ntfs_unmap_page(page); 567 ntfs_unmap_page(page);
@@ -793,11 +793,11 @@ found_it:
793descend_into_child_node: 793descend_into_child_node:
794 /* 794 /*
795 * Convert vcn to index into the index allocation attribute in units 795 * Convert vcn to index into the index allocation attribute in units
796 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 796 * of PAGE_SIZE and map the page cache page, reading it from
797 * disk if necessary. 797 * disk if necessary.
798 */ 798 */
799 page = ntfs_map_page(ia_mapping, vcn << 799 page = ntfs_map_page(ia_mapping, vcn <<
800 dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 800 dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
801 if (IS_ERR(page)) { 801 if (IS_ERR(page)) {
802 ntfs_error(sb, "Failed to map directory index page, error %ld.", 802 ntfs_error(sb, "Failed to map directory index page, error %ld.",
803 -PTR_ERR(page)); 803 -PTR_ERR(page));
@@ -809,9 +809,9 @@ descend_into_child_node:
809fast_descend_into_child_node: 809fast_descend_into_child_node:
810 /* Get to the index allocation block. */ 810 /* Get to the index allocation block. */
811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
813 /* Bounds checks. */ 813 /* Bounds checks. */
814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
816 "inode 0x%lx or driver bug.", dir_ni->mft_no); 816 "inode 0x%lx or driver bug.", dir_ni->mft_no);
817 goto unm_err_out; 817 goto unm_err_out;
@@ -844,7 +844,7 @@ fast_descend_into_child_node:
844 goto unm_err_out; 844 goto unm_err_out;
845 } 845 }
846 index_end = (u8*)ia + dir_ni->itype.index.block_size; 846 index_end = (u8*)ia + dir_ni->itype.index.block_size;
847 if (index_end > kaddr + PAGE_CACHE_SIZE) { 847 if (index_end > kaddr + PAGE_SIZE) {
848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
849 "0x%lx crosses page boundary. Impossible! " 849 "0x%lx crosses page boundary. Impossible! "
850 "Cannot access! This is probably a bug in the " 850 "Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@ found_it2:
968 /* If vcn is in the same page cache page as old_vcn we 968 /* If vcn is in the same page cache page as old_vcn we
969 * recycle the mapped page. */ 969 * recycle the mapped page. */
970 if (old_vcn << vol->cluster_size_bits >> 970 if (old_vcn << vol->cluster_size_bits >>
971 PAGE_CACHE_SHIFT == vcn << 971 PAGE_SHIFT == vcn <<
972 vol->cluster_size_bits >> 972 vol->cluster_size_bits >>
973 PAGE_CACHE_SHIFT) 973 PAGE_SHIFT)
974 goto fast_descend_into_child_node; 974 goto fast_descend_into_child_node;
975 unlock_page(page); 975 unlock_page(page);
976 ntfs_unmap_page(page); 976 ntfs_unmap_page(page);
@@ -1246,15 +1246,15 @@ skip_index_root:
1246 goto iput_err_out; 1246 goto iput_err_out;
1247 } 1247 }
1248 /* Get the starting bit position in the current bitmap page. */ 1248 /* Get the starting bit position in the current bitmap page. */
1249 cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1); 1249 cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1);
1250 bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1); 1250 bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1);
1251get_next_bmp_page: 1251get_next_bmp_page:
1252 ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx", 1252 ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
1253 (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT), 1253 (unsigned long long)bmp_pos >> (3 + PAGE_SHIFT),
1254 (unsigned long long)bmp_pos & 1254 (unsigned long long)bmp_pos &
1255 (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1)); 1255 (unsigned long long)((PAGE_SIZE * 8) - 1));
1256 bmp_page = ntfs_map_page(bmp_mapping, 1256 bmp_page = ntfs_map_page(bmp_mapping,
1257 bmp_pos >> (3 + PAGE_CACHE_SHIFT)); 1257 bmp_pos >> (3 + PAGE_SHIFT));
1258 if (IS_ERR(bmp_page)) { 1258 if (IS_ERR(bmp_page)) {
1259 ntfs_error(sb, "Reading index bitmap failed."); 1259 ntfs_error(sb, "Reading index bitmap failed.");
1260 err = PTR_ERR(bmp_page); 1260 err = PTR_ERR(bmp_page);
@@ -1270,9 +1270,9 @@ find_next_index_buffer:
1270 * If we have reached the end of the bitmap page, get the next 1270 * If we have reached the end of the bitmap page, get the next
1271 * page, and put away the old one. 1271 * page, and put away the old one.
1272 */ 1272 */
1273 if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) { 1273 if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) {
1274 ntfs_unmap_page(bmp_page); 1274 ntfs_unmap_page(bmp_page);
1275 bmp_pos += PAGE_CACHE_SIZE * 8; 1275 bmp_pos += PAGE_SIZE * 8;
1276 cur_bmp_pos = 0; 1276 cur_bmp_pos = 0;
1277 goto get_next_bmp_page; 1277 goto get_next_bmp_page;
1278 } 1278 }
@@ -1285,8 +1285,8 @@ find_next_index_buffer:
1285 ntfs_debug("Handling index buffer 0x%llx.", 1285 ntfs_debug("Handling index buffer 0x%llx.",
1286 (unsigned long long)bmp_pos + cur_bmp_pos); 1286 (unsigned long long)bmp_pos + cur_bmp_pos);
1287 /* If the current index buffer is in the same page we reuse the page. */ 1287 /* If the current index buffer is in the same page we reuse the page. */
1288 if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) != 1288 if ((prev_ia_pos & (s64)PAGE_MASK) !=
1289 (ia_pos & (s64)PAGE_CACHE_MASK)) { 1289 (ia_pos & (s64)PAGE_MASK)) {
1290 prev_ia_pos = ia_pos; 1290 prev_ia_pos = ia_pos;
1291 if (likely(ia_page != NULL)) { 1291 if (likely(ia_page != NULL)) {
1292 unlock_page(ia_page); 1292 unlock_page(ia_page);
@@ -1296,7 +1296,7 @@ find_next_index_buffer:
1296 * Map the page cache page containing the current ia_pos, 1296 * Map the page cache page containing the current ia_pos,
1297 * reading it from disk if necessary. 1297 * reading it from disk if necessary.
1298 */ 1298 */
1299 ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT); 1299 ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT);
1300 if (IS_ERR(ia_page)) { 1300 if (IS_ERR(ia_page)) {
1301 ntfs_error(sb, "Reading index allocation data failed."); 1301 ntfs_error(sb, "Reading index allocation data failed.");
1302 err = PTR_ERR(ia_page); 1302 err = PTR_ERR(ia_page);
@@ -1307,10 +1307,10 @@ find_next_index_buffer:
1307 kaddr = (u8*)page_address(ia_page); 1307 kaddr = (u8*)page_address(ia_page);
1308 } 1308 }
1309 /* Get the current index buffer. */ 1309 /* Get the current index buffer. */
1310 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & 1310 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
1311 ~(s64)(ndir->itype.index.block_size - 1))); 1311 ~(s64)(ndir->itype.index.block_size - 1)));
1312 /* Bounds checks. */ 1312 /* Bounds checks. */
1313 if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { 1313 if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
1314 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 1314 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
1315 "inode 0x%lx or driver bug.", vdir->i_ino); 1315 "inode 0x%lx or driver bug.", vdir->i_ino);
1316 goto err_out; 1316 goto err_out;
@@ -1348,7 +1348,7 @@ find_next_index_buffer:
1348 goto err_out; 1348 goto err_out;
1349 } 1349 }
1350 index_end = (u8*)ia + ndir->itype.index.block_size; 1350 index_end = (u8*)ia + ndir->itype.index.block_size;
1351 if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) { 1351 if (unlikely(index_end > kaddr + PAGE_SIZE)) {
1352 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 1352 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
1353 "0x%lx crosses page boundary. Impossible! " 1353 "0x%lx crosses page boundary. Impossible! "
1354 "Cannot access! This is probably a bug in the " 1354 "Cannot access! This is probably a bug in the "
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index bed4d427dfae..91117ada8528 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -220,8 +220,8 @@ do_non_resident_extend:
220 m = NULL; 220 m = NULL;
221 } 221 }
222 mapping = vi->i_mapping; 222 mapping = vi->i_mapping;
223 index = old_init_size >> PAGE_CACHE_SHIFT; 223 index = old_init_size >> PAGE_SHIFT;
224 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 224 end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
225 do { 225 do {
226 /* 226 /*
227 * Read the page. If the page is not present, this will zero 227 * Read the page. If the page is not present, this will zero
@@ -233,7 +233,7 @@ do_non_resident_extend:
233 goto init_err_out; 233 goto init_err_out;
234 } 234 }
235 if (unlikely(PageError(page))) { 235 if (unlikely(PageError(page))) {
236 page_cache_release(page); 236 put_page(page);
237 err = -EIO; 237 err = -EIO;
238 goto init_err_out; 238 goto init_err_out;
239 } 239 }
@@ -242,13 +242,13 @@ do_non_resident_extend:
242 * enough to make ntfs_writepage() work. 242 * enough to make ntfs_writepage() work.
243 */ 243 */
244 write_lock_irqsave(&ni->size_lock, flags); 244 write_lock_irqsave(&ni->size_lock, flags);
245 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; 245 ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
246 if (ni->initialized_size > new_init_size) 246 if (ni->initialized_size > new_init_size)
247 ni->initialized_size = new_init_size; 247 ni->initialized_size = new_init_size;
248 write_unlock_irqrestore(&ni->size_lock, flags); 248 write_unlock_irqrestore(&ni->size_lock, flags);
249 /* Set the page dirty so it gets written out. */ 249 /* Set the page dirty so it gets written out. */
250 set_page_dirty(page); 250 set_page_dirty(page);
251 page_cache_release(page); 251 put_page(page);
252 /* 252 /*
253 * Play nice with the vm and the rest of the system. This is 253 * Play nice with the vm and the rest of the system. This is
254 * very much needed as we can potentially be modifying the 254 * very much needed as we can potentially be modifying the
@@ -543,7 +543,7 @@ out:
543err_out: 543err_out:
544 while (nr > 0) { 544 while (nr > 0) {
545 unlock_page(pages[--nr]); 545 unlock_page(pages[--nr]);
546 page_cache_release(pages[nr]); 546 put_page(pages[nr]);
547 } 547 }
548 goto out; 548 goto out;
549} 549}
@@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
573 * only partially being written to. 573 * only partially being written to.
574 * 574 *
575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
576 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside 576 * greater than PAGE_SIZE, that all pages in @pages are entirely inside
577 * the same cluster and that they are the entirety of that cluster, and that 577 * the same cluster and that they are the entirety of that cluster, and that
578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. 578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
579 * 579 *
@@ -653,7 +653,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
653 u = 0; 653 u = 0;
654do_next_page: 654do_next_page:
655 page = pages[u]; 655 page = pages[u];
656 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 656 bh_pos = (s64)page->index << PAGE_SHIFT;
657 bh = head = page_buffers(page); 657 bh = head = page_buffers(page);
658 do { 658 do {
659 VCN cdelta; 659 VCN cdelta;
@@ -810,11 +810,11 @@ map_buffer_cached:
810 810
811 kaddr = kmap_atomic(page); 811 kaddr = kmap_atomic(page);
812 if (bh_pos < pos) { 812 if (bh_pos < pos) {
813 pofs = bh_pos & ~PAGE_CACHE_MASK; 813 pofs = bh_pos & ~PAGE_MASK;
814 memset(kaddr + pofs, 0, pos - bh_pos); 814 memset(kaddr + pofs, 0, pos - bh_pos);
815 } 815 }
816 if (bh_end > end) { 816 if (bh_end > end) {
817 pofs = end & ~PAGE_CACHE_MASK; 817 pofs = end & ~PAGE_MASK;
818 memset(kaddr + pofs, 0, bh_end - end); 818 memset(kaddr + pofs, 0, bh_end - end);
819 } 819 }
820 kunmap_atomic(kaddr); 820 kunmap_atomic(kaddr);
@@ -942,7 +942,7 @@ rl_not_mapped_enoent:
942 * unmapped. This can only happen when the cluster size is 942 * unmapped. This can only happen when the cluster size is
943 * less than the page cache size. 943 * less than the page cache size.
944 */ 944 */
945 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { 945 if (unlikely(vol->cluster_size < PAGE_SIZE)) {
946 bh_cend = (bh_end + vol->cluster_size - 1) >> 946 bh_cend = (bh_end + vol->cluster_size - 1) >>
947 vol->cluster_size_bits; 947 vol->cluster_size_bits;
948 if ((bh_cend <= cpos || bh_cpos >= cend)) { 948 if ((bh_cend <= cpos || bh_cpos >= cend)) {
@@ -1208,7 +1208,7 @@ rl_not_mapped_enoent:
1208 wait_on_buffer(bh); 1208 wait_on_buffer(bh);
1209 if (likely(buffer_uptodate(bh))) { 1209 if (likely(buffer_uptodate(bh))) {
1210 page = bh->b_page; 1210 page = bh->b_page;
1211 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + 1211 bh_pos = ((s64)page->index << PAGE_SHIFT) +
1212 bh_offset(bh); 1212 bh_offset(bh);
1213 /* 1213 /*
1214 * If the buffer overflows the initialized size, need 1214 * If the buffer overflows the initialized size, need
@@ -1350,7 +1350,7 @@ rl_not_mapped_enoent:
1350 bh = head = page_buffers(page); 1350 bh = head = page_buffers(page);
1351 do { 1351 do {
1352 if (u == nr_pages && 1352 if (u == nr_pages &&
1353 ((s64)page->index << PAGE_CACHE_SHIFT) + 1353 ((s64)page->index << PAGE_SHIFT) +
1354 bh_offset(bh) >= end) 1354 bh_offset(bh) >= end)
1355 break; 1355 break;
1356 if (!buffer_new(bh)) 1356 if (!buffer_new(bh))
@@ -1422,7 +1422,7 @@ static inline int ntfs_commit_pages_after_non_resident_write(
1422 bool partial; 1422 bool partial;
1423 1423
1424 page = pages[u]; 1424 page = pages[u];
1425 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 1425 bh_pos = (s64)page->index << PAGE_SHIFT;
1426 bh = head = page_buffers(page); 1426 bh = head = page_buffers(page);
1427 partial = false; 1427 partial = false;
1428 do { 1428 do {
@@ -1639,7 +1639,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
1639 if (end < attr_len) 1639 if (end < attr_len)
1640 memcpy(kaddr + end, kattr + end, attr_len - end); 1640 memcpy(kaddr + end, kattr + end, attr_len - end);
1641 /* Zero the region outside the end of the attribute value. */ 1641 /* Zero the region outside the end of the attribute value. */
1642 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1642 memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
1643 flush_dcache_page(page); 1643 flush_dcache_page(page);
1644 SetPageUptodate(page); 1644 SetPageUptodate(page);
1645 } 1645 }
@@ -1706,7 +1706,7 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
1706 unsigned len, copied; 1706 unsigned len, copied;
1707 1707
1708 do { 1708 do {
1709 len = PAGE_CACHE_SIZE - ofs; 1709 len = PAGE_SIZE - ofs;
1710 if (len > bytes) 1710 if (len > bytes)
1711 len = bytes; 1711 len = bytes;
1712 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, 1712 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
@@ -1724,14 +1724,14 @@ out:
1724 return total; 1724 return total;
1725err: 1725err:
1726 /* Zero the rest of the target like __copy_from_user(). */ 1726 /* Zero the rest of the target like __copy_from_user(). */
1727 len = PAGE_CACHE_SIZE - copied; 1727 len = PAGE_SIZE - copied;
1728 do { 1728 do {
1729 if (len > bytes) 1729 if (len > bytes)
1730 len = bytes; 1730 len = bytes;
1731 zero_user(*pages, copied, len); 1731 zero_user(*pages, copied, len);
1732 bytes -= len; 1732 bytes -= len;
1733 copied = 0; 1733 copied = 0;
1734 len = PAGE_CACHE_SIZE; 1734 len = PAGE_SIZE;
1735 } while (++pages < last_page); 1735 } while (++pages < last_page);
1736 goto out; 1736 goto out;
1737} 1737}
@@ -1787,8 +1787,8 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1787 * attributes. 1787 * attributes.
1788 */ 1788 */
1789 nr_pages = 1; 1789 nr_pages = 1;
1790 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) 1790 if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
1791 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; 1791 nr_pages = vol->cluster_size >> PAGE_SHIFT;
1792 last_vcn = -1; 1792 last_vcn = -1;
1793 do { 1793 do {
1794 VCN vcn; 1794 VCN vcn;
@@ -1796,9 +1796,9 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1796 unsigned ofs, do_pages, u; 1796 unsigned ofs, do_pages, u;
1797 size_t copied; 1797 size_t copied;
1798 1798
1799 start_idx = idx = pos >> PAGE_CACHE_SHIFT; 1799 start_idx = idx = pos >> PAGE_SHIFT;
1800 ofs = pos & ~PAGE_CACHE_MASK; 1800 ofs = pos & ~PAGE_MASK;
1801 bytes = PAGE_CACHE_SIZE - ofs; 1801 bytes = PAGE_SIZE - ofs;
1802 do_pages = 1; 1802 do_pages = 1;
1803 if (nr_pages > 1) { 1803 if (nr_pages > 1) {
1804 vcn = pos >> vol->cluster_size_bits; 1804 vcn = pos >> vol->cluster_size_bits;
@@ -1832,7 +1832,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1832 if (lcn == LCN_HOLE) { 1832 if (lcn == LCN_HOLE) {
1833 start_idx = (pos & ~(s64) 1833 start_idx = (pos & ~(s64)
1834 vol->cluster_size_mask) 1834 vol->cluster_size_mask)
1835 >> PAGE_CACHE_SHIFT; 1835 >> PAGE_SHIFT;
1836 bytes = vol->cluster_size - (pos & 1836 bytes = vol->cluster_size - (pos &
1837 vol->cluster_size_mask); 1837 vol->cluster_size_mask);
1838 do_pages = nr_pages; 1838 do_pages = nr_pages;
@@ -1871,12 +1871,12 @@ again:
1871 if (unlikely(status)) { 1871 if (unlikely(status)) {
1872 do { 1872 do {
1873 unlock_page(pages[--do_pages]); 1873 unlock_page(pages[--do_pages]);
1874 page_cache_release(pages[do_pages]); 1874 put_page(pages[do_pages]);
1875 } while (do_pages); 1875 } while (do_pages);
1876 break; 1876 break;
1877 } 1877 }
1878 } 1878 }
1879 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; 1879 u = (pos >> PAGE_SHIFT) - pages[0]->index;
1880 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, 1880 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
1881 i, bytes); 1881 i, bytes);
1882 ntfs_flush_dcache_pages(pages + u, do_pages - u); 1882 ntfs_flush_dcache_pages(pages + u, do_pages - u);
@@ -1889,7 +1889,7 @@ again:
1889 } 1889 }
1890 do { 1890 do {
1891 unlock_page(pages[--do_pages]); 1891 unlock_page(pages[--do_pages]);
1892 page_cache_release(pages[do_pages]); 1892 put_page(pages[do_pages]);
1893 } while (do_pages); 1893 } while (do_pages);
1894 if (unlikely(status < 0)) 1894 if (unlikely(status < 0))
1895 break; 1895 break;
@@ -1921,7 +1921,7 @@ again:
1921 } 1921 }
1922 } while (iov_iter_count(i)); 1922 } while (iov_iter_count(i));
1923 if (cached_page) 1923 if (cached_page)
1924 page_cache_release(cached_page); 1924 put_page(cached_page);
1925 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", 1925 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
1926 written ? "written" : "status", (unsigned long)written, 1926 written ? "written" : "status", (unsigned long)written,
1927 (long)status); 1927 (long)status);
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 096c135691ae..0d645f357930 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,11 +272,11 @@ done:
272descend_into_child_node: 272descend_into_child_node:
273 /* 273 /*
274 * Convert vcn to index into the index allocation attribute in units 274 * Convert vcn to index into the index allocation attribute in units
275 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 275 * of PAGE_SIZE and map the page cache page, reading it from
276 * disk if necessary. 276 * disk if necessary.
277 */ 277 */
278 page = ntfs_map_page(ia_mapping, vcn << 278 page = ntfs_map_page(ia_mapping, vcn <<
279 idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 279 idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
280 if (IS_ERR(page)) { 280 if (IS_ERR(page)) {
281 ntfs_error(sb, "Failed to map index page, error %ld.", 281 ntfs_error(sb, "Failed to map index page, error %ld.",
282 -PTR_ERR(page)); 282 -PTR_ERR(page));
@@ -288,9 +288,9 @@ descend_into_child_node:
288fast_descend_into_child_node: 288fast_descend_into_child_node:
289 /* Get to the index allocation block. */ 289 /* Get to the index allocation block. */
290 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 290 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
291 idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 291 idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
292 /* Bounds checks. */ 292 /* Bounds checks. */
293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
294 ntfs_error(sb, "Out of bounds check failed. Corrupt inode " 294 ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
295 "0x%lx or driver bug.", idx_ni->mft_no); 295 "0x%lx or driver bug.", idx_ni->mft_no);
296 goto unm_err_out; 296 goto unm_err_out;
@@ -323,7 +323,7 @@ fast_descend_into_child_node:
323 goto unm_err_out; 323 goto unm_err_out;
324 } 324 }
325 index_end = (u8*)ia + idx_ni->itype.index.block_size; 325 index_end = (u8*)ia + idx_ni->itype.index.block_size;
326 if (index_end > kaddr + PAGE_CACHE_SIZE) { 326 if (index_end > kaddr + PAGE_SIZE) {
327 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx " 327 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
328 "crosses page boundary. Impossible! Cannot " 328 "crosses page boundary. Impossible! Cannot "
329 "access! This is probably a bug in the " 329 "access! This is probably a bug in the "
@@ -427,9 +427,9 @@ ia_done:
427 * the mapped page. 427 * the mapped page.
428 */ 428 */
429 if (old_vcn << vol->cluster_size_bits >> 429 if (old_vcn << vol->cluster_size_bits >>
430 PAGE_CACHE_SHIFT == vcn << 430 PAGE_SHIFT == vcn <<
431 vol->cluster_size_bits >> 431 vol->cluster_size_bits >>
432 PAGE_CACHE_SHIFT) 432 PAGE_SHIFT)
433 goto fast_descend_into_child_node; 433 goto fast_descend_into_child_node;
434 unlock_page(page); 434 unlock_page(page);
435 ntfs_unmap_page(page); 435 ntfs_unmap_page(page);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d284f07eda77..f40972d6df90 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -868,12 +868,12 @@ skip_attr_list_load:
868 ni->itype.index.block_size); 868 ni->itype.index.block_size);
869 goto unm_err_out; 869 goto unm_err_out;
870 } 870 }
871 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { 871 if (ni->itype.index.block_size > PAGE_SIZE) {
872 ntfs_error(vi->i_sb, "Index block size (%u) > " 872 ntfs_error(vi->i_sb, "Index block size (%u) > "
873 "PAGE_CACHE_SIZE (%ld) is not " 873 "PAGE_SIZE (%ld) is not "
874 "supported. Sorry.", 874 "supported. Sorry.",
875 ni->itype.index.block_size, 875 ni->itype.index.block_size,
876 PAGE_CACHE_SIZE); 876 PAGE_SIZE);
877 err = -EOPNOTSUPP; 877 err = -EOPNOTSUPP;
878 goto unm_err_out; 878 goto unm_err_out;
879 } 879 }
@@ -1585,10 +1585,10 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1585 "two.", ni->itype.index.block_size); 1585 "two.", ni->itype.index.block_size);
1586 goto unm_err_out; 1586 goto unm_err_out;
1587 } 1587 }
1588 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { 1588 if (ni->itype.index.block_size > PAGE_SIZE) {
1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE " 1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
1590 "(%ld) is not supported. Sorry.", 1590 "(%ld) is not supported. Sorry.",
1591 ni->itype.index.block_size, PAGE_CACHE_SIZE); 1591 ni->itype.index.block_size, PAGE_SIZE);
1592 err = -EOPNOTSUPP; 1592 err = -EOPNOTSUPP;
1593 goto unm_err_out; 1593 goto unm_err_out;
1594 } 1594 }
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
index 1711b710b641..27a24a42f712 100644
--- a/fs/ntfs/lcnalloc.c
+++ b/fs/ntfs/lcnalloc.c
@@ -283,15 +283,15 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
283 ntfs_unmap_page(page); 283 ntfs_unmap_page(page);
284 } 284 }
285 page = ntfs_map_page(mapping, last_read_pos >> 285 page = ntfs_map_page(mapping, last_read_pos >>
286 PAGE_CACHE_SHIFT); 286 PAGE_SHIFT);
287 if (IS_ERR(page)) { 287 if (IS_ERR(page)) {
288 err = PTR_ERR(page); 288 err = PTR_ERR(page);
289 ntfs_error(vol->sb, "Failed to map page."); 289 ntfs_error(vol->sb, "Failed to map page.");
290 goto out; 290 goto out;
291 } 291 }
292 buf_size = last_read_pos & ~PAGE_CACHE_MASK; 292 buf_size = last_read_pos & ~PAGE_MASK;
293 buf = page_address(page) + buf_size; 293 buf = page_address(page) + buf_size;
294 buf_size = PAGE_CACHE_SIZE - buf_size; 294 buf_size = PAGE_SIZE - buf_size;
295 if (unlikely(last_read_pos + buf_size > i_size)) 295 if (unlikely(last_read_pos + buf_size > i_size))
296 buf_size = i_size - last_read_pos; 296 buf_size = i_size - last_read_pos;
297 buf_size <<= 3; 297 buf_size <<= 3;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index c71de292c5ad..9d71213ca81e 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -381,7 +381,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
381 * completely inside @rp, just copy it from there. Otherwise map all 381 * completely inside @rp, just copy it from there. Otherwise map all
382 * the required pages and copy the data from them. 382 * the required pages and copy the data from them.
383 */ 383 */
384 size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK); 384 size = PAGE_SIZE - (pos & ~PAGE_MASK);
385 if (size >= le32_to_cpu(rp->system_page_size)) { 385 if (size >= le32_to_cpu(rp->system_page_size)) {
386 memcpy(trp, rp, le32_to_cpu(rp->system_page_size)); 386 memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
387 } else { 387 } else {
@@ -394,8 +394,8 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
394 /* Copy the remaining data one page at a time. */ 394 /* Copy the remaining data one page at a time. */
395 have_read = size; 395 have_read = size;
396 to_read = le32_to_cpu(rp->system_page_size) - size; 396 to_read = le32_to_cpu(rp->system_page_size) - size;
397 idx = (pos + size) >> PAGE_CACHE_SHIFT; 397 idx = (pos + size) >> PAGE_SHIFT;
398 BUG_ON((pos + size) & ~PAGE_CACHE_MASK); 398 BUG_ON((pos + size) & ~PAGE_MASK);
399 do { 399 do {
400 page = ntfs_map_page(vi->i_mapping, idx); 400 page = ntfs_map_page(vi->i_mapping, idx);
401 if (IS_ERR(page)) { 401 if (IS_ERR(page)) {
@@ -406,7 +406,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
406 err = -EIO; 406 err = -EIO;
407 goto err_out; 407 goto err_out;
408 } 408 }
409 size = min_t(int, to_read, PAGE_CACHE_SIZE); 409 size = min_t(int, to_read, PAGE_SIZE);
410 memcpy((u8*)trp + have_read, page_address(page), size); 410 memcpy((u8*)trp + have_read, page_address(page), size);
411 ntfs_unmap_page(page); 411 ntfs_unmap_page(page);
412 have_read += size; 412 have_read += size;
@@ -509,11 +509,11 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
509 * log page size if the page cache size is between the default log page 509 * log page size if the page cache size is between the default log page
510 * size and twice that. 510 * size and twice that.
511 */ 511 */
512 if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <= 512 if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <=
513 DefaultLogPageSize * 2) 513 DefaultLogPageSize * 2)
514 log_page_size = DefaultLogPageSize; 514 log_page_size = DefaultLogPageSize;
515 else 515 else
516 log_page_size = PAGE_CACHE_SIZE; 516 log_page_size = PAGE_SIZE;
517 log_page_mask = log_page_size - 1; 517 log_page_mask = log_page_size - 1;
518 /* 518 /*
519 * Use ntfs_ffs() instead of ffs() to enable the compiler to 519 * Use ntfs_ffs() instead of ffs() to enable the compiler to
@@ -539,7 +539,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
539 * to be empty. 539 * to be empty.
540 */ 540 */
541 for (pos = 0; pos < size; pos <<= 1) { 541 for (pos = 0; pos < size; pos <<= 1) {
542 pgoff_t idx = pos >> PAGE_CACHE_SHIFT; 542 pgoff_t idx = pos >> PAGE_SHIFT;
543 if (!page || page->index != idx) { 543 if (!page || page->index != idx) {
544 if (page) 544 if (page)
545 ntfs_unmap_page(page); 545 ntfs_unmap_page(page);
@@ -550,7 +550,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
550 goto err_out; 550 goto err_out;
551 } 551 }
552 } 552 }
553 kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK); 553 kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
554 /* 554 /*
555 * A non-empty block means the logfile is not empty while an 555 * A non-empty block means the logfile is not empty while an
556 * empty block after a non-empty block has been encountered 556 * empty block after a non-empty block has been encountered
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 3014a36a255b..37b2501caaa4 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -61,16 +61,16 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
61 * here if the volume was that big... 61 * here if the volume was that big...
62 */ 62 */
63 index = (u64)ni->mft_no << vol->mft_record_size_bits >> 63 index = (u64)ni->mft_no << vol->mft_record_size_bits >>
64 PAGE_CACHE_SHIFT; 64 PAGE_SHIFT;
65 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 65 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
66 66
67 i_size = i_size_read(mft_vi); 67 i_size = i_size_read(mft_vi);
68 /* The maximum valid index into the page cache for $MFT's data. */ 68 /* The maximum valid index into the page cache for $MFT's data. */
69 end_index = i_size >> PAGE_CACHE_SHIFT; 69 end_index = i_size >> PAGE_SHIFT;
70 70
71 /* If the wanted index is out of bounds the mft record doesn't exist. */ 71 /* If the wanted index is out of bounds the mft record doesn't exist. */
72 if (unlikely(index >= end_index)) { 72 if (unlikely(index >= end_index)) {
73 if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs + 73 if (index > end_index || (i_size & ~PAGE_MASK) < ofs +
74 vol->mft_record_size) { 74 vol->mft_record_size) {
75 page = ERR_PTR(-ENOENT); 75 page = ERR_PTR(-ENOENT);
76 ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, " 76 ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
@@ -487,7 +487,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
487 } 487 }
488 /* Get the page containing the mirror copy of the mft record @m. */ 488 /* Get the page containing the mirror copy of the mft record @m. */
489 page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >> 489 page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
490 (PAGE_CACHE_SHIFT - vol->mft_record_size_bits)); 490 (PAGE_SHIFT - vol->mft_record_size_bits));
491 if (IS_ERR(page)) { 491 if (IS_ERR(page)) {
492 ntfs_error(vol->sb, "Failed to map mft mirror page."); 492 ntfs_error(vol->sb, "Failed to map mft mirror page.");
493 err = PTR_ERR(page); 493 err = PTR_ERR(page);
@@ -497,7 +497,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
497 BUG_ON(!PageUptodate(page)); 497 BUG_ON(!PageUptodate(page));
498 ClearPageUptodate(page); 498 ClearPageUptodate(page);
499 /* Offset of the mft mirror record inside the page. */ 499 /* Offset of the mft mirror record inside the page. */
500 page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 500 page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
501 /* The address in the page of the mirror copy of the mft record @m. */ 501 /* The address in the page of the mirror copy of the mft record @m. */
502 kmirr = page_address(page) + page_ofs; 502 kmirr = page_address(page) + page_ofs;
503 /* Copy the mst protected mft record to the mirror. */ 503 /* Copy the mst protected mft record to the mirror. */
@@ -1178,8 +1178,8 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
1178 for (; pass <= 2;) { 1178 for (; pass <= 2;) {
1179 /* Cap size to pass_end. */ 1179 /* Cap size to pass_end. */
1180 ofs = data_pos >> 3; 1180 ofs = data_pos >> 3;
1181 page_ofs = ofs & ~PAGE_CACHE_MASK; 1181 page_ofs = ofs & ~PAGE_MASK;
1182 size = PAGE_CACHE_SIZE - page_ofs; 1182 size = PAGE_SIZE - page_ofs;
1183 ll = ((pass_end + 7) >> 3) - ofs; 1183 ll = ((pass_end + 7) >> 3) - ofs;
1184 if (size > ll) 1184 if (size > ll)
1185 size = ll; 1185 size = ll;
@@ -1190,7 +1190,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
1190 */ 1190 */
1191 if (size) { 1191 if (size) {
1192 page = ntfs_map_page(mftbmp_mapping, 1192 page = ntfs_map_page(mftbmp_mapping,
1193 ofs >> PAGE_CACHE_SHIFT); 1193 ofs >> PAGE_SHIFT);
1194 if (IS_ERR(page)) { 1194 if (IS_ERR(page)) {
1195 ntfs_error(vol->sb, "Failed to read mft " 1195 ntfs_error(vol->sb, "Failed to read mft "
1196 "bitmap, aborting."); 1196 "bitmap, aborting.");
@@ -1328,13 +1328,13 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
1328 */ 1328 */
1329 ll = lcn >> 3; 1329 ll = lcn >> 3;
1330 page = ntfs_map_page(vol->lcnbmp_ino->i_mapping, 1330 page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
1331 ll >> PAGE_CACHE_SHIFT); 1331 ll >> PAGE_SHIFT);
1332 if (IS_ERR(page)) { 1332 if (IS_ERR(page)) {
1333 up_write(&mftbmp_ni->runlist.lock); 1333 up_write(&mftbmp_ni->runlist.lock);
1334 ntfs_error(vol->sb, "Failed to read from lcn bitmap."); 1334 ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
1335 return PTR_ERR(page); 1335 return PTR_ERR(page);
1336 } 1336 }
1337 b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK); 1337 b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
1338 tb = 1 << (lcn & 7ull); 1338 tb = 1 << (lcn & 7ull);
1339 down_write(&vol->lcnbmp_lock); 1339 down_write(&vol->lcnbmp_lock);
1340 if (*b != 0xff && !(*b & tb)) { 1340 if (*b != 0xff && !(*b & tb)) {
@@ -2103,14 +2103,14 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
2103 * The index into the page cache and the offset within the page cache 2103 * The index into the page cache and the offset within the page cache
2104 * page of the wanted mft record. 2104 * page of the wanted mft record.
2105 */ 2105 */
2106 index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; 2106 index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT;
2107 ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 2107 ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
2108 /* The maximum valid index into the page cache for $MFT's data. */ 2108 /* The maximum valid index into the page cache for $MFT's data. */
2109 i_size = i_size_read(mft_vi); 2109 i_size = i_size_read(mft_vi);
2110 end_index = i_size >> PAGE_CACHE_SHIFT; 2110 end_index = i_size >> PAGE_SHIFT;
2111 if (unlikely(index >= end_index)) { 2111 if (unlikely(index >= end_index)) {
2112 if (unlikely(index > end_index || ofs + vol->mft_record_size >= 2112 if (unlikely(index > end_index || ofs + vol->mft_record_size >=
2113 (i_size & ~PAGE_CACHE_MASK))) { 2113 (i_size & ~PAGE_MASK))) {
2114 ntfs_error(vol->sb, "Tried to format non-existing mft " 2114 ntfs_error(vol->sb, "Tried to format non-existing mft "
2115 "record 0x%llx.", (long long)mft_no); 2115 "record 0x%llx.", (long long)mft_no);
2116 return -ENOENT; 2116 return -ENOENT;
@@ -2515,8 +2515,8 @@ mft_rec_already_initialized:
2515 * We now have allocated and initialized the mft record. Calculate the 2515 * We now have allocated and initialized the mft record. Calculate the
2516 * index of and the offset within the page cache page the record is in. 2516 * index of and the offset within the page cache page the record is in.
2517 */ 2517 */
2518 index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; 2518 index = bit << vol->mft_record_size_bits >> PAGE_SHIFT;
2519 ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 2519 ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK;
2520 /* Read, map, and pin the page containing the mft record. */ 2520 /* Read, map, and pin the page containing the mft record. */
2521 page = ntfs_map_page(vol->mft_ino->i_mapping, index); 2521 page = ntfs_map_page(vol->mft_ino->i_mapping, index);
2522 if (IS_ERR(page)) { 2522 if (IS_ERR(page)) {
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index c581e26a350d..12de47b96ca9 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -43,7 +43,7 @@ typedef enum {
43 NTFS_MAX_NAME_LEN = 255, 43 NTFS_MAX_NAME_LEN = 255,
44 NTFS_MAX_ATTR_NAME_LEN = 255, 44 NTFS_MAX_ATTR_NAME_LEN = 255,
45 NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */ 45 NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */
46 NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE, 46 NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE,
47} NTFS_CONSTANTS; 47} NTFS_CONSTANTS;
48 48
49/* Global variables. */ 49/* Global variables. */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1b38abdaa3ed..ecb49870a680 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,14 +823,14 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)", 823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
824 vol->mft_record_size_bits, vol->mft_record_size_bits); 824 vol->mft_record_size_bits, vol->mft_record_size_bits);
825 /* 825 /*
826 * We cannot support mft record sizes above the PAGE_CACHE_SIZE since 826 * We cannot support mft record sizes above the PAGE_SIZE since
827 * we store $MFT/$DATA, the table of mft records in the page cache. 827 * we store $MFT/$DATA, the table of mft records in the page cache.
828 */ 828 */
829 if (vol->mft_record_size > PAGE_CACHE_SIZE) { 829 if (vol->mft_record_size > PAGE_SIZE) {
830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the " 830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
831 "PAGE_CACHE_SIZE on your system (%lu). " 831 "PAGE_SIZE on your system (%lu). "
832 "This is not supported. Sorry.", 832 "This is not supported. Sorry.",
833 vol->mft_record_size, PAGE_CACHE_SIZE); 833 vol->mft_record_size, PAGE_SIZE);
834 return false; 834 return false;
835 } 835 }
836 /* We cannot support mft record sizes below the sector size. */ 836 /* We cannot support mft record sizes below the sector size. */
@@ -1096,7 +1096,7 @@ static bool check_mft_mirror(ntfs_volume *vol)
1096 1096
1097 ntfs_debug("Entering."); 1097 ntfs_debug("Entering.");
1098 /* Compare contents of $MFT and $MFTMirr. */ 1098 /* Compare contents of $MFT and $MFTMirr. */
1099 mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size; 1099 mrecs_per_page = PAGE_SIZE / vol->mft_record_size;
1100 BUG_ON(!mrecs_per_page); 1100 BUG_ON(!mrecs_per_page);
1101 BUG_ON(!vol->mftmirr_size); 1101 BUG_ON(!vol->mftmirr_size);
1102 mft_page = mirr_page = NULL; 1102 mft_page = mirr_page = NULL;
@@ -1615,20 +1615,20 @@ static bool load_and_init_attrdef(ntfs_volume *vol)
1615 if (!vol->attrdef) 1615 if (!vol->attrdef)
1616 goto iput_failed; 1616 goto iput_failed;
1617 index = 0; 1617 index = 0;
1618 max_index = i_size >> PAGE_CACHE_SHIFT; 1618 max_index = i_size >> PAGE_SHIFT;
1619 size = PAGE_CACHE_SIZE; 1619 size = PAGE_SIZE;
1620 while (index < max_index) { 1620 while (index < max_index) {
1621 /* Read the attrdef table and copy it into the linear buffer. */ 1621 /* Read the attrdef table and copy it into the linear buffer. */
1622read_partial_attrdef_page: 1622read_partial_attrdef_page:
1623 page = ntfs_map_page(ino->i_mapping, index); 1623 page = ntfs_map_page(ino->i_mapping, index);
1624 if (IS_ERR(page)) 1624 if (IS_ERR(page))
1625 goto free_iput_failed; 1625 goto free_iput_failed;
1626 memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT), 1626 memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT),
1627 page_address(page), size); 1627 page_address(page), size);
1628 ntfs_unmap_page(page); 1628 ntfs_unmap_page(page);
1629 }; 1629 };
1630 if (size == PAGE_CACHE_SIZE) { 1630 if (size == PAGE_SIZE) {
1631 size = i_size & ~PAGE_CACHE_MASK; 1631 size = i_size & ~PAGE_MASK;
1632 if (size) 1632 if (size)
1633 goto read_partial_attrdef_page; 1633 goto read_partial_attrdef_page;
1634 } 1634 }
@@ -1684,20 +1684,20 @@ static bool load_and_init_upcase(ntfs_volume *vol)
1684 if (!vol->upcase) 1684 if (!vol->upcase)
1685 goto iput_upcase_failed; 1685 goto iput_upcase_failed;
1686 index = 0; 1686 index = 0;
1687 max_index = i_size >> PAGE_CACHE_SHIFT; 1687 max_index = i_size >> PAGE_SHIFT;
1688 size = PAGE_CACHE_SIZE; 1688 size = PAGE_SIZE;
1689 while (index < max_index) { 1689 while (index < max_index) {
1690 /* Read the upcase table and copy it into the linear buffer. */ 1690 /* Read the upcase table and copy it into the linear buffer. */
1691read_partial_upcase_page: 1691read_partial_upcase_page:
1692 page = ntfs_map_page(ino->i_mapping, index); 1692 page = ntfs_map_page(ino->i_mapping, index);
1693 if (IS_ERR(page)) 1693 if (IS_ERR(page))
1694 goto iput_upcase_failed; 1694 goto iput_upcase_failed;
1695 memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT), 1695 memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT),
1696 page_address(page), size); 1696 page_address(page), size);
1697 ntfs_unmap_page(page); 1697 ntfs_unmap_page(page);
1698 }; 1698 };
1699 if (size == PAGE_CACHE_SIZE) { 1699 if (size == PAGE_SIZE) {
1700 size = i_size & ~PAGE_CACHE_MASK; 1700 size = i_size & ~PAGE_MASK;
1701 if (size) 1701 if (size)
1702 goto read_partial_upcase_page; 1702 goto read_partial_upcase_page;
1703 } 1703 }
@@ -2471,14 +2471,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2471 down_read(&vol->lcnbmp_lock); 2471 down_read(&vol->lcnbmp_lock);
2472 /* 2472 /*
2473 * Convert the number of bits into bytes rounded up, then convert into 2473 * Convert the number of bits into bytes rounded up, then convert into
2474 * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one 2474 * multiples of PAGE_SIZE, rounding up so that if we have one
2475 * full and one partial page max_index = 2. 2475 * full and one partial page max_index = 2.
2476 */ 2476 */
2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> 2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
2478 PAGE_CACHE_SHIFT; 2478 PAGE_SHIFT;
2479 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2479 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
2481 max_index, PAGE_CACHE_SIZE / 4); 2481 max_index, PAGE_SIZE / 4);
2482 for (index = 0; index < max_index; index++) { 2482 for (index = 0; index < max_index; index++) {
2483 unsigned long *kaddr; 2483 unsigned long *kaddr;
2484 2484
@@ -2491,7 +2491,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2491 if (IS_ERR(page)) { 2491 if (IS_ERR(page)) {
2492 ntfs_debug("read_mapping_page() error. Skipping " 2492 ntfs_debug("read_mapping_page() error. Skipping "
2493 "page (index 0x%lx).", index); 2493 "page (index 0x%lx).", index);
2494 nr_free -= PAGE_CACHE_SIZE * 8; 2494 nr_free -= PAGE_SIZE * 8;
2495 continue; 2495 continue;
2496 } 2496 }
2497 kaddr = kmap_atomic(page); 2497 kaddr = kmap_atomic(page);
@@ -2503,9 +2503,9 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2503 * ntfs_readpage(). 2503 * ntfs_readpage().
2504 */ 2504 */
2505 nr_free -= bitmap_weight(kaddr, 2505 nr_free -= bitmap_weight(kaddr,
2506 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2506 PAGE_SIZE * BITS_PER_BYTE);
2507 kunmap_atomic(kaddr); 2507 kunmap_atomic(kaddr);
2508 page_cache_release(page); 2508 put_page(page);
2509 } 2509 }
2510 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); 2510 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
2511 /* 2511 /*
@@ -2547,9 +2547,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2547 pgoff_t index; 2547 pgoff_t index;
2548 2548
2549 ntfs_debug("Entering."); 2549 ntfs_debug("Entering.");
2550 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2550 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
2552 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2552 "0x%lx.", max_index, PAGE_SIZE / 4);
2553 for (index = 0; index < max_index; index++) { 2553 for (index = 0; index < max_index; index++) {
2554 unsigned long *kaddr; 2554 unsigned long *kaddr;
2555 2555
@@ -2562,7 +2562,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2562 if (IS_ERR(page)) { 2562 if (IS_ERR(page)) {
2563 ntfs_debug("read_mapping_page() error. Skipping " 2563 ntfs_debug("read_mapping_page() error. Skipping "
2564 "page (index 0x%lx).", index); 2564 "page (index 0x%lx).", index);
2565 nr_free -= PAGE_CACHE_SIZE * 8; 2565 nr_free -= PAGE_SIZE * 8;
2566 continue; 2566 continue;
2567 } 2567 }
2568 kaddr = kmap_atomic(page); 2568 kaddr = kmap_atomic(page);
@@ -2574,9 +2574,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2574 * ntfs_readpage(). 2574 * ntfs_readpage().
2575 */ 2575 */
2576 nr_free -= bitmap_weight(kaddr, 2576 nr_free -= bitmap_weight(kaddr,
2577 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2577 PAGE_SIZE * BITS_PER_BYTE);
2578 kunmap_atomic(kaddr); 2578 kunmap_atomic(kaddr);
2579 page_cache_release(page); 2579 put_page(page);
2580 } 2580 }
2581 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", 2581 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
2582 index - 1); 2582 index - 1);
@@ -2618,17 +2618,17 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2618 /* Type of filesystem. */ 2618 /* Type of filesystem. */
2619 sfs->f_type = NTFS_SB_MAGIC; 2619 sfs->f_type = NTFS_SB_MAGIC;
2620 /* Optimal transfer block size. */ 2620 /* Optimal transfer block size. */
2621 sfs->f_bsize = PAGE_CACHE_SIZE; 2621 sfs->f_bsize = PAGE_SIZE;
2622 /* 2622 /*
2623 * Total data blocks in filesystem in units of f_bsize and since 2623 * Total data blocks in filesystem in units of f_bsize and since
2624 * inodes are also stored in data blocs ($MFT is a file) this is just 2624 * inodes are also stored in data blocs ($MFT is a file) this is just
2625 * the total clusters. 2625 * the total clusters.
2626 */ 2626 */
2627 sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >> 2627 sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
2628 PAGE_CACHE_SHIFT; 2628 PAGE_SHIFT;
2629 /* Free data blocks in filesystem in units of f_bsize. */ 2629 /* Free data blocks in filesystem in units of f_bsize. */
2630 size = get_nr_free_clusters(vol) << vol->cluster_size_bits >> 2630 size = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
2631 PAGE_CACHE_SHIFT; 2631 PAGE_SHIFT;
2632 if (size < 0LL) 2632 if (size < 0LL)
2633 size = 0LL; 2633 size = 0LL;
2634 /* Free blocks avail to non-superuser, same as above on NTFS. */ 2634 /* Free blocks avail to non-superuser, same as above on NTFS. */
@@ -2639,11 +2639,11 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; 2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
2640 /* 2640 /*
2641 * Convert the maximum number of set bits into bytes rounded up, then 2641 * Convert the maximum number of set bits into bytes rounded up, then
2642 * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we 2642 * convert into multiples of PAGE_SIZE, rounding up so that if we
2643 * have one full and one partial page max_index = 2. 2643 * have one full and one partial page max_index = 2.
2644 */ 2644 */
2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) 2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
2646 + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2646 + 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT;
2647 read_unlock_irqrestore(&mft_ni->size_lock, flags); 2647 read_unlock_irqrestore(&mft_ni->size_lock, flags);
2648 /* Number of inodes in filesystem (at this point in time). */ 2648 /* Number of inodes in filesystem (at this point in time). */
2649 sfs->f_files = size; 2649 sfs->f_files = size;
@@ -2765,15 +2765,15 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2765 if (!parse_options(vol, (char*)opt)) 2765 if (!parse_options(vol, (char*)opt))
2766 goto err_out_now; 2766 goto err_out_now;
2767 2767
2768 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2768 /* We support sector sizes up to the PAGE_SIZE. */
2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
2770 if (!silent) 2770 if (!silent)
2771 ntfs_error(sb, "Device has unsupported sector size " 2771 ntfs_error(sb, "Device has unsupported sector size "
2772 "(%i). The maximum supported sector " 2772 "(%i). The maximum supported sector "
2773 "size on this architecture is %lu " 2773 "size on this architecture is %lu "
2774 "bytes.", 2774 "bytes.",
2775 bdev_logical_block_size(sb->s_bdev), 2775 bdev_logical_block_size(sb->s_bdev),
2776 PAGE_CACHE_SIZE); 2776 PAGE_SIZE);
2777 goto err_out_now; 2777 goto err_out_now;
2778 } 2778 }
2779 /* 2779 /*
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 70907d638b60..e361d1a0ca09 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6671,7 +6671,7 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6671{ 6671{
6672 int i; 6672 int i;
6673 struct page *page; 6673 struct page *page;
6674 unsigned int from, to = PAGE_CACHE_SIZE; 6674 unsigned int from, to = PAGE_SIZE;
6675 struct super_block *sb = inode->i_sb; 6675 struct super_block *sb = inode->i_sb;
6676 6676
6677 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); 6677 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
@@ -6679,21 +6679,21 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6679 if (numpages == 0) 6679 if (numpages == 0)
6680 goto out; 6680 goto out;
6681 6681
6682 to = PAGE_CACHE_SIZE; 6682 to = PAGE_SIZE;
6683 for(i = 0; i < numpages; i++) { 6683 for(i = 0; i < numpages; i++) {
6684 page = pages[i]; 6684 page = pages[i];
6685 6685
6686 from = start & (PAGE_CACHE_SIZE - 1); 6686 from = start & (PAGE_SIZE - 1);
6687 if ((end >> PAGE_CACHE_SHIFT) == page->index) 6687 if ((end >> PAGE_SHIFT) == page->index)
6688 to = end & (PAGE_CACHE_SIZE - 1); 6688 to = end & (PAGE_SIZE - 1);
6689 6689
6690 BUG_ON(from > PAGE_CACHE_SIZE); 6690 BUG_ON(from > PAGE_SIZE);
6691 BUG_ON(to > PAGE_CACHE_SIZE); 6691 BUG_ON(to > PAGE_SIZE);
6692 6692
6693 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, 6693 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
6694 &phys); 6694 &phys);
6695 6695
6696 start = (page->index + 1) << PAGE_CACHE_SHIFT; 6696 start = (page->index + 1) << PAGE_SHIFT;
6697 } 6697 }
6698out: 6698out:
6699 if (pages) 6699 if (pages)
@@ -6712,7 +6712,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6712 6712
6713 numpages = 0; 6713 numpages = 0;
6714 last_page_bytes = PAGE_ALIGN(end); 6714 last_page_bytes = PAGE_ALIGN(end);
6715 index = start >> PAGE_CACHE_SHIFT; 6715 index = start >> PAGE_SHIFT;
6716 do { 6716 do {
6717 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); 6717 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
6718 if (!pages[numpages]) { 6718 if (!pages[numpages]) {
@@ -6723,7 +6723,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6723 6723
6724 numpages++; 6724 numpages++;
6725 index++; 6725 index++;
6726 } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); 6726 } while (index < (last_page_bytes >> PAGE_SHIFT));
6727 6727
6728out: 6728out:
6729 if (ret != 0) { 6729 if (ret != 0) {
@@ -6950,8 +6950,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6950 * to do that now. 6950 * to do that now.
6951 */ 6951 */
6952 if (!ocfs2_sparse_alloc(osb) && 6952 if (!ocfs2_sparse_alloc(osb) &&
6953 PAGE_CACHE_SIZE < osb->s_clustersize) 6953 PAGE_SIZE < osb->s_clustersize)
6954 end = PAGE_CACHE_SIZE; 6954 end = PAGE_SIZE;
6955 6955
6956 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); 6956 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
6957 if (ret) { 6957 if (ret) {
@@ -6971,8 +6971,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6971 goto out_unlock; 6971 goto out_unlock;
6972 } 6972 }
6973 6973
6974 page_end = PAGE_CACHE_SIZE; 6974 page_end = PAGE_SIZE;
6975 if (PAGE_CACHE_SIZE > osb->s_clustersize) 6975 if (PAGE_SIZE > osb->s_clustersize)
6976 page_end = osb->s_clustersize; 6976 page_end = osb->s_clustersize;
6977 6977
6978 for (i = 0; i < num_pages; i++) 6978 for (i = 0; i < num_pages; i++)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1581240a7ca0..ad1577348a92 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -234,7 +234,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
234 234
235 size = i_size_read(inode); 235 size = i_size_read(inode);
236 236
237 if (size > PAGE_CACHE_SIZE || 237 if (size > PAGE_SIZE ||
238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { 238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
239 ocfs2_error(inode->i_sb, 239 ocfs2_error(inode->i_sb,
240 "Inode %llu has with inline data has bad size: %Lu\n", 240 "Inode %llu has with inline data has bad size: %Lu\n",
@@ -247,7 +247,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
247 if (size) 247 if (size)
248 memcpy(kaddr, di->id2.i_data.id_data, size); 248 memcpy(kaddr, di->id2.i_data.id_data, size);
249 /* Clear the remaining part of the page */ 249 /* Clear the remaining part of the page */
250 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); 250 memset(kaddr + size, 0, PAGE_SIZE - size);
251 flush_dcache_page(page); 251 flush_dcache_page(page);
252 kunmap_atomic(kaddr); 252 kunmap_atomic(kaddr);
253 253
@@ -282,7 +282,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
282{ 282{
283 struct inode *inode = page->mapping->host; 283 struct inode *inode = page->mapping->host;
284 struct ocfs2_inode_info *oi = OCFS2_I(inode); 284 struct ocfs2_inode_info *oi = OCFS2_I(inode);
285 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; 285 loff_t start = (loff_t)page->index << PAGE_SHIFT;
286 int ret, unlock = 1; 286 int ret, unlock = 1;
287 287
288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, 288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
@@ -385,7 +385,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
385 * drop out in that case as it's not worth handling here. 385 * drop out in that case as it's not worth handling here.
386 */ 386 */
387 last = list_entry(pages->prev, struct page, lru); 387 last = list_entry(pages->prev, struct page, lru);
388 start = (loff_t)last->index << PAGE_CACHE_SHIFT; 388 start = (loff_t)last->index << PAGE_SHIFT;
389 if (start >= i_size_read(inode)) 389 if (start >= i_size_read(inode))
390 goto out_unlock; 390 goto out_unlock;
391 391
@@ -511,12 +511,12 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
511 unsigned int *start, 511 unsigned int *start,
512 unsigned int *end) 512 unsigned int *end)
513{ 513{
514 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; 514 unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
515 515
516 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { 516 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
517 unsigned int cpp; 517 unsigned int cpp;
518 518
519 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); 519 cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
520 520
521 cluster_start = cpos % cpp; 521 cluster_start = cpos % cpp;
522 cluster_start = cluster_start << osb->s_clustersize_bits; 522 cluster_start = cluster_start << osb->s_clustersize_bits;
@@ -684,13 +684,13 @@ next_bh:
684 return ret; 684 return ret;
685} 685}
686 686
687#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 687#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
688#define OCFS2_MAX_CTXT_PAGES 1 688#define OCFS2_MAX_CTXT_PAGES 1
689#else 689#else
690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) 690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
691#endif 691#endif
692 692
693#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) 693#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
694 694
695struct ocfs2_unwritten_extent { 695struct ocfs2_unwritten_extent {
696 struct list_head ue_node; 696 struct list_head ue_node;
@@ -785,7 +785,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
785 if (pages[i]) { 785 if (pages[i]) {
786 unlock_page(pages[i]); 786 unlock_page(pages[i]);
787 mark_page_accessed(pages[i]); 787 mark_page_accessed(pages[i]);
788 page_cache_release(pages[i]); 788 put_page(pages[i]);
789 } 789 }
790 } 790 }
791} 791}
@@ -808,7 +808,7 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
808 } 808 }
809 } 809 }
810 mark_page_accessed(wc->w_target_page); 810 mark_page_accessed(wc->w_target_page);
811 page_cache_release(wc->w_target_page); 811 put_page(wc->w_target_page);
812 } 812 }
813 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 813 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
814} 814}
@@ -857,7 +857,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
857 wc->w_di_bh = di_bh; 857 wc->w_di_bh = di_bh;
858 wc->w_type = type; 858 wc->w_type = type;
859 859
860 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) 860 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
861 wc->w_large_pages = 1; 861 wc->w_large_pages = 1;
862 else 862 else
863 wc->w_large_pages = 0; 863 wc->w_large_pages = 0;
@@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode,
920 loff_t user_pos, unsigned user_len) 920 loff_t user_pos, unsigned user_len)
921{ 921{
922 int i; 922 int i;
923 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), 923 unsigned from = user_pos & (PAGE_SIZE - 1),
924 to = user_pos + user_len; 924 to = user_pos + user_len;
925 struct page *tmppage; 925 struct page *tmppage;
926 926
@@ -960,7 +960,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
960 (page_offset(page) <= user_pos)); 960 (page_offset(page) <= user_pos));
961 961
962 if (page == wc->w_target_page) { 962 if (page == wc->w_target_page) {
963 map_from = user_pos & (PAGE_CACHE_SIZE - 1); 963 map_from = user_pos & (PAGE_SIZE - 1);
964 map_to = map_from + user_len; 964 map_to = map_from + user_len;
965 965
966 if (new) 966 if (new)
@@ -1034,7 +1034,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1034 struct inode *inode = mapping->host; 1034 struct inode *inode = mapping->host;
1035 loff_t last_byte; 1035 loff_t last_byte;
1036 1036
1037 target_index = user_pos >> PAGE_CACHE_SHIFT; 1037 target_index = user_pos >> PAGE_SHIFT;
1038 1038
1039 /* 1039 /*
1040 * Figure out how many pages we'll be manipulating here. For 1040 * Figure out how many pages we'll be manipulating here. For
@@ -1053,14 +1053,14 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1053 */ 1053 */
1054 last_byte = max(user_pos + user_len, i_size_read(inode)); 1054 last_byte = max(user_pos + user_len, i_size_read(inode));
1055 BUG_ON(last_byte < 1); 1055 BUG_ON(last_byte < 1);
1056 end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1; 1056 end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
1057 if ((start + wc->w_num_pages) > end_index) 1057 if ((start + wc->w_num_pages) > end_index)
1058 wc->w_num_pages = end_index - start; 1058 wc->w_num_pages = end_index - start;
1059 } else { 1059 } else {
1060 wc->w_num_pages = 1; 1060 wc->w_num_pages = 1;
1061 start = target_index; 1061 start = target_index;
1062 } 1062 }
1063 end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT; 1063 end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
1064 1064
1065 for(i = 0; i < wc->w_num_pages; i++) { 1065 for(i = 0; i < wc->w_num_pages; i++) {
1066 index = start + i; 1066 index = start + i;
@@ -1082,7 +1082,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1082 goto out; 1082 goto out;
1083 } 1083 }
1084 1084
1085 page_cache_get(mmap_page); 1085 get_page(mmap_page);
1086 wc->w_pages[i] = mmap_page; 1086 wc->w_pages[i] = mmap_page;
1087 wc->w_target_locked = true; 1087 wc->w_target_locked = true;
1088 } else if (index >= target_index && index <= end_index && 1088 } else if (index >= target_index && index <= end_index &&
@@ -1272,7 +1272,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1272{ 1272{
1273 struct ocfs2_write_cluster_desc *desc; 1273 struct ocfs2_write_cluster_desc *desc;
1274 1274
1275 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); 1275 wc->w_target_from = pos & (PAGE_SIZE - 1);
1276 wc->w_target_to = wc->w_target_from + len; 1276 wc->w_target_to = wc->w_target_from + len;
1277 1277
1278 if (alloc == 0) 1278 if (alloc == 0)
@@ -1309,7 +1309,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1309 &wc->w_target_to); 1309 &wc->w_target_to);
1310 } else { 1310 } else {
1311 wc->w_target_from = 0; 1311 wc->w_target_from = 0;
1312 wc->w_target_to = PAGE_CACHE_SIZE; 1312 wc->w_target_to = PAGE_SIZE;
1313 } 1313 }
1314} 1314}
1315 1315
@@ -1981,7 +1981,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
1981 struct page *page, void *fsdata) 1981 struct page *page, void *fsdata)
1982{ 1982{
1983 int i, ret; 1983 int i, ret;
1984 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); 1984 unsigned from, to, start = pos & (PAGE_SIZE - 1);
1985 struct inode *inode = mapping->host; 1985 struct inode *inode = mapping->host;
1986 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1986 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1987 struct ocfs2_write_ctxt *wc = fsdata; 1987 struct ocfs2_write_ctxt *wc = fsdata;
@@ -2027,8 +2027,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
2027 from = wc->w_target_from; 2027 from = wc->w_target_from;
2028 to = wc->w_target_to; 2028 to = wc->w_target_to;
2029 2029
2030 BUG_ON(from > PAGE_CACHE_SIZE || 2030 BUG_ON(from > PAGE_SIZE ||
2031 to > PAGE_CACHE_SIZE || 2031 to > PAGE_SIZE ||
2032 to < from); 2032 to < from);
2033 } else { 2033 } else {
2034 /* 2034 /*
@@ -2037,7 +2037,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
2037 * to flush their entire range. 2037 * to flush their entire range.
2038 */ 2038 */
2039 from = 0; 2039 from = 0;
2040 to = PAGE_CACHE_SIZE; 2040 to = PAGE_SIZE;
2041 } 2041 }
2042 2042
2043 if (page_has_buffers(tmppage)) { 2043 if (page_has_buffers(tmppage)) {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index bd15929b5f92..1934abb6b680 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -417,13 +417,13 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
417 bio->bi_private = wc; 417 bio->bi_private = wc;
418 bio->bi_end_io = o2hb_bio_end_io; 418 bio->bi_end_io = o2hb_bio_end_io;
419 419
420 vec_start = (cs << bits) % PAGE_CACHE_SIZE; 420 vec_start = (cs << bits) % PAGE_SIZE;
421 while(cs < max_slots) { 421 while(cs < max_slots) {
422 current_page = cs / spp; 422 current_page = cs / spp;
423 page = reg->hr_slot_data[current_page]; 423 page = reg->hr_slot_data[current_page];
424 424
425 vec_len = min(PAGE_CACHE_SIZE - vec_start, 425 vec_len = min(PAGE_SIZE - vec_start,
426 (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); 426 (max_slots-cs) * (PAGE_SIZE/spp) );
427 427
428 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", 428 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
429 current_page, vec_len, vec_start); 429 current_page, vec_len, vec_start);
@@ -431,7 +431,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
431 len = bio_add_page(bio, page, vec_len, vec_start); 431 len = bio_add_page(bio, page, vec_len, vec_start);
432 if (len != vec_len) break; 432 if (len != vec_len) break;
433 433
434 cs += vec_len / (PAGE_CACHE_SIZE/spp); 434 cs += vec_len / (PAGE_SIZE/spp);
435 vec_start = 0; 435 vec_start = 0;
436 } 436 }
437 437
@@ -1576,7 +1576,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
1576 1576
1577static void o2hb_init_region_params(struct o2hb_region *reg) 1577static void o2hb_init_region_params(struct o2hb_region *reg)
1578{ 1578{
1579 reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; 1579 reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
1580 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; 1580 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
1581 1581
1582 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", 1582 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 03768bb3aab1..47b3b2d4e775 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -571,8 +571,8 @@ static int dlmfs_fill_super(struct super_block * sb,
571 int silent) 571 int silent)
572{ 572{
573 sb->s_maxbytes = MAX_LFS_FILESIZE; 573 sb->s_maxbytes = MAX_LFS_FILESIZE;
574 sb->s_blocksize = PAGE_CACHE_SIZE; 574 sb->s_blocksize = PAGE_SIZE;
575 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 575 sb->s_blocksize_bits = PAGE_SHIFT;
576 sb->s_magic = DLMFS_MAGIC; 576 sb->s_magic = DLMFS_MAGIC;
577 sb->s_op = &dlmfs_ops; 577 sb->s_op = &dlmfs_ops;
578 sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); 578 sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c18ab45f8d21..5308841756be 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -770,14 +770,14 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
770{ 770{
771 struct address_space *mapping = inode->i_mapping; 771 struct address_space *mapping = inode->i_mapping;
772 struct page *page; 772 struct page *page;
773 unsigned long index = abs_from >> PAGE_CACHE_SHIFT; 773 unsigned long index = abs_from >> PAGE_SHIFT;
774 handle_t *handle; 774 handle_t *handle;
775 int ret = 0; 775 int ret = 0;
776 unsigned zero_from, zero_to, block_start, block_end; 776 unsigned zero_from, zero_to, block_start, block_end;
777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
778 778
779 BUG_ON(abs_from >= abs_to); 779 BUG_ON(abs_from >= abs_to);
780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
781 BUG_ON(abs_from & (inode->i_blkbits - 1)); 781 BUG_ON(abs_from & (inode->i_blkbits - 1));
782 782
783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh); 783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
@@ -794,10 +794,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
794 } 794 }
795 795
796 /* Get the offsets within the page that we want to zero */ 796 /* Get the offsets within the page that we want to zero */
797 zero_from = abs_from & (PAGE_CACHE_SIZE - 1); 797 zero_from = abs_from & (PAGE_SIZE - 1);
798 zero_to = abs_to & (PAGE_CACHE_SIZE - 1); 798 zero_to = abs_to & (PAGE_SIZE - 1);
799 if (!zero_to) 799 if (!zero_to)
800 zero_to = PAGE_CACHE_SIZE; 800 zero_to = PAGE_SIZE;
801 801
802 trace_ocfs2_write_zero_page( 802 trace_ocfs2_write_zero_page(
803 (unsigned long long)OCFS2_I(inode)->ip_blkno, 803 (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -851,7 +851,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
851 851
852out_unlock: 852out_unlock:
853 unlock_page(page); 853 unlock_page(page);
854 page_cache_release(page); 854 put_page(page);
855out_commit_trans: 855out_commit_trans:
856 if (handle) 856 if (handle)
857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -959,7 +959,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
959 BUG_ON(range_start >= range_end); 959 BUG_ON(range_start >= range_end);
960 960
961 while (zero_pos < range_end) { 961 while (zero_pos < range_end) {
962 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; 962 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
963 if (next_pos > range_end) 963 if (next_pos > range_end)
964 next_pos = range_end; 964 next_pos = range_end;
965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh); 965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 9ea081f4e6e4..71545ad4628c 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -65,13 +65,13 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
65 struct inode *inode = file_inode(file); 65 struct inode *inode = file_inode(file);
66 struct address_space *mapping = inode->i_mapping; 66 struct address_space *mapping = inode->i_mapping;
67 loff_t pos = page_offset(page); 67 loff_t pos = page_offset(page);
68 unsigned int len = PAGE_CACHE_SIZE; 68 unsigned int len = PAGE_SIZE;
69 pgoff_t last_index; 69 pgoff_t last_index;
70 struct page *locked_page = NULL; 70 struct page *locked_page = NULL;
71 void *fsdata; 71 void *fsdata;
72 loff_t size = i_size_read(inode); 72 loff_t size = i_size_read(inode);
73 73
74 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 74 last_index = (size - 1) >> PAGE_SHIFT;
75 75
76 /* 76 /*
77 * There are cases that lead to the page no longer bebongs to the 77 * There are cases that lead to the page no longer bebongs to the
@@ -102,7 +102,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
102 * because the "write" would invalidate their data. 102 * because the "write" would invalidate their data.
103 */ 103 */
104 if (page->index == last_index) 104 if (page->index == last_index)
105 len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; 105 len = ((size - 1) & ~PAGE_MASK) + 1;
106 106
107 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, 107 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
108 &locked_page, &fsdata, di_bh, page); 108 &locked_page, &fsdata, di_bh, page);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6cf6538a0651..e63af7ddfe68 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -822,10 +822,10 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
822 u32 clusters = pg_index; 822 u32 clusters = pg_index;
823 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 823 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
824 824
825 if (unlikely(PAGE_CACHE_SHIFT > cbits)) 825 if (unlikely(PAGE_SHIFT > cbits))
826 clusters = pg_index << (PAGE_CACHE_SHIFT - cbits); 826 clusters = pg_index << (PAGE_SHIFT - cbits);
827 else if (PAGE_CACHE_SHIFT < cbits) 827 else if (PAGE_SHIFT < cbits)
828 clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT); 828 clusters = pg_index >> (cbits - PAGE_SHIFT);
829 829
830 return clusters; 830 return clusters;
831} 831}
@@ -839,10 +839,10 @@ static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
839 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 839 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
840 pgoff_t index = clusters; 840 pgoff_t index = clusters;
841 841
842 if (PAGE_CACHE_SHIFT > cbits) { 842 if (PAGE_SHIFT > cbits) {
843 index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits); 843 index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
844 } else if (PAGE_CACHE_SHIFT < cbits) { 844 } else if (PAGE_SHIFT < cbits) {
845 index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT); 845 index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
846 } 846 }
847 847
848 return index; 848 return index;
@@ -853,8 +853,8 @@ static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
853 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 853 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
854 unsigned int pages_per_cluster = 1; 854 unsigned int pages_per_cluster = 1;
855 855
856 if (PAGE_CACHE_SHIFT < cbits) 856 if (PAGE_SHIFT < cbits)
857 pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT); 857 pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
858 858
859 return pages_per_cluster; 859 return pages_per_cluster;
860} 860}
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 3892f3c079ca..ab6a6cdcf91c 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -867,6 +867,10 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
867 int status = 0; 867 int status = 0;
868 868
869 trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type); 869 trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
870 if (!sb_has_quota_loaded(sb, type)) {
871 status = -ESRCH;
872 goto out;
873 }
870 status = ocfs2_lock_global_qf(info, 0); 874 status = ocfs2_lock_global_qf(info, 0);
871 if (status < 0) 875 if (status < 0)
872 goto out; 876 goto out;
@@ -878,8 +882,11 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
878out_global: 882out_global:
879 ocfs2_unlock_global_qf(info, 0); 883 ocfs2_unlock_global_qf(info, 0);
880out: 884out:
881 /* Avoid logging ENOENT since it just means there isn't next ID */ 885 /*
882 if (status && status != -ENOENT) 886 * Avoid logging ENOENT since it just means there isn't next ID and
887 * ESRCH which means quota isn't enabled for the filesystem.
888 */
889 if (status && status != -ENOENT && status != -ESRCH)
883 mlog_errno(status); 890 mlog_errno(status);
884 return status; 891 return status;
885} 892}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3eff031aaf26..744d5d90c363 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2937,16 +2937,16 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2937 end = i_size_read(inode); 2937 end = i_size_read(inode);
2938 2938
2939 while (offset < end) { 2939 while (offset < end) {
2940 page_index = offset >> PAGE_CACHE_SHIFT; 2940 page_index = offset >> PAGE_SHIFT;
2941 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; 2941 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
2942 if (map_end > end) 2942 if (map_end > end)
2943 map_end = end; 2943 map_end = end;
2944 2944
2945 /* from, to is the offset within the page. */ 2945 /* from, to is the offset within the page. */
2946 from = offset & (PAGE_CACHE_SIZE - 1); 2946 from = offset & (PAGE_SIZE - 1);
2947 to = PAGE_CACHE_SIZE; 2947 to = PAGE_SIZE;
2948 if (map_end & (PAGE_CACHE_SIZE - 1)) 2948 if (map_end & (PAGE_SIZE - 1))
2949 to = map_end & (PAGE_CACHE_SIZE - 1); 2949 to = map_end & (PAGE_SIZE - 1);
2950 2950
2951 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2951 page = find_or_create_page(mapping, page_index, GFP_NOFS);
2952 if (!page) { 2952 if (!page) {
@@ -2956,10 +2956,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2956 } 2956 }
2957 2957
2958 /* 2958 /*
2959 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2959 * In case PAGE_SIZE <= CLUSTER_SIZE, This page
2960 * can't be dirtied before we CoW it out. 2960 * can't be dirtied before we CoW it out.
2961 */ 2961 */
2962 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2962 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2963 BUG_ON(PageDirty(page)); 2963 BUG_ON(PageDirty(page));
2964 2964
2965 if (!PageUptodate(page)) { 2965 if (!PageUptodate(page)) {
@@ -2987,7 +2987,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2987 mark_page_accessed(page); 2987 mark_page_accessed(page);
2988unlock: 2988unlock:
2989 unlock_page(page); 2989 unlock_page(page);
2990 page_cache_release(page); 2990 put_page(page);
2991 page = NULL; 2991 page = NULL;
2992 offset = map_end; 2992 offset = map_end;
2993 if (ret) 2993 if (ret)
@@ -3165,8 +3165,8 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
3165 } 3165 }
3166 3166
3167 while (offset < end) { 3167 while (offset < end) {
3168 page_index = offset >> PAGE_CACHE_SHIFT; 3168 page_index = offset >> PAGE_SHIFT;
3169 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; 3169 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
3170 if (map_end > end) 3170 if (map_end > end)
3171 map_end = end; 3171 map_end = end;
3172 3172
@@ -3182,7 +3182,7 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
3182 mark_page_accessed(page); 3182 mark_page_accessed(page);
3183 3183
3184 unlock_page(page); 3184 unlock_page(page);
3185 page_cache_release(page); 3185 put_page(page);
3186 page = NULL; 3186 page = NULL;
3187 offset = map_end; 3187 offset = map_end;
3188 if (ret) 3188 if (ret)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7db631e1c8b0..d7cae3327de5 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -605,8 +605,8 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
605 /* 605 /*
606 * We might be limited by page cache size. 606 * We might be limited by page cache size.
607 */ 607 */
608 if (bytes > PAGE_CACHE_SIZE) { 608 if (bytes > PAGE_SIZE) {
609 bytes = PAGE_CACHE_SIZE; 609 bytes = PAGE_SIZE;
610 trim = 1; 610 trim = 1;
611 /* 611 /*
612 * Shift by 31 here so that we don't get larger than 612 * Shift by 31 here so that we don't get larger than
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 2382e267b49e..0166faabf8f2 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -18,8 +18,8 @@ static int read_one_page(struct page *page)
18 int max_block; 18 int max_block;
19 ssize_t bytes_read = 0; 19 ssize_t bytes_read = 0;
20 struct inode *inode = page->mapping->host; 20 struct inode *inode = page->mapping->host;
21 const __u32 blocksize = PAGE_CACHE_SIZE; /* inode->i_blksize */ 21 const __u32 blocksize = PAGE_SIZE; /* inode->i_blksize */
22 const __u32 blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */ 22 const __u32 blockbits = PAGE_SHIFT; /* inode->i_blkbits */
23 struct iov_iter to; 23 struct iov_iter to;
24 struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE}; 24 struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
25 25
@@ -86,7 +86,7 @@ static int orangefs_readpages(struct file *file,
86 "failure adding page to cache, read_one_page returned: %d\n", 86 "failure adding page to cache, read_one_page returned: %d\n",
87 ret); 87 ret);
88 } else { 88 } else {
89 page_cache_release(page); 89 put_page(page);
90 } 90 }
91 } 91 }
92 BUG_ON(!list_empty(pages)); 92 BUG_ON(!list_empty(pages));
@@ -328,7 +328,7 @@ static int orangefs_init_iops(struct inode *inode)
328 case S_IFREG: 328 case S_IFREG:
329 inode->i_op = &orangefs_file_inode_operations; 329 inode->i_op = &orangefs_file_inode_operations;
330 inode->i_fop = &orangefs_file_operations; 330 inode->i_fop = &orangefs_file_operations;
331 inode->i_blkbits = PAGE_CACHE_SHIFT; 331 inode->i_blkbits = PAGE_SHIFT;
332 break; 332 break;
333 case S_IFLNK: 333 case S_IFLNK:
334 inode->i_op = &orangefs_symlink_inode_operations; 334 inode->i_op = &orangefs_symlink_inode_operations;
@@ -456,7 +456,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
456 inode->i_uid = current_fsuid(); 456 inode->i_uid = current_fsuid();
457 inode->i_gid = current_fsgid(); 457 inode->i_gid = current_fsgid();
458 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 458 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
459 inode->i_size = PAGE_CACHE_SIZE; 459 inode->i_size = PAGE_SIZE;
460 inode->i_rdev = dev; 460 inode->i_rdev = dev;
461 461
462 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); 462 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 1f8acc9f9a88..75375e90a63f 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -170,7 +170,7 @@ orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
170 int i; 170 int i;
171 171
172 for (i = 0; i < bufmap->page_count; i++) 172 for (i = 0; i < bufmap->page_count; i++)
173 page_cache_release(bufmap->page_array[i]); 173 put_page(bufmap->page_array[i]);
174} 174}
175 175
176static void 176static void
@@ -299,7 +299,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
299 299
300 for (i = 0; i < ret; i++) { 300 for (i = 0; i < ret; i++) {
301 SetPageError(bufmap->page_array[i]); 301 SetPageError(bufmap->page_array[i]);
302 page_cache_release(bufmap->page_array[i]); 302 put_page(bufmap->page_array[i]);
303 } 303 }
304 return -ENOMEM; 304 return -ENOMEM;
305 } 305 }
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 40f5163b56aa..8277aba65e87 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -303,7 +303,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
303 } 303 }
304 break; 304 break;
305 case S_IFDIR: 305 case S_IFDIR:
306 inode->i_size = PAGE_CACHE_SIZE; 306 inode->i_size = PAGE_SIZE;
307 orangefs_inode->blksize = (1 << inode->i_blkbits); 307 orangefs_inode->blksize = (1 << inode->i_blkbits);
308 spin_lock(&inode->i_lock); 308 spin_lock(&inode->i_lock);
309 inode_set_bytes(inode, inode->i_size); 309 inode_set_bytes(inode, inode->i_size);
diff --git a/fs/pipe.c b/fs/pipe.c
index ab8dad3ccb6a..0d3f5165cb0b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -134,7 +134,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
134 if (page_count(page) == 1 && !pipe->tmp_page) 134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page; 135 pipe->tmp_page = page;
136 else 136 else
137 page_cache_release(page); 137 put_page(page);
138} 138}
139 139
140/** 140/**
@@ -180,7 +180,7 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
180 */ 180 */
181void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) 181void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
182{ 182{
183 page_cache_get(buf->page); 183 get_page(buf->page);
184} 184}
185EXPORT_SYMBOL(generic_pipe_buf_get); 185EXPORT_SYMBOL(generic_pipe_buf_get);
186 186
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(generic_pipe_buf_confirm);
211void generic_pipe_buf_release(struct pipe_inode_info *pipe, 211void generic_pipe_buf_release(struct pipe_inode_info *pipe,
212 struct pipe_buffer *buf) 212 struct pipe_buffer *buf)
213{ 213{
214 page_cache_release(buf->page); 214 put_page(buf->page);
215} 215}
216EXPORT_SYMBOL(generic_pipe_buf_release); 216EXPORT_SYMBOL(generic_pipe_buf_release);
217 217
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df431642042..229cb546bee0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -553,7 +553,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
553 if (radix_tree_exceptional_entry(page)) 553 if (radix_tree_exceptional_entry(page))
554 mss->swap += PAGE_SIZE; 554 mss->swap += PAGE_SIZE;
555 else 555 else
556 page_cache_release(page); 556 put_page(page);
557 557
558 return; 558 return;
559 } 559 }
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 55bb57e6a30d..8afe10cf7df8 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -279,12 +279,12 @@ static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
279 if (!page) 279 if (!page)
280 return VM_FAULT_OOM; 280 return VM_FAULT_OOM;
281 if (!PageUptodate(page)) { 281 if (!PageUptodate(page)) {
282 offset = (loff_t) index << PAGE_CACHE_SHIFT; 282 offset = (loff_t) index << PAGE_SHIFT;
283 buf = __va((page_to_pfn(page) << PAGE_SHIFT)); 283 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); 284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
285 if (rc < 0) { 285 if (rc < 0) {
286 unlock_page(page); 286 unlock_page(page);
287 page_cache_release(page); 287 put_page(page);
288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
289 } 289 }
290 SetPageUptodate(page); 290 SetPageUptodate(page);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index dc645b66cd79..45d6110744cb 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -420,8 +420,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
420 pstore_sb = sb; 420 pstore_sb = sb;
421 421
422 sb->s_maxbytes = MAX_LFS_FILESIZE; 422 sb->s_maxbytes = MAX_LFS_FILESIZE;
423 sb->s_blocksize = PAGE_CACHE_SIZE; 423 sb->s_blocksize = PAGE_SIZE;
424 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 424 sb->s_blocksize_bits = PAGE_SHIFT;
425 sb->s_magic = PSTOREFS_MAGIC; 425 sb->s_magic = PSTOREFS_MAGIC;
426 sb->s_op = &pstore_ops; 426 sb->s_op = &pstore_ops;
427 sb->s_time_gran = 1; 427 sb->s_time_gran = 1;
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index e1f37278cf97..144ceda4948e 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -35,9 +35,9 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
35static unsigned last_entry(struct inode *inode, unsigned long page_nr) 35static unsigned last_entry(struct inode *inode, unsigned long page_nr)
36{ 36{
37 unsigned long last_byte = inode->i_size; 37 unsigned long last_byte = inode->i_size;
38 last_byte -= page_nr << PAGE_CACHE_SHIFT; 38 last_byte -= page_nr << PAGE_SHIFT;
39 if (last_byte > PAGE_CACHE_SIZE) 39 if (last_byte > PAGE_SIZE)
40 last_byte = PAGE_CACHE_SIZE; 40 last_byte = PAGE_SIZE;
41 return last_byte / QNX6_DIR_ENTRY_SIZE; 41 return last_byte / QNX6_DIR_ENTRY_SIZE;
42} 42}
43 43
@@ -47,9 +47,9 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
47{ 47{
48 struct qnx6_sb_info *sbi = QNX6_SB(sb); 48 struct qnx6_sb_info *sbi = QNX6_SB(sb);
49 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ 49 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
50 u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */ 50 u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
51 /* within page */ 51 /* within page */
52 u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK; 52 u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
53 struct address_space *mapping = sbi->longfile->i_mapping; 53 struct address_space *mapping = sbi->longfile->i_mapping;
54 struct page *page = read_mapping_page(mapping, n, NULL); 54 struct page *page = read_mapping_page(mapping, n, NULL);
55 if (IS_ERR(page)) 55 if (IS_ERR(page))
@@ -115,8 +115,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
115 struct qnx6_sb_info *sbi = QNX6_SB(s); 115 struct qnx6_sb_info *sbi = QNX6_SB(s);
116 loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); 116 loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
117 unsigned long npages = dir_pages(inode); 117 unsigned long npages = dir_pages(inode);
118 unsigned long n = pos >> PAGE_CACHE_SHIFT; 118 unsigned long n = pos >> PAGE_SHIFT;
119 unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; 119 unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
120 bool done = false; 120 bool done = false;
121 121
122 ctx->pos = pos; 122 ctx->pos = pos;
@@ -131,7 +131,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
131 131
132 if (IS_ERR(page)) { 132 if (IS_ERR(page)) {
133 pr_err("%s(): read failed\n", __func__); 133 pr_err("%s(): read failed\n", __func__);
134 ctx->pos = (n + 1) << PAGE_CACHE_SHIFT; 134 ctx->pos = (n + 1) << PAGE_SHIFT;
135 return PTR_ERR(page); 135 return PTR_ERR(page);
136 } 136 }
137 de = ((struct qnx6_dir_entry *)page_address(page)) + start; 137 de = ((struct qnx6_dir_entry *)page_address(page)) + start;
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 47bb1de07155..1192422a1c56 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -542,8 +542,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
542 iget_failed(inode); 542 iget_failed(inode);
543 return ERR_PTR(-EIO); 543 return ERR_PTR(-EIO);
544 } 544 }
545 n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS); 545 n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
546 offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS); 546 offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
547 mapping = sbi->inodes->i_mapping; 547 mapping = sbi->inodes->i_mapping;
548 page = read_mapping_page(mapping, n, NULL); 548 page = read_mapping_page(mapping, n, NULL);
549 if (IS_ERR(page)) { 549 if (IS_ERR(page)) {
diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
index d3fb2b698800..f23b5c4a66ad 100644
--- a/fs/qnx6/qnx6.h
+++ b/fs/qnx6/qnx6.h
@@ -128,7 +128,7 @@ extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
128static inline void qnx6_put_page(struct page *page) 128static inline void qnx6_put_page(struct page *page)
129{ 129{
130 kunmap(page); 130 kunmap(page);
131 page_cache_release(page); 131 put_page(page);
132} 132}
133 133
134extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, 134extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ba827daea5a0..ff21980d0119 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2047,11 +2047,20 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2047 struct quota_info *dqopt = sb_dqopt(sb); 2047 struct quota_info *dqopt = sb_dqopt(sb);
2048 int err; 2048 int err;
2049 2049
2050 if (!dqopt->ops[qid->type]->get_next_id) 2050 mutex_lock(&dqopt->dqonoff_mutex);
2051 return -ENOSYS; 2051 if (!sb_has_quota_active(sb, qid->type)) {
2052 err = -ESRCH;
2053 goto out;
2054 }
2055 if (!dqopt->ops[qid->type]->get_next_id) {
2056 err = -ENOSYS;
2057 goto out;
2058 }
2052 mutex_lock(&dqopt->dqio_mutex); 2059 mutex_lock(&dqopt->dqio_mutex);
2053 err = dqopt->ops[qid->type]->get_next_id(sb, qid); 2060 err = dqopt->ops[qid->type]->get_next_id(sb, qid);
2054 mutex_unlock(&dqopt->dqio_mutex); 2061 mutex_unlock(&dqopt->dqio_mutex);
2062out:
2063 mutex_unlock(&dqopt->dqonoff_mutex);
2055 2064
2056 return err; 2065 return err;
2057} 2066}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 38981b037524..1ab6e6c2e60e 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -223,8 +223,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
223 return err; 223 return err;
224 224
225 sb->s_maxbytes = MAX_LFS_FILESIZE; 225 sb->s_maxbytes = MAX_LFS_FILESIZE;
226 sb->s_blocksize = PAGE_CACHE_SIZE; 226 sb->s_blocksize = PAGE_SIZE;
227 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 227 sb->s_blocksize_bits = PAGE_SHIFT;
228 sb->s_magic = RAMFS_MAGIC; 228 sb->s_magic = RAMFS_MAGIC;
229 sb->s_op = &ramfs_ops; 229 sb->s_op = &ramfs_ops;
230 sb->s_time_gran = 1; 230 sb->s_time_gran = 1;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9424a4ba93a9..389773711de4 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -180,11 +180,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
180 int partial = 0; 180 int partial = 0;
181 unsigned blocksize; 181 unsigned blocksize;
182 struct buffer_head *bh, *head; 182 struct buffer_head *bh, *head;
183 unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; 183 unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
184 int new; 184 int new;
185 int logit = reiserfs_file_data_log(inode); 185 int logit = reiserfs_file_data_log(inode);
186 struct super_block *s = inode->i_sb; 186 struct super_block *s = inode->i_sb;
187 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; 187 int bh_per_page = PAGE_SIZE / s->s_blocksize;
188 struct reiserfs_transaction_handle th; 188 struct reiserfs_transaction_handle th;
189 int ret = 0; 189 int ret = 0;
190 190
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ae9e5b308cf9..d5c2e9c865de 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -386,7 +386,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
386 goto finished; 386 goto finished;
387 } 387 }
388 /* read file tail into part of page */ 388 /* read file tail into part of page */
389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1); 389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
390 copy_item_head(&tmp_ih, ih); 390 copy_item_head(&tmp_ih, ih);
391 391
392 /* 392 /*
@@ -587,10 +587,10 @@ static int convert_tail_for_hole(struct inode *inode,
587 return -EIO; 587 return -EIO;
588 588
589 /* always try to read until the end of the block */ 589 /* always try to read until the end of the block */
590 tail_start = tail_offset & (PAGE_CACHE_SIZE - 1); 590 tail_start = tail_offset & (PAGE_SIZE - 1);
591 tail_end = (tail_start | (bh_result->b_size - 1)) + 1; 591 tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
592 592
593 index = tail_offset >> PAGE_CACHE_SHIFT; 593 index = tail_offset >> PAGE_SHIFT;
594 /* 594 /*
595 * hole_page can be zero in case of direct_io, we are sure 595 * hole_page can be zero in case of direct_io, we are sure
596 * that we cannot get here if we write with O_DIRECT into tail page 596 * that we cannot get here if we write with O_DIRECT into tail page
@@ -629,7 +629,7 @@ static int convert_tail_for_hole(struct inode *inode,
629unlock: 629unlock:
630 if (tail_page != hole_page) { 630 if (tail_page != hole_page) {
631 unlock_page(tail_page); 631 unlock_page(tail_page);
632 page_cache_release(tail_page); 632 put_page(tail_page);
633 } 633 }
634out: 634out:
635 return retval; 635 return retval;
@@ -2189,11 +2189,11 @@ static int grab_tail_page(struct inode *inode,
2189 * we want the page with the last byte in the file, 2189 * we want the page with the last byte in the file,
2190 * not the page that will hold the next byte for appending 2190 * not the page that will hold the next byte for appending
2191 */ 2191 */
2192 unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; 2192 unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
2193 unsigned long pos = 0; 2193 unsigned long pos = 0;
2194 unsigned long start = 0; 2194 unsigned long start = 0;
2195 unsigned long blocksize = inode->i_sb->s_blocksize; 2195 unsigned long blocksize = inode->i_sb->s_blocksize;
2196 unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1); 2196 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
2197 struct buffer_head *bh; 2197 struct buffer_head *bh;
2198 struct buffer_head *head; 2198 struct buffer_head *head;
2199 struct page *page; 2199 struct page *page;
@@ -2251,7 +2251,7 @@ out:
2251 2251
2252unlock: 2252unlock:
2253 unlock_page(page); 2253 unlock_page(page);
2254 page_cache_release(page); 2254 put_page(page);
2255 return error; 2255 return error;
2256} 2256}
2257 2257
@@ -2265,7 +2265,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2265{ 2265{
2266 struct reiserfs_transaction_handle th; 2266 struct reiserfs_transaction_handle th;
2267 /* we want the offset for the first byte after the end of the file */ 2267 /* we want the offset for the first byte after the end of the file */
2268 unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 2268 unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
2269 unsigned blocksize = inode->i_sb->s_blocksize; 2269 unsigned blocksize = inode->i_sb->s_blocksize;
2270 unsigned length; 2270 unsigned length;
2271 struct page *page = NULL; 2271 struct page *page = NULL;
@@ -2345,7 +2345,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2345 } 2345 }
2346 } 2346 }
2347 unlock_page(page); 2347 unlock_page(page);
2348 page_cache_release(page); 2348 put_page(page);
2349 } 2349 }
2350 2350
2351 reiserfs_write_unlock(inode->i_sb); 2351 reiserfs_write_unlock(inode->i_sb);
@@ -2354,7 +2354,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2354out: 2354out:
2355 if (page) { 2355 if (page) {
2356 unlock_page(page); 2356 unlock_page(page);
2357 page_cache_release(page); 2357 put_page(page);
2358 } 2358 }
2359 2359
2360 reiserfs_write_unlock(inode->i_sb); 2360 reiserfs_write_unlock(inode->i_sb);
@@ -2426,7 +2426,7 @@ research:
2426 } else if (is_direct_le_ih(ih)) { 2426 } else if (is_direct_le_ih(ih)) {
2427 char *p; 2427 char *p;
2428 p = page_address(bh_result->b_page); 2428 p = page_address(bh_result->b_page);
2429 p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1); 2429 p += (byte_offset - 1) & (PAGE_SIZE - 1);
2430 copy_size = ih_item_len(ih) - pos_in_item; 2430 copy_size = ih_item_len(ih) - pos_in_item;
2431 2431
2432 fs_gen = get_generation(inode->i_sb); 2432 fs_gen = get_generation(inode->i_sb);
@@ -2525,7 +2525,7 @@ static int reiserfs_write_full_page(struct page *page,
2525 struct writeback_control *wbc) 2525 struct writeback_control *wbc)
2526{ 2526{
2527 struct inode *inode = page->mapping->host; 2527 struct inode *inode = page->mapping->host;
2528 unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; 2528 unsigned long end_index = inode->i_size >> PAGE_SHIFT;
2529 int error = 0; 2529 int error = 0;
2530 unsigned long block; 2530 unsigned long block;
2531 sector_t last_block; 2531 sector_t last_block;
@@ -2535,7 +2535,7 @@ static int reiserfs_write_full_page(struct page *page,
2535 int checked = PageChecked(page); 2535 int checked = PageChecked(page);
2536 struct reiserfs_transaction_handle th; 2536 struct reiserfs_transaction_handle th;
2537 struct super_block *s = inode->i_sb; 2537 struct super_block *s = inode->i_sb;
2538 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; 2538 int bh_per_page = PAGE_SIZE / s->s_blocksize;
2539 th.t_trans_id = 0; 2539 th.t_trans_id = 0;
2540 2540
2541 /* no logging allowed when nonblocking or from PF_MEMALLOC */ 2541 /* no logging allowed when nonblocking or from PF_MEMALLOC */
@@ -2564,16 +2564,16 @@ static int reiserfs_write_full_page(struct page *page,
2564 if (page->index >= end_index) { 2564 if (page->index >= end_index) {
2565 unsigned last_offset; 2565 unsigned last_offset;
2566 2566
2567 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 2567 last_offset = inode->i_size & (PAGE_SIZE - 1);
2568 /* no file contents in this page */ 2568 /* no file contents in this page */
2569 if (page->index >= end_index + 1 || !last_offset) { 2569 if (page->index >= end_index + 1 || !last_offset) {
2570 unlock_page(page); 2570 unlock_page(page);
2571 return 0; 2571 return 0;
2572 } 2572 }
2573 zero_user_segment(page, last_offset, PAGE_CACHE_SIZE); 2573 zero_user_segment(page, last_offset, PAGE_SIZE);
2574 } 2574 }
2575 bh = head; 2575 bh = head;
2576 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); 2576 block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
2577 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 2577 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2578 /* first map all the buffers, logging any direct items we find */ 2578 /* first map all the buffers, logging any direct items we find */
2579 do { 2579 do {
@@ -2774,7 +2774,7 @@ static int reiserfs_write_begin(struct file *file,
2774 *fsdata = (void *)(unsigned long)flags; 2774 *fsdata = (void *)(unsigned long)flags;
2775 } 2775 }
2776 2776
2777 index = pos >> PAGE_CACHE_SHIFT; 2777 index = pos >> PAGE_SHIFT;
2778 page = grab_cache_page_write_begin(mapping, index, flags); 2778 page = grab_cache_page_write_begin(mapping, index, flags);
2779 if (!page) 2779 if (!page)
2780 return -ENOMEM; 2780 return -ENOMEM;
@@ -2822,7 +2822,7 @@ static int reiserfs_write_begin(struct file *file,
2822 } 2822 }
2823 if (ret) { 2823 if (ret) {
2824 unlock_page(page); 2824 unlock_page(page);
2825 page_cache_release(page); 2825 put_page(page);
2826 /* Truncate allocated blocks */ 2826 /* Truncate allocated blocks */
2827 reiserfs_truncate_failed_write(inode); 2827 reiserfs_truncate_failed_write(inode);
2828 } 2828 }
@@ -2909,7 +2909,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2909 else 2909 else
2910 th = NULL; 2910 th = NULL;
2911 2911
2912 start = pos & (PAGE_CACHE_SIZE - 1); 2912 start = pos & (PAGE_SIZE - 1);
2913 if (unlikely(copied < len)) { 2913 if (unlikely(copied < len)) {
2914 if (!PageUptodate(page)) 2914 if (!PageUptodate(page))
2915 copied = 0; 2915 copied = 0;
@@ -2974,7 +2974,7 @@ out:
2974 if (locked) 2974 if (locked)
2975 reiserfs_write_unlock(inode->i_sb); 2975 reiserfs_write_unlock(inode->i_sb);
2976 unlock_page(page); 2976 unlock_page(page);
2977 page_cache_release(page); 2977 put_page(page);
2978 2978
2979 if (pos + len > inode->i_size) 2979 if (pos + len > inode->i_size)
2980 reiserfs_truncate_failed_write(inode); 2980 reiserfs_truncate_failed_write(inode);
@@ -2996,7 +2996,7 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2996 unsigned from, unsigned to) 2996 unsigned from, unsigned to)
2997{ 2997{
2998 struct inode *inode = page->mapping->host; 2998 struct inode *inode = page->mapping->host;
2999 loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to; 2999 loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
3000 int ret = 0; 3000 int ret = 0;
3001 int update_sd = 0; 3001 int update_sd = 0;
3002 struct reiserfs_transaction_handle *th = NULL; 3002 struct reiserfs_transaction_handle *th = NULL;
@@ -3181,7 +3181,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
3181 struct inode *inode = page->mapping->host; 3181 struct inode *inode = page->mapping->host;
3182 unsigned int curr_off = 0; 3182 unsigned int curr_off = 0;
3183 unsigned int stop = offset + length; 3183 unsigned int stop = offset + length;
3184 int partial_page = (offset || length < PAGE_CACHE_SIZE); 3184 int partial_page = (offset || length < PAGE_SIZE);
3185 int ret = 1; 3185 int ret = 1;
3186 3186
3187 BUG_ON(!PageLocked(page)); 3187 BUG_ON(!PageLocked(page));
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 036a1fc0a8c3..57045f423893 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -203,7 +203,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
203 * __reiserfs_write_begin on that page. This will force a 203 * __reiserfs_write_begin on that page. This will force a
204 * reiserfs_get_block to unpack the tail for us. 204 * reiserfs_get_block to unpack the tail for us.
205 */ 205 */
206 index = inode->i_size >> PAGE_CACHE_SHIFT; 206 index = inode->i_size >> PAGE_SHIFT;
207 mapping = inode->i_mapping; 207 mapping = inode->i_mapping;
208 page = grab_cache_page(mapping, index); 208 page = grab_cache_page(mapping, index);
209 retval = -ENOMEM; 209 retval = -ENOMEM;
@@ -221,7 +221,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
221 221
222out_unlock: 222out_unlock:
223 unlock_page(page); 223 unlock_page(page);
224 page_cache_release(page); 224 put_page(page);
225 225
226out: 226out:
227 inode_unlock(inode); 227 inode_unlock(inode);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 44c2bdced1c8..2ace90e981f0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -599,18 +599,18 @@ static int journal_list_still_alive(struct super_block *s,
599 * This does a check to see if the buffer belongs to one of these 599 * This does a check to see if the buffer belongs to one of these
600 * lost pages before doing the final put_bh. If page->mapping was 600 * lost pages before doing the final put_bh. If page->mapping was
601 * null, it tries to free buffers on the page, which should make the 601 * null, it tries to free buffers on the page, which should make the
602 * final page_cache_release drop the page from the lru. 602 * final put_page drop the page from the lru.
603 */ 603 */
604static void release_buffer_page(struct buffer_head *bh) 604static void release_buffer_page(struct buffer_head *bh)
605{ 605{
606 struct page *page = bh->b_page; 606 struct page *page = bh->b_page;
607 if (!page->mapping && trylock_page(page)) { 607 if (!page->mapping && trylock_page(page)) {
608 page_cache_get(page); 608 get_page(page);
609 put_bh(bh); 609 put_bh(bh);
610 if (!page->mapping) 610 if (!page->mapping)
611 try_to_free_buffers(page); 611 try_to_free_buffers(page);
612 unlock_page(page); 612 unlock_page(page);
613 page_cache_release(page); 613 put_page(page);
614 } else { 614 } else {
615 put_bh(bh); 615 put_bh(bh);
616 } 616 }
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 24cbe013240f..5feacd689241 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1342,7 +1342,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1342 */ 1342 */
1343 1343
1344 data = kmap_atomic(un_bh->b_page); 1344 data = kmap_atomic(un_bh->b_page);
1345 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); 1345 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1));
1346 memcpy(data + off, 1346 memcpy(data + off,
1347 ih_item_body(PATH_PLAST_BUFFER(path), &s_ih), 1347 ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
1348 ret_value); 1348 ret_value);
@@ -1511,7 +1511,7 @@ static void unmap_buffers(struct page *page, loff_t pos)
1511 1511
1512 if (page) { 1512 if (page) {
1513 if (page_has_buffers(page)) { 1513 if (page_has_buffers(page)) {
1514 tail_index = pos & (PAGE_CACHE_SIZE - 1); 1514 tail_index = pos & (PAGE_SIZE - 1);
1515 cur_index = 0; 1515 cur_index = 0;
1516 head = page_buffers(page); 1516 head = page_buffers(page);
1517 bh = head; 1517 bh = head;
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index f41e19b4bb42..2d5489b0a269 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -151,7 +151,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
151 */ 151 */
152 if (up_to_date_bh) { 152 if (up_to_date_bh) {
153 unsigned pgoff = 153 unsigned pgoff =
154 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); 154 (tail_offset + total_tail - 1) & (PAGE_SIZE - 1);
155 char *kaddr = kmap_atomic(up_to_date_bh->b_page); 155 char *kaddr = kmap_atomic(up_to_date_bh->b_page);
156 memset(kaddr + pgoff, 0, blk_size - total_tail); 156 memset(kaddr + pgoff, 0, blk_size - total_tail);
157 kunmap_atomic(kaddr); 157 kunmap_atomic(kaddr);
@@ -271,7 +271,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
271 * the page was locked and this part of the page was up to date when 271 * the page was locked and this part of the page was up to date when
272 * indirect2direct was called, so we know the bytes are still valid 272 * indirect2direct was called, so we know the bytes are still valid
273 */ 273 */
274 tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); 274 tail = tail + (pos & (PAGE_SIZE - 1));
275 275
276 PATH_LAST_POSITION(path)++; 276 PATH_LAST_POSITION(path)++;
277 277
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 57e0b2310532..28f5f8b11370 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -415,7 +415,7 @@ out:
415static inline void reiserfs_put_page(struct page *page) 415static inline void reiserfs_put_page(struct page *page)
416{ 416{
417 kunmap(page); 417 kunmap(page);
418 page_cache_release(page); 418 put_page(page);
419} 419}
420 420
421static struct page *reiserfs_get_page(struct inode *dir, size_t n) 421static struct page *reiserfs_get_page(struct inode *dir, size_t n)
@@ -427,7 +427,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
427 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this 427 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this
428 */ 428 */
429 mapping_set_gfp_mask(mapping, GFP_NOFS); 429 mapping_set_gfp_mask(mapping, GFP_NOFS);
430 page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL); 430 page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
431 if (!IS_ERR(page)) { 431 if (!IS_ERR(page)) {
432 kmap(page); 432 kmap(page);
433 if (PageError(page)) 433 if (PageError(page))
@@ -526,10 +526,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
526 while (buffer_pos < buffer_size || buffer_pos == 0) { 526 while (buffer_pos < buffer_size || buffer_pos == 0) {
527 size_t chunk; 527 size_t chunk;
528 size_t skip = 0; 528 size_t skip = 0;
529 size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); 529 size_t page_offset = (file_pos & (PAGE_SIZE - 1));
530 530
531 if (buffer_size - buffer_pos > PAGE_CACHE_SIZE) 531 if (buffer_size - buffer_pos > PAGE_SIZE)
532 chunk = PAGE_CACHE_SIZE; 532 chunk = PAGE_SIZE;
533 else 533 else
534 chunk = buffer_size - buffer_pos; 534 chunk = buffer_size - buffer_pos;
535 535
@@ -546,8 +546,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
546 struct reiserfs_xattr_header *rxh; 546 struct reiserfs_xattr_header *rxh;
547 547
548 skip = file_pos = sizeof(struct reiserfs_xattr_header); 548 skip = file_pos = sizeof(struct reiserfs_xattr_header);
549 if (chunk + skip > PAGE_CACHE_SIZE) 549 if (chunk + skip > PAGE_SIZE)
550 chunk = PAGE_CACHE_SIZE - skip; 550 chunk = PAGE_SIZE - skip;
551 rxh = (struct reiserfs_xattr_header *)data; 551 rxh = (struct reiserfs_xattr_header *)data;
552 rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC); 552 rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);
553 rxh->h_hash = cpu_to_le32(xahash); 553 rxh->h_hash = cpu_to_le32(xahash);
@@ -675,8 +675,8 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
675 char *data; 675 char *data;
676 size_t skip = 0; 676 size_t skip = 0;
677 677
678 if (isize - file_pos > PAGE_CACHE_SIZE) 678 if (isize - file_pos > PAGE_SIZE)
679 chunk = PAGE_CACHE_SIZE; 679 chunk = PAGE_SIZE;
680 else 680 else
681 chunk = isize - file_pos; 681 chunk = isize - file_pos;
682 682
diff --git a/fs/splice.c b/fs/splice.c
index 9947b5c69664..b018eb485019 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -88,7 +88,7 @@ out_unlock:
88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, 88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
89 struct pipe_buffer *buf) 89 struct pipe_buffer *buf)
90{ 90{
91 page_cache_release(buf->page); 91 put_page(buf->page);
92 buf->flags &= ~PIPE_BUF_FLAG_LRU; 92 buf->flags &= ~PIPE_BUF_FLAG_LRU;
93} 93}
94 94
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(splice_to_pipe);
268 268
269void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) 269void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
270{ 270{
271 page_cache_release(spd->pages[i]); 271 put_page(spd->pages[i]);
272} 272}
273 273
274/* 274/*
@@ -328,9 +328,9 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
328 if (splice_grow_spd(pipe, &spd)) 328 if (splice_grow_spd(pipe, &spd))
329 return -ENOMEM; 329 return -ENOMEM;
330 330
331 index = *ppos >> PAGE_CACHE_SHIFT; 331 index = *ppos >> PAGE_SHIFT;
332 loff = *ppos & ~PAGE_CACHE_MASK; 332 loff = *ppos & ~PAGE_MASK;
333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 333 req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
334 nr_pages = min(req_pages, spd.nr_pages_max); 334 nr_pages = min(req_pages, spd.nr_pages_max);
335 335
336 /* 336 /*
@@ -365,7 +365,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
365 error = add_to_page_cache_lru(page, mapping, index, 365 error = add_to_page_cache_lru(page, mapping, index,
366 mapping_gfp_constraint(mapping, GFP_KERNEL)); 366 mapping_gfp_constraint(mapping, GFP_KERNEL));
367 if (unlikely(error)) { 367 if (unlikely(error)) {
368 page_cache_release(page); 368 put_page(page);
369 if (error == -EEXIST) 369 if (error == -EEXIST)
370 continue; 370 continue;
371 break; 371 break;
@@ -385,7 +385,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
385 * Now loop over the map and see if we need to start IO on any 385 * Now loop over the map and see if we need to start IO on any
386 * pages, fill in the partial map, etc. 386 * pages, fill in the partial map, etc.
387 */ 387 */
388 index = *ppos >> PAGE_CACHE_SHIFT; 388 index = *ppos >> PAGE_SHIFT;
389 nr_pages = spd.nr_pages; 389 nr_pages = spd.nr_pages;
390 spd.nr_pages = 0; 390 spd.nr_pages = 0;
391 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 391 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
@@ -397,7 +397,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
397 /* 397 /*
398 * this_len is the max we'll use from this page 398 * this_len is the max we'll use from this page
399 */ 399 */
400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 400 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
401 page = spd.pages[page_nr]; 401 page = spd.pages[page_nr];
402 402
403 if (PageReadahead(page)) 403 if (PageReadahead(page))
@@ -426,7 +426,7 @@ retry_lookup:
426 error = -ENOMEM; 426 error = -ENOMEM;
427 break; 427 break;
428 } 428 }
429 page_cache_release(spd.pages[page_nr]); 429 put_page(spd.pages[page_nr]);
430 spd.pages[page_nr] = page; 430 spd.pages[page_nr] = page;
431 } 431 }
432 /* 432 /*
@@ -456,7 +456,7 @@ fill_it:
456 * i_size must be checked after PageUptodate. 456 * i_size must be checked after PageUptodate.
457 */ 457 */
458 isize = i_size_read(mapping->host); 458 isize = i_size_read(mapping->host);
459 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 459 end_index = (isize - 1) >> PAGE_SHIFT;
460 if (unlikely(!isize || index > end_index)) 460 if (unlikely(!isize || index > end_index))
461 break; 461 break;
462 462
@@ -470,7 +470,7 @@ fill_it:
470 /* 470 /*
471 * max good bytes in this page 471 * max good bytes in this page
472 */ 472 */
473 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 473 plen = ((isize - 1) & ~PAGE_MASK) + 1;
474 if (plen <= loff) 474 if (plen <= loff)
475 break; 475 break;
476 476
@@ -494,8 +494,8 @@ fill_it:
494 * we got, 'nr_pages' is how many pages are in the map. 494 * we got, 'nr_pages' is how many pages are in the map.
495 */ 495 */
496 while (page_nr < nr_pages) 496 while (page_nr < nr_pages)
497 page_cache_release(spd.pages[page_nr++]); 497 put_page(spd.pages[page_nr++]);
498 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 498 in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
499 499
500 if (spd.nr_pages) 500 if (spd.nr_pages)
501 error = splice_to_pipe(pipe, &spd); 501 error = splice_to_pipe(pipe, &spd);
@@ -636,8 +636,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
636 goto shrink_ret; 636 goto shrink_ret;
637 } 637 }
638 638
639 offset = *ppos & ~PAGE_CACHE_MASK; 639 offset = *ppos & ~PAGE_MASK;
640 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 640 nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
641 641
642 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { 642 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
643 struct page *page; 643 struct page *page;
@@ -647,7 +647,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
647 if (!page) 647 if (!page)
648 goto err; 648 goto err;
649 649
650 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); 650 this_len = min_t(size_t, len, PAGE_SIZE - offset);
651 vec[i].iov_base = (void __user *) page_address(page); 651 vec[i].iov_base = (void __user *) page_address(page);
652 vec[i].iov_len = this_len; 652 vec[i].iov_len = this_len;
653 spd.pages[i] = page; 653 spd.pages[i] = page;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 0cea9b9236d0..2c2618410d51 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -181,11 +181,11 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
181 in = min(bytes, msblk->devblksize - offset); 181 in = min(bytes, msblk->devblksize - offset);
182 bytes -= in; 182 bytes -= in;
183 while (in) { 183 while (in) {
184 if (pg_offset == PAGE_CACHE_SIZE) { 184 if (pg_offset == PAGE_SIZE) {
185 data = squashfs_next_page(output); 185 data = squashfs_next_page(output);
186 pg_offset = 0; 186 pg_offset = 0;
187 } 187 }
188 avail = min_t(int, in, PAGE_CACHE_SIZE - 188 avail = min_t(int, in, PAGE_SIZE -
189 pg_offset); 189 pg_offset);
190 memcpy(data + pg_offset, bh[k]->b_data + offset, 190 memcpy(data + pg_offset, bh[k]->b_data + offset,
191 avail); 191 avail);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 1cb70a0b2168..23813c078cc9 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -30,7 +30,7 @@
30 * access the metadata and fragment caches. 30 * access the metadata and fragment caches.
31 * 31 *
32 * To avoid out of memory and fragmentation issues with vmalloc the cache 32 * To avoid out of memory and fragmentation issues with vmalloc the cache
33 * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. 33 * uses sequences of kmalloced PAGE_SIZE buffers.
34 * 34 *
35 * It should be noted that the cache is not used for file datablocks, these 35 * It should be noted that the cache is not used for file datablocks, these
36 * are decompressed and cached in the page-cache in the normal way. The 36 * are decompressed and cached in the page-cache in the normal way. The
@@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
231/* 231/*
232 * Initialise cache allocating the specified number of entries, each of 232 * Initialise cache allocating the specified number of entries, each of
233 * size block_size. To avoid vmalloc fragmentation issues each entry 233 * size block_size. To avoid vmalloc fragmentation issues each entry
234 * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. 234 * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
235 */ 235 */
236struct squashfs_cache *squashfs_cache_init(char *name, int entries, 236struct squashfs_cache *squashfs_cache_init(char *name, int entries,
237 int block_size) 237 int block_size)
@@ -255,7 +255,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
255 cache->unused = entries; 255 cache->unused = entries;
256 cache->entries = entries; 256 cache->entries = entries;
257 cache->block_size = block_size; 257 cache->block_size = block_size;
258 cache->pages = block_size >> PAGE_CACHE_SHIFT; 258 cache->pages = block_size >> PAGE_SHIFT;
259 cache->pages = cache->pages ? cache->pages : 1; 259 cache->pages = cache->pages ? cache->pages : 1;
260 cache->name = name; 260 cache->name = name;
261 cache->num_waiters = 0; 261 cache->num_waiters = 0;
@@ -275,7 +275,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
275 } 275 }
276 276
277 for (j = 0; j < cache->pages; j++) { 277 for (j = 0; j < cache->pages; j++) {
278 entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 278 entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
279 if (entry->data[j] == NULL) { 279 if (entry->data[j] == NULL) {
280 ERROR("Failed to allocate %s buffer\n", name); 280 ERROR("Failed to allocate %s buffer\n", name);
281 goto cleanup; 281 goto cleanup;
@@ -314,10 +314,10 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
314 return min(length, entry->length - offset); 314 return min(length, entry->length - offset);
315 315
316 while (offset < entry->length) { 316 while (offset < entry->length) {
317 void *buff = entry->data[offset / PAGE_CACHE_SIZE] 317 void *buff = entry->data[offset / PAGE_SIZE]
318 + (offset % PAGE_CACHE_SIZE); 318 + (offset % PAGE_SIZE);
319 int bytes = min_t(int, entry->length - offset, 319 int bytes = min_t(int, entry->length - offset,
320 PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); 320 PAGE_SIZE - (offset % PAGE_SIZE));
321 321
322 if (bytes >= remaining) { 322 if (bytes >= remaining) {
323 memcpy(buffer, buff, remaining); 323 memcpy(buffer, buff, remaining);
@@ -415,7 +415,7 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
415 */ 415 */
416void *squashfs_read_table(struct super_block *sb, u64 block, int length) 416void *squashfs_read_table(struct super_block *sb, u64 block, int length)
417{ 417{
418 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 418 int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
419 int i, res; 419 int i, res;
420 void *table, *buffer, **data; 420 void *table, *buffer, **data;
421 struct squashfs_page_actor *actor; 421 struct squashfs_page_actor *actor;
@@ -436,7 +436,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
436 goto failed2; 436 goto failed2;
437 } 437 }
438 438
439 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) 439 for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
440 data[i] = buffer; 440 data[i] = buffer;
441 441
442 res = squashfs_read_data(sb, block, length | 442 res = squashfs_read_data(sb, block, length |
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index e9034bf6e5ae..d2bc13636f79 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -102,7 +102,7 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags)
102 * Read decompressor specific options from file system if present 102 * Read decompressor specific options from file system if present
103 */ 103 */
104 if (SQUASHFS_COMP_OPTS(flags)) { 104 if (SQUASHFS_COMP_OPTS(flags)) {
105 buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 105 buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
106 if (buffer == NULL) { 106 if (buffer == NULL) {
107 comp_opts = ERR_PTR(-ENOMEM); 107 comp_opts = ERR_PTR(-ENOMEM);
108 goto out; 108 goto out;
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e5c9689062ba..13d80947bf9e 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -175,7 +175,7 @@ static long long read_indexes(struct super_block *sb, int n,
175{ 175{
176 int err, i; 176 int err, i;
177 long long block = 0; 177 long long block = 0;
178 __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 178 __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
179 179
180 if (blist == NULL) { 180 if (blist == NULL) {
181 ERROR("read_indexes: Failed to allocate block_list\n"); 181 ERROR("read_indexes: Failed to allocate block_list\n");
@@ -183,7 +183,7 @@ static long long read_indexes(struct super_block *sb, int n,
183 } 183 }
184 184
185 while (n) { 185 while (n) {
186 int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); 186 int blocks = min_t(int, n, PAGE_SIZE >> 2);
187 187
188 err = squashfs_read_metadata(sb, blist, start_block, 188 err = squashfs_read_metadata(sb, blist, start_block,
189 offset, blocks << 2); 189 offset, blocks << 2);
@@ -377,19 +377,19 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
377 struct inode *inode = page->mapping->host; 377 struct inode *inode = page->mapping->host;
378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
379 void *pageaddr; 379 void *pageaddr;
380 int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; 380 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
381 int start_index = page->index & ~mask, end_index = start_index | mask; 381 int start_index = page->index & ~mask, end_index = start_index | mask;
382 382
383 /* 383 /*
384 * Loop copying datablock into pages. As the datablock likely covers 384 * Loop copying datablock into pages. As the datablock likely covers
385 * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly 385 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
386 * grab the pages from the page cache, except for the page that we've 386 * grab the pages from the page cache, except for the page that we've
387 * been called to fill. 387 * been called to fill.
388 */ 388 */
389 for (i = start_index; i <= end_index && bytes > 0; i++, 389 for (i = start_index; i <= end_index && bytes > 0; i++,
390 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { 390 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
391 struct page *push_page; 391 struct page *push_page;
392 int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; 392 int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
393 393
394 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); 394 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
395 395
@@ -404,14 +404,14 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
404 404
405 pageaddr = kmap_atomic(push_page); 405 pageaddr = kmap_atomic(push_page);
406 squashfs_copy_data(pageaddr, buffer, offset, avail); 406 squashfs_copy_data(pageaddr, buffer, offset, avail);
407 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 407 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
408 kunmap_atomic(pageaddr); 408 kunmap_atomic(pageaddr);
409 flush_dcache_page(push_page); 409 flush_dcache_page(push_page);
410 SetPageUptodate(push_page); 410 SetPageUptodate(push_page);
411skip_page: 411skip_page:
412 unlock_page(push_page); 412 unlock_page(push_page);
413 if (i != page->index) 413 if (i != page->index)
414 page_cache_release(push_page); 414 put_page(push_page);
415 } 415 }
416} 416}
417 417
@@ -454,7 +454,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
454{ 454{
455 struct inode *inode = page->mapping->host; 455 struct inode *inode = page->mapping->host;
456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
457 int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); 457 int index = page->index >> (msblk->block_log - PAGE_SHIFT);
458 int file_end = i_size_read(inode) >> msblk->block_log; 458 int file_end = i_size_read(inode) >> msblk->block_log;
459 int res; 459 int res;
460 void *pageaddr; 460 void *pageaddr;
@@ -462,8 +462,8 @@ static int squashfs_readpage(struct file *file, struct page *page)
462 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", 462 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
463 page->index, squashfs_i(inode)->start); 463 page->index, squashfs_i(inode)->start);
464 464
465 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 465 if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
466 PAGE_CACHE_SHIFT)) 466 PAGE_SHIFT))
467 goto out; 467 goto out;
468 468
469 if (index < file_end || squashfs_i(inode)->fragment_block == 469 if (index < file_end || squashfs_i(inode)->fragment_block ==
@@ -487,7 +487,7 @@ error_out:
487 SetPageError(page); 487 SetPageError(page);
488out: 488out:
489 pageaddr = kmap_atomic(page); 489 pageaddr = kmap_atomic(page);
490 memset(pageaddr, 0, PAGE_CACHE_SIZE); 490 memset(pageaddr, 0, PAGE_SIZE);
491 kunmap_atomic(pageaddr); 491 kunmap_atomic(pageaddr);
492 flush_dcache_page(page); 492 flush_dcache_page(page);
493 if (!PageError(page)) 493 if (!PageError(page))
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 43e7a7eddac0..cb485d8e0e91 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -30,8 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
30 struct inode *inode = target_page->mapping->host; 30 struct inode *inode = target_page->mapping->host;
31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
32 32
33 int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 33 int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
34 int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; 34 int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
35 int start_index = target_page->index & ~mask; 35 int start_index = target_page->index & ~mask;
36 int end_index = start_index | mask; 36 int end_index = start_index | mask;
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM; 37 int i, n, pages, missing_pages, bytes, res = -ENOMEM;
@@ -68,7 +68,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
68 68
69 if (PageUptodate(page[i])) { 69 if (PageUptodate(page[i])) {
70 unlock_page(page[i]); 70 unlock_page(page[i]);
71 page_cache_release(page[i]); 71 put_page(page[i]);
72 page[i] = NULL; 72 page[i] = NULL;
73 missing_pages++; 73 missing_pages++;
74 } 74 }
@@ -96,10 +96,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
96 goto mark_errored; 96 goto mark_errored;
97 97
98 /* Last page may have trailing bytes not filled */ 98 /* Last page may have trailing bytes not filled */
99 bytes = res % PAGE_CACHE_SIZE; 99 bytes = res % PAGE_SIZE;
100 if (bytes) { 100 if (bytes) {
101 pageaddr = kmap_atomic(page[pages - 1]); 101 pageaddr = kmap_atomic(page[pages - 1]);
102 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); 102 memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
103 kunmap_atomic(pageaddr); 103 kunmap_atomic(pageaddr);
104 } 104 }
105 105
@@ -109,7 +109,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
109 SetPageUptodate(page[i]); 109 SetPageUptodate(page[i]);
110 unlock_page(page[i]); 110 unlock_page(page[i]);
111 if (page[i] != target_page) 111 if (page[i] != target_page)
112 page_cache_release(page[i]); 112 put_page(page[i]);
113 } 113 }
114 114
115 kfree(actor); 115 kfree(actor);
@@ -127,7 +127,7 @@ mark_errored:
127 flush_dcache_page(page[i]); 127 flush_dcache_page(page[i]);
128 SetPageError(page[i]); 128 SetPageError(page[i]);
129 unlock_page(page[i]); 129 unlock_page(page[i]);
130 page_cache_release(page[i]); 130 put_page(page[i]);
131 } 131 }
132 132
133out: 133out:
@@ -153,21 +153,21 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
153 } 153 }
154 154
155 for (n = 0; n < pages && bytes > 0; n++, 155 for (n = 0; n < pages && bytes > 0; n++,
156 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { 156 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
157 int avail = min_t(int, bytes, PAGE_CACHE_SIZE); 157 int avail = min_t(int, bytes, PAGE_SIZE);
158 158
159 if (page[n] == NULL) 159 if (page[n] == NULL)
160 continue; 160 continue;
161 161
162 pageaddr = kmap_atomic(page[n]); 162 pageaddr = kmap_atomic(page[n]);
163 squashfs_copy_data(pageaddr, buffer, offset, avail); 163 squashfs_copy_data(pageaddr, buffer, offset, avail);
164 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 164 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
165 kunmap_atomic(pageaddr); 165 kunmap_atomic(pageaddr);
166 flush_dcache_page(page[n]); 166 flush_dcache_page(page[n]);
167 SetPageUptodate(page[n]); 167 SetPageUptodate(page[n]);
168 unlock_page(page[n]); 168 unlock_page(page[n]);
169 if (page[n] != target_page) 169 if (page[n] != target_page)
170 page_cache_release(page[n]); 170 put_page(page[n]);
171 } 171 }
172 172
173out: 173out:
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index c31e2bc9c081..ff4468bd18b0 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -117,13 +117,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
117 data = squashfs_first_page(output); 117 data = squashfs_first_page(output);
118 buff = stream->output; 118 buff = stream->output;
119 while (data) { 119 while (data) {
120 if (bytes <= PAGE_CACHE_SIZE) { 120 if (bytes <= PAGE_SIZE) {
121 memcpy(data, buff, bytes); 121 memcpy(data, buff, bytes);
122 break; 122 break;
123 } 123 }
124 memcpy(data, buff, PAGE_CACHE_SIZE); 124 memcpy(data, buff, PAGE_SIZE);
125 buff += PAGE_CACHE_SIZE; 125 buff += PAGE_SIZE;
126 bytes -= PAGE_CACHE_SIZE; 126 bytes -= PAGE_SIZE;
127 data = squashfs_next_page(output); 127 data = squashfs_next_page(output);
128 } 128 }
129 squashfs_finish_page(output); 129 squashfs_finish_page(output);
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 244b9fbfff7b..934c17e96590 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -102,13 +102,13 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
102 data = squashfs_first_page(output); 102 data = squashfs_first_page(output);
103 buff = stream->output; 103 buff = stream->output;
104 while (data) { 104 while (data) {
105 if (bytes <= PAGE_CACHE_SIZE) { 105 if (bytes <= PAGE_SIZE) {
106 memcpy(data, buff, bytes); 106 memcpy(data, buff, bytes);
107 break; 107 break;
108 } else { 108 } else {
109 memcpy(data, buff, PAGE_CACHE_SIZE); 109 memcpy(data, buff, PAGE_SIZE);
110 buff += PAGE_CACHE_SIZE; 110 buff += PAGE_SIZE;
111 bytes -= PAGE_CACHE_SIZE; 111 bytes -= PAGE_SIZE;
112 data = squashfs_next_page(output); 112 data = squashfs_next_page(output);
113 } 113 }
114 } 114 }
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 5a1c11f56441..9b7b1b6a7892 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -48,7 +48,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
48 if (actor == NULL) 48 if (actor == NULL)
49 return NULL; 49 return NULL;
50 50
51 actor->length = length ? : pages * PAGE_CACHE_SIZE; 51 actor->length = length ? : pages * PAGE_SIZE;
52 actor->buffer = buffer; 52 actor->buffer = buffer;
53 actor->pages = pages; 53 actor->pages = pages;
54 actor->next_page = 0; 54 actor->next_page = 0;
@@ -88,7 +88,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
88 if (actor == NULL) 88 if (actor == NULL)
89 return NULL; 89 return NULL;
90 90
91 actor->length = length ? : pages * PAGE_CACHE_SIZE; 91 actor->length = length ? : pages * PAGE_SIZE;
92 actor->page = page; 92 actor->page = page;
93 actor->pages = pages; 93 actor->pages = pages;
94 actor->next_page = 0; 94 actor->next_page = 0;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 26dd82008b82..98537eab27e2 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -24,7 +24,7 @@ static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
24 if (actor == NULL) 24 if (actor == NULL)
25 return NULL; 25 return NULL;
26 26
27 actor->length = length ? : pages * PAGE_CACHE_SIZE; 27 actor->length = length ? : pages * PAGE_SIZE;
28 actor->page = page; 28 actor->page = page;
29 actor->pages = pages; 29 actor->pages = pages;
30 actor->next_page = 0; 30 actor->next_page = 0;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5e79bfa4f260..cf01e15a7b16 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -152,7 +152,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
152 * Check the system page size is not larger than the filesystem 152 * Check the system page size is not larger than the filesystem
153 * block size (by default 128K). This is currently not supported. 153 * block size (by default 128K). This is currently not supported.
154 */ 154 */
155 if (PAGE_CACHE_SIZE > msblk->block_size) { 155 if (PAGE_SIZE > msblk->block_size) {
156 ERROR("Page size > filesystem block size (%d). This is " 156 ERROR("Page size > filesystem block size (%d). This is "
157 "currently not supported!\n", msblk->block_size); 157 "currently not supported!\n", msblk->block_size);
158 goto failed_mount; 158 goto failed_mount;
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index dbcc2f54bad4..d688ef42a6a1 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -48,10 +48,10 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
48 struct inode *inode = page->mapping->host; 48 struct inode *inode = page->mapping->host;
49 struct super_block *sb = inode->i_sb; 49 struct super_block *sb = inode->i_sb;
50 struct squashfs_sb_info *msblk = sb->s_fs_info; 50 struct squashfs_sb_info *msblk = sb->s_fs_info;
51 int index = page->index << PAGE_CACHE_SHIFT; 51 int index = page->index << PAGE_SHIFT;
52 u64 block = squashfs_i(inode)->start; 52 u64 block = squashfs_i(inode)->start;
53 int offset = squashfs_i(inode)->offset; 53 int offset = squashfs_i(inode)->offset;
54 int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); 54 int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
55 int bytes, copied; 55 int bytes, copied;
56 void *pageaddr; 56 void *pageaddr;
57 struct squashfs_cache_entry *entry; 57 struct squashfs_cache_entry *entry;
@@ -94,7 +94,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset, 94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
95 length - bytes); 95 length - bytes);
96 if (copied == length - bytes) 96 if (copied == length - bytes)
97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); 97 memset(pageaddr + length, 0, PAGE_SIZE - length);
98 else 98 else
99 block = entry->next_index; 99 block = entry->next_index;
100 kunmap_atomic(pageaddr); 100 kunmap_atomic(pageaddr);
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index c609624e4b8a..6bfaef73d065 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -141,7 +141,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
141 stream->buf.in_pos = 0; 141 stream->buf.in_pos = 0;
142 stream->buf.in_size = 0; 142 stream->buf.in_size = 0;
143 stream->buf.out_pos = 0; 143 stream->buf.out_pos = 0;
144 stream->buf.out_size = PAGE_CACHE_SIZE; 144 stream->buf.out_size = PAGE_SIZE;
145 stream->buf.out = squashfs_first_page(output); 145 stream->buf.out = squashfs_first_page(output);
146 146
147 do { 147 do {
@@ -158,7 +158,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
158 stream->buf.out = squashfs_next_page(output); 158 stream->buf.out = squashfs_next_page(output);
159 if (stream->buf.out != NULL) { 159 if (stream->buf.out != NULL) {
160 stream->buf.out_pos = 0; 160 stream->buf.out_pos = 0;
161 total += PAGE_CACHE_SIZE; 161 total += PAGE_SIZE;
162 } 162 }
163 } 163 }
164 164
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 8727caba6882..2ec24d128bce 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -69,7 +69,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
69 int zlib_err, zlib_init = 0, k = 0; 69 int zlib_err, zlib_init = 0, k = 0;
70 z_stream *stream = strm; 70 z_stream *stream = strm;
71 71
72 stream->avail_out = PAGE_CACHE_SIZE; 72 stream->avail_out = PAGE_SIZE;
73 stream->next_out = squashfs_first_page(output); 73 stream->next_out = squashfs_first_page(output);
74 stream->avail_in = 0; 74 stream->avail_in = 0;
75 75
@@ -85,7 +85,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
85 if (stream->avail_out == 0) { 85 if (stream->avail_out == 0) {
86 stream->next_out = squashfs_next_page(output); 86 stream->next_out = squashfs_next_page(output);
87 if (stream->next_out != NULL) 87 if (stream->next_out != NULL)
88 stream->avail_out = PAGE_CACHE_SIZE; 88 stream->avail_out = PAGE_SIZE;
89 } 89 }
90 90
91 if (!zlib_init) { 91 if (!zlib_init) {
diff --git a/fs/sync.c b/fs/sync.c
index dd5d1711c7ac..2a54c1f22035 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -302,7 +302,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
302 goto out; 302 goto out;
303 303
304 if (sizeof(pgoff_t) == 4) { 304 if (sizeof(pgoff_t) == 4) {
305 if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { 305 if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
306 /* 306 /*
307 * The range starts outside a 32 bit machine's 307 * The range starts outside a 32 bit machine's
308 * pagecache addressing capabilities. Let it "succeed" 308 * pagecache addressing capabilities. Let it "succeed"
@@ -310,7 +310,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
310 ret = 0; 310 ret = 0;
311 goto out; 311 goto out;
312 } 312 }
313 if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { 313 if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
314 /* 314 /*
315 * Out to EOF 315 * Out to EOF
316 */ 316 */
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 63c1bcb224ee..c0f0a3e643eb 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -30,7 +30,7 @@ const struct file_operations sysv_dir_operations = {
30static inline void dir_put_page(struct page *page) 30static inline void dir_put_page(struct page *page)
31{ 31{
32 kunmap(page); 32 kunmap(page);
33 page_cache_release(page); 33 put_page(page);
34} 34}
35 35
36static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) 36static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
@@ -73,8 +73,8 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
73 if (pos >= inode->i_size) 73 if (pos >= inode->i_size)
74 return 0; 74 return 0;
75 75
76 offset = pos & ~PAGE_CACHE_MASK; 76 offset = pos & ~PAGE_MASK;
77 n = pos >> PAGE_CACHE_SHIFT; 77 n = pos >> PAGE_SHIFT;
78 78
79 for ( ; n < npages; n++, offset = 0) { 79 for ( ; n < npages; n++, offset = 0) {
80 char *kaddr, *limit; 80 char *kaddr, *limit;
@@ -85,7 +85,7 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
85 continue; 85 continue;
86 kaddr = (char *)page_address(page); 86 kaddr = (char *)page_address(page);
87 de = (struct sysv_dir_entry *)(kaddr+offset); 87 de = (struct sysv_dir_entry *)(kaddr+offset);
88 limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; 88 limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE;
89 for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) { 89 for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
90 char *name = de->name; 90 char *name = de->name;
91 91
@@ -146,7 +146,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
146 if (!IS_ERR(page)) { 146 if (!IS_ERR(page)) {
147 kaddr = (char*)page_address(page); 147 kaddr = (char*)page_address(page);
148 de = (struct sysv_dir_entry *) kaddr; 148 de = (struct sysv_dir_entry *) kaddr;
149 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; 149 kaddr += PAGE_SIZE - SYSV_DIRSIZE;
150 for ( ; (char *) de <= kaddr ; de++) { 150 for ( ; (char *) de <= kaddr ; de++) {
151 if (!de->inode) 151 if (!de->inode)
152 continue; 152 continue;
@@ -190,7 +190,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
190 goto out; 190 goto out;
191 kaddr = (char*)page_address(page); 191 kaddr = (char*)page_address(page);
192 de = (struct sysv_dir_entry *)kaddr; 192 de = (struct sysv_dir_entry *)kaddr;
193 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; 193 kaddr += PAGE_SIZE - SYSV_DIRSIZE;
194 while ((char *)de <= kaddr) { 194 while ((char *)de <= kaddr) {
195 if (!de->inode) 195 if (!de->inode)
196 goto got_it; 196 goto got_it;
@@ -261,7 +261,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
261 kmap(page); 261 kmap(page);
262 262
263 base = (char*)page_address(page); 263 base = (char*)page_address(page);
264 memset(base, 0, PAGE_CACHE_SIZE); 264 memset(base, 0, PAGE_SIZE);
265 265
266 de = (struct sysv_dir_entry *) base; 266 de = (struct sysv_dir_entry *) base;
267 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 267 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
@@ -273,7 +273,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
273 kunmap(page); 273 kunmap(page);
274 err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); 274 err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
275fail: 275fail:
276 page_cache_release(page); 276 put_page(page);
277 return err; 277 return err;
278} 278}
279 279
@@ -296,7 +296,7 @@ int sysv_empty_dir(struct inode * inode)
296 296
297 kaddr = (char *)page_address(page); 297 kaddr = (char *)page_address(page);
298 de = (struct sysv_dir_entry *)kaddr; 298 de = (struct sysv_dir_entry *)kaddr;
299 kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE; 299 kaddr += PAGE_SIZE-SYSV_DIRSIZE;
300 300
301 for ( ;(char *)de <= kaddr; de++) { 301 for ( ;(char *)de <= kaddr; de++) {
302 if (!de->inode) 302 if (!de->inode)
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 11e83ed0b4bf..90b60c03b588 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -264,11 +264,11 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
264out_dir: 264out_dir:
265 if (dir_de) { 265 if (dir_de) {
266 kunmap(dir_page); 266 kunmap(dir_page);
267 page_cache_release(dir_page); 267 put_page(dir_page);
268 } 268 }
269out_old: 269out_old:
270 kunmap(old_page); 270 kunmap(old_page);
271 page_cache_release(old_page); 271 put_page(old_page);
272out: 272out:
273 return err; 273 return err;
274} 274}
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 065c88f8e4b8..446753d8ac34 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -121,7 +121,7 @@ static int do_readpage(struct page *page)
121 if (block >= beyond) { 121 if (block >= beyond) {
122 /* Reading beyond inode */ 122 /* Reading beyond inode */
123 SetPageChecked(page); 123 SetPageChecked(page);
124 memset(addr, 0, PAGE_CACHE_SIZE); 124 memset(addr, 0, PAGE_SIZE);
125 goto out; 125 goto out;
126 } 126 }
127 127
@@ -223,7 +223,7 @@ static int write_begin_slow(struct address_space *mapping,
223{ 223{
224 struct inode *inode = mapping->host; 224 struct inode *inode = mapping->host;
225 struct ubifs_info *c = inode->i_sb->s_fs_info; 225 struct ubifs_info *c = inode->i_sb->s_fs_info;
226 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 226 pgoff_t index = pos >> PAGE_SHIFT;
227 struct ubifs_budget_req req = { .new_page = 1 }; 227 struct ubifs_budget_req req = { .new_page = 1 };
228 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 228 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
229 struct page *page; 229 struct page *page;
@@ -254,13 +254,13 @@ static int write_begin_slow(struct address_space *mapping,
254 } 254 }
255 255
256 if (!PageUptodate(page)) { 256 if (!PageUptodate(page)) {
257 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) 257 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
258 SetPageChecked(page); 258 SetPageChecked(page);
259 else { 259 else {
260 err = do_readpage(page); 260 err = do_readpage(page);
261 if (err) { 261 if (err) {
262 unlock_page(page); 262 unlock_page(page);
263 page_cache_release(page); 263 put_page(page);
264 ubifs_release_budget(c, &req); 264 ubifs_release_budget(c, &req);
265 return err; 265 return err;
266 } 266 }
@@ -428,7 +428,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
428 struct inode *inode = mapping->host; 428 struct inode *inode = mapping->host;
429 struct ubifs_info *c = inode->i_sb->s_fs_info; 429 struct ubifs_info *c = inode->i_sb->s_fs_info;
430 struct ubifs_inode *ui = ubifs_inode(inode); 430 struct ubifs_inode *ui = ubifs_inode(inode);
431 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 431 pgoff_t index = pos >> PAGE_SHIFT;
432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
433 int skipped_read = 0; 433 int skipped_read = 0;
434 struct page *page; 434 struct page *page;
@@ -446,7 +446,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
446 446
447 if (!PageUptodate(page)) { 447 if (!PageUptodate(page)) {
448 /* The page is not loaded from the flash */ 448 /* The page is not loaded from the flash */
449 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { 449 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
450 /* 450 /*
451 * We change whole page so no need to load it. But we 451 * We change whole page so no need to load it. But we
452 * do not know whether this page exists on the media or 452 * do not know whether this page exists on the media or
@@ -462,7 +462,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
462 err = do_readpage(page); 462 err = do_readpage(page);
463 if (err) { 463 if (err) {
464 unlock_page(page); 464 unlock_page(page);
465 page_cache_release(page); 465 put_page(page);
466 return err; 466 return err;
467 } 467 }
468 } 468 }
@@ -494,7 +494,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
494 mutex_unlock(&ui->ui_mutex); 494 mutex_unlock(&ui->ui_mutex);
495 } 495 }
496 unlock_page(page); 496 unlock_page(page);
497 page_cache_release(page); 497 put_page(page);
498 498
499 return write_begin_slow(mapping, pos, len, pagep, flags); 499 return write_begin_slow(mapping, pos, len, pagep, flags);
500 } 500 }
@@ -549,12 +549,12 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", 549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
550 inode->i_ino, pos, page->index, len, copied, inode->i_size); 550 inode->i_ino, pos, page->index, len, copied, inode->i_size);
551 551
552 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) { 552 if (unlikely(copied < len && len == PAGE_SIZE)) {
553 /* 553 /*
554 * VFS copied less data to the page that it intended and 554 * VFS copied less data to the page that it intended and
555 * declared in its '->write_begin()' call via the @len 555 * declared in its '->write_begin()' call via the @len
556 * argument. If the page was not up-to-date, and @len was 556 * argument. If the page was not up-to-date, and @len was
557 * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did 557 * @PAGE_SIZE, the 'ubifs_write_begin()' function did
558 * not load it from the media (for optimization reasons). This 558 * not load it from the media (for optimization reasons). This
559 * means that part of the page contains garbage. So read the 559 * means that part of the page contains garbage. So read the
560 * page now. 560 * page now.
@@ -593,7 +593,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
593 593
594out: 594out:
595 unlock_page(page); 595 unlock_page(page);
596 page_cache_release(page); 596 put_page(page);
597 return copied; 597 return copied;
598} 598}
599 599
@@ -621,10 +621,10 @@ static int populate_page(struct ubifs_info *c, struct page *page,
621 621
622 addr = zaddr = kmap(page); 622 addr = zaddr = kmap(page);
623 623
624 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 624 end_index = (i_size - 1) >> PAGE_SHIFT;
625 if (!i_size || page->index > end_index) { 625 if (!i_size || page->index > end_index) {
626 hole = 1; 626 hole = 1;
627 memset(addr, 0, PAGE_CACHE_SIZE); 627 memset(addr, 0, PAGE_SIZE);
628 goto out_hole; 628 goto out_hole;
629 } 629 }
630 630
@@ -673,7 +673,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
673 } 673 }
674 674
675 if (end_index == page->index) { 675 if (end_index == page->index) {
676 int len = i_size & (PAGE_CACHE_SIZE - 1); 676 int len = i_size & (PAGE_SIZE - 1);
677 677
678 if (len && len < read) 678 if (len && len < read)
679 memset(zaddr + len, 0, read - len); 679 memset(zaddr + len, 0, read - len);
@@ -773,7 +773,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
773 isize = i_size_read(inode); 773 isize = i_size_read(inode);
774 if (isize == 0) 774 if (isize == 0)
775 goto out_free; 775 goto out_free;
776 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 776 end_index = ((isize - 1) >> PAGE_SHIFT);
777 777
778 for (page_idx = 1; page_idx < page_cnt; page_idx++) { 778 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
779 pgoff_t page_offset = offset + page_idx; 779 pgoff_t page_offset = offset + page_idx;
@@ -788,7 +788,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
788 if (!PageUptodate(page)) 788 if (!PageUptodate(page))
789 err = populate_page(c, page, bu, &n); 789 err = populate_page(c, page, bu, &n);
790 unlock_page(page); 790 unlock_page(page);
791 page_cache_release(page); 791 put_page(page);
792 if (err) 792 if (err)
793 break; 793 break;
794 } 794 }
@@ -905,7 +905,7 @@ static int do_writepage(struct page *page, int len)
905#ifdef UBIFS_DEBUG 905#ifdef UBIFS_DEBUG
906 struct ubifs_inode *ui = ubifs_inode(inode); 906 struct ubifs_inode *ui = ubifs_inode(inode);
907 spin_lock(&ui->ui_lock); 907 spin_lock(&ui->ui_lock);
908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); 908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
909 spin_unlock(&ui->ui_lock); 909 spin_unlock(&ui->ui_lock);
910#endif 910#endif
911 911
@@ -1001,8 +1001,8 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1001 struct inode *inode = page->mapping->host; 1001 struct inode *inode = page->mapping->host;
1002 struct ubifs_inode *ui = ubifs_inode(inode); 1002 struct ubifs_inode *ui = ubifs_inode(inode);
1003 loff_t i_size = i_size_read(inode), synced_i_size; 1003 loff_t i_size = i_size_read(inode), synced_i_size;
1004 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 1004 pgoff_t end_index = i_size >> PAGE_SHIFT;
1005 int err, len = i_size & (PAGE_CACHE_SIZE - 1); 1005 int err, len = i_size & (PAGE_SIZE - 1);
1006 void *kaddr; 1006 void *kaddr;
1007 1007
1008 dbg_gen("ino %lu, pg %lu, pg flags %#lx", 1008 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
@@ -1021,7 +1021,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1021 1021
1022 /* Is the page fully inside @i_size? */ 1022 /* Is the page fully inside @i_size? */
1023 if (page->index < end_index) { 1023 if (page->index < end_index) {
1024 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { 1024 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1025 err = inode->i_sb->s_op->write_inode(inode, NULL); 1025 err = inode->i_sb->s_op->write_inode(inode, NULL);
1026 if (err) 1026 if (err)
1027 goto out_unlock; 1027 goto out_unlock;
@@ -1034,7 +1034,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1034 * with this. 1034 * with this.
1035 */ 1035 */
1036 } 1036 }
1037 return do_writepage(page, PAGE_CACHE_SIZE); 1037 return do_writepage(page, PAGE_SIZE);
1038 } 1038 }
1039 1039
1040 /* 1040 /*
@@ -1045,7 +1045,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1045 * writes to that region are not written out to the file." 1045 * writes to that region are not written out to the file."
1046 */ 1046 */
1047 kaddr = kmap_atomic(page); 1047 kaddr = kmap_atomic(page);
1048 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); 1048 memset(kaddr + len, 0, PAGE_SIZE - len);
1049 flush_dcache_page(page); 1049 flush_dcache_page(page);
1050 kunmap_atomic(kaddr); 1050 kunmap_atomic(kaddr);
1051 1051
@@ -1138,7 +1138,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1138 truncate_setsize(inode, new_size); 1138 truncate_setsize(inode, new_size);
1139 1139
1140 if (offset) { 1140 if (offset) {
1141 pgoff_t index = new_size >> PAGE_CACHE_SHIFT; 1141 pgoff_t index = new_size >> PAGE_SHIFT;
1142 struct page *page; 1142 struct page *page;
1143 1143
1144 page = find_lock_page(inode->i_mapping, index); 1144 page = find_lock_page(inode->i_mapping, index);
@@ -1157,9 +1157,9 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1157 clear_page_dirty_for_io(page); 1157 clear_page_dirty_for_io(page);
1158 if (UBIFS_BLOCKS_PER_PAGE_SHIFT) 1158 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1159 offset = new_size & 1159 offset = new_size &
1160 (PAGE_CACHE_SIZE - 1); 1160 (PAGE_SIZE - 1);
1161 err = do_writepage(page, offset); 1161 err = do_writepage(page, offset);
1162 page_cache_release(page); 1162 put_page(page);
1163 if (err) 1163 if (err)
1164 goto out_budg; 1164 goto out_budg;
1165 /* 1165 /*
@@ -1173,7 +1173,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1173 * having to read it. 1173 * having to read it.
1174 */ 1174 */
1175 unlock_page(page); 1175 unlock_page(page);
1176 page_cache_release(page); 1176 put_page(page);
1177 } 1177 }
1178 } 1178 }
1179 } 1179 }
@@ -1285,7 +1285,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1285 struct ubifs_info *c = inode->i_sb->s_fs_info; 1285 struct ubifs_info *c = inode->i_sb->s_fs_info;
1286 1286
1287 ubifs_assert(PagePrivate(page)); 1287 ubifs_assert(PagePrivate(page));
1288 if (offset || length < PAGE_CACHE_SIZE) 1288 if (offset || length < PAGE_SIZE)
1289 /* Partial page remains dirty */ 1289 /* Partial page remains dirty */
1290 return; 1290 return;
1291 1291
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index a233ba913be4..e98c24ee25a1 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2237,12 +2237,12 @@ static int __init ubifs_init(void)
2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); 2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
2238 2238
2239 /* 2239 /*
2240 * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to 2240 * We require that PAGE_SIZE is greater-than-or-equal-to
2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. 2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
2242 */ 2242 */
2243 if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) { 2243 if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
2244 pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", 2244 pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
2245 current->pid, (unsigned int)PAGE_CACHE_SIZE); 2245 current->pid, (unsigned int)PAGE_SIZE);
2246 return -EINVAL; 2246 return -EINVAL;
2247 } 2247 }
2248 2248
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index c2a57e193a81..4cd7e569cd00 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -46,8 +46,8 @@
46#define UBIFS_SUPER_MAGIC 0x24051905 46#define UBIFS_SUPER_MAGIC 0x24051905
47 47
48/* Number of UBIFS blocks per VFS page */ 48/* Number of UBIFS blocks per VFS page */
49#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE) 49#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE)
50#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT) 50#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT)
51 51
52/* "File system end of life" sequence number watermark */ 52/* "File system end of life" sequence number watermark */
53#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL 53#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1af98963d860..877ba1c9b461 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -46,7 +46,7 @@ static void __udf_adinicb_readpage(struct page *page)
46 46
47 kaddr = kmap(page); 47 kaddr = kmap(page);
48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); 48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
49 memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); 49 memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
50 flush_dcache_page(page); 50 flush_dcache_page(page);
51 SetPageUptodate(page); 51 SetPageUptodate(page);
52 kunmap(page); 52 kunmap(page);
@@ -87,14 +87,14 @@ static int udf_adinicb_write_begin(struct file *file,
87{ 87{
88 struct page *page; 88 struct page *page;
89 89
90 if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) 90 if (WARN_ON_ONCE(pos >= PAGE_SIZE))
91 return -EIO; 91 return -EIO;
92 page = grab_cache_page_write_begin(mapping, 0, flags); 92 page = grab_cache_page_write_begin(mapping, 0, flags);
93 if (!page) 93 if (!page)
94 return -ENOMEM; 94 return -ENOMEM;
95 *pagep = page; 95 *pagep = page;
96 96
97 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) 97 if (!PageUptodate(page) && len != PAGE_SIZE)
98 __udf_adinicb_readpage(page); 98 __udf_adinicb_readpage(page);
99 return 0; 99 return 0;
100} 100}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 166d3ed32c39..2dc461eeb415 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -287,7 +287,7 @@ int udf_expand_file_adinicb(struct inode *inode)
287 if (!PageUptodate(page)) { 287 if (!PageUptodate(page)) {
288 kaddr = kmap(page); 288 kaddr = kmap(page);
289 memset(kaddr + iinfo->i_lenAlloc, 0x00, 289 memset(kaddr + iinfo->i_lenAlloc, 0x00,
290 PAGE_CACHE_SIZE - iinfo->i_lenAlloc); 290 PAGE_SIZE - iinfo->i_lenAlloc);
291 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, 291 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
292 iinfo->i_lenAlloc); 292 iinfo->i_lenAlloc);
293 flush_dcache_page(page); 293 flush_dcache_page(page);
@@ -319,7 +319,7 @@ int udf_expand_file_adinicb(struct inode *inode)
319 inode->i_data.a_ops = &udf_adinicb_aops; 319 inode->i_data.a_ops = &udf_adinicb_aops;
320 up_write(&iinfo->i_data_sem); 320 up_write(&iinfo->i_data_sem);
321 } 321 }
322 page_cache_release(page); 322 put_page(page);
323 mark_inode_dirty(inode); 323 mark_inode_dirty(inode);
324 324
325 return err; 325 return err;
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index dc5fae601c24..0447b949c7f5 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -237,7 +237,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237 sector_t newb, struct page *locked_page) 237 sector_t newb, struct page *locked_page)
238{ 238{
239 const unsigned blks_per_page = 239 const unsigned blks_per_page =
240 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 240 1 << (PAGE_SHIFT - inode->i_blkbits);
241 const unsigned mask = blks_per_page - 1; 241 const unsigned mask = blks_per_page - 1;
242 struct address_space * const mapping = inode->i_mapping; 242 struct address_space * const mapping = inode->i_mapping;
243 pgoff_t index, cur_index, last_index; 243 pgoff_t index, cur_index, last_index;
@@ -255,9 +255,9 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
255 255
256 cur_index = locked_page->index; 256 cur_index = locked_page->index;
257 end = count + beg; 257 end = count + beg;
258 last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 258 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
259 for (i = beg; i < end; i = (i | mask) + 1) { 259 for (i = beg; i < end; i = (i | mask) + 1) {
260 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 260 index = i >> (PAGE_SHIFT - inode->i_blkbits);
261 261
262 if (likely(cur_index != index)) { 262 if (likely(cur_index != index)) {
263 page = ufs_get_locked_page(mapping, index); 263 page = ufs_get_locked_page(mapping, index);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 74f2e80288bf..0b1457292734 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -62,7 +62,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
62static inline void ufs_put_page(struct page *page) 62static inline void ufs_put_page(struct page *page)
63{ 63{
64 kunmap(page); 64 kunmap(page);
65 page_cache_release(page); 65 put_page(page);
66} 66}
67 67
68ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) 68ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
@@ -111,13 +111,13 @@ static void ufs_check_page(struct page *page)
111 struct super_block *sb = dir->i_sb; 111 struct super_block *sb = dir->i_sb;
112 char *kaddr = page_address(page); 112 char *kaddr = page_address(page);
113 unsigned offs, rec_len; 113 unsigned offs, rec_len;
114 unsigned limit = PAGE_CACHE_SIZE; 114 unsigned limit = PAGE_SIZE;
115 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; 115 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
116 struct ufs_dir_entry *p; 116 struct ufs_dir_entry *p;
117 char *error; 117 char *error;
118 118
119 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 119 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
120 limit = dir->i_size & ~PAGE_CACHE_MASK; 120 limit = dir->i_size & ~PAGE_MASK;
121 if (limit & chunk_mask) 121 if (limit & chunk_mask)
122 goto Ebadsize; 122 goto Ebadsize;
123 if (!limit) 123 if (!limit)
@@ -170,7 +170,7 @@ Einumber:
170bad_entry: 170bad_entry:
171 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " 171 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
172 "offset=%lu, rec_len=%d, name_len=%d", 172 "offset=%lu, rec_len=%d, name_len=%d",
173 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 173 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
174 rec_len, ufs_get_de_namlen(sb, p)); 174 rec_len, ufs_get_de_namlen(sb, p));
175 goto fail; 175 goto fail;
176Eend: 176Eend:
@@ -178,7 +178,7 @@ Eend:
178 ufs_error(sb, __func__, 178 ufs_error(sb, __func__,
179 "entry in directory #%lu spans the page boundary" 179 "entry in directory #%lu spans the page boundary"
180 "offset=%lu", 180 "offset=%lu",
181 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); 181 dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
182fail: 182fail:
183 SetPageChecked(page); 183 SetPageChecked(page);
184 SetPageError(page); 184 SetPageError(page);
@@ -211,9 +211,9 @@ ufs_last_byte(struct inode *inode, unsigned long page_nr)
211{ 211{
212 unsigned last_byte = inode->i_size; 212 unsigned last_byte = inode->i_size;
213 213
214 last_byte -= page_nr << PAGE_CACHE_SHIFT; 214 last_byte -= page_nr << PAGE_SHIFT;
215 if (last_byte > PAGE_CACHE_SIZE) 215 if (last_byte > PAGE_SIZE)
216 last_byte = PAGE_CACHE_SIZE; 216 last_byte = PAGE_SIZE;
217 return last_byte; 217 return last_byte;
218} 218}
219 219
@@ -341,7 +341,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
341 kaddr = page_address(page); 341 kaddr = page_address(page);
342 dir_end = kaddr + ufs_last_byte(dir, n); 342 dir_end = kaddr + ufs_last_byte(dir, n);
343 de = (struct ufs_dir_entry *)kaddr; 343 de = (struct ufs_dir_entry *)kaddr;
344 kaddr += PAGE_CACHE_SIZE - reclen; 344 kaddr += PAGE_SIZE - reclen;
345 while ((char *)de <= kaddr) { 345 while ((char *)de <= kaddr) {
346 if ((char *)de == dir_end) { 346 if ((char *)de == dir_end) {
347 /* We hit i_size */ 347 /* We hit i_size */
@@ -432,8 +432,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
432 loff_t pos = ctx->pos; 432 loff_t pos = ctx->pos;
433 struct inode *inode = file_inode(file); 433 struct inode *inode = file_inode(file);
434 struct super_block *sb = inode->i_sb; 434 struct super_block *sb = inode->i_sb;
435 unsigned int offset = pos & ~PAGE_CACHE_MASK; 435 unsigned int offset = pos & ~PAGE_MASK;
436 unsigned long n = pos >> PAGE_CACHE_SHIFT; 436 unsigned long n = pos >> PAGE_SHIFT;
437 unsigned long npages = dir_pages(inode); 437 unsigned long npages = dir_pages(inode);
438 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 438 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
439 int need_revalidate = file->f_version != inode->i_version; 439 int need_revalidate = file->f_version != inode->i_version;
@@ -454,14 +454,14 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
454 ufs_error(sb, __func__, 454 ufs_error(sb, __func__,
455 "bad page in #%lu", 455 "bad page in #%lu",
456 inode->i_ino); 456 inode->i_ino);
457 ctx->pos += PAGE_CACHE_SIZE - offset; 457 ctx->pos += PAGE_SIZE - offset;
458 return -EIO; 458 return -EIO;
459 } 459 }
460 kaddr = page_address(page); 460 kaddr = page_address(page);
461 if (unlikely(need_revalidate)) { 461 if (unlikely(need_revalidate)) {
462 if (offset) { 462 if (offset) {
463 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); 463 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
464 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 464 ctx->pos = (n<<PAGE_SHIFT) + offset;
465 } 465 }
466 file->f_version = inode->i_version; 466 file->f_version = inode->i_version;
467 need_revalidate = 0; 467 need_revalidate = 0;
@@ -574,7 +574,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
574 574
575 kmap(page); 575 kmap(page);
576 base = (char*)page_address(page); 576 base = (char*)page_address(page);
577 memset(base, 0, PAGE_CACHE_SIZE); 577 memset(base, 0, PAGE_SIZE);
578 578
579 de = (struct ufs_dir_entry *) base; 579 de = (struct ufs_dir_entry *) base;
580 580
@@ -594,7 +594,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
594 594
595 err = ufs_commit_chunk(page, 0, chunk_size); 595 err = ufs_commit_chunk(page, 0, chunk_size);
596fail: 596fail:
597 page_cache_release(page); 597 put_page(page);
598 return err; 598 return err;
599} 599}
600 600
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index d897e169ab9c..9f49431e798d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -1051,13 +1051,13 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1051 lastfrag--; 1051 lastfrag--;
1052 1052
1053 lastpage = ufs_get_locked_page(mapping, lastfrag >> 1053 lastpage = ufs_get_locked_page(mapping, lastfrag >>
1054 (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1054 (PAGE_SHIFT - inode->i_blkbits));
1055 if (IS_ERR(lastpage)) { 1055 if (IS_ERR(lastpage)) {
1056 err = -EIO; 1056 err = -EIO;
1057 goto out; 1057 goto out;
1058 } 1058 }
1059 1059
1060 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1060 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1061 bh = page_buffers(lastpage); 1061 bh = page_buffers(lastpage);
1062 for (i = 0; i < end; ++i) 1062 for (i = 0; i < end; ++i)
1063 bh = bh->b_this_page; 1063 bh = bh->b_this_page;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index acf4a3b61b81..a1559f762805 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -305,7 +305,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
305 ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); 305 ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
306 else { 306 else {
307 kunmap(dir_page); 307 kunmap(dir_page);
308 page_cache_release(dir_page); 308 put_page(dir_page);
309 } 309 }
310 inode_dec_link_count(old_dir); 310 inode_dec_link_count(old_dir);
311 } 311 }
@@ -315,11 +315,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
315out_dir: 315out_dir:
316 if (dir_de) { 316 if (dir_de) {
317 kunmap(dir_page); 317 kunmap(dir_page);
318 page_cache_release(dir_page); 318 put_page(dir_page);
319 } 319 }
320out_old: 320out_old:
321 kunmap(old_page); 321 kunmap(old_page);
322 page_cache_release(old_page); 322 put_page(old_page);
323out: 323out:
324 return err; 324 return err;
325} 325}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index b6c2f94e041e..a409e3e7827a 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -261,14 +261,14 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
261 if (unlikely(page->mapping == NULL)) { 261 if (unlikely(page->mapping == NULL)) {
262 /* Truncate got there first */ 262 /* Truncate got there first */
263 unlock_page(page); 263 unlock_page(page);
264 page_cache_release(page); 264 put_page(page);
265 page = NULL; 265 page = NULL;
266 goto out; 266 goto out;
267 } 267 }
268 268
269 if (!PageUptodate(page) || PageError(page)) { 269 if (!PageUptodate(page) || PageError(page)) {
270 unlock_page(page); 270 unlock_page(page);
271 page_cache_release(page); 271 put_page(page);
272 272
273 printk(KERN_ERR "ufs_change_blocknr: " 273 printk(KERN_ERR "ufs_change_blocknr: "
274 "can not read page: ino %lu, index: %lu\n", 274 "can not read page: ino %lu, index: %lu\n",
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 954175928240..b7fbf53dbc81 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -283,7 +283,7 @@ extern struct page *ufs_get_locked_page(struct address_space *mapping,
283static inline void ufs_put_locked_page(struct page *page) 283static inline void ufs_put_locked_page(struct page *page)
284{ 284{
285 unlock_page(page); 285 unlock_page(page);
286 page_cache_release(page); 286 put_page(page);
287} 287}
288 288
289 289
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 041b6948aecc..ce41d7fe753c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3742,11 +3742,11 @@ xfs_bmap_btalloc(
3742 args.prod = align; 3742 args.prod = align;
3743 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3743 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3744 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3744 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3745 } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { 3745 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3746 args.prod = 1; 3746 args.prod = 1;
3747 args.mod = 0; 3747 args.mod = 0;
3748 } else { 3748 } else {
3749 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; 3749 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3750 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 3750 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3751 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3751 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3752 } 3752 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d445a64b979e..e49b2406d15d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -704,7 +704,7 @@ next_buffer:
704 704
705 xfs_iunlock(ip, XFS_ILOCK_EXCL); 705 xfs_iunlock(ip, XFS_ILOCK_EXCL);
706out_invalidate: 706out_invalidate:
707 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); 707 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
708 return; 708 return;
709} 709}
710 710
@@ -925,9 +925,9 @@ xfs_do_writepage(
925 * ---------------------------------^------------------| 925 * ---------------------------------^------------------|
926 */ 926 */
927 offset = i_size_read(inode); 927 offset = i_size_read(inode);
928 end_index = offset >> PAGE_CACHE_SHIFT; 928 end_index = offset >> PAGE_SHIFT;
929 if (page->index < end_index) 929 if (page->index < end_index)
930 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT; 930 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
931 else { 931 else {
932 /* 932 /*
933 * Check whether the page to write out is beyond or straddles 933 * Check whether the page to write out is beyond or straddles
@@ -940,7 +940,7 @@ xfs_do_writepage(
940 * | | Straddles | 940 * | | Straddles |
941 * ---------------------------------^-----------|--------| 941 * ---------------------------------^-----------|--------|
942 */ 942 */
943 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1); 943 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
944 944
945 /* 945 /*
946 * Skip the page if it is fully outside i_size, e.g. due to a 946 * Skip the page if it is fully outside i_size, e.g. due to a
@@ -971,7 +971,7 @@ xfs_do_writepage(
971 * memory is zeroed when mapped, and writes to that region are 971 * memory is zeroed when mapped, and writes to that region are
972 * not written out to the file." 972 * not written out to the file."
973 */ 973 */
974 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); 974 zero_user_segment(page, offset_into_page, PAGE_SIZE);
975 975
976 /* Adjust the end_offset to the end of file */ 976 /* Adjust the end_offset to the end of file */
977 end_offset = offset; 977 end_offset = offset;
@@ -1475,7 +1475,7 @@ xfs_vm_write_failed(
1475 loff_t block_offset; 1475 loff_t block_offset;
1476 loff_t block_start; 1476 loff_t block_start;
1477 loff_t block_end; 1477 loff_t block_end;
1478 loff_t from = pos & (PAGE_CACHE_SIZE - 1); 1478 loff_t from = pos & (PAGE_SIZE - 1);
1479 loff_t to = from + len; 1479 loff_t to = from + len;
1480 struct buffer_head *bh, *head; 1480 struct buffer_head *bh, *head;
1481 struct xfs_mount *mp = XFS_I(inode)->i_mount; 1481 struct xfs_mount *mp = XFS_I(inode)->i_mount;
@@ -1491,7 +1491,7 @@ xfs_vm_write_failed(
1491 * start of the page by using shifts rather than masks the mismatch 1491 * start of the page by using shifts rather than masks the mismatch
1492 * problem. 1492 * problem.
1493 */ 1493 */
1494 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT; 1494 block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
1495 1495
1496 ASSERT(block_offset + from == pos); 1496 ASSERT(block_offset + from == pos);
1497 1497
@@ -1558,12 +1558,12 @@ xfs_vm_write_begin(
1558 struct page **pagep, 1558 struct page **pagep,
1559 void **fsdata) 1559 void **fsdata)
1560{ 1560{
1561 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1561 pgoff_t index = pos >> PAGE_SHIFT;
1562 struct page *page; 1562 struct page *page;
1563 int status; 1563 int status;
1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount; 1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
1565 1565
1566 ASSERT(len <= PAGE_CACHE_SIZE); 1566 ASSERT(len <= PAGE_SIZE);
1567 1567
1568 page = grab_cache_page_write_begin(mapping, index, flags); 1568 page = grab_cache_page_write_begin(mapping, index, flags);
1569 if (!page) 1569 if (!page)
@@ -1592,7 +1592,7 @@ xfs_vm_write_begin(
1592 truncate_pagecache_range(inode, start, pos + len); 1592 truncate_pagecache_range(inode, start, pos + len);
1593 } 1593 }
1594 1594
1595 page_cache_release(page); 1595 put_page(page);
1596 page = NULL; 1596 page = NULL;
1597 } 1597 }
1598 1598
@@ -1620,7 +1620,7 @@ xfs_vm_write_end(
1620{ 1620{
1621 int ret; 1621 int ret;
1622 1622
1623 ASSERT(len <= PAGE_CACHE_SIZE); 1623 ASSERT(len <= PAGE_SIZE);
1624 1624
1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1626 if (unlikely(ret < len)) { 1626 if (unlikely(ret < len)) {
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a32c1dcae2ff..3b6309865c65 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1237,7 +1237,7 @@ xfs_free_file_space(
1237 /* wait for the completion of any pending DIOs */ 1237 /* wait for the completion of any pending DIOs */
1238 inode_dio_wait(VFS_I(ip)); 1238 inode_dio_wait(VFS_I(ip));
1239 1239
1240 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1240 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1241 ioffset = round_down(offset, rounding); 1241 ioffset = round_down(offset, rounding);
1242 iendoffset = round_up(offset + len, rounding) - 1; 1242 iendoffset = round_up(offset + len, rounding) - 1;
1243 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset, 1243 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
@@ -1466,7 +1466,7 @@ xfs_shift_file_space(
1466 if (error) 1466 if (error)
1467 return error; 1467 return error;
1468 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 1468 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1469 offset >> PAGE_CACHE_SHIFT, -1); 1469 offset >> PAGE_SHIFT, -1);
1470 if (error) 1470 if (error)
1471 return error; 1471 return error;
1472 1472
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ac0fd32de31e..569938a4a357 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -106,8 +106,8 @@ xfs_iozero(
106 unsigned offset, bytes; 106 unsigned offset, bytes;
107 void *fsdata; 107 void *fsdata;
108 108
109 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 109 offset = (pos & (PAGE_SIZE -1)); /* Within page */
110 bytes = PAGE_CACHE_SIZE - offset; 110 bytes = PAGE_SIZE - offset;
111 if (bytes > count) 111 if (bytes > count)
112 bytes = count; 112 bytes = count;
113 113
@@ -799,8 +799,8 @@ xfs_file_dio_aio_write(
799 /* see generic_file_direct_write() for why this is necessary */ 799 /* see generic_file_direct_write() for why this is necessary */
800 if (mapping->nrpages) { 800 if (mapping->nrpages) {
801 invalidate_inode_pages2_range(mapping, 801 invalidate_inode_pages2_range(mapping,
802 pos >> PAGE_CACHE_SHIFT, 802 pos >> PAGE_SHIFT,
803 end >> PAGE_CACHE_SHIFT); 803 end >> PAGE_SHIFT);
804 } 804 }
805 805
806 if (ret > 0) { 806 if (ret > 0) {
@@ -1207,9 +1207,9 @@ xfs_find_get_desired_pgoff(
1207 1207
1208 pagevec_init(&pvec, 0); 1208 pagevec_init(&pvec, 0);
1209 1209
1210 index = startoff >> PAGE_CACHE_SHIFT; 1210 index = startoff >> PAGE_SHIFT;
1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1212 end = endoff >> PAGE_CACHE_SHIFT; 1212 end = endoff >> PAGE_SHIFT;
1213 do { 1213 do {
1214 int want; 1214 int want;
1215 unsigned nr_pages; 1215 unsigned nr_pages;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239a0fa9..a8192dc797dc 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -135,7 +135,7 @@ typedef __u32 xfs_nlink_t;
135 * Size of block device i/o is parameterized here. 135 * Size of block device i/o is parameterized here.
136 * Currently the system supports page-sized i/o. 136 * Currently the system supports page-sized i/o.
137 */ 137 */
138#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT 138#define BLKDEV_IOSHIFT PAGE_SHIFT
139#define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) 139#define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT)
140/* number of BB's per block device block */ 140/* number of BB's per block device block */
141#define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) 141#define BLKDEV_BB BTOBB(BLKDEV_IOSIZE)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 536a0ee9cd5a..cfd4210dd015 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -171,7 +171,7 @@ xfs_sb_validate_fsb_count(
171 ASSERT(sbp->sb_blocklog >= BBSHIFT); 171 ASSERT(sbp->sb_blocklog >= BBSHIFT);
172 172
173 /* Limited by ULONG_MAX of page cache index */ 173 /* Limited by ULONG_MAX of page cache index */
174 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 174 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
175 return -EFBIG; 175 return -EFBIG;
176 return 0; 176 return 0;
177} 177}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bac6b3435591..eafe257b357a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -231,12 +231,12 @@ static inline unsigned long
231xfs_preferred_iosize(xfs_mount_t *mp) 231xfs_preferred_iosize(xfs_mount_t *mp)
232{ 232{
233 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE) 233 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
234 return PAGE_CACHE_SIZE; 234 return PAGE_SIZE;
235 return (mp->m_swidth ? 235 return (mp->m_swidth ?
236 (mp->m_swidth << mp->m_sb.sb_blocklog) : 236 (mp->m_swidth << mp->m_sb.sb_blocklog) :
237 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ? 237 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
238 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) : 238 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) :
239 PAGE_CACHE_SIZE)); 239 PAGE_SIZE));
240} 240}
241 241
242#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ 242#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index ade236e90bb3..51ddaf2c2b8c 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -293,8 +293,8 @@ xfs_fs_commit_blocks(
293 * Make sure reads through the pagecache see the new data. 293 * Make sure reads through the pagecache see the new data.
294 */ 294 */
295 error = invalidate_inode_pages2_range(inode->i_mapping, 295 error = invalidate_inode_pages2_range(inode->i_mapping,
296 start >> PAGE_CACHE_SHIFT, 296 start >> PAGE_SHIFT,
297 (end - 1) >> PAGE_CACHE_SHIFT); 297 (end - 1) >> PAGE_SHIFT);
298 WARN_ON_ONCE(error); 298 WARN_ON_ONCE(error);
299 299
300 error = xfs_iomap_write_unwritten(ip, start, length); 300 error = xfs_iomap_write_unwritten(ip, start, length);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d760934109b5..187e14b696c2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -556,10 +556,10 @@ xfs_max_file_offset(
556 /* Figure out maximum filesize, on Linux this can depend on 556 /* Figure out maximum filesize, on Linux this can depend on
557 * the filesystem blocksize (on 32 bit platforms). 557 * the filesystem blocksize (on 32 bit platforms).
558 * __block_write_begin does this in an [unsigned] long... 558 * __block_write_begin does this in an [unsigned] long...
559 * page->index << (PAGE_CACHE_SHIFT - bbits) 559 * page->index << (PAGE_SHIFT - bbits)
560 * So, for page sized blocks (4K on 32 bit platforms), 560 * So, for page sized blocks (4K on 32 bit platforms),
561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
562 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 562 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
563 * but for smaller blocksizes it is less (bbits = log2 bsize). 563 * but for smaller blocksizes it is less (bbits = log2 bsize).
564 * Note1: get_block_t takes a long (implicit cast from above) 564 * Note1: get_block_t takes a long (implicit cast from above)
565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
@@ -570,10 +570,10 @@ xfs_max_file_offset(
570#if BITS_PER_LONG == 32 570#if BITS_PER_LONG == 32
571# if defined(CONFIG_LBDAF) 571# if defined(CONFIG_LBDAF)
572 ASSERT(sizeof(sector_t) == 8); 572 ASSERT(sizeof(sector_t) == 8);
573 pagefactor = PAGE_CACHE_SIZE; 573 pagefactor = PAGE_SIZE;
574 bitshift = BITS_PER_LONG; 574 bitshift = BITS_PER_LONG;
575# else 575# else
576 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); 576 pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
577# endif 577# endif
578#endif 578#endif
579 579
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..3f103076d0bf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@ struct bdi_writeback {
135 135
136struct backing_dev_info { 136struct backing_dev_info {
137 struct list_head bdi_list; 137 struct list_head bdi_list;
138 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 138 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
139 unsigned int capabilities; /* Device capabilities */ 139 unsigned int capabilities; /* Device capabilities */
140 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 140 congested_fn *congested_fn; /* Function pointer if device is md/dm */
141 void *congested_data; /* Pointer to aux data for congested func */ 141 void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 88bc64f00bb5..6b7481f62218 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43#define BIO_MAX_PAGES 256 43#define BIO_MAX_PAGES 256
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46 46
47/* 47/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7e5d7e018bea..669e419d6234 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1372,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1372 1372
1373static inline void put_dev_sector(Sector p) 1373static inline void put_dev_sector(Sector p)
1374{ 1374{
1375 page_cache_release(p.v); 1375 put_page(p.v);
1376} 1376}
1377 1377
1378static inline bool __bvec_gap_to_prev(struct request_queue *q, 1378static inline bool __bvec_gap_to_prev(struct request_queue *q,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c67f052cc5e5..d48daa3f6f20 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -43,7 +43,7 @@ enum bh_state_bits {
43 */ 43 */
44}; 44};
45 45
46#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) 46#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
47 47
48struct page; 48struct page;
49struct buffer_head; 49struct buffer_head;
@@ -263,7 +263,7 @@ void buffer_init(void);
263static inline void attach_page_buffers(struct page *page, 263static inline void attach_page_buffers(struct page *page,
264 struct buffer_head *head) 264 struct buffer_head *head)
265{ 265{
266 page_cache_get(page); 266 get_page(page);
267 SetPagePrivate(page); 267 SetPagePrivate(page);
268 set_page_private(page, (unsigned long)head); 268 set_page_private(page, (unsigned long)head);
269} 269}
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e7975e4681e1..db92a8d4926e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc);
176 */ 176 */
177static inline int calc_pages_for(u64 off, u64 len) 177static inline int calc_pages_for(u64 off, u64 len)
178{ 178{
179 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - 179 return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
180 (off >> PAGE_CACHE_SHIFT); 180 (off >> PAGE_SHIFT);
181} 181}
182 182
183extern struct kmem_cache *ceph_inode_cachep; 183extern struct kmem_cache *ceph_inode_cachep;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246feed3..eeae401a2412 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -199,7 +199,7 @@
199#define unreachable() __builtin_unreachable() 199#define unreachable() __builtin_unreachable()
200 200
201/* Mark a function definition as prohibited from being cloned. */ 201/* Mark a function definition as prohibited from being cloned. */
202#define __noclone __attribute__((__noclone__)) 202#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
203 203
204#endif /* GCC_VERSION >= 40500 */ 204#endif /* GCC_VERSION >= 40500 */
205 205
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 9eb215a155e0..b90e9bdbd1dd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -262,7 +262,7 @@ struct f2fs_node {
262/* 262/*
263 * For NAT entries 263 * For NAT entries
264 */ 264 */
265#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) 265#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
266 266
267struct f2fs_nat_entry { 267struct f2fs_nat_entry {
268 __u8 version; /* latest version of cached nat entry */ 268 __u8 version; /* latest version of cached nat entry */
@@ -282,7 +282,7 @@ struct f2fs_nat_block {
282 * Not allow to change this. 282 * Not allow to change this.
283 */ 283 */
284#define SIT_VBLOCK_MAP_SIZE 64 284#define SIT_VBLOCK_MAP_SIZE 64
285#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) 285#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
286 286
287/* 287/*
288 * Note that f2fs_sit_entry->vblocks has the following bit-field information. 288 * Note that f2fs_sit_entry->vblocks has the following bit-field information.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 14a97194b34b..304991a80e23 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -929,7 +929,7 @@ static inline struct file *get_file(struct file *f)
929/* Page cache limit. The filesystems should put that into their s_maxbytes 929/* Page cache limit. The filesystems should put that into their s_maxbytes
930 limits, otherwise bad things can happen in VM. */ 930 limits, otherwise bad things can happen in VM. */
931#if BITS_PER_LONG==32 931#if BITS_PER_LONG==32
932#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 932#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
933#elif BITS_PER_LONG==64 933#elif BITS_PER_LONG==64
934#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) 934#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
935#endif 935#endif
@@ -2067,7 +2067,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int);
2067/* /sys/fs */ 2067/* /sys/fs */
2068extern struct kobject *fs_kobj; 2068extern struct kobject *fs_kobj;
2069 2069
2070#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) 2070#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
2071 2071
2072#ifdef CONFIG_MANDATORY_FILE_LOCKING 2072#ifdef CONFIG_MANDATORY_FILE_LOCKING
2073extern int locks_mandatory_locked(struct file *); 2073extern int locks_mandatory_locked(struct file *);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d1b7b5..ffcff53e3b2b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
623 * 623 *
624 * A page may belong to an inode's memory mapping. In this case, page->mapping 624 * A page may belong to an inode's memory mapping. In this case, page->mapping
625 * is the pointer to the inode, and page->index is the file offset of the page, 625 * is the pointer to the inode, and page->index is the file offset of the page,
626 * in units of PAGE_CACHE_SIZE. 626 * in units of PAGE_SIZE.
627 * 627 *
628 * If pagecache pages are not associated with an inode, they are said to be 628 * If pagecache pages are not associated with an inode, they are said to be
629 * anonymous pages. These may become associated with the swapcache, and in that 629 * anonymous pages. These may become associated with the swapcache, and in that
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b37313b..c2d75b4fa86c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct vm_area_struct {
341 341
342 /* Information about our backing store: */ 342 /* Information about our backing store: */
343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
344 units, *not* PAGE_CACHE_SIZE */ 344 units */
345 struct file * vm_file; /* File we map to (can be NULL). */ 345 struct file * vm_file; /* File we map to (can be NULL). */
346 void * vm_private_data; /* was vm_pte (shared mem) */ 346 void * vm_private_data; /* was vm_pte (shared mem) */
347 347
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f2f650f136ee..957049f72290 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@ struct nfs_page {
41 struct page *wb_page; /* page to read in/write out */ 41 struct page *wb_page; /* page to read in/write out */
42 struct nfs_open_context *wb_context; /* File state context info */ 42 struct nfs_open_context *wb_context; /* File state context info */
43 struct nfs_lock_context *wb_lock_context; /* lock context info */ 43 struct nfs_lock_context *wb_lock_context; /* lock context info */
44 pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ 44 pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
45 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 45 unsigned int wb_offset, /* Offset & ~PAGE_MASK */
46 wb_pgbase, /* Start of page data */ 46 wb_pgbase, /* Start of page data */
47 wb_bytes; /* Length of request */ 47 wb_bytes; /* Length of request */
48 struct kref wb_kref; /* reference count */ 48 struct kref wb_kref; /* reference count */
@@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head)
184static inline 184static inline
185loff_t req_offset(struct nfs_page *req) 185loff_t req_offset(struct nfs_page *req)
186{ 186{
187 return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; 187 return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
188} 188}
189 189
190#endif /* _LINUX_NFS_PAGE_H */ 190#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763e4b86..e9fcf90b270d 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
331{ 331{
332 unsigned len = le16_to_cpu(dlen); 332 unsigned len = le16_to_cpu(dlen);
333 333
334#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 334#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
335 if (len == NILFS_MAX_REC_LEN) 335 if (len == NILFS_MAX_REC_LEN)
336 return 1 << 16; 336 return 1 << 16;
337#endif 337#endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
340 340
341static inline __le16 nilfs_rec_len_to_disk(unsigned len) 341static inline __le16 nilfs_rec_len_to_disk(unsigned len)
342{ 342{
343#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 343#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
344 if (len == (1 << 16)) 344 if (len == (1 << 16))
345 return cpu_to_le16(NILFS_MAX_REC_LEN); 345 return cpu_to_le16(NILFS_MAX_REC_LEN);
346 else if (len > (1 << 16)) 346 else if (len > (1 << 16))
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1ebd65c91422..7e1ab155c67c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
86 (__force unsigned long)mask; 86 (__force unsigned long)mask;
87} 87}
88 88
89/*
90 * The page cache can be done in larger chunks than
91 * one page, because it allows for more efficient
92 * throughput (it can then be mapped into user
93 * space in smaller chunks for same flexibility).
94 *
95 * Or rather, it _will_ be done in larger chunks.
96 */
97#define PAGE_CACHE_SHIFT PAGE_SHIFT
98#define PAGE_CACHE_SIZE PAGE_SIZE
99#define PAGE_CACHE_MASK PAGE_MASK
100#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
101
102#define page_cache_get(page) get_page(page)
103#define page_cache_release(page) put_page(page)
104void release_pages(struct page **pages, int nr, bool cold); 89void release_pages(struct page **pages, int nr, bool cold);
105 90
106/* 91/*
@@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
390 return page->index << compound_order(page); 375 return page->index << compound_order(page);
391 376
392 if (likely(!PageTransTail(page))) 377 if (likely(!PageTransTail(page)))
393 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 378 return page->index;
394 379
395 /* 380 /*
396 * We don't initialize ->index for tail pages: calculate based on 381 * We don't initialize ->index for tail pages: calculate based on
397 * head page 382 * head page
398 */ 383 */
399 pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 384 pgoff = compound_head(page)->index;
400 pgoff += page - compound_head(page); 385 pgoff += page - compound_head(page);
401 return pgoff; 386 return pgoff;
402} 387}
@@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
406 */ 391 */
407static inline loff_t page_offset(struct page *page) 392static inline loff_t page_offset(struct page *page)
408{ 393{
409 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 394 return ((loff_t)page->index) << PAGE_SHIFT;
410} 395}
411 396
412static inline loff_t page_file_offset(struct page *page) 397static inline loff_t page_file_offset(struct page *page)
413{ 398{
414 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; 399 return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
415} 400}
416 401
417extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 402extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
425 return linear_hugepage_index(vma, address); 410 return linear_hugepage_index(vma, address);
426 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 411 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
427 pgoff += vma->vm_pgoff; 412 pgoff += vma->vm_pgoff;
428 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 413 return pgoff;
429} 414}
430 415
431extern void __lock_page(struct page *page); 416extern void __lock_page(struct page *page);
@@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
535/* 520/*
536 * Fault a userspace page into pagetables. Return non-zero on a fault. 521 * Fault a userspace page into pagetables. Return non-zero on a fault.
537 * 522 *
538 * This assumes that two userspace pages are always sufficient. That's 523 * This assumes that two userspace pages are always sufficient.
539 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
540 */ 524 */
541static inline int fault_in_pages_writeable(char __user *uaddr, int size) 525static inline int fault_in_pages_writeable(char __user *uaddr, int size)
542{ 526{
@@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page,
671 655
672static inline unsigned long dir_pages(struct inode *inode) 656static inline unsigned long dir_pages(struct inode *inode)
673{ 657{
674 return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> 658 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
675 PAGE_CACHE_SHIFT; 659 PAGE_SHIFT;
676} 660}
677 661
678#endif /* _LINUX_PAGEMAP_H */ 662#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..7ca44fb5b675 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
129 * 129 *
130 * These happen to all be powers of 2, which is not strictly 130 * These happen to all be powers of 2, which is not strictly
131 * necessary but helps enforce the real limitation, which is 131 * necessary but helps enforce the real limitation, which is
132 * that they should be multiples of PAGE_CACHE_SIZE. 132 * that they should be multiples of PAGE_SIZE.
133 * 133 *
134 * For UDP transports, a block plus NFS,RPC, and UDP headers 134 * For UDP transports, a block plus NFS,RPC, and UDP headers
135 * has to fit into the IP datagram limit of 64K. The largest 135 * has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d18b65c53dbb..2b83359c19ca 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -433,9 +433,9 @@ struct backing_dev_info;
433#define si_swapinfo(val) \ 433#define si_swapinfo(val) \
434 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 434 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
435/* only sparc can not include linux/pagemap.h in this file 435/* only sparc can not include linux/pagemap.h in this file
436 * so leave page_cache_release and release_pages undeclared... */ 436 * so leave put_page and release_pages undeclared... */
437#define free_page_and_swap_cache(page) \ 437#define free_page_and_swap_cache(page) \
438 page_cache_release(page) 438 put_page(page)
439#define free_pages_and_swap_cache(pages, nr) \ 439#define free_pages_and_swap_cache(pages, nr) \
440 release_pages((pages), (nr), false); 440 release_pages((pages), (nr), false);
441 441
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 781c1399c6a3..ade739f67f1d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -307,8 +307,8 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
307 struct inode *inode; 307 struct inode *inode;
308 struct ipc_namespace *ns = data; 308 struct ipc_namespace *ns = data;
309 309
310 sb->s_blocksize = PAGE_CACHE_SIZE; 310 sb->s_blocksize = PAGE_SIZE;
311 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 311 sb->s_blocksize_bits = PAGE_SHIFT;
312 sb->s_magic = MQUEUE_MAGIC; 312 sb->s_magic = MQUEUE_MAGIC;
313 sb->s_op = &mqueue_super_ops; 313 sb->s_op = &mqueue_super_ops;
314 314
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 220fc17b9718..7edc95edfaee 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -321,7 +321,7 @@ retry:
321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
322 322
323 ret = __replace_page(vma, vaddr, old_page, new_page); 323 ret = __replace_page(vma, vaddr, old_page, new_page);
324 page_cache_release(new_page); 324 put_page(new_page);
325put_old: 325put_old:
326 put_page(old_page); 326 put_page(old_page);
327 327
@@ -539,14 +539,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
539 * see uprobe_register(). 539 * see uprobe_register().
540 */ 540 */
541 if (mapping->a_ops->readpage) 541 if (mapping->a_ops->readpage)
542 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); 542 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
543 else 543 else
544 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); 544 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
545 if (IS_ERR(page)) 545 if (IS_ERR(page))
546 return PTR_ERR(page); 546 return PTR_ERR(page);
547 547
548 copy_from_page(page, offset, insn, nbytes); 548 copy_from_page(page, offset, insn, nbytes);
549 page_cache_release(page); 549 put_page(page);
550 550
551 return 0; 551 return 0;
552} 552}
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8a5bc66b0c0..b8024fa7101d 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -97,8 +97,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
97 break; 97 break;
98 case POSIX_FADV_WILLNEED: 98 case POSIX_FADV_WILLNEED:
99 /* First and last PARTIAL page! */ 99 /* First and last PARTIAL page! */
100 start_index = offset >> PAGE_CACHE_SHIFT; 100 start_index = offset >> PAGE_SHIFT;
101 end_index = endbyte >> PAGE_CACHE_SHIFT; 101 end_index = endbyte >> PAGE_SHIFT;
102 102
103 /* Careful about overflow on the "+1" */ 103 /* Careful about overflow on the "+1" */
104 nrpages = end_index - start_index + 1; 104 nrpages = end_index - start_index + 1;
@@ -124,8 +124,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
124 * preserved on the expectation that it is better to preserve 124 * preserved on the expectation that it is better to preserve
125 * needed memory than to discard unneeded memory. 125 * needed memory than to discard unneeded memory.
126 */ 126 */
127 start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; 127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
128 end_index = (endbyte >> PAGE_CACHE_SHIFT); 128 end_index = (endbyte >> PAGE_SHIFT);
129 129
130 if (end_index >= start_index) { 130 if (end_index >= start_index) {
131 unsigned long count = invalidate_mapping_pages(mapping, 131 unsigned long count = invalidate_mapping_pages(mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index a8c69c8c0a90..f2479af09da9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -265,7 +265,7 @@ void delete_from_page_cache(struct page *page)
265 265
266 if (freepage) 266 if (freepage)
267 freepage(page); 267 freepage(page);
268 page_cache_release(page); 268 put_page(page);
269} 269}
270EXPORT_SYMBOL(delete_from_page_cache); 270EXPORT_SYMBOL(delete_from_page_cache);
271 271
@@ -352,8 +352,8 @@ EXPORT_SYMBOL(filemap_flush);
352static int __filemap_fdatawait_range(struct address_space *mapping, 352static int __filemap_fdatawait_range(struct address_space *mapping,
353 loff_t start_byte, loff_t end_byte) 353 loff_t start_byte, loff_t end_byte)
354{ 354{
355 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 355 pgoff_t index = start_byte >> PAGE_SHIFT;
356 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 356 pgoff_t end = end_byte >> PAGE_SHIFT;
357 struct pagevec pvec; 357 struct pagevec pvec;
358 int nr_pages; 358 int nr_pages;
359 int ret = 0; 359 int ret = 0;
@@ -550,7 +550,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
550 pgoff_t offset = old->index; 550 pgoff_t offset = old->index;
551 freepage = mapping->a_ops->freepage; 551 freepage = mapping->a_ops->freepage;
552 552
553 page_cache_get(new); 553 get_page(new);
554 new->mapping = mapping; 554 new->mapping = mapping;
555 new->index = offset; 555 new->index = offset;
556 556
@@ -572,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
572 radix_tree_preload_end(); 572 radix_tree_preload_end();
573 if (freepage) 573 if (freepage)
574 freepage(old); 574 freepage(old);
575 page_cache_release(old); 575 put_page(old);
576 } 576 }
577 577
578 return error; 578 return error;
@@ -651,7 +651,7 @@ static int __add_to_page_cache_locked(struct page *page,
651 return error; 651 return error;
652 } 652 }
653 653
654 page_cache_get(page); 654 get_page(page);
655 page->mapping = mapping; 655 page->mapping = mapping;
656 page->index = offset; 656 page->index = offset;
657 657
@@ -675,7 +675,7 @@ err_insert:
675 spin_unlock_irq(&mapping->tree_lock); 675 spin_unlock_irq(&mapping->tree_lock);
676 if (!huge) 676 if (!huge)
677 mem_cgroup_cancel_charge(page, memcg, false); 677 mem_cgroup_cancel_charge(page, memcg, false);
678 page_cache_release(page); 678 put_page(page);
679 return error; 679 return error;
680} 680}
681 681
@@ -1083,7 +1083,7 @@ repeat:
1083 * include/linux/pagemap.h for details. 1083 * include/linux/pagemap.h for details.
1084 */ 1084 */
1085 if (unlikely(page != *pagep)) { 1085 if (unlikely(page != *pagep)) {
1086 page_cache_release(page); 1086 put_page(page);
1087 goto repeat; 1087 goto repeat;
1088 } 1088 }
1089 } 1089 }
@@ -1121,7 +1121,7 @@ repeat:
1121 /* Has the page been truncated? */ 1121 /* Has the page been truncated? */
1122 if (unlikely(page->mapping != mapping)) { 1122 if (unlikely(page->mapping != mapping)) {
1123 unlock_page(page); 1123 unlock_page(page);
1124 page_cache_release(page); 1124 put_page(page);
1125 goto repeat; 1125 goto repeat;
1126 } 1126 }
1127 VM_BUG_ON_PAGE(page->index != offset, page); 1127 VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1168,7 +1168,7 @@ repeat:
1168 if (fgp_flags & FGP_LOCK) { 1168 if (fgp_flags & FGP_LOCK) {
1169 if (fgp_flags & FGP_NOWAIT) { 1169 if (fgp_flags & FGP_NOWAIT) {
1170 if (!trylock_page(page)) { 1170 if (!trylock_page(page)) {
1171 page_cache_release(page); 1171 put_page(page);
1172 return NULL; 1172 return NULL;
1173 } 1173 }
1174 } else { 1174 } else {
@@ -1178,7 +1178,7 @@ repeat:
1178 /* Has the page been truncated? */ 1178 /* Has the page been truncated? */
1179 if (unlikely(page->mapping != mapping)) { 1179 if (unlikely(page->mapping != mapping)) {
1180 unlock_page(page); 1180 unlock_page(page);
1181 page_cache_release(page); 1181 put_page(page);
1182 goto repeat; 1182 goto repeat;
1183 } 1183 }
1184 VM_BUG_ON_PAGE(page->index != offset, page); 1184 VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1209,7 +1209,7 @@ no_page:
1209 err = add_to_page_cache_lru(page, mapping, offset, 1209 err = add_to_page_cache_lru(page, mapping, offset,
1210 gfp_mask & GFP_RECLAIM_MASK); 1210 gfp_mask & GFP_RECLAIM_MASK);
1211 if (unlikely(err)) { 1211 if (unlikely(err)) {
1212 page_cache_release(page); 1212 put_page(page);
1213 page = NULL; 1213 page = NULL;
1214 if (err == -EEXIST) 1214 if (err == -EEXIST)
1215 goto repeat; 1215 goto repeat;
@@ -1278,7 +1278,7 @@ repeat:
1278 1278
1279 /* Has the page moved? */ 1279 /* Has the page moved? */
1280 if (unlikely(page != *slot)) { 1280 if (unlikely(page != *slot)) {
1281 page_cache_release(page); 1281 put_page(page);
1282 goto repeat; 1282 goto repeat;
1283 } 1283 }
1284export: 1284export:
@@ -1343,7 +1343,7 @@ repeat:
1343 1343
1344 /* Has the page moved? */ 1344 /* Has the page moved? */
1345 if (unlikely(page != *slot)) { 1345 if (unlikely(page != *slot)) {
1346 page_cache_release(page); 1346 put_page(page);
1347 goto repeat; 1347 goto repeat;
1348 } 1348 }
1349 1349
@@ -1405,7 +1405,7 @@ repeat:
1405 1405
1406 /* Has the page moved? */ 1406 /* Has the page moved? */
1407 if (unlikely(page != *slot)) { 1407 if (unlikely(page != *slot)) {
1408 page_cache_release(page); 1408 put_page(page);
1409 goto repeat; 1409 goto repeat;
1410 } 1410 }
1411 1411
@@ -1415,7 +1415,7 @@ repeat:
1415 * negatives, which is just confusing to the caller. 1415 * negatives, which is just confusing to the caller.
1416 */ 1416 */
1417 if (page->mapping == NULL || page->index != iter.index) { 1417 if (page->mapping == NULL || page->index != iter.index) {
1418 page_cache_release(page); 1418 put_page(page);
1419 break; 1419 break;
1420 } 1420 }
1421 1421
@@ -1482,7 +1482,7 @@ repeat:
1482 1482
1483 /* Has the page moved? */ 1483 /* Has the page moved? */
1484 if (unlikely(page != *slot)) { 1484 if (unlikely(page != *slot)) {
1485 page_cache_release(page); 1485 put_page(page);
1486 goto repeat; 1486 goto repeat;
1487 } 1487 }
1488 1488
@@ -1549,7 +1549,7 @@ repeat:
1549 1549
1550 /* Has the page moved? */ 1550 /* Has the page moved? */
1551 if (unlikely(page != *slot)) { 1551 if (unlikely(page != *slot)) {
1552 page_cache_release(page); 1552 put_page(page);
1553 goto repeat; 1553 goto repeat;
1554 } 1554 }
1555export: 1555export:
@@ -1610,11 +1610,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1610 unsigned int prev_offset; 1610 unsigned int prev_offset;
1611 int error = 0; 1611 int error = 0;
1612 1612
1613 index = *ppos >> PAGE_CACHE_SHIFT; 1613 index = *ppos >> PAGE_SHIFT;
1614 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1614 prev_index = ra->prev_pos >> PAGE_SHIFT;
1615 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1615 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1616 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1616 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1617 offset = *ppos & ~PAGE_CACHE_MASK; 1617 offset = *ppos & ~PAGE_MASK;
1618 1618
1619 for (;;) { 1619 for (;;) {
1620 struct page *page; 1620 struct page *page;
@@ -1648,7 +1648,7 @@ find_page:
1648 if (PageUptodate(page)) 1648 if (PageUptodate(page))
1649 goto page_ok; 1649 goto page_ok;
1650 1650
1651 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1651 if (inode->i_blkbits == PAGE_SHIFT ||
1652 !mapping->a_ops->is_partially_uptodate) 1652 !mapping->a_ops->is_partially_uptodate)
1653 goto page_not_up_to_date; 1653 goto page_not_up_to_date;
1654 if (!trylock_page(page)) 1654 if (!trylock_page(page))
@@ -1672,18 +1672,18 @@ page_ok:
1672 */ 1672 */
1673 1673
1674 isize = i_size_read(inode); 1674 isize = i_size_read(inode);
1675 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1675 end_index = (isize - 1) >> PAGE_SHIFT;
1676 if (unlikely(!isize || index > end_index)) { 1676 if (unlikely(!isize || index > end_index)) {
1677 page_cache_release(page); 1677 put_page(page);
1678 goto out; 1678 goto out;
1679 } 1679 }
1680 1680
1681 /* nr is the maximum number of bytes to copy from this page */ 1681 /* nr is the maximum number of bytes to copy from this page */
1682 nr = PAGE_CACHE_SIZE; 1682 nr = PAGE_SIZE;
1683 if (index == end_index) { 1683 if (index == end_index) {
1684 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1684 nr = ((isize - 1) & ~PAGE_MASK) + 1;
1685 if (nr <= offset) { 1685 if (nr <= offset) {
1686 page_cache_release(page); 1686 put_page(page);
1687 goto out; 1687 goto out;
1688 } 1688 }
1689 } 1689 }
@@ -1711,11 +1711,11 @@ page_ok:
1711 1711
1712 ret = copy_page_to_iter(page, offset, nr, iter); 1712 ret = copy_page_to_iter(page, offset, nr, iter);
1713 offset += ret; 1713 offset += ret;
1714 index += offset >> PAGE_CACHE_SHIFT; 1714 index += offset >> PAGE_SHIFT;
1715 offset &= ~PAGE_CACHE_MASK; 1715 offset &= ~PAGE_MASK;
1716 prev_offset = offset; 1716 prev_offset = offset;
1717 1717
1718 page_cache_release(page); 1718 put_page(page);
1719 written += ret; 1719 written += ret;
1720 if (!iov_iter_count(iter)) 1720 if (!iov_iter_count(iter))
1721 goto out; 1721 goto out;
@@ -1735,7 +1735,7 @@ page_not_up_to_date_locked:
1735 /* Did it get truncated before we got the lock? */ 1735 /* Did it get truncated before we got the lock? */
1736 if (!page->mapping) { 1736 if (!page->mapping) {
1737 unlock_page(page); 1737 unlock_page(page);
1738 page_cache_release(page); 1738 put_page(page);
1739 continue; 1739 continue;
1740 } 1740 }
1741 1741
@@ -1757,7 +1757,7 @@ readpage:
1757 1757
1758 if (unlikely(error)) { 1758 if (unlikely(error)) {
1759 if (error == AOP_TRUNCATED_PAGE) { 1759 if (error == AOP_TRUNCATED_PAGE) {
1760 page_cache_release(page); 1760 put_page(page);
1761 error = 0; 1761 error = 0;
1762 goto find_page; 1762 goto find_page;
1763 } 1763 }
@@ -1774,7 +1774,7 @@ readpage:
1774 * invalidate_mapping_pages got it 1774 * invalidate_mapping_pages got it
1775 */ 1775 */
1776 unlock_page(page); 1776 unlock_page(page);
1777 page_cache_release(page); 1777 put_page(page);
1778 goto find_page; 1778 goto find_page;
1779 } 1779 }
1780 unlock_page(page); 1780 unlock_page(page);
@@ -1789,7 +1789,7 @@ readpage:
1789 1789
1790readpage_error: 1790readpage_error:
1791 /* UHHUH! A synchronous read error occurred. Report it */ 1791 /* UHHUH! A synchronous read error occurred. Report it */
1792 page_cache_release(page); 1792 put_page(page);
1793 goto out; 1793 goto out;
1794 1794
1795no_cached_page: 1795no_cached_page:
@@ -1805,7 +1805,7 @@ no_cached_page:
1805 error = add_to_page_cache_lru(page, mapping, index, 1805 error = add_to_page_cache_lru(page, mapping, index,
1806 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1806 mapping_gfp_constraint(mapping, GFP_KERNEL));
1807 if (error) { 1807 if (error) {
1808 page_cache_release(page); 1808 put_page(page);
1809 if (error == -EEXIST) { 1809 if (error == -EEXIST) {
1810 error = 0; 1810 error = 0;
1811 goto find_page; 1811 goto find_page;
@@ -1817,10 +1817,10 @@ no_cached_page:
1817 1817
1818out: 1818out:
1819 ra->prev_pos = prev_index; 1819 ra->prev_pos = prev_index;
1820 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1820 ra->prev_pos <<= PAGE_SHIFT;
1821 ra->prev_pos |= prev_offset; 1821 ra->prev_pos |= prev_offset;
1822 1822
1823 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1823 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
1824 file_accessed(filp); 1824 file_accessed(filp);
1825 return written ? written : error; 1825 return written ? written : error;
1826} 1826}
@@ -1912,7 +1912,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
1912 else if (ret == -EEXIST) 1912 else if (ret == -EEXIST)
1913 ret = 0; /* losing race to add is OK */ 1913 ret = 0; /* losing race to add is OK */
1914 1914
1915 page_cache_release(page); 1915 put_page(page);
1916 1916
1917 } while (ret == AOP_TRUNCATED_PAGE); 1917 } while (ret == AOP_TRUNCATED_PAGE);
1918 1918
@@ -2022,8 +2022,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2022 loff_t size; 2022 loff_t size;
2023 int ret = 0; 2023 int ret = 0;
2024 2024
2025 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2025 size = round_up(i_size_read(inode), PAGE_SIZE);
2026 if (offset >= size >> PAGE_CACHE_SHIFT) 2026 if (offset >= size >> PAGE_SHIFT)
2027 return VM_FAULT_SIGBUS; 2027 return VM_FAULT_SIGBUS;
2028 2028
2029 /* 2029 /*
@@ -2049,7 +2049,7 @@ retry_find:
2049 } 2049 }
2050 2050
2051 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 2051 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
2052 page_cache_release(page); 2052 put_page(page);
2053 return ret | VM_FAULT_RETRY; 2053 return ret | VM_FAULT_RETRY;
2054 } 2054 }
2055 2055
@@ -2072,10 +2072,10 @@ retry_find:
2072 * Found the page and have a reference on it. 2072 * Found the page and have a reference on it.
2073 * We must recheck i_size under page lock. 2073 * We must recheck i_size under page lock.
2074 */ 2074 */
2075 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2075 size = round_up(i_size_read(inode), PAGE_SIZE);
2076 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 2076 if (unlikely(offset >= size >> PAGE_SHIFT)) {
2077 unlock_page(page); 2077 unlock_page(page);
2078 page_cache_release(page); 2078 put_page(page);
2079 return VM_FAULT_SIGBUS; 2079 return VM_FAULT_SIGBUS;
2080 } 2080 }
2081 2081
@@ -2120,7 +2120,7 @@ page_not_uptodate:
2120 if (!PageUptodate(page)) 2120 if (!PageUptodate(page))
2121 error = -EIO; 2121 error = -EIO;
2122 } 2122 }
2123 page_cache_release(page); 2123 put_page(page);
2124 2124
2125 if (!error || error == AOP_TRUNCATED_PAGE) 2125 if (!error || error == AOP_TRUNCATED_PAGE)
2126 goto retry_find; 2126 goto retry_find;
@@ -2164,7 +2164,7 @@ repeat:
2164 2164
2165 /* Has the page moved? */ 2165 /* Has the page moved? */
2166 if (unlikely(page != *slot)) { 2166 if (unlikely(page != *slot)) {
2167 page_cache_release(page); 2167 put_page(page);
2168 goto repeat; 2168 goto repeat;
2169 } 2169 }
2170 2170
@@ -2178,8 +2178,8 @@ repeat:
2178 if (page->mapping != mapping || !PageUptodate(page)) 2178 if (page->mapping != mapping || !PageUptodate(page))
2179 goto unlock; 2179 goto unlock;
2180 2180
2181 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2181 size = round_up(i_size_read(mapping->host), PAGE_SIZE);
2182 if (page->index >= size >> PAGE_CACHE_SHIFT) 2182 if (page->index >= size >> PAGE_SHIFT)
2183 goto unlock; 2183 goto unlock;
2184 2184
2185 pte = vmf->pte + page->index - vmf->pgoff; 2185 pte = vmf->pte + page->index - vmf->pgoff;
@@ -2195,7 +2195,7 @@ repeat:
2195unlock: 2195unlock:
2196 unlock_page(page); 2196 unlock_page(page);
2197skip: 2197skip:
2198 page_cache_release(page); 2198 put_page(page);
2199next: 2199next:
2200 if (iter.index == vmf->max_pgoff) 2200 if (iter.index == vmf->max_pgoff)
2201 break; 2201 break;
@@ -2278,7 +2278,7 @@ static struct page *wait_on_page_read(struct page *page)
2278 if (!IS_ERR(page)) { 2278 if (!IS_ERR(page)) {
2279 wait_on_page_locked(page); 2279 wait_on_page_locked(page);
2280 if (!PageUptodate(page)) { 2280 if (!PageUptodate(page)) {
2281 page_cache_release(page); 2281 put_page(page);
2282 page = ERR_PTR(-EIO); 2282 page = ERR_PTR(-EIO);
2283 } 2283 }
2284 } 2284 }
@@ -2301,7 +2301,7 @@ repeat:
2301 return ERR_PTR(-ENOMEM); 2301 return ERR_PTR(-ENOMEM);
2302 err = add_to_page_cache_lru(page, mapping, index, gfp); 2302 err = add_to_page_cache_lru(page, mapping, index, gfp);
2303 if (unlikely(err)) { 2303 if (unlikely(err)) {
2304 page_cache_release(page); 2304 put_page(page);
2305 if (err == -EEXIST) 2305 if (err == -EEXIST)
2306 goto repeat; 2306 goto repeat;
2307 /* Presumably ENOMEM for radix tree node */ 2307 /* Presumably ENOMEM for radix tree node */
@@ -2311,7 +2311,7 @@ repeat:
2311filler: 2311filler:
2312 err = filler(data, page); 2312 err = filler(data, page);
2313 if (err < 0) { 2313 if (err < 0) {
2314 page_cache_release(page); 2314 put_page(page);
2315 return ERR_PTR(err); 2315 return ERR_PTR(err);
2316 } 2316 }
2317 2317
@@ -2364,7 +2364,7 @@ filler:
2364 /* Case c or d, restart the operation */ 2364 /* Case c or d, restart the operation */
2365 if (!page->mapping) { 2365 if (!page->mapping) {
2366 unlock_page(page); 2366 unlock_page(page);
2367 page_cache_release(page); 2367 put_page(page);
2368 goto repeat; 2368 goto repeat;
2369 } 2369 }
2370 2370
@@ -2511,7 +2511,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2511 struct iov_iter data; 2511 struct iov_iter data;
2512 2512
2513 write_len = iov_iter_count(from); 2513 write_len = iov_iter_count(from);
2514 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2514 end = (pos + write_len - 1) >> PAGE_SHIFT;
2515 2515
2516 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2516 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2517 if (written) 2517 if (written)
@@ -2525,7 +2525,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2525 */ 2525 */
2526 if (mapping->nrpages) { 2526 if (mapping->nrpages) {
2527 written = invalidate_inode_pages2_range(mapping, 2527 written = invalidate_inode_pages2_range(mapping,
2528 pos >> PAGE_CACHE_SHIFT, end); 2528 pos >> PAGE_SHIFT, end);
2529 /* 2529 /*
2530 * If a page can not be invalidated, return 0 to fall back 2530 * If a page can not be invalidated, return 0 to fall back
2531 * to buffered write. 2531 * to buffered write.
@@ -2550,7 +2550,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2550 */ 2550 */
2551 if (mapping->nrpages) { 2551 if (mapping->nrpages) {
2552 invalidate_inode_pages2_range(mapping, 2552 invalidate_inode_pages2_range(mapping,
2553 pos >> PAGE_CACHE_SHIFT, end); 2553 pos >> PAGE_SHIFT, end);
2554 } 2554 }
2555 2555
2556 if (written > 0) { 2556 if (written > 0) {
@@ -2611,8 +2611,8 @@ ssize_t generic_perform_write(struct file *file,
2611 size_t copied; /* Bytes copied from user */ 2611 size_t copied; /* Bytes copied from user */
2612 void *fsdata; 2612 void *fsdata;
2613 2613
2614 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2614 offset = (pos & (PAGE_SIZE - 1));
2615 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2615 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2616 iov_iter_count(i)); 2616 iov_iter_count(i));
2617 2617
2618again: 2618again:
@@ -2665,7 +2665,7 @@ again:
2665 * because not all segments in the iov can be copied at 2665 * because not all segments in the iov can be copied at
2666 * once without a pagefault. 2666 * once without a pagefault.
2667 */ 2667 */
2668 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2668 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2669 iov_iter_single_seg_count(i)); 2669 iov_iter_single_seg_count(i));
2670 goto again; 2670 goto again;
2671 } 2671 }
@@ -2752,8 +2752,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2752 iocb->ki_pos = endbyte + 1; 2752 iocb->ki_pos = endbyte + 1;
2753 written += status; 2753 written += status;
2754 invalidate_mapping_pages(mapping, 2754 invalidate_mapping_pages(mapping,
2755 pos >> PAGE_CACHE_SHIFT, 2755 pos >> PAGE_SHIFT,
2756 endbyte >> PAGE_CACHE_SHIFT); 2756 endbyte >> PAGE_SHIFT);
2757 } else { 2757 } else {
2758 /* 2758 /*
2759 * We don't know how much we wrote, so just return 2759 * We don't know how much we wrote, so just return
diff --git a/mm/gup.c b/mm/gup.c
index 7f1c4fb77cfa..fb87aea9edc8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1107,7 +1107,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1107 * @addr: user address 1107 * @addr: user address
1108 * 1108 *
1109 * Returns struct page pointer of user page pinned for dump, 1109 * Returns struct page pointer of user page pinned for dump,
1110 * to be freed afterwards by page_cache_release() or put_page(). 1110 * to be freed afterwards by put_page().
1111 * 1111 *
1112 * Returns NULL on any kind of failure - a hole must then be inserted into 1112 * Returns NULL on any kind of failure - a hole must then be inserted into
1113 * the corefile, to preserve alignment with its headers; and also returns 1113 * the corefile, to preserve alignment with its headers; and also returns
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06058eaa173b..19d0d08b396f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3346,7 +3346,7 @@ retry_avoidcopy:
3346 old_page != pagecache_page) 3346 old_page != pagecache_page)
3347 outside_reserve = 1; 3347 outside_reserve = 1;
3348 3348
3349 page_cache_get(old_page); 3349 get_page(old_page);
3350 3350
3351 /* 3351 /*
3352 * Drop page table lock as buddy allocator may be called. It will 3352 * Drop page table lock as buddy allocator may be called. It will
@@ -3364,7 +3364,7 @@ retry_avoidcopy:
3364 * may get SIGKILLed if it later faults. 3364 * may get SIGKILLed if it later faults.
3365 */ 3365 */
3366 if (outside_reserve) { 3366 if (outside_reserve) {
3367 page_cache_release(old_page); 3367 put_page(old_page);
3368 BUG_ON(huge_pte_none(pte)); 3368 BUG_ON(huge_pte_none(pte));
3369 unmap_ref_private(mm, vma, old_page, address); 3369 unmap_ref_private(mm, vma, old_page, address);
3370 BUG_ON(huge_pte_none(pte)); 3370 BUG_ON(huge_pte_none(pte));
@@ -3425,9 +3425,9 @@ retry_avoidcopy:
3425 spin_unlock(ptl); 3425 spin_unlock(ptl);
3426 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3426 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3427out_release_all: 3427out_release_all:
3428 page_cache_release(new_page); 3428 put_page(new_page);
3429out_release_old: 3429out_release_old:
3430 page_cache_release(old_page); 3430 put_page(old_page);
3431 3431
3432 spin_lock(ptl); /* Caller expects lock to be held */ 3432 spin_lock(ptl); /* Caller expects lock to be held */
3433 return ret; 3433 return ret;
diff --git a/mm/madvise.c b/mm/madvise.c
index a01147359f3b..07427d3fcead 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -170,7 +170,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
171 vma, index); 171 vma, index);
172 if (page) 172 if (page)
173 page_cache_release(page); 173 put_page(page);
174 } 174 }
175 175
176 return 0; 176 return 0;
@@ -204,14 +204,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
204 page = find_get_entry(mapping, index); 204 page = find_get_entry(mapping, index);
205 if (!radix_tree_exceptional_entry(page)) { 205 if (!radix_tree_exceptional_entry(page)) {
206 if (page) 206 if (page)
207 page_cache_release(page); 207 put_page(page);
208 continue; 208 continue;
209 } 209 }
210 swap = radix_to_swp_entry(page); 210 swap = radix_to_swp_entry(page);
211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, 211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
212 NULL, 0); 212 NULL, 0);
213 if (page) 213 if (page)
214 page_cache_release(page); 214 put_page(page);
215 } 215 }
216 216
217 lru_add_drain(); /* Push any new pages onto the LRU now */ 217 lru_add_drain(); /* Push any new pages onto the LRU now */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5a544c6c0717..78f5f2641b91 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -538,7 +538,7 @@ static int delete_from_lru_cache(struct page *p)
538 /* 538 /*
539 * drop the page count elevated by isolate_lru_page() 539 * drop the page count elevated by isolate_lru_page()
540 */ 540 */
541 page_cache_release(p); 541 put_page(p);
542 return 0; 542 return 0;
543 } 543 }
544 return -EIO; 544 return -EIO;
diff --git a/mm/memory.c b/mm/memory.c
index 098f00d05461..93897f23cc11 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2054,7 +2054,7 @@ static inline int wp_page_reuse(struct mm_struct *mm,
2054 VM_BUG_ON_PAGE(PageAnon(page), page); 2054 VM_BUG_ON_PAGE(PageAnon(page), page);
2055 mapping = page->mapping; 2055 mapping = page->mapping;
2056 unlock_page(page); 2056 unlock_page(page);
2057 page_cache_release(page); 2057 put_page(page);
2058 2058
2059 if ((dirtied || page_mkwrite) && mapping) { 2059 if ((dirtied || page_mkwrite) && mapping) {
2060 /* 2060 /*
@@ -2188,7 +2188,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2188 } 2188 }
2189 2189
2190 if (new_page) 2190 if (new_page)
2191 page_cache_release(new_page); 2191 put_page(new_page);
2192 2192
2193 pte_unmap_unlock(page_table, ptl); 2193 pte_unmap_unlock(page_table, ptl);
2194 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2194 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -2203,14 +2203,14 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2203 munlock_vma_page(old_page); 2203 munlock_vma_page(old_page);
2204 unlock_page(old_page); 2204 unlock_page(old_page);
2205 } 2205 }
2206 page_cache_release(old_page); 2206 put_page(old_page);
2207 } 2207 }
2208 return page_copied ? VM_FAULT_WRITE : 0; 2208 return page_copied ? VM_FAULT_WRITE : 0;
2209oom_free_new: 2209oom_free_new:
2210 page_cache_release(new_page); 2210 put_page(new_page);
2211oom: 2211oom:
2212 if (old_page) 2212 if (old_page)
2213 page_cache_release(old_page); 2213 put_page(old_page);
2214 return VM_FAULT_OOM; 2214 return VM_FAULT_OOM;
2215} 2215}
2216 2216
@@ -2258,7 +2258,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2258{ 2258{
2259 int page_mkwrite = 0; 2259 int page_mkwrite = 0;
2260 2260
2261 page_cache_get(old_page); 2261 get_page(old_page);
2262 2262
2263 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2263 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2264 int tmp; 2264 int tmp;
@@ -2267,7 +2267,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2267 tmp = do_page_mkwrite(vma, old_page, address); 2267 tmp = do_page_mkwrite(vma, old_page, address);
2268 if (unlikely(!tmp || (tmp & 2268 if (unlikely(!tmp || (tmp &
2269 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 2269 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2270 page_cache_release(old_page); 2270 put_page(old_page);
2271 return tmp; 2271 return tmp;
2272 } 2272 }
2273 /* 2273 /*
@@ -2281,7 +2281,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2281 if (!pte_same(*page_table, orig_pte)) { 2281 if (!pte_same(*page_table, orig_pte)) {
2282 unlock_page(old_page); 2282 unlock_page(old_page);
2283 pte_unmap_unlock(page_table, ptl); 2283 pte_unmap_unlock(page_table, ptl);
2284 page_cache_release(old_page); 2284 put_page(old_page);
2285 return 0; 2285 return 0;
2286 } 2286 }
2287 page_mkwrite = 1; 2287 page_mkwrite = 1;
@@ -2341,7 +2341,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2341 */ 2341 */
2342 if (PageAnon(old_page) && !PageKsm(old_page)) { 2342 if (PageAnon(old_page) && !PageKsm(old_page)) {
2343 if (!trylock_page(old_page)) { 2343 if (!trylock_page(old_page)) {
2344 page_cache_get(old_page); 2344 get_page(old_page);
2345 pte_unmap_unlock(page_table, ptl); 2345 pte_unmap_unlock(page_table, ptl);
2346 lock_page(old_page); 2346 lock_page(old_page);
2347 page_table = pte_offset_map_lock(mm, pmd, address, 2347 page_table = pte_offset_map_lock(mm, pmd, address,
@@ -2349,10 +2349,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2349 if (!pte_same(*page_table, orig_pte)) { 2349 if (!pte_same(*page_table, orig_pte)) {
2350 unlock_page(old_page); 2350 unlock_page(old_page);
2351 pte_unmap_unlock(page_table, ptl); 2351 pte_unmap_unlock(page_table, ptl);
2352 page_cache_release(old_page); 2352 put_page(old_page);
2353 return 0; 2353 return 0;
2354 } 2354 }
2355 page_cache_release(old_page); 2355 put_page(old_page);
2356 } 2356 }
2357 if (reuse_swap_page(old_page)) { 2357 if (reuse_swap_page(old_page)) {
2358 /* 2358 /*
@@ -2375,7 +2375,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2375 /* 2375 /*
2376 * Ok, we need to copy. Oh, well.. 2376 * Ok, we need to copy. Oh, well..
2377 */ 2377 */
2378 page_cache_get(old_page); 2378 get_page(old_page);
2379 2379
2380 pte_unmap_unlock(page_table, ptl); 2380 pte_unmap_unlock(page_table, ptl);
2381 return wp_page_copy(mm, vma, address, page_table, pmd, 2381 return wp_page_copy(mm, vma, address, page_table, pmd,
@@ -2400,7 +2400,6 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
2400 2400
2401 vba = vma->vm_pgoff; 2401 vba = vma->vm_pgoff;
2402 vea = vba + vma_pages(vma) - 1; 2402 vea = vba + vma_pages(vma) - 1;
2403 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2404 zba = details->first_index; 2403 zba = details->first_index;
2405 if (zba < vba) 2404 if (zba < vba)
2406 zba = vba; 2405 zba = vba;
@@ -2619,7 +2618,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2619 * parallel locked swapcache. 2618 * parallel locked swapcache.
2620 */ 2619 */
2621 unlock_page(swapcache); 2620 unlock_page(swapcache);
2622 page_cache_release(swapcache); 2621 put_page(swapcache);
2623 } 2622 }
2624 2623
2625 if (flags & FAULT_FLAG_WRITE) { 2624 if (flags & FAULT_FLAG_WRITE) {
@@ -2641,10 +2640,10 @@ out_nomap:
2641out_page: 2640out_page:
2642 unlock_page(page); 2641 unlock_page(page);
2643out_release: 2642out_release:
2644 page_cache_release(page); 2643 put_page(page);
2645 if (page != swapcache) { 2644 if (page != swapcache) {
2646 unlock_page(swapcache); 2645 unlock_page(swapcache);
2647 page_cache_release(swapcache); 2646 put_page(swapcache);
2648 } 2647 }
2649 return ret; 2648 return ret;
2650} 2649}
@@ -2752,7 +2751,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2752 if (userfaultfd_missing(vma)) { 2751 if (userfaultfd_missing(vma)) {
2753 pte_unmap_unlock(page_table, ptl); 2752 pte_unmap_unlock(page_table, ptl);
2754 mem_cgroup_cancel_charge(page, memcg, false); 2753 mem_cgroup_cancel_charge(page, memcg, false);
2755 page_cache_release(page); 2754 put_page(page);
2756 return handle_userfault(vma, address, flags, 2755 return handle_userfault(vma, address, flags,
2757 VM_UFFD_MISSING); 2756 VM_UFFD_MISSING);
2758 } 2757 }
@@ -2771,10 +2770,10 @@ unlock:
2771 return 0; 2770 return 0;
2772release: 2771release:
2773 mem_cgroup_cancel_charge(page, memcg, false); 2772 mem_cgroup_cancel_charge(page, memcg, false);
2774 page_cache_release(page); 2773 put_page(page);
2775 goto unlock; 2774 goto unlock;
2776oom_free_page: 2775oom_free_page:
2777 page_cache_release(page); 2776 put_page(page);
2778oom: 2777oom:
2779 return VM_FAULT_OOM; 2778 return VM_FAULT_OOM;
2780} 2779}
@@ -2807,7 +2806,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2807 if (unlikely(PageHWPoison(vmf.page))) { 2806 if (unlikely(PageHWPoison(vmf.page))) {
2808 if (ret & VM_FAULT_LOCKED) 2807 if (ret & VM_FAULT_LOCKED)
2809 unlock_page(vmf.page); 2808 unlock_page(vmf.page);
2810 page_cache_release(vmf.page); 2809 put_page(vmf.page);
2811 return VM_FAULT_HWPOISON; 2810 return VM_FAULT_HWPOISON;
2812 } 2811 }
2813 2812
@@ -2996,7 +2995,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2996 if (unlikely(!pte_same(*pte, orig_pte))) { 2995 if (unlikely(!pte_same(*pte, orig_pte))) {
2997 pte_unmap_unlock(pte, ptl); 2996 pte_unmap_unlock(pte, ptl);
2998 unlock_page(fault_page); 2997 unlock_page(fault_page);
2999 page_cache_release(fault_page); 2998 put_page(fault_page);
3000 return ret; 2999 return ret;
3001 } 3000 }
3002 do_set_pte(vma, address, fault_page, pte, false, false); 3001 do_set_pte(vma, address, fault_page, pte, false, false);
@@ -3024,7 +3023,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3024 return VM_FAULT_OOM; 3023 return VM_FAULT_OOM;
3025 3024
3026 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { 3025 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
3027 page_cache_release(new_page); 3026 put_page(new_page);
3028 return VM_FAULT_OOM; 3027 return VM_FAULT_OOM;
3029 } 3028 }
3030 3029
@@ -3041,7 +3040,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3041 pte_unmap_unlock(pte, ptl); 3040 pte_unmap_unlock(pte, ptl);
3042 if (fault_page) { 3041 if (fault_page) {
3043 unlock_page(fault_page); 3042 unlock_page(fault_page);
3044 page_cache_release(fault_page); 3043 put_page(fault_page);
3045 } else { 3044 } else {
3046 /* 3045 /*
3047 * The fault handler has no page to lock, so it holds 3046 * The fault handler has no page to lock, so it holds
@@ -3057,7 +3056,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3057 pte_unmap_unlock(pte, ptl); 3056 pte_unmap_unlock(pte, ptl);
3058 if (fault_page) { 3057 if (fault_page) {
3059 unlock_page(fault_page); 3058 unlock_page(fault_page);
3060 page_cache_release(fault_page); 3059 put_page(fault_page);
3061 } else { 3060 } else {
3062 /* 3061 /*
3063 * The fault handler has no page to lock, so it holds 3062 * The fault handler has no page to lock, so it holds
@@ -3068,7 +3067,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3068 return ret; 3067 return ret;
3069uncharge_out: 3068uncharge_out:
3070 mem_cgroup_cancel_charge(new_page, memcg, false); 3069 mem_cgroup_cancel_charge(new_page, memcg, false);
3071 page_cache_release(new_page); 3070 put_page(new_page);
3072 return ret; 3071 return ret;
3073} 3072}
3074 3073
@@ -3096,7 +3095,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3096 tmp = do_page_mkwrite(vma, fault_page, address); 3095 tmp = do_page_mkwrite(vma, fault_page, address);
3097 if (unlikely(!tmp || 3096 if (unlikely(!tmp ||
3098 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3097 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3099 page_cache_release(fault_page); 3098 put_page(fault_page);
3100 return tmp; 3099 return tmp;
3101 } 3100 }
3102 } 3101 }
@@ -3105,7 +3104,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3105 if (unlikely(!pte_same(*pte, orig_pte))) { 3104 if (unlikely(!pte_same(*pte, orig_pte))) {
3106 pte_unmap_unlock(pte, ptl); 3105 pte_unmap_unlock(pte, ptl);
3107 unlock_page(fault_page); 3106 unlock_page(fault_page);
3108 page_cache_release(fault_page); 3107 put_page(fault_page);
3109 return ret; 3108 return ret;
3110 } 3109 }
3111 do_set_pte(vma, address, fault_page, pte, true, false); 3110 do_set_pte(vma, address, fault_page, pte, true, false);
@@ -3736,7 +3735,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3736 buf, maddr + offset, bytes); 3735 buf, maddr + offset, bytes);
3737 } 3736 }
3738 kunmap(page); 3737 kunmap(page);
3739 page_cache_release(page); 3738 put_page(page);
3740 } 3739 }
3741 len -= bytes; 3740 len -= bytes;
3742 buf += bytes; 3741 buf += bytes;
diff --git a/mm/mincore.c b/mm/mincore.c
index 563f32045490..c0b5ba965200 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -75,7 +75,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
75#endif 75#endif
76 if (page) { 76 if (page) {
77 present = PageUptodate(page); 77 present = PageUptodate(page);
78 page_cache_release(page); 78 put_page(page);
79 } 79 }
80 80
81 return present; 81 return present;
@@ -211,7 +211,7 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
211 * return values: 211 * return values:
212 * zero - success 212 * zero - success
213 * -EFAULT - vec points to an illegal address 213 * -EFAULT - vec points to an illegal address
214 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE 214 * -EINVAL - addr is not a multiple of PAGE_SIZE
215 * -ENOMEM - Addresses in the range [addr, addr + len] are 215 * -ENOMEM - Addresses in the range [addr, addr + len] are
216 * invalid for the address space of this process, or 216 * invalid for the address space of this process, or
217 * specify one or more pages which are not currently 217 * specify one or more pages which are not currently
@@ -226,14 +226,14 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
226 unsigned char *tmp; 226 unsigned char *tmp;
227 227
228 /* Check the start address: needs to be page-aligned.. */ 228 /* Check the start address: needs to be page-aligned.. */
229 if (start & ~PAGE_CACHE_MASK) 229 if (start & ~PAGE_MASK)
230 return -EINVAL; 230 return -EINVAL;
231 231
232 /* ..and we need to be passed a valid user-space range */ 232 /* ..and we need to be passed a valid user-space range */
233 if (!access_ok(VERIFY_READ, (void __user *) start, len)) 233 if (!access_ok(VERIFY_READ, (void __user *) start, len))
234 return -ENOMEM; 234 return -ENOMEM;
235 235
236 /* This also avoids any overflows on PAGE_CACHE_ALIGN */ 236 /* This also avoids any overflows on PAGE_ALIGN */
237 pages = len >> PAGE_SHIFT; 237 pages = len >> PAGE_SHIFT;
238 pages += (offset_in_page(len)) != 0; 238 pages += (offset_in_page(len)) != 0;
239 239
diff --git a/mm/nommu.c b/mm/nommu.c
index de8b6b6580c1..102e257cc6c3 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -141,7 +141,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
141 if (pages) { 141 if (pages) {
142 pages[i] = virt_to_page(start); 142 pages[i] = virt_to_page(start);
143 if (pages[i]) 143 if (pages[i])
144 page_cache_get(pages[i]); 144 get_page(pages[i]);
145 } 145 }
146 if (vmas) 146 if (vmas)
147 vmas[i] = vma; 147 vmas[i] = vma;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 11ff8f758631..999792d35ccc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2176,8 +2176,8 @@ int write_cache_pages(struct address_space *mapping,
2176 cycled = 0; 2176 cycled = 0;
2177 end = -1; 2177 end = -1;
2178 } else { 2178 } else {
2179 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2179 index = wbc->range_start >> PAGE_SHIFT;
2180 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2180 end = wbc->range_end >> PAGE_SHIFT;
2181 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2181 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2182 range_whole = 1; 2182 range_whole = 1;
2183 cycled = 1; /* ignore range_cyclic tests */ 2183 cycled = 1; /* ignore range_cyclic tests */
@@ -2382,14 +2382,14 @@ int write_one_page(struct page *page, int wait)
2382 wait_on_page_writeback(page); 2382 wait_on_page_writeback(page);
2383 2383
2384 if (clear_page_dirty_for_io(page)) { 2384 if (clear_page_dirty_for_io(page)) {
2385 page_cache_get(page); 2385 get_page(page);
2386 ret = mapping->a_ops->writepage(page, &wbc); 2386 ret = mapping->a_ops->writepage(page, &wbc);
2387 if (ret == 0 && wait) { 2387 if (ret == 0 && wait) {
2388 wait_on_page_writeback(page); 2388 wait_on_page_writeback(page);
2389 if (PageError(page)) 2389 if (PageError(page))
2390 ret = -EIO; 2390 ret = -EIO;
2391 } 2391 }
2392 page_cache_release(page); 2392 put_page(page);
2393 } else { 2393 } else {
2394 unlock_page(page); 2394 unlock_page(page);
2395 } 2395 }
@@ -2431,7 +2431,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2431 __inc_zone_page_state(page, NR_DIRTIED); 2431 __inc_zone_page_state(page, NR_DIRTIED);
2432 __inc_wb_stat(wb, WB_RECLAIMABLE); 2432 __inc_wb_stat(wb, WB_RECLAIMABLE);
2433 __inc_wb_stat(wb, WB_DIRTIED); 2433 __inc_wb_stat(wb, WB_DIRTIED);
2434 task_io_account_write(PAGE_CACHE_SIZE); 2434 task_io_account_write(PAGE_SIZE);
2435 current->nr_dirtied++; 2435 current->nr_dirtied++;
2436 this_cpu_inc(bdp_ratelimits); 2436 this_cpu_inc(bdp_ratelimits);
2437 } 2437 }
@@ -2450,7 +2450,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2451 dec_zone_page_state(page, NR_FILE_DIRTY); 2451 dec_zone_page_state(page, NR_FILE_DIRTY);
2452 dec_wb_stat(wb, WB_RECLAIMABLE); 2452 dec_wb_stat(wb, WB_RECLAIMABLE);
2453 task_io_account_cancelled_write(PAGE_CACHE_SIZE); 2453 task_io_account_cancelled_write(PAGE_SIZE);
2454 } 2454 }
2455} 2455}
2456 2456
diff --git a/mm/page_io.c b/mm/page_io.c
index 18aac7819cc9..cd92e3d67a32 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -252,7 +252,7 @@ out:
252 252
253static sector_t swap_page_sector(struct page *page) 253static sector_t swap_page_sector(struct page *page)
254{ 254{
255 return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9); 255 return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
256} 256}
257 257
258int __swap_writepage(struct page *page, struct writeback_control *wbc, 258int __swap_writepage(struct page *page, struct writeback_control *wbc,
diff --git a/mm/readahead.c b/mm/readahead.c
index 20e58e820e44..40be3ae0afe3 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -47,11 +47,11 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
47 if (!trylock_page(page)) 47 if (!trylock_page(page))
48 BUG(); 48 BUG();
49 page->mapping = mapping; 49 page->mapping = mapping;
50 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 50 do_invalidatepage(page, 0, PAGE_SIZE);
51 page->mapping = NULL; 51 page->mapping = NULL;
52 unlock_page(page); 52 unlock_page(page);
53 } 53 }
54 page_cache_release(page); 54 put_page(page);
55} 55}
56 56
57/* 57/*
@@ -93,14 +93,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
93 read_cache_pages_invalidate_page(mapping, page); 93 read_cache_pages_invalidate_page(mapping, page);
94 continue; 94 continue;
95 } 95 }
96 page_cache_release(page); 96 put_page(page);
97 97
98 ret = filler(data, page); 98 ret = filler(data, page);
99 if (unlikely(ret)) { 99 if (unlikely(ret)) {
100 read_cache_pages_invalidate_pages(mapping, pages); 100 read_cache_pages_invalidate_pages(mapping, pages);
101 break; 101 break;
102 } 102 }
103 task_io_account_read(PAGE_CACHE_SIZE); 103 task_io_account_read(PAGE_SIZE);
104 } 104 }
105 return ret; 105 return ret;
106} 106}
@@ -130,7 +130,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
130 mapping_gfp_constraint(mapping, GFP_KERNEL))) { 130 mapping_gfp_constraint(mapping, GFP_KERNEL))) {
131 mapping->a_ops->readpage(filp, page); 131 mapping->a_ops->readpage(filp, page);
132 } 132 }
133 page_cache_release(page); 133 put_page(page);
134 } 134 }
135 ret = 0; 135 ret = 0;
136 136
@@ -163,7 +163,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
163 if (isize == 0) 163 if (isize == 0)
164 goto out; 164 goto out;
165 165
166 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 166 end_index = ((isize - 1) >> PAGE_SHIFT);
167 167
168 /* 168 /*
169 * Preallocate as many pages as we will need. 169 * Preallocate as many pages as we will need.
@@ -216,7 +216,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
216 while (nr_to_read) { 216 while (nr_to_read) {
217 int err; 217 int err;
218 218
219 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 219 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
220 220
221 if (this_chunk > nr_to_read) 221 if (this_chunk > nr_to_read)
222 this_chunk = nr_to_read; 222 this_chunk = nr_to_read;
@@ -425,7 +425,7 @@ ondemand_readahead(struct address_space *mapping,
425 * trivial case: (offset - prev_offset) == 1 425 * trivial case: (offset - prev_offset) == 1
426 * unaligned reads: (offset - prev_offset) == 0 426 * unaligned reads: (offset - prev_offset) == 0
427 */ 427 */
428 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT; 428 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
429 if (offset - prev_offset <= 1UL) 429 if (offset - prev_offset <= 1UL)
430 goto initial_readahead; 430 goto initial_readahead;
431 431
@@ -558,8 +558,8 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
558 if (f.file) { 558 if (f.file) {
559 if (f.file->f_mode & FMODE_READ) { 559 if (f.file->f_mode & FMODE_READ) {
560 struct address_space *mapping = f.file->f_mapping; 560 struct address_space *mapping = f.file->f_mapping;
561 pgoff_t start = offset >> PAGE_CACHE_SHIFT; 561 pgoff_t start = offset >> PAGE_SHIFT;
562 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 562 pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
563 unsigned long len = end - start + 1; 563 unsigned long len = end - start + 1;
564 ret = do_readahead(mapping, f.file, start, len); 564 ret = do_readahead(mapping, f.file, start, len);
565 } 565 }
diff --git a/mm/rmap.c b/mm/rmap.c
index 395e314b7996..307b555024ef 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1541,7 +1541,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1541 1541
1542discard: 1542discard:
1543 page_remove_rmap(page, PageHuge(page)); 1543 page_remove_rmap(page, PageHuge(page));
1544 page_cache_release(page); 1544 put_page(page);
1545 1545
1546out_unmap: 1546out_unmap:
1547 pte_unmap_unlock(pte, ptl); 1547 pte_unmap_unlock(pte, ptl);
diff --git a/mm/shmem.c b/mm/shmem.c
index 9428c51ab2d6..719bd6b88d98 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -75,8 +75,8 @@ static struct vfsmount *shm_mnt;
75 75
76#include "internal.h" 76#include "internal.h"
77 77
78#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 78#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
79#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 79#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
80 80
81/* Pretend that each entry is of this size in directory's i_size */ 81/* Pretend that each entry is of this size in directory's i_size */
82#define BOGO_DIRENT_SIZE 20 82#define BOGO_DIRENT_SIZE 20
@@ -176,13 +176,13 @@ static inline int shmem_reacct_size(unsigned long flags,
176static inline int shmem_acct_block(unsigned long flags) 176static inline int shmem_acct_block(unsigned long flags)
177{ 177{
178 return (flags & VM_NORESERVE) ? 178 return (flags & VM_NORESERVE) ?
179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0;
180} 180}
181 181
182static inline void shmem_unacct_blocks(unsigned long flags, long pages) 182static inline void shmem_unacct_blocks(unsigned long flags, long pages)
183{ 183{
184 if (flags & VM_NORESERVE) 184 if (flags & VM_NORESERVE)
185 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 185 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
186} 186}
187 187
188static const struct super_operations shmem_ops; 188static const struct super_operations shmem_ops;
@@ -300,7 +300,7 @@ static int shmem_add_to_page_cache(struct page *page,
300 VM_BUG_ON_PAGE(!PageLocked(page), page); 300 VM_BUG_ON_PAGE(!PageLocked(page), page);
301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
302 302
303 page_cache_get(page); 303 get_page(page);
304 page->mapping = mapping; 304 page->mapping = mapping;
305 page->index = index; 305 page->index = index;
306 306
@@ -318,7 +318,7 @@ static int shmem_add_to_page_cache(struct page *page,
318 } else { 318 } else {
319 page->mapping = NULL; 319 page->mapping = NULL;
320 spin_unlock_irq(&mapping->tree_lock); 320 spin_unlock_irq(&mapping->tree_lock);
321 page_cache_release(page); 321 put_page(page);
322 } 322 }
323 return error; 323 return error;
324} 324}
@@ -338,7 +338,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
338 __dec_zone_page_state(page, NR_FILE_PAGES); 338 __dec_zone_page_state(page, NR_FILE_PAGES);
339 __dec_zone_page_state(page, NR_SHMEM); 339 __dec_zone_page_state(page, NR_SHMEM);
340 spin_unlock_irq(&mapping->tree_lock); 340 spin_unlock_irq(&mapping->tree_lock);
341 page_cache_release(page); 341 put_page(page);
342 BUG_ON(error); 342 BUG_ON(error);
343} 343}
344 344
@@ -474,10 +474,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
474{ 474{
475 struct address_space *mapping = inode->i_mapping; 475 struct address_space *mapping = inode->i_mapping;
476 struct shmem_inode_info *info = SHMEM_I(inode); 476 struct shmem_inode_info *info = SHMEM_I(inode);
477 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 477 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
478 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 478 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
479 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 479 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
480 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 480 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
481 struct pagevec pvec; 481 struct pagevec pvec;
482 pgoff_t indices[PAGEVEC_SIZE]; 482 pgoff_t indices[PAGEVEC_SIZE];
483 long nr_swaps_freed = 0; 483 long nr_swaps_freed = 0;
@@ -530,7 +530,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
530 struct page *page = NULL; 530 struct page *page = NULL;
531 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 531 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
532 if (page) { 532 if (page) {
533 unsigned int top = PAGE_CACHE_SIZE; 533 unsigned int top = PAGE_SIZE;
534 if (start > end) { 534 if (start > end) {
535 top = partial_end; 535 top = partial_end;
536 partial_end = 0; 536 partial_end = 0;
@@ -538,7 +538,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
538 zero_user_segment(page, partial_start, top); 538 zero_user_segment(page, partial_start, top);
539 set_page_dirty(page); 539 set_page_dirty(page);
540 unlock_page(page); 540 unlock_page(page);
541 page_cache_release(page); 541 put_page(page);
542 } 542 }
543 } 543 }
544 if (partial_end) { 544 if (partial_end) {
@@ -548,7 +548,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
548 zero_user_segment(page, 0, partial_end); 548 zero_user_segment(page, 0, partial_end);
549 set_page_dirty(page); 549 set_page_dirty(page);
550 unlock_page(page); 550 unlock_page(page);
551 page_cache_release(page); 551 put_page(page);
552 } 552 }
553 } 553 }
554 if (start >= end) 554 if (start >= end)
@@ -833,7 +833,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
833 mem_cgroup_commit_charge(page, memcg, true, false); 833 mem_cgroup_commit_charge(page, memcg, true, false);
834out: 834out:
835 unlock_page(page); 835 unlock_page(page);
836 page_cache_release(page); 836 put_page(page);
837 return error; 837 return error;
838} 838}
839 839
@@ -1080,7 +1080,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1080 if (!newpage) 1080 if (!newpage)
1081 return -ENOMEM; 1081 return -ENOMEM;
1082 1082
1083 page_cache_get(newpage); 1083 get_page(newpage);
1084 copy_highpage(newpage, oldpage); 1084 copy_highpage(newpage, oldpage);
1085 flush_dcache_page(newpage); 1085 flush_dcache_page(newpage);
1086 1086
@@ -1120,8 +1120,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1120 set_page_private(oldpage, 0); 1120 set_page_private(oldpage, 0);
1121 1121
1122 unlock_page(oldpage); 1122 unlock_page(oldpage);
1123 page_cache_release(oldpage); 1123 put_page(oldpage);
1124 page_cache_release(oldpage); 1124 put_page(oldpage);
1125 return error; 1125 return error;
1126} 1126}
1127 1127
@@ -1145,7 +1145,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1145 int once = 0; 1145 int once = 0;
1146 int alloced = 0; 1146 int alloced = 0;
1147 1147
1148 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1148 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1149 return -EFBIG; 1149 return -EFBIG;
1150repeat: 1150repeat:
1151 swap.val = 0; 1151 swap.val = 0;
@@ -1156,7 +1156,7 @@ repeat:
1156 } 1156 }
1157 1157
1158 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1158 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1159 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1159 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1160 error = -EINVAL; 1160 error = -EINVAL;
1161 goto unlock; 1161 goto unlock;
1162 } 1162 }
@@ -1169,7 +1169,7 @@ repeat:
1169 if (sgp != SGP_READ) 1169 if (sgp != SGP_READ)
1170 goto clear; 1170 goto clear;
1171 unlock_page(page); 1171 unlock_page(page);
1172 page_cache_release(page); 1172 put_page(page);
1173 page = NULL; 1173 page = NULL;
1174 } 1174 }
1175 if (page || (sgp == SGP_READ && !swap.val)) { 1175 if (page || (sgp == SGP_READ && !swap.val)) {
@@ -1327,7 +1327,7 @@ clear:
1327 1327
1328 /* Perhaps the file has been truncated since we checked */ 1328 /* Perhaps the file has been truncated since we checked */
1329 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1329 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1330 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1330 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1331 if (alloced) { 1331 if (alloced) {
1332 ClearPageDirty(page); 1332 ClearPageDirty(page);
1333 delete_from_page_cache(page); 1333 delete_from_page_cache(page);
@@ -1355,7 +1355,7 @@ failed:
1355unlock: 1355unlock:
1356 if (page) { 1356 if (page) {
1357 unlock_page(page); 1357 unlock_page(page);
1358 page_cache_release(page); 1358 put_page(page);
1359 } 1359 }
1360 if (error == -ENOSPC && !once++) { 1360 if (error == -ENOSPC && !once++) {
1361 info = SHMEM_I(inode); 1361 info = SHMEM_I(inode);
@@ -1577,7 +1577,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
1577{ 1577{
1578 struct inode *inode = mapping->host; 1578 struct inode *inode = mapping->host;
1579 struct shmem_inode_info *info = SHMEM_I(inode); 1579 struct shmem_inode_info *info = SHMEM_I(inode);
1580 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1580 pgoff_t index = pos >> PAGE_SHIFT;
1581 1581
1582 /* i_mutex is held by caller */ 1582 /* i_mutex is held by caller */
1583 if (unlikely(info->seals)) { 1583 if (unlikely(info->seals)) {
@@ -1601,16 +1601,16 @@ shmem_write_end(struct file *file, struct address_space *mapping,
1601 i_size_write(inode, pos + copied); 1601 i_size_write(inode, pos + copied);
1602 1602
1603 if (!PageUptodate(page)) { 1603 if (!PageUptodate(page)) {
1604 if (copied < PAGE_CACHE_SIZE) { 1604 if (copied < PAGE_SIZE) {
1605 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1605 unsigned from = pos & (PAGE_SIZE - 1);
1606 zero_user_segments(page, 0, from, 1606 zero_user_segments(page, 0, from,
1607 from + copied, PAGE_CACHE_SIZE); 1607 from + copied, PAGE_SIZE);
1608 } 1608 }
1609 SetPageUptodate(page); 1609 SetPageUptodate(page);
1610 } 1610 }
1611 set_page_dirty(page); 1611 set_page_dirty(page);
1612 unlock_page(page); 1612 unlock_page(page);
1613 page_cache_release(page); 1613 put_page(page);
1614 1614
1615 return copied; 1615 return copied;
1616} 1616}
@@ -1635,8 +1635,8 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1635 if (!iter_is_iovec(to)) 1635 if (!iter_is_iovec(to))
1636 sgp = SGP_DIRTY; 1636 sgp = SGP_DIRTY;
1637 1637
1638 index = *ppos >> PAGE_CACHE_SHIFT; 1638 index = *ppos >> PAGE_SHIFT;
1639 offset = *ppos & ~PAGE_CACHE_MASK; 1639 offset = *ppos & ~PAGE_MASK;
1640 1640
1641 for (;;) { 1641 for (;;) {
1642 struct page *page = NULL; 1642 struct page *page = NULL;
@@ -1644,11 +1644,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1644 unsigned long nr, ret; 1644 unsigned long nr, ret;
1645 loff_t i_size = i_size_read(inode); 1645 loff_t i_size = i_size_read(inode);
1646 1646
1647 end_index = i_size >> PAGE_CACHE_SHIFT; 1647 end_index = i_size >> PAGE_SHIFT;
1648 if (index > end_index) 1648 if (index > end_index)
1649 break; 1649 break;
1650 if (index == end_index) { 1650 if (index == end_index) {
1651 nr = i_size & ~PAGE_CACHE_MASK; 1651 nr = i_size & ~PAGE_MASK;
1652 if (nr <= offset) 1652 if (nr <= offset)
1653 break; 1653 break;
1654 } 1654 }
@@ -1666,14 +1666,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1666 * We must evaluate after, since reads (unlike writes) 1666 * We must evaluate after, since reads (unlike writes)
1667 * are called without i_mutex protection against truncate 1667 * are called without i_mutex protection against truncate
1668 */ 1668 */
1669 nr = PAGE_CACHE_SIZE; 1669 nr = PAGE_SIZE;
1670 i_size = i_size_read(inode); 1670 i_size = i_size_read(inode);
1671 end_index = i_size >> PAGE_CACHE_SHIFT; 1671 end_index = i_size >> PAGE_SHIFT;
1672 if (index == end_index) { 1672 if (index == end_index) {
1673 nr = i_size & ~PAGE_CACHE_MASK; 1673 nr = i_size & ~PAGE_MASK;
1674 if (nr <= offset) { 1674 if (nr <= offset) {
1675 if (page) 1675 if (page)
1676 page_cache_release(page); 1676 put_page(page);
1677 break; 1677 break;
1678 } 1678 }
1679 } 1679 }
@@ -1694,7 +1694,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1694 mark_page_accessed(page); 1694 mark_page_accessed(page);
1695 } else { 1695 } else {
1696 page = ZERO_PAGE(0); 1696 page = ZERO_PAGE(0);
1697 page_cache_get(page); 1697 get_page(page);
1698 } 1698 }
1699 1699
1700 /* 1700 /*
@@ -1704,10 +1704,10 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1704 ret = copy_page_to_iter(page, offset, nr, to); 1704 ret = copy_page_to_iter(page, offset, nr, to);
1705 retval += ret; 1705 retval += ret;
1706 offset += ret; 1706 offset += ret;
1707 index += offset >> PAGE_CACHE_SHIFT; 1707 index += offset >> PAGE_SHIFT;
1708 offset &= ~PAGE_CACHE_MASK; 1708 offset &= ~PAGE_MASK;
1709 1709
1710 page_cache_release(page); 1710 put_page(page);
1711 if (!iov_iter_count(to)) 1711 if (!iov_iter_count(to))
1712 break; 1712 break;
1713 if (ret < nr) { 1713 if (ret < nr) {
@@ -1717,7 +1717,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1717 cond_resched(); 1717 cond_resched();
1718 } 1718 }
1719 1719
1720 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1720 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
1721 file_accessed(file); 1721 file_accessed(file);
1722 return retval ? retval : error; 1722 return retval ? retval : error;
1723} 1723}
@@ -1755,9 +1755,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1755 if (splice_grow_spd(pipe, &spd)) 1755 if (splice_grow_spd(pipe, &spd))
1756 return -ENOMEM; 1756 return -ENOMEM;
1757 1757
1758 index = *ppos >> PAGE_CACHE_SHIFT; 1758 index = *ppos >> PAGE_SHIFT;
1759 loff = *ppos & ~PAGE_CACHE_MASK; 1759 loff = *ppos & ~PAGE_MASK;
1760 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1760 req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
1761 nr_pages = min(req_pages, spd.nr_pages_max); 1761 nr_pages = min(req_pages, spd.nr_pages_max);
1762 1762
1763 spd.nr_pages = find_get_pages_contig(mapping, index, 1763 spd.nr_pages = find_get_pages_contig(mapping, index,
@@ -1774,7 +1774,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1774 index++; 1774 index++;
1775 } 1775 }
1776 1776
1777 index = *ppos >> PAGE_CACHE_SHIFT; 1777 index = *ppos >> PAGE_SHIFT;
1778 nr_pages = spd.nr_pages; 1778 nr_pages = spd.nr_pages;
1779 spd.nr_pages = 0; 1779 spd.nr_pages = 0;
1780 1780
@@ -1784,7 +1784,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1784 if (!len) 1784 if (!len)
1785 break; 1785 break;
1786 1786
1787 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1787 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
1788 page = spd.pages[page_nr]; 1788 page = spd.pages[page_nr];
1789 1789
1790 if (!PageUptodate(page) || page->mapping != mapping) { 1790 if (!PageUptodate(page) || page->mapping != mapping) {
@@ -1793,19 +1793,19 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1793 if (error) 1793 if (error)
1794 break; 1794 break;
1795 unlock_page(page); 1795 unlock_page(page);
1796 page_cache_release(spd.pages[page_nr]); 1796 put_page(spd.pages[page_nr]);
1797 spd.pages[page_nr] = page; 1797 spd.pages[page_nr] = page;
1798 } 1798 }
1799 1799
1800 isize = i_size_read(inode); 1800 isize = i_size_read(inode);
1801 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1801 end_index = (isize - 1) >> PAGE_SHIFT;
1802 if (unlikely(!isize || index > end_index)) 1802 if (unlikely(!isize || index > end_index))
1803 break; 1803 break;
1804 1804
1805 if (end_index == index) { 1805 if (end_index == index) {
1806 unsigned int plen; 1806 unsigned int plen;
1807 1807
1808 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1808 plen = ((isize - 1) & ~PAGE_MASK) + 1;
1809 if (plen <= loff) 1809 if (plen <= loff)
1810 break; 1810 break;
1811 1811
@@ -1822,7 +1822,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1822 } 1822 }
1823 1823
1824 while (page_nr < nr_pages) 1824 while (page_nr < nr_pages)
1825 page_cache_release(spd.pages[page_nr++]); 1825 put_page(spd.pages[page_nr++]);
1826 1826
1827 if (spd.nr_pages) 1827 if (spd.nr_pages)
1828 error = splice_to_pipe(pipe, &spd); 1828 error = splice_to_pipe(pipe, &spd);
@@ -1904,10 +1904,10 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1904 else if (offset >= inode->i_size) 1904 else if (offset >= inode->i_size)
1905 offset = -ENXIO; 1905 offset = -ENXIO;
1906 else { 1906 else {
1907 start = offset >> PAGE_CACHE_SHIFT; 1907 start = offset >> PAGE_SHIFT;
1908 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1908 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1909 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1909 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1910 new_offset <<= PAGE_CACHE_SHIFT; 1910 new_offset <<= PAGE_SHIFT;
1911 if (new_offset > offset) { 1911 if (new_offset > offset) {
1912 if (new_offset < inode->i_size) 1912 if (new_offset < inode->i_size)
1913 offset = new_offset; 1913 offset = new_offset;
@@ -2203,8 +2203,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2203 goto out; 2203 goto out;
2204 } 2204 }
2205 2205
2206 start = offset >> PAGE_CACHE_SHIFT; 2206 start = offset >> PAGE_SHIFT;
2207 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2207 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2208 /* Try to avoid a swapstorm if len is impossible to satisfy */ 2208 /* Try to avoid a swapstorm if len is impossible to satisfy */
2209 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2209 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2210 error = -ENOSPC; 2210 error = -ENOSPC;
@@ -2237,8 +2237,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2237 if (error) { 2237 if (error) {
2238 /* Remove the !PageUptodate pages we added */ 2238 /* Remove the !PageUptodate pages we added */
2239 shmem_undo_range(inode, 2239 shmem_undo_range(inode,
2240 (loff_t)start << PAGE_CACHE_SHIFT, 2240 (loff_t)start << PAGE_SHIFT,
2241 (loff_t)index << PAGE_CACHE_SHIFT, true); 2241 (loff_t)index << PAGE_SHIFT, true);
2242 goto undone; 2242 goto undone;
2243 } 2243 }
2244 2244
@@ -2259,7 +2259,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2259 */ 2259 */
2260 set_page_dirty(page); 2260 set_page_dirty(page);
2261 unlock_page(page); 2261 unlock_page(page);
2262 page_cache_release(page); 2262 put_page(page);
2263 cond_resched(); 2263 cond_resched();
2264 } 2264 }
2265 2265
@@ -2280,7 +2280,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2280 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 2280 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2281 2281
2282 buf->f_type = TMPFS_MAGIC; 2282 buf->f_type = TMPFS_MAGIC;
2283 buf->f_bsize = PAGE_CACHE_SIZE; 2283 buf->f_bsize = PAGE_SIZE;
2284 buf->f_namelen = NAME_MAX; 2284 buf->f_namelen = NAME_MAX;
2285 if (sbinfo->max_blocks) { 2285 if (sbinfo->max_blocks) {
2286 buf->f_blocks = sbinfo->max_blocks; 2286 buf->f_blocks = sbinfo->max_blocks;
@@ -2523,7 +2523,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2523 struct shmem_inode_info *info; 2523 struct shmem_inode_info *info;
2524 2524
2525 len = strlen(symname) + 1; 2525 len = strlen(symname) + 1;
2526 if (len > PAGE_CACHE_SIZE) 2526 if (len > PAGE_SIZE)
2527 return -ENAMETOOLONG; 2527 return -ENAMETOOLONG;
2528 2528
2529 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2529 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
@@ -2562,7 +2562,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2562 SetPageUptodate(page); 2562 SetPageUptodate(page);
2563 set_page_dirty(page); 2563 set_page_dirty(page);
2564 unlock_page(page); 2564 unlock_page(page);
2565 page_cache_release(page); 2565 put_page(page);
2566 } 2566 }
2567 dir->i_size += BOGO_DIRENT_SIZE; 2567 dir->i_size += BOGO_DIRENT_SIZE;
2568 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2568 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -2835,7 +2835,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2835 if (*rest) 2835 if (*rest)
2836 goto bad_val; 2836 goto bad_val;
2837 sbinfo->max_blocks = 2837 sbinfo->max_blocks =
2838 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2838 DIV_ROUND_UP(size, PAGE_SIZE);
2839 } else if (!strcmp(this_char,"nr_blocks")) { 2839 } else if (!strcmp(this_char,"nr_blocks")) {
2840 sbinfo->max_blocks = memparse(value, &rest); 2840 sbinfo->max_blocks = memparse(value, &rest);
2841 if (*rest) 2841 if (*rest)
@@ -2940,7 +2940,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2940 2940
2941 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2941 if (sbinfo->max_blocks != shmem_default_max_blocks())
2942 seq_printf(seq, ",size=%luk", 2942 seq_printf(seq, ",size=%luk",
2943 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2943 sbinfo->max_blocks << (PAGE_SHIFT - 10));
2944 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2944 if (sbinfo->max_inodes != shmem_default_max_inodes())
2945 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2945 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2946 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2946 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
@@ -3082,8 +3082,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
3082 sbinfo->free_inodes = sbinfo->max_inodes; 3082 sbinfo->free_inodes = sbinfo->max_inodes;
3083 3083
3084 sb->s_maxbytes = MAX_LFS_FILESIZE; 3084 sb->s_maxbytes = MAX_LFS_FILESIZE;
3085 sb->s_blocksize = PAGE_CACHE_SIZE; 3085 sb->s_blocksize = PAGE_SIZE;
3086 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 3086 sb->s_blocksize_bits = PAGE_SHIFT;
3087 sb->s_magic = TMPFS_MAGIC; 3087 sb->s_magic = TMPFS_MAGIC;
3088 sb->s_op = &shmem_ops; 3088 sb->s_op = &shmem_ops;
3089 sb->s_time_gran = 1; 3089 sb->s_time_gran = 1;
diff --git a/mm/swap.c b/mm/swap.c
index 09fe5e97714a..a0bc206b4ac6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -114,7 +114,7 @@ void put_pages_list(struct list_head *pages)
114 114
115 victim = list_entry(pages->prev, struct page, lru); 115 victim = list_entry(pages->prev, struct page, lru);
116 list_del(&victim->lru); 116 list_del(&victim->lru);
117 page_cache_release(victim); 117 put_page(victim);
118 } 118 }
119} 119}
120EXPORT_SYMBOL(put_pages_list); 120EXPORT_SYMBOL(put_pages_list);
@@ -142,7 +142,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
142 return seg; 142 return seg;
143 143
144 pages[seg] = kmap_to_page(kiov[seg].iov_base); 144 pages[seg] = kmap_to_page(kiov[seg].iov_base);
145 page_cache_get(pages[seg]); 145 get_page(pages[seg]);
146 } 146 }
147 147
148 return seg; 148 return seg;
@@ -236,7 +236,7 @@ void rotate_reclaimable_page(struct page *page)
236 struct pagevec *pvec; 236 struct pagevec *pvec;
237 unsigned long flags; 237 unsigned long flags;
238 238
239 page_cache_get(page); 239 get_page(page);
240 local_irq_save(flags); 240 local_irq_save(flags);
241 pvec = this_cpu_ptr(&lru_rotate_pvecs); 241 pvec = this_cpu_ptr(&lru_rotate_pvecs);
242 if (!pagevec_add(pvec, page)) 242 if (!pagevec_add(pvec, page))
@@ -294,7 +294,7 @@ void activate_page(struct page *page)
294 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 294 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
295 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 295 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
296 296
297 page_cache_get(page); 297 get_page(page);
298 if (!pagevec_add(pvec, page)) 298 if (!pagevec_add(pvec, page))
299 pagevec_lru_move_fn(pvec, __activate_page, NULL); 299 pagevec_lru_move_fn(pvec, __activate_page, NULL);
300 put_cpu_var(activate_page_pvecs); 300 put_cpu_var(activate_page_pvecs);
@@ -389,7 +389,7 @@ static void __lru_cache_add(struct page *page)
389{ 389{
390 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 390 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
391 391
392 page_cache_get(page); 392 get_page(page);
393 if (!pagevec_space(pvec)) 393 if (!pagevec_space(pvec))
394 __pagevec_lru_add(pvec); 394 __pagevec_lru_add(pvec);
395 pagevec_add(pvec, page); 395 pagevec_add(pvec, page);
@@ -646,7 +646,7 @@ void deactivate_page(struct page *page)
646 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 646 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
648 648
649 page_cache_get(page); 649 get_page(page);
650 if (!pagevec_add(pvec, page)) 650 if (!pagevec_add(pvec, page))
651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
652 put_cpu_var(lru_deactivate_pvecs); 652 put_cpu_var(lru_deactivate_pvecs);
@@ -698,7 +698,7 @@ void lru_add_drain_all(void)
698} 698}
699 699
700/** 700/**
701 * release_pages - batched page_cache_release() 701 * release_pages - batched put_page()
702 * @pages: array of pages to release 702 * @pages: array of pages to release
703 * @nr: number of pages 703 * @nr: number of pages
704 * @cold: whether the pages are cache cold 704 * @cold: whether the pages are cache cold
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 69cb2464e7dc..366ce3518703 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -85,7 +85,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
85 VM_BUG_ON_PAGE(PageSwapCache(page), page); 85 VM_BUG_ON_PAGE(PageSwapCache(page), page);
86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
87 87
88 page_cache_get(page); 88 get_page(page);
89 SetPageSwapCache(page); 89 SetPageSwapCache(page);
90 set_page_private(page, entry.val); 90 set_page_private(page, entry.val);
91 91
@@ -109,7 +109,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
109 VM_BUG_ON(error == -EEXIST); 109 VM_BUG_ON(error == -EEXIST);
110 set_page_private(page, 0UL); 110 set_page_private(page, 0UL);
111 ClearPageSwapCache(page); 111 ClearPageSwapCache(page);
112 page_cache_release(page); 112 put_page(page);
113 } 113 }
114 114
115 return error; 115 return error;
@@ -226,7 +226,7 @@ void delete_from_swap_cache(struct page *page)
226 spin_unlock_irq(&address_space->tree_lock); 226 spin_unlock_irq(&address_space->tree_lock);
227 227
228 swapcache_free(entry); 228 swapcache_free(entry);
229 page_cache_release(page); 229 put_page(page);
230} 230}
231 231
232/* 232/*
@@ -252,7 +252,7 @@ static inline void free_swap_cache(struct page *page)
252void free_page_and_swap_cache(struct page *page) 252void free_page_and_swap_cache(struct page *page)
253{ 253{
254 free_swap_cache(page); 254 free_swap_cache(page);
255 page_cache_release(page); 255 put_page(page);
256} 256}
257 257
258/* 258/*
@@ -380,7 +380,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
380 } while (err != -ENOMEM); 380 } while (err != -ENOMEM);
381 381
382 if (new_page) 382 if (new_page)
383 page_cache_release(new_page); 383 put_page(new_page);
384 return found_page; 384 return found_page;
385} 385}
386 386
@@ -495,7 +495,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
495 continue; 495 continue;
496 if (offset != entry_offset) 496 if (offset != entry_offset)
497 SetPageReadahead(page); 497 SetPageReadahead(page);
498 page_cache_release(page); 498 put_page(page);
499 } 499 }
500 blk_finish_plug(&plug); 500 blk_finish_plug(&plug);
501 501
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 560ad380634c..83874eced5bf 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -119,7 +119,7 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
119 ret = try_to_free_swap(page); 119 ret = try_to_free_swap(page);
120 unlock_page(page); 120 unlock_page(page);
121 } 121 }
122 page_cache_release(page); 122 put_page(page);
123 return ret; 123 return ret;
124} 124}
125 125
@@ -1000,7 +1000,7 @@ int free_swap_and_cache(swp_entry_t entry)
1000 page = find_get_page(swap_address_space(entry), 1000 page = find_get_page(swap_address_space(entry),
1001 entry.val); 1001 entry.val);
1002 if (page && !trylock_page(page)) { 1002 if (page && !trylock_page(page)) {
1003 page_cache_release(page); 1003 put_page(page);
1004 page = NULL; 1004 page = NULL;
1005 } 1005 }
1006 } 1006 }
@@ -1017,7 +1017,7 @@ int free_swap_and_cache(swp_entry_t entry)
1017 SetPageDirty(page); 1017 SetPageDirty(page);
1018 } 1018 }
1019 unlock_page(page); 1019 unlock_page(page);
1020 page_cache_release(page); 1020 put_page(page);
1021 } 1021 }
1022 return p != NULL; 1022 return p != NULL;
1023} 1023}
@@ -1518,7 +1518,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1518 } 1518 }
1519 if (retval) { 1519 if (retval) {
1520 unlock_page(page); 1520 unlock_page(page);
1521 page_cache_release(page); 1521 put_page(page);
1522 break; 1522 break;
1523 } 1523 }
1524 1524
@@ -1570,7 +1570,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1570 */ 1570 */
1571 SetPageDirty(page); 1571 SetPageDirty(page);
1572 unlock_page(page); 1572 unlock_page(page);
1573 page_cache_release(page); 1573 put_page(page);
1574 1574
1575 /* 1575 /*
1576 * Make sure that we aren't completely killing 1576 * Make sure that we aren't completely killing
@@ -2574,7 +2574,7 @@ bad_swap:
2574out: 2574out:
2575 if (page && !IS_ERR(page)) { 2575 if (page && !IS_ERR(page)) {
2576 kunmap(page); 2576 kunmap(page);
2577 page_cache_release(page); 2577 put_page(page);
2578 } 2578 }
2579 if (name) 2579 if (name)
2580 putname(name); 2580 putname(name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 7598b552ae03..b00272810871 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -118,7 +118,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
118 return -EIO; 118 return -EIO;
119 119
120 if (page_has_private(page)) 120 if (page_has_private(page))
121 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 121 do_invalidatepage(page, 0, PAGE_SIZE);
122 122
123 /* 123 /*
124 * Some filesystems seem to re-dirty the page even after 124 * Some filesystems seem to re-dirty the page even after
@@ -159,8 +159,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
159{ 159{
160 if (page_mapped(page)) { 160 if (page_mapped(page)) {
161 unmap_mapping_range(mapping, 161 unmap_mapping_range(mapping,
162 (loff_t)page->index << PAGE_CACHE_SHIFT, 162 (loff_t)page->index << PAGE_SHIFT,
163 PAGE_CACHE_SIZE, 0); 163 PAGE_SIZE, 0);
164 } 164 }
165 return truncate_complete_page(mapping, page); 165 return truncate_complete_page(mapping, page);
166} 166}
@@ -241,8 +241,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
241 return; 241 return;
242 242
243 /* Offsets within partial pages */ 243 /* Offsets within partial pages */
244 partial_start = lstart & (PAGE_CACHE_SIZE - 1); 244 partial_start = lstart & (PAGE_SIZE - 1);
245 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 245 partial_end = (lend + 1) & (PAGE_SIZE - 1);
246 246
247 /* 247 /*
248 * 'start' and 'end' always covers the range of pages to be fully 248 * 'start' and 'end' always covers the range of pages to be fully
@@ -250,7 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
250 * start of the range and 'partial_end' at the end of the range. 250 * start of the range and 'partial_end' at the end of the range.
251 * Note that 'end' is exclusive while 'lend' is inclusive. 251 * Note that 'end' is exclusive while 'lend' is inclusive.
252 */ 252 */
253 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 253 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
254 if (lend == -1) 254 if (lend == -1)
255 /* 255 /*
256 * lend == -1 indicates end-of-file so we have to set 'end' 256 * lend == -1 indicates end-of-file so we have to set 'end'
@@ -259,7 +259,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
259 */ 259 */
260 end = -1; 260 end = -1;
261 else 261 else
262 end = (lend + 1) >> PAGE_CACHE_SHIFT; 262 end = (lend + 1) >> PAGE_SHIFT;
263 263
264 pagevec_init(&pvec, 0); 264 pagevec_init(&pvec, 0);
265 index = start; 265 index = start;
@@ -298,7 +298,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
298 if (partial_start) { 298 if (partial_start) {
299 struct page *page = find_lock_page(mapping, start - 1); 299 struct page *page = find_lock_page(mapping, start - 1);
300 if (page) { 300 if (page) {
301 unsigned int top = PAGE_CACHE_SIZE; 301 unsigned int top = PAGE_SIZE;
302 if (start > end) { 302 if (start > end) {
303 /* Truncation within a single page */ 303 /* Truncation within a single page */
304 top = partial_end; 304 top = partial_end;
@@ -311,7 +311,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
311 do_invalidatepage(page, partial_start, 311 do_invalidatepage(page, partial_start,
312 top - partial_start); 312 top - partial_start);
313 unlock_page(page); 313 unlock_page(page);
314 page_cache_release(page); 314 put_page(page);
315 } 315 }
316 } 316 }
317 if (partial_end) { 317 if (partial_end) {
@@ -324,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
324 do_invalidatepage(page, 0, 324 do_invalidatepage(page, 0,
325 partial_end); 325 partial_end);
326 unlock_page(page); 326 unlock_page(page);
327 page_cache_release(page); 327 put_page(page);
328 } 328 }
329 } 329 }
330 /* 330 /*
@@ -538,7 +538,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
538 if (mapping->a_ops->freepage) 538 if (mapping->a_ops->freepage)
539 mapping->a_ops->freepage(page); 539 mapping->a_ops->freepage(page);
540 540
541 page_cache_release(page); /* pagecache ref */ 541 put_page(page); /* pagecache ref */
542 return 1; 542 return 1;
543failed: 543failed:
544 spin_unlock_irqrestore(&mapping->tree_lock, flags); 544 spin_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -608,18 +608,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
608 * Zap the rest of the file in one hit. 608 * Zap the rest of the file in one hit.
609 */ 609 */
610 unmap_mapping_range(mapping, 610 unmap_mapping_range(mapping,
611 (loff_t)index << PAGE_CACHE_SHIFT, 611 (loff_t)index << PAGE_SHIFT,
612 (loff_t)(1 + end - index) 612 (loff_t)(1 + end - index)
613 << PAGE_CACHE_SHIFT, 613 << PAGE_SHIFT,
614 0); 614 0);
615 did_range_unmap = 1; 615 did_range_unmap = 1;
616 } else { 616 } else {
617 /* 617 /*
618 * Just zap this page 618 * Just zap this page
619 */ 619 */
620 unmap_mapping_range(mapping, 620 unmap_mapping_range(mapping,
621 (loff_t)index << PAGE_CACHE_SHIFT, 621 (loff_t)index << PAGE_SHIFT,
622 PAGE_CACHE_SIZE, 0); 622 PAGE_SIZE, 0);
623 } 623 }
624 } 624 }
625 BUG_ON(page_mapped(page)); 625 BUG_ON(page_mapped(page));
@@ -744,14 +744,14 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
744 744
745 WARN_ON(to > inode->i_size); 745 WARN_ON(to > inode->i_size);
746 746
747 if (from >= to || bsize == PAGE_CACHE_SIZE) 747 if (from >= to || bsize == PAGE_SIZE)
748 return; 748 return;
749 /* Page straddling @from will not have any hole block created? */ 749 /* Page straddling @from will not have any hole block created? */
750 rounded_from = round_up(from, bsize); 750 rounded_from = round_up(from, bsize);
751 if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) 751 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
752 return; 752 return;
753 753
754 index = from >> PAGE_CACHE_SHIFT; 754 index = from >> PAGE_SHIFT;
755 page = find_lock_page(inode->i_mapping, index); 755 page = find_lock_page(inode->i_mapping, index);
756 /* Page not cached? Nothing to do */ 756 /* Page not cached? Nothing to do */
757 if (!page) 757 if (!page)
@@ -763,7 +763,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
763 if (page_mkclean(page)) 763 if (page_mkclean(page))
764 set_page_dirty(page); 764 set_page_dirty(page);
765 unlock_page(page); 765 unlock_page(page);
766 page_cache_release(page); 766 put_page(page);
767} 767}
768EXPORT_SYMBOL(pagecache_isize_extended); 768EXPORT_SYMBOL(pagecache_isize_extended);
769 769
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 9f3a0290b273..af817e5060fb 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -93,7 +93,7 @@ out_release_uncharge_unlock:
93 pte_unmap_unlock(dst_pte, ptl); 93 pte_unmap_unlock(dst_pte, ptl);
94 mem_cgroup_cancel_charge(page, memcg, false); 94 mem_cgroup_cancel_charge(page, memcg, false);
95out_release: 95out_release:
96 page_cache_release(page); 96 put_page(page);
97 goto out; 97 goto out;
98} 98}
99 99
@@ -287,7 +287,7 @@ out_unlock:
287 up_read(&dst_mm->mmap_sem); 287 up_read(&dst_mm->mmap_sem);
288out: 288out:
289 if (page) 289 if (page)
290 page_cache_release(page); 290 put_page(page);
291 BUG_ON(copied < 0); 291 BUG_ON(copied < 0);
292 BUG_ON(err > 0); 292 BUG_ON(err > 0);
293 BUG_ON(!copied && !err); 293 BUG_ON(!copied && !err);
diff --git a/mm/zswap.c b/mm/zswap.c
index bf14508afd64..91dad80d068b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -869,7 +869,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
869 869
870 case ZSWAP_SWAPCACHE_EXIST: 870 case ZSWAP_SWAPCACHE_EXIST:
871 /* page is already in the swap cache, ignore for now */ 871 /* page is already in the swap cache, ignore for now */
872 page_cache_release(page); 872 put_page(page);
873 ret = -EEXIST; 873 ret = -EEXIST;
874 goto fail; 874 goto fail;
875 875
@@ -897,7 +897,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
897 897
898 /* start writeback */ 898 /* start writeback */
899 __swap_writepage(page, &wbc, end_swap_bio_write); 899 __swap_writepage(page, &wbc, end_swap_bio_write);
900 page_cache_release(page); 900 put_page(page);
901 zswap_written_back_pages++; 901 zswap_written_back_pages++;
902 902
903 spin_lock(&tree->lock); 903 spin_lock(&tree->lock);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1831f6353622..a5502898ea33 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -269,7 +269,7 @@ static void _ceph_msgr_exit(void)
269 } 269 }
270 270
271 BUG_ON(zero_page == NULL); 271 BUG_ON(zero_page == NULL);
272 page_cache_release(zero_page); 272 put_page(zero_page);
273 zero_page = NULL; 273 zero_page = NULL;
274 274
275 ceph_msgr_slab_exit(); 275 ceph_msgr_slab_exit();
@@ -282,7 +282,7 @@ int ceph_msgr_init(void)
282 282
283 BUG_ON(zero_page != NULL); 283 BUG_ON(zero_page != NULL);
284 zero_page = ZERO_PAGE(0); 284 zero_page = ZERO_PAGE(0);
285 page_cache_get(zero_page); 285 get_page(zero_page);
286 286
287 /* 287 /*
288 * The number of active work items is limited by the number of 288 * The number of active work items is limited by the number of
@@ -1602,7 +1602,7 @@ static int write_partial_skip(struct ceph_connection *con)
1602 1602
1603 dout("%s %p %d left\n", __func__, con, con->out_skip); 1603 dout("%s %p %d left\n", __func__, con, con->out_skip);
1604 while (con->out_skip > 0) { 1604 while (con->out_skip > 0) {
1605 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1605 size_t size = min(con->out_skip, (int) PAGE_SIZE);
1606 1606
1607 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1607 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1608 if (ret <= 0) 1608 if (ret <= 0)
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index c7c220a736e5..6864007e64fc 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -56,7 +56,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
56 size_t bit = pl->room; 56 size_t bit = pl->room;
57 int ret; 57 int ret;
58 58
59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), 59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
60 buf, bit); 60 buf, bit);
61 pl->length += bit; 61 pl->length += bit;
62 pl->room -= bit; 62 pl->room -= bit;
@@ -67,7 +67,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
67 return ret; 67 return ret;
68 } 68 }
69 69
70 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); 70 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
71 pl->length += len; 71 pl->length += len;
72 pl->room -= len; 72 pl->room -= len;
73 return 0; 73 return 0;
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 10297f7a89ba..00d2601407c5 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -95,19 +95,19 @@ int ceph_copy_user_to_page_vector(struct page **pages,
95 loff_t off, size_t len) 95 loff_t off, size_t len)
96{ 96{
97 int i = 0; 97 int i = 0;
98 int po = off & ~PAGE_CACHE_MASK; 98 int po = off & ~PAGE_MASK;
99 int left = len; 99 int left = len;
100 int l, bad; 100 int l, bad;
101 101
102 while (left > 0) { 102 while (left > 0) {
103 l = min_t(int, PAGE_CACHE_SIZE-po, left); 103 l = min_t(int, PAGE_SIZE-po, left);
104 bad = copy_from_user(page_address(pages[i]) + po, data, l); 104 bad = copy_from_user(page_address(pages[i]) + po, data, l);
105 if (bad == l) 105 if (bad == l)
106 return -EFAULT; 106 return -EFAULT;
107 data += l - bad; 107 data += l - bad;
108 left -= l - bad; 108 left -= l - bad;
109 po += l - bad; 109 po += l - bad;
110 if (po == PAGE_CACHE_SIZE) { 110 if (po == PAGE_SIZE) {
111 po = 0; 111 po = 0;
112 i++; 112 i++;
113 } 113 }
@@ -121,17 +121,17 @@ void ceph_copy_to_page_vector(struct page **pages,
121 loff_t off, size_t len) 121 loff_t off, size_t len)
122{ 122{
123 int i = 0; 123 int i = 0;
124 size_t po = off & ~PAGE_CACHE_MASK; 124 size_t po = off & ~PAGE_MASK;
125 size_t left = len; 125 size_t left = len;
126 126
127 while (left > 0) { 127 while (left > 0) {
128 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 128 size_t l = min_t(size_t, PAGE_SIZE-po, left);
129 129
130 memcpy(page_address(pages[i]) + po, data, l); 130 memcpy(page_address(pages[i]) + po, data, l);
131 data += l; 131 data += l;
132 left -= l; 132 left -= l;
133 po += l; 133 po += l;
134 if (po == PAGE_CACHE_SIZE) { 134 if (po == PAGE_SIZE) {
135 po = 0; 135 po = 0;
136 i++; 136 i++;
137 } 137 }
@@ -144,17 +144,17 @@ void ceph_copy_from_page_vector(struct page **pages,
144 loff_t off, size_t len) 144 loff_t off, size_t len)
145{ 145{
146 int i = 0; 146 int i = 0;
147 size_t po = off & ~PAGE_CACHE_MASK; 147 size_t po = off & ~PAGE_MASK;
148 size_t left = len; 148 size_t left = len;
149 149
150 while (left > 0) { 150 while (left > 0) {
151 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 151 size_t l = min_t(size_t, PAGE_SIZE-po, left);
152 152
153 memcpy(data, page_address(pages[i]) + po, l); 153 memcpy(data, page_address(pages[i]) + po, l);
154 data += l; 154 data += l;
155 left -= l; 155 left -= l;
156 po += l; 156 po += l;
157 if (po == PAGE_CACHE_SIZE) { 157 if (po == PAGE_SIZE) {
158 po = 0; 158 po = 0;
159 i++; 159 i++;
160 } 160 }
@@ -168,25 +168,25 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
168 */ 168 */
169void ceph_zero_page_vector_range(int off, int len, struct page **pages) 169void ceph_zero_page_vector_range(int off, int len, struct page **pages)
170{ 170{
171 int i = off >> PAGE_CACHE_SHIFT; 171 int i = off >> PAGE_SHIFT;
172 172
173 off &= ~PAGE_CACHE_MASK; 173 off &= ~PAGE_MASK;
174 174
175 dout("zero_page_vector_page %u~%u\n", off, len); 175 dout("zero_page_vector_page %u~%u\n", off, len);
176 176
177 /* leading partial page? */ 177 /* leading partial page? */
178 if (off) { 178 if (off) {
179 int end = min((int)PAGE_CACHE_SIZE, off + len); 179 int end = min((int)PAGE_SIZE, off + len);
180 dout("zeroing %d %p head from %d\n", i, pages[i], 180 dout("zeroing %d %p head from %d\n", i, pages[i],
181 (int)off); 181 (int)off);
182 zero_user_segment(pages[i], off, end); 182 zero_user_segment(pages[i], off, end);
183 len -= (end - off); 183 len -= (end - off);
184 i++; 184 i++;
185 } 185 }
186 while (len >= PAGE_CACHE_SIZE) { 186 while (len >= PAGE_SIZE) {
187 dout("zeroing %d %p len=%d\n", i, pages[i], len); 187 dout("zeroing %d %p len=%d\n", i, pages[i], len);
188 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); 188 zero_user_segment(pages[i], 0, PAGE_SIZE);
189 len -= PAGE_CACHE_SIZE; 189 len -= PAGE_SIZE;
190 i++; 190 i++;
191 } 191 }
192 /* trailing partial page? */ 192 /* trailing partial page? */
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8c6bc795f060..15612ffa8d57 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1728,8 +1728,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
1728 return 0; 1728 return 0;
1729 } 1729 }
1730 1730
1731 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1731 first = snd_buf->page_base >> PAGE_SHIFT;
1732 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1732 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1733 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1733 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1734 rqstp->rq_enc_pages 1734 rqstp->rq_enc_pages
1735 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1735 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
@@ -1775,10 +1775,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1775 status = alloc_enc_pages(rqstp); 1775 status = alloc_enc_pages(rqstp);
1776 if (status) 1776 if (status)
1777 return status; 1777 return status;
1778 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1778 first = snd_buf->page_base >> PAGE_SHIFT;
1779 inpages = snd_buf->pages + first; 1779 inpages = snd_buf->pages + first;
1780 snd_buf->pages = rqstp->rq_enc_pages; 1780 snd_buf->pages = rqstp->rq_enc_pages;
1781 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1781 snd_buf->page_base -= first << PAGE_SHIFT;
1782 /* 1782 /*
1783 * Give the tail its own page, in case we need extra space in the 1783 * Give the tail its own page, in case we need extra space in the
1784 * head when wrapping: 1784 * head when wrapping:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d94a8e1e9f05..045e11ecd332 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -465,7 +465,7 @@ encryptor(struct scatterlist *sg, void *data)
465 page_pos = desc->pos - outbuf->head[0].iov_len; 465 page_pos = desc->pos - outbuf->head[0].iov_len;
466 if (page_pos >= 0 && page_pos < outbuf->page_len) { 466 if (page_pos >= 0 && page_pos < outbuf->page_len) {
467 /* pages are not in place: */ 467 /* pages are not in place: */
468 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; 468 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
469 in_page = desc->pages[i]; 469 in_page = desc->pages[i];
470 } else { 470 } else {
471 in_page = sg_page(sg); 471 in_page = sg_page(sg);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 765088e4ad84..a737c2da0837 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -79,9 +79,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
79 len -= buf->head[0].iov_len; 79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) { 80 if (len <= buf->page_len) {
81 unsigned int last = (buf->page_base + len - 1) 81 unsigned int last = (buf->page_base + len - 1)
82 >>PAGE_CACHE_SHIFT; 82 >>PAGE_SHIFT;
83 unsigned int offset = (buf->page_base + len - 1) 83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_CACHE_SIZE - 1); 84 & (PAGE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last]); 85 ptr = kmap_atomic(buf->pages[last]);
86 pad = *(ptr + offset); 86 pad = *(ptr + offset);
87 kunmap_atomic(ptr); 87 kunmap_atomic(ptr);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 008c25d1b9f9..553bf95f7003 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -881,7 +881,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
881 char *kaddr; 881 char *kaddr;
882 ssize_t ret = -ENOMEM; 882 ssize_t ret = -ENOMEM;
883 883
884 if (count >= PAGE_CACHE_SIZE) 884 if (count >= PAGE_SIZE)
885 goto out_slow; 885 goto out_slow;
886 886
887 page = find_or_create_page(mapping, 0, GFP_KERNEL); 887 page = find_or_create_page(mapping, 0, GFP_KERNEL);
@@ -892,7 +892,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
892 ret = cache_do_downcall(kaddr, buf, count, cd); 892 ret = cache_do_downcall(kaddr, buf, count, cd);
893 kunmap(page); 893 kunmap(page);
894 unlock_page(page); 894 unlock_page(page);
895 page_cache_release(page); 895 put_page(page);
896 return ret; 896 return ret;
897out_slow: 897out_slow:
898 return cache_slow_downcall(buf, count, cd); 898 return cache_slow_downcall(buf, count, cd);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 31789ef3e614..fc48eca21fd2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1390,8 +1390,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1390 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1390 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1391 int err; 1391 int err;
1392 1392
1393 sb->s_blocksize = PAGE_CACHE_SIZE; 1393 sb->s_blocksize = PAGE_SIZE;
1394 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1394 sb->s_blocksize_bits = PAGE_SHIFT;
1395 sb->s_magic = RPCAUTH_GSSMAGIC; 1395 sb->s_magic = RPCAUTH_GSSMAGIC;
1396 sb->s_op = &s_ops; 1396 sb->s_op = &s_ops;
1397 sb->s_d_op = &simple_dentry_operations; 1397 sb->s_d_op = &simple_dentry_operations;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 2df87f78e518..de70c78025d7 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -96,8 +96,8 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
96 if (base || xdr->page_base) { 96 if (base || xdr->page_base) {
97 pglen -= base; 97 pglen -= base;
98 base += xdr->page_base; 98 base += xdr->page_base;
99 ppage += base >> PAGE_CACHE_SHIFT; 99 ppage += base >> PAGE_SHIFT;
100 base &= ~PAGE_CACHE_MASK; 100 base &= ~PAGE_MASK;
101 } 101 }
102 do { 102 do {
103 char *kaddr; 103 char *kaddr;
@@ -113,7 +113,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
113 } 113 }
114 } 114 }
115 115
116 len = PAGE_CACHE_SIZE; 116 len = PAGE_SIZE;
117 kaddr = kmap_atomic(*ppage); 117 kaddr = kmap_atomic(*ppage);
118 if (base) { 118 if (base) {
119 len -= base; 119 len -= base;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4c1b53..6bdb3865212d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way: 165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]', 166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 167 * then its address is given as (i << PAGE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap. 169 * they point to may overlap.
170 */ 170 */
@@ -181,20 +181,20 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
181 pgto_base += len; 181 pgto_base += len;
182 pgfrom_base += len; 182 pgfrom_base += len;
183 183
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 184 pgto = pages + (pgto_base >> PAGE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 185 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
186 186
187 pgto_base &= ~PAGE_CACHE_MASK; 187 pgto_base &= ~PAGE_MASK;
188 pgfrom_base &= ~PAGE_CACHE_MASK; 188 pgfrom_base &= ~PAGE_MASK;
189 189
190 do { 190 do {
191 /* Are any pointers crossing a page boundary? */ 191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) { 192 if (pgto_base == 0) {
193 pgto_base = PAGE_CACHE_SIZE; 193 pgto_base = PAGE_SIZE;
194 pgto--; 194 pgto--;
195 } 195 }
196 if (pgfrom_base == 0) { 196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_CACHE_SIZE; 197 pgfrom_base = PAGE_SIZE;
198 pgfrom--; 198 pgfrom--;
199 } 199 }
200 200
@@ -236,11 +236,11 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
236 char *vto; 236 char *vto;
237 size_t copy; 237 size_t copy;
238 238
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 239 pgto = pages + (pgbase >> PAGE_SHIFT);
240 pgbase &= ~PAGE_CACHE_MASK; 240 pgbase &= ~PAGE_MASK;
241 241
242 for (;;) { 242 for (;;) {
243 copy = PAGE_CACHE_SIZE - pgbase; 243 copy = PAGE_SIZE - pgbase;
244 if (copy > len) 244 if (copy > len)
245 copy = len; 245 copy = len;
246 246
@@ -253,7 +253,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
253 break; 253 break;
254 254
255 pgbase += copy; 255 pgbase += copy;
256 if (pgbase == PAGE_CACHE_SIZE) { 256 if (pgbase == PAGE_SIZE) {
257 flush_dcache_page(*pgto); 257 flush_dcache_page(*pgto);
258 pgbase = 0; 258 pgbase = 0;
259 pgto++; 259 pgto++;
@@ -280,11 +280,11 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
280 char *vfrom; 280 char *vfrom;
281 size_t copy; 281 size_t copy;
282 282
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 283 pgfrom = pages + (pgbase >> PAGE_SHIFT);
284 pgbase &= ~PAGE_CACHE_MASK; 284 pgbase &= ~PAGE_MASK;
285 285
286 do { 286 do {
287 copy = PAGE_CACHE_SIZE - pgbase; 287 copy = PAGE_SIZE - pgbase;
288 if (copy > len) 288 if (copy > len)
289 copy = len; 289 copy = len;
290 290
@@ -293,7 +293,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
293 kunmap_atomic(vfrom); 293 kunmap_atomic(vfrom);
294 294
295 pgbase += copy; 295 pgbase += copy;
296 if (pgbase == PAGE_CACHE_SIZE) { 296 if (pgbase == PAGE_SIZE) {
297 pgbase = 0; 297 pgbase = 0;
298 pgfrom++; 298 pgfrom++;
299 } 299 }
@@ -1038,8 +1038,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1038 if (base < buf->page_len) { 1038 if (base < buf->page_len) {
1039 subbuf->page_len = min(buf->page_len - base, len); 1039 subbuf->page_len = min(buf->page_len - base, len);
1040 base += buf->page_base; 1040 base += buf->page_base;
1041 subbuf->page_base = base & ~PAGE_CACHE_MASK; 1041 subbuf->page_base = base & ~PAGE_MASK;
1042 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; 1042 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1043 len -= subbuf->page_len; 1043 len -= subbuf->page_len;
1044 base = 0; 1044 base = 0;
1045 } else { 1045 } else {
@@ -1297,9 +1297,9 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1297 todo -= avail_here; 1297 todo -= avail_here;
1298 1298
1299 base += buf->page_base; 1299 base += buf->page_base;
1300 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 1300 ppages = buf->pages + (base >> PAGE_SHIFT);
1301 base &= ~PAGE_CACHE_MASK; 1301 base &= ~PAGE_MASK;
1302 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 1302 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1303 avail_here); 1303 avail_here);
1304 c = kmap(*ppages) + base; 1304 c = kmap(*ppages) + base;
1305 1305
@@ -1383,7 +1383,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1383 } 1383 }
1384 1384
1385 avail_page = min(avail_here, 1385 avail_page = min(avail_here,
1386 (unsigned int) PAGE_CACHE_SIZE); 1386 (unsigned int) PAGE_SIZE);
1387 } 1387 }
1388 base = buf->page_len; /* align to start of tail */ 1388 base = buf->page_len; /* align to start of tail */
1389 } 1389 }
@@ -1479,9 +1479,9 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1479 if (page_len > len) 1479 if (page_len > len)
1480 page_len = len; 1480 page_len = len;
1481 len -= page_len; 1481 len -= page_len;
1482 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); 1482 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1483 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; 1483 i = (offset + buf->page_base) >> PAGE_SHIFT;
1484 thislen = PAGE_CACHE_SIZE - page_offset; 1484 thislen = PAGE_SIZE - page_offset;
1485 do { 1485 do {
1486 if (thislen > page_len) 1486 if (thislen > page_len)
1487 thislen = page_len; 1487 thislen = page_len;
@@ -1492,7 +1492,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1492 page_len -= thislen; 1492 page_len -= thislen;
1493 i++; 1493 i++;
1494 page_offset = 0; 1494 page_offset = 0;
1495 thislen = PAGE_CACHE_SIZE; 1495 thislen = PAGE_SIZE;
1496 } while (page_len != 0); 1496 } while (page_len != 0);
1497 offset = 0; 1497 offset = 0;
1498 } 1498 }
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index d14bf411515b..a452ad7cec40 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -15,7 +15,6 @@ config SND_USB_AUDIO
15 select SND_RAWMIDI 15 select SND_RAWMIDI
16 select SND_PCM 16 select SND_PCM
17 select BITREVERSE 17 select BITREVERSE
18 select SND_USB_AUDIO_USE_MEDIA_CONTROLLER if MEDIA_CONTROLLER && (MEDIA_SUPPORT=y || MEDIA_SUPPORT=SND_USB_AUDIO)
19 help 18 help
20 Say Y here to include support for USB audio and USB MIDI 19 Say Y here to include support for USB audio and USB MIDI
21 devices. 20 devices.
@@ -23,9 +22,6 @@ config SND_USB_AUDIO
23 To compile this driver as a module, choose M here: the module 22 To compile this driver as a module, choose M here: the module
24 will be called snd-usb-audio. 23 will be called snd-usb-audio.
25 24
26config SND_USB_AUDIO_USE_MEDIA_CONTROLLER
27 bool
28
29config SND_USB_UA101 25config SND_USB_UA101
30 tristate "Edirol UA-101/UA-1000 driver" 26 tristate "Edirol UA-101/UA-1000 driver"
31 select SND_PCM 27 select SND_PCM
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 8dca3c407f5a..2d2d122b069f 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -15,8 +15,6 @@ snd-usb-audio-objs := card.o \
15 quirks.o \ 15 quirks.o \
16 stream.o 16 stream.o
17 17
18snd-usb-audio-$(CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER) += media.o
19
20snd-usbmidi-lib-objs := midi.o 18snd-usbmidi-lib-objs := midi.o
21 19
22# Toplevel Module Dependency 20# Toplevel Module Dependency
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 63244bbba8c7..3fc63583a537 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -66,7 +66,6 @@
66#include "format.h" 66#include "format.h"
67#include "power.h" 67#include "power.h"
68#include "stream.h" 68#include "stream.h"
69#include "media.h"
70 69
71MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 70MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
72MODULE_DESCRIPTION("USB Audio"); 71MODULE_DESCRIPTION("USB Audio");
@@ -612,11 +611,6 @@ static int usb_audio_probe(struct usb_interface *intf,
612 if (err < 0) 611 if (err < 0)
613 goto __error; 612 goto __error;
614 613
615 if (quirk->media_device) {
616 /* don't want to fail when media_snd_device_create() fails */
617 media_snd_device_create(chip, intf);
618 }
619
620 usb_chip[chip->index] = chip; 614 usb_chip[chip->index] = chip;
621 chip->num_interfaces++; 615 chip->num_interfaces++;
622 usb_set_intfdata(intf, chip); 616 usb_set_intfdata(intf, chip);
@@ -673,14 +667,6 @@ static void usb_audio_disconnect(struct usb_interface *intf)
673 list_for_each(p, &chip->midi_list) { 667 list_for_each(p, &chip->midi_list) {
674 snd_usbmidi_disconnect(p); 668 snd_usbmidi_disconnect(p);
675 } 669 }
676 /*
677 * Nice to check quirk && quirk->media_device
678 * need some special handlings. Doesn't look like
679 * we have access to quirk here
680 * Acceses mixer_list
681 */
682 media_snd_device_delete(chip);
683
684 /* release mixer resources */ 670 /* release mixer resources */
685 list_for_each_entry(mixer, &chip->mixer_list, list) { 671 list_for_each_entry(mixer, &chip->mixer_list, list) {
686 snd_usb_mixer_disconnect(mixer); 672 snd_usb_mixer_disconnect(mixer);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 34a0898e2238..71778ca4b26a 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -105,8 +105,6 @@ struct snd_usb_endpoint {
105 struct list_head list; 105 struct list_head list;
106}; 106};
107 107
108struct media_ctl;
109
110struct snd_usb_substream { 108struct snd_usb_substream {
111 struct snd_usb_stream *stream; 109 struct snd_usb_stream *stream;
112 struct usb_device *dev; 110 struct usb_device *dev;
@@ -158,7 +156,6 @@ struct snd_usb_substream {
158 } dsd_dop; 156 } dsd_dop;
159 157
160 bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */ 158 bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */
161 struct media_ctl *media_ctl;
162}; 159};
163 160
164struct snd_usb_stream { 161struct snd_usb_stream {
diff --git a/sound/usb/media.c b/sound/usb/media.c
deleted file mode 100644
index 93a50d01490c..000000000000
--- a/sound/usb/media.c
+++ /dev/null
@@ -1,318 +0,0 @@
1/*
2 * media.c - Media Controller specific ALSA driver code
3 *
4 * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
5 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
6 *
7 * This file is released under the GPLv2.
8 */
9
10/*
11 * This file adds Media Controller support to ALSA driver
12 * to use the Media Controller API to share tuner with DVB
13 * and V4L2 drivers that control media device. Media device
14 * is created based on existing quirks framework. Using this
15 * approach, the media controller API usage can be added for
16 * a specific device.
17*/
18
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/usb.h>
24
25#include <sound/pcm.h>
26#include <sound/core.h>
27
28#include "usbaudio.h"
29#include "card.h"
30#include "mixer.h"
31#include "media.h"
32
33static int media_snd_enable_source(struct media_ctl *mctl)
34{
35 if (mctl && mctl->media_dev->enable_source)
36 return mctl->media_dev->enable_source(&mctl->media_entity,
37 &mctl->media_pipe);
38 return 0;
39}
40
41static void media_snd_disable_source(struct media_ctl *mctl)
42{
43 if (mctl && mctl->media_dev->disable_source)
44 mctl->media_dev->disable_source(&mctl->media_entity);
45}
46
47int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
48 int stream)
49{
50 struct media_device *mdev;
51 struct media_ctl *mctl;
52 struct device *pcm_dev = &pcm->streams[stream].dev;
53 u32 intf_type;
54 int ret = 0;
55 u16 mixer_pad;
56 struct media_entity *entity;
57
58 mdev = subs->stream->chip->media_dev;
59 if (!mdev)
60 return -ENODEV;
61
62 if (subs->media_ctl)
63 return 0;
64
65 /* allocate media_ctl */
66 mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
67 if (!mctl)
68 return -ENOMEM;
69
70 mctl->media_dev = mdev;
71 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
72 intf_type = MEDIA_INTF_T_ALSA_PCM_PLAYBACK;
73 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_PLAYBACK;
74 mctl->media_pad.flags = MEDIA_PAD_FL_SOURCE;
75 mixer_pad = 1;
76 } else {
77 intf_type = MEDIA_INTF_T_ALSA_PCM_CAPTURE;
78 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_CAPTURE;
79 mctl->media_pad.flags = MEDIA_PAD_FL_SINK;
80 mixer_pad = 2;
81 }
82 mctl->media_entity.name = pcm->name;
83 media_entity_pads_init(&mctl->media_entity, 1, &mctl->media_pad);
84 ret = media_device_register_entity(mctl->media_dev,
85 &mctl->media_entity);
86 if (ret)
87 goto free_mctl;
88
89 mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0,
90 MAJOR(pcm_dev->devt),
91 MINOR(pcm_dev->devt));
92 if (!mctl->intf_devnode) {
93 ret = -ENOMEM;
94 goto unregister_entity;
95 }
96 mctl->intf_link = media_create_intf_link(&mctl->media_entity,
97 &mctl->intf_devnode->intf,
98 MEDIA_LNK_FL_ENABLED);
99 if (!mctl->intf_link) {
100 ret = -ENOMEM;
101 goto devnode_remove;
102 }
103
104 /* create link between mixer and audio */
105 media_device_for_each_entity(entity, mdev) {
106 switch (entity->function) {
107 case MEDIA_ENT_F_AUDIO_MIXER:
108 ret = media_create_pad_link(entity, mixer_pad,
109 &mctl->media_entity, 0,
110 MEDIA_LNK_FL_ENABLED);
111 if (ret)
112 goto remove_intf_link;
113 break;
114 }
115 }
116
117 subs->media_ctl = mctl;
118 return 0;
119
120remove_intf_link:
121 media_remove_intf_link(mctl->intf_link);
122devnode_remove:
123 media_devnode_remove(mctl->intf_devnode);
124unregister_entity:
125 media_device_unregister_entity(&mctl->media_entity);
126free_mctl:
127 kfree(mctl);
128 return ret;
129}
130
131void media_snd_stream_delete(struct snd_usb_substream *subs)
132{
133 struct media_ctl *mctl = subs->media_ctl;
134
135 if (mctl && mctl->media_dev) {
136 struct media_device *mdev;
137
138 mdev = subs->stream->chip->media_dev;
139 if (mdev && media_devnode_is_registered(&mdev->devnode)) {
140 media_devnode_remove(mctl->intf_devnode);
141 media_device_unregister_entity(&mctl->media_entity);
142 media_entity_cleanup(&mctl->media_entity);
143 }
144 kfree(mctl);
145 subs->media_ctl = NULL;
146 }
147}
148
149int media_snd_start_pipeline(struct snd_usb_substream *subs)
150{
151 struct media_ctl *mctl = subs->media_ctl;
152
153 if (mctl)
154 return media_snd_enable_source(mctl);
155 return 0;
156}
157
158void media_snd_stop_pipeline(struct snd_usb_substream *subs)
159{
160 struct media_ctl *mctl = subs->media_ctl;
161
162 if (mctl)
163 media_snd_disable_source(mctl);
164}
165
166int media_snd_mixer_init(struct snd_usb_audio *chip)
167{
168 struct device *ctl_dev = &chip->card->ctl_dev;
169 struct media_intf_devnode *ctl_intf;
170 struct usb_mixer_interface *mixer;
171 struct media_device *mdev = chip->media_dev;
172 struct media_mixer_ctl *mctl;
173 u32 intf_type = MEDIA_INTF_T_ALSA_CONTROL;
174 int ret;
175
176 if (!mdev)
177 return -ENODEV;
178
179 ctl_intf = chip->ctl_intf_media_devnode;
180 if (!ctl_intf) {
181 ctl_intf = media_devnode_create(mdev, intf_type, 0,
182 MAJOR(ctl_dev->devt),
183 MINOR(ctl_dev->devt));
184 if (!ctl_intf)
185 return -ENOMEM;
186 chip->ctl_intf_media_devnode = ctl_intf;
187 }
188
189 list_for_each_entry(mixer, &chip->mixer_list, list) {
190
191 if (mixer->media_mixer_ctl)
192 continue;
193
194 /* allocate media_mixer_ctl */
195 mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
196 if (!mctl)
197 return -ENOMEM;
198
199 mctl->media_dev = mdev;
200 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_MIXER;
201 mctl->media_entity.name = chip->card->mixername;
202 mctl->media_pad[0].flags = MEDIA_PAD_FL_SINK;
203 mctl->media_pad[1].flags = MEDIA_PAD_FL_SOURCE;
204 mctl->media_pad[2].flags = MEDIA_PAD_FL_SOURCE;
205 media_entity_pads_init(&mctl->media_entity, MEDIA_MIXER_PAD_MAX,
206 mctl->media_pad);
207 ret = media_device_register_entity(mctl->media_dev,
208 &mctl->media_entity);
209 if (ret) {
210 kfree(mctl);
211 return ret;
212 }
213
214 mctl->intf_link = media_create_intf_link(&mctl->media_entity,
215 &ctl_intf->intf,
216 MEDIA_LNK_FL_ENABLED);
217 if (!mctl->intf_link) {
218 media_device_unregister_entity(&mctl->media_entity);
219 media_entity_cleanup(&mctl->media_entity);
220 kfree(mctl);
221 return -ENOMEM;
222 }
223 mctl->intf_devnode = ctl_intf;
224 mixer->media_mixer_ctl = mctl;
225 }
226 return 0;
227}
228
229static void media_snd_mixer_delete(struct snd_usb_audio *chip)
230{
231 struct usb_mixer_interface *mixer;
232 struct media_device *mdev = chip->media_dev;
233
234 if (!mdev)
235 return;
236
237 list_for_each_entry(mixer, &chip->mixer_list, list) {
238 struct media_mixer_ctl *mctl;
239
240 mctl = mixer->media_mixer_ctl;
241 if (!mixer->media_mixer_ctl)
242 continue;
243
244 if (media_devnode_is_registered(&mdev->devnode)) {
245 media_device_unregister_entity(&mctl->media_entity);
246 media_entity_cleanup(&mctl->media_entity);
247 }
248 kfree(mctl);
249 mixer->media_mixer_ctl = NULL;
250 }
251 if (media_devnode_is_registered(&mdev->devnode))
252 media_devnode_remove(chip->ctl_intf_media_devnode);
253 chip->ctl_intf_media_devnode = NULL;
254}
255
256int media_snd_device_create(struct snd_usb_audio *chip,
257 struct usb_interface *iface)
258{
259 struct media_device *mdev;
260 struct usb_device *usbdev = interface_to_usbdev(iface);
261 int ret;
262
263 mdev = media_device_get_devres(&usbdev->dev);
264 if (!mdev)
265 return -ENOMEM;
266 if (!mdev->dev) {
267 /* register media device */
268 mdev->dev = &usbdev->dev;
269 if (usbdev->product)
270 strlcpy(mdev->model, usbdev->product,
271 sizeof(mdev->model));
272 if (usbdev->serial)
273 strlcpy(mdev->serial, usbdev->serial,
274 sizeof(mdev->serial));
275 strcpy(mdev->bus_info, usbdev->devpath);
276 mdev->hw_revision = le16_to_cpu(usbdev->descriptor.bcdDevice);
277 media_device_init(mdev);
278 }
279 if (!media_devnode_is_registered(&mdev->devnode)) {
280 ret = media_device_register(mdev);
281 if (ret) {
282 dev_err(&usbdev->dev,
283 "Couldn't register media device. Error: %d\n",
284 ret);
285 return ret;
286 }
287 }
288
289 /* save media device - avoid lookups */
290 chip->media_dev = mdev;
291
292 /* Create media entities for mixer and control dev */
293 ret = media_snd_mixer_init(chip);
294 if (ret) {
295 dev_err(&usbdev->dev,
296 "Couldn't create media mixer entities. Error: %d\n",
297 ret);
298
299 /* clear saved media_dev */
300 chip->media_dev = NULL;
301
302 return ret;
303 }
304 return 0;
305}
306
307void media_snd_device_delete(struct snd_usb_audio *chip)
308{
309 struct media_device *mdev = chip->media_dev;
310
311 media_snd_mixer_delete(chip);
312
313 if (mdev) {
314 if (media_devnode_is_registered(&mdev->devnode))
315 media_device_unregister(mdev);
316 chip->media_dev = NULL;
317 }
318}
diff --git a/sound/usb/media.h b/sound/usb/media.h
deleted file mode 100644
index 1dcdcdc5f7aa..000000000000
--- a/sound/usb/media.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * media.h - Media Controller specific ALSA driver code
3 *
4 * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
5 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
6 *
7 * This file is released under the GPLv2.
8 */
9
10/*
11 * This file adds Media Controller support to ALSA driver
12 * to use the Media Controller API to share tuner with DVB
13 * and V4L2 drivers that control media device. Media device
14 * is created based on existing quirks framework. Using this
15 * approach, the media controller API usage can be added for
16 * a specific device.
17*/
18#ifndef __MEDIA_H
19
20#ifdef CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER
21
22#include <media/media-device.h>
23#include <media/media-entity.h>
24#include <sound/asound.h>
25
26struct media_ctl {
27 struct media_device *media_dev;
28 struct media_entity media_entity;
29 struct media_intf_devnode *intf_devnode;
30 struct media_link *intf_link;
31 struct media_pad media_pad;
32 struct media_pipeline media_pipe;
33};
34
35/*
36 * One source pad each for SNDRV_PCM_STREAM_CAPTURE and
37 * SNDRV_PCM_STREAM_PLAYBACK. One for sink pad to link
38 * to AUDIO Source
39*/
40#define MEDIA_MIXER_PAD_MAX (SNDRV_PCM_STREAM_LAST + 2)
41
42struct media_mixer_ctl {
43 struct media_device *media_dev;
44 struct media_entity media_entity;
45 struct media_intf_devnode *intf_devnode;
46 struct media_link *intf_link;
47 struct media_pad media_pad[MEDIA_MIXER_PAD_MAX];
48 struct media_pipeline media_pipe;
49};
50
51int media_snd_device_create(struct snd_usb_audio *chip,
52 struct usb_interface *iface);
53void media_snd_device_delete(struct snd_usb_audio *chip);
54int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
55 int stream);
56void media_snd_stream_delete(struct snd_usb_substream *subs);
57int media_snd_start_pipeline(struct snd_usb_substream *subs);
58void media_snd_stop_pipeline(struct snd_usb_substream *subs);
59#else
60static inline int media_snd_device_create(struct snd_usb_audio *chip,
61 struct usb_interface *iface)
62 { return 0; }
63static inline void media_snd_device_delete(struct snd_usb_audio *chip) { }
64static inline int media_snd_stream_init(struct snd_usb_substream *subs,
65 struct snd_pcm *pcm, int stream)
66 { return 0; }
67static inline void media_snd_stream_delete(struct snd_usb_substream *subs) { }
68static inline int media_snd_start_pipeline(struct snd_usb_substream *subs)
69 { return 0; }
70static inline void media_snd_stop_pipeline(struct snd_usb_substream *subs) { }
71#endif
72#endif /* __MEDIA_H */
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index f3789446ab9c..3417ef347e40 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -3,8 +3,6 @@
3 3
4#include <sound/info.h> 4#include <sound/info.h>
5 5
6struct media_mixer_ctl;
7
8struct usb_mixer_interface { 6struct usb_mixer_interface {
9 struct snd_usb_audio *chip; 7 struct snd_usb_audio *chip;
10 struct usb_host_interface *hostif; 8 struct usb_host_interface *hostif;
@@ -24,7 +22,6 @@ struct usb_mixer_interface {
24 struct urb *rc_urb; 22 struct urb *rc_urb;
25 struct usb_ctrlrequest *rc_setup_packet; 23 struct usb_ctrlrequest *rc_setup_packet;
26 u8 rc_buffer[6]; 24 u8 rc_buffer[6];
27 struct media_mixer_ctl *media_mixer_ctl;
28}; 25};
29 26
30#define MAX_CHANNELS 16 /* max logical channels */ 27#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 0e4e0640c504..44d178ee9177 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -35,7 +35,6 @@
35#include "pcm.h" 35#include "pcm.h"
36#include "clock.h" 36#include "clock.h"
37#include "power.h" 37#include "power.h"
38#include "media.h"
39 38
40#define SUBSTREAM_FLAG_DATA_EP_STARTED 0 39#define SUBSTREAM_FLAG_DATA_EP_STARTED 0
41#define SUBSTREAM_FLAG_SYNC_EP_STARTED 1 40#define SUBSTREAM_FLAG_SYNC_EP_STARTED 1
@@ -718,14 +717,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
718 struct audioformat *fmt; 717 struct audioformat *fmt;
719 int ret; 718 int ret;
720 719
721 ret = media_snd_start_pipeline(subs);
722 if (ret)
723 return ret;
724
725 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, 720 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
726 params_buffer_bytes(hw_params)); 721 params_buffer_bytes(hw_params));
727 if (ret < 0) 722 if (ret < 0)
728 goto err_ret; 723 return ret;
729 724
730 subs->pcm_format = params_format(hw_params); 725 subs->pcm_format = params_format(hw_params);
731 subs->period_bytes = params_period_bytes(hw_params); 726 subs->period_bytes = params_period_bytes(hw_params);
@@ -739,27 +734,22 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
739 dev_dbg(&subs->dev->dev, 734 dev_dbg(&subs->dev->dev,
740 "cannot set format: format = %#x, rate = %d, channels = %d\n", 735 "cannot set format: format = %#x, rate = %d, channels = %d\n",
741 subs->pcm_format, subs->cur_rate, subs->channels); 736 subs->pcm_format, subs->cur_rate, subs->channels);
742 ret = -EINVAL; 737 return -EINVAL;
743 goto err_ret;
744 } 738 }
745 739
746 ret = snd_usb_lock_shutdown(subs->stream->chip); 740 ret = snd_usb_lock_shutdown(subs->stream->chip);
747 if (ret < 0) 741 if (ret < 0)
748 goto err_ret; 742 return ret;
749 ret = set_format(subs, fmt); 743 ret = set_format(subs, fmt);
750 snd_usb_unlock_shutdown(subs->stream->chip); 744 snd_usb_unlock_shutdown(subs->stream->chip);
751 if (ret < 0) 745 if (ret < 0)
752 goto err_ret; 746 return ret;
753 747
754 subs->interface = fmt->iface; 748 subs->interface = fmt->iface;
755 subs->altset_idx = fmt->altset_idx; 749 subs->altset_idx = fmt->altset_idx;
756 subs->need_setup_ep = true; 750 subs->need_setup_ep = true;
757 751
758 return 0; 752 return 0;
759
760err_ret:
761 media_snd_stop_pipeline(subs);
762 return ret;
763} 753}
764 754
765/* 755/*
@@ -771,7 +761,6 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
771{ 761{
772 struct snd_usb_substream *subs = substream->runtime->private_data; 762 struct snd_usb_substream *subs = substream->runtime->private_data;
773 763
774 media_snd_stop_pipeline(subs);
775 subs->cur_audiofmt = NULL; 764 subs->cur_audiofmt = NULL;
776 subs->cur_rate = 0; 765 subs->cur_rate = 0;
777 subs->period_bytes = 0; 766 subs->period_bytes = 0;
@@ -1232,7 +1221,6 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
1232 struct snd_usb_stream *as = snd_pcm_substream_chip(substream); 1221 struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
1233 struct snd_pcm_runtime *runtime = substream->runtime; 1222 struct snd_pcm_runtime *runtime = substream->runtime;
1234 struct snd_usb_substream *subs = &as->substream[direction]; 1223 struct snd_usb_substream *subs = &as->substream[direction];
1235 int ret;
1236 1224
1237 subs->interface = -1; 1225 subs->interface = -1;
1238 subs->altset_idx = 0; 1226 subs->altset_idx = 0;
@@ -1246,12 +1234,7 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
1246 subs->dsd_dop.channel = 0; 1234 subs->dsd_dop.channel = 0;
1247 subs->dsd_dop.marker = 1; 1235 subs->dsd_dop.marker = 1;
1248 1236
1249 ret = setup_hw_info(runtime, subs); 1237 return setup_hw_info(runtime, subs);
1250 if (ret == 0)
1251 ret = media_snd_stream_init(subs, as->pcm, direction);
1252 if (ret)
1253 snd_usb_autosuspend(subs->stream->chip);
1254 return ret;
1255} 1238}
1256 1239
1257static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) 1240static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
@@ -1260,7 +1243,6 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
1260 struct snd_usb_substream *subs = &as->substream[direction]; 1243 struct snd_usb_substream *subs = &as->substream[direction];
1261 1244
1262 stop_endpoints(subs, true); 1245 stop_endpoints(subs, true);
1263 media_snd_stop_pipeline(subs);
1264 1246
1265 if (subs->interface >= 0 && 1247 if (subs->interface >= 0 &&
1266 !snd_usb_lock_shutdown(subs->stream->chip)) { 1248 !snd_usb_lock_shutdown(subs->stream->chip)) {
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 9d087b19c70c..c60a776e815d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2886,7 +2886,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2886 .product_name = pname, \ 2886 .product_name = pname, \
2887 .ifnum = QUIRK_ANY_INTERFACE, \ 2887 .ifnum = QUIRK_ANY_INTERFACE, \
2888 .type = QUIRK_AUDIO_ALIGN_TRANSFER, \ 2888 .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
2889 .media_device = 1, \
2890 } \ 2889 } \
2891} 2890}
2892 2891
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 6fe7f210bd4e..8e9548bc1f1a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -36,7 +36,6 @@
36#include "format.h" 36#include "format.h"
37#include "clock.h" 37#include "clock.h"
38#include "stream.h" 38#include "stream.h"
39#include "media.h"
40 39
41/* 40/*
42 * free a substream 41 * free a substream
@@ -53,7 +52,6 @@ static void free_substream(struct snd_usb_substream *subs)
53 kfree(fp); 52 kfree(fp);
54 } 53 }
55 kfree(subs->rate_list.list); 54 kfree(subs->rate_list.list);
56 media_snd_stream_delete(subs);
57} 55}
58 56
59 57
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index a161c7c1b126..b665d85555cb 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -30,9 +30,6 @@
30 * 30 *
31 */ 31 */
32 32
33struct media_device;
34struct media_intf_devnode;
35
36struct snd_usb_audio { 33struct snd_usb_audio {
37 int index; 34 int index;
38 struct usb_device *dev; 35 struct usb_device *dev;
@@ -63,8 +60,6 @@ struct snd_usb_audio {
63 bool autoclock; /* from the 'autoclock' module param */ 60 bool autoclock; /* from the 'autoclock' module param */
64 61
65 struct usb_host_interface *ctrl_intf; /* the audio control interface */ 62 struct usb_host_interface *ctrl_intf; /* the audio control interface */
66 struct media_device *media_dev;
67 struct media_intf_devnode *ctl_intf_media_devnode;
68}; 63};
69 64
70#define usb_audio_err(chip, fmt, args...) \ 65#define usb_audio_err(chip, fmt, args...) \
@@ -115,7 +110,6 @@ struct snd_usb_audio_quirk {
115 const char *product_name; 110 const char *product_name;
116 int16_t ifnum; 111 int16_t ifnum;
117 uint16_t type; 112 uint16_t type;
118 bool media_device;
119 const void *data; 113 const void *data;
120}; 114};
121 115
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index b9453b838162..150829dd7998 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1497,15 +1497,15 @@ TEST_F(TRACE_syscall, syscall_dropped)
1497#define SECCOMP_SET_MODE_FILTER 1 1497#define SECCOMP_SET_MODE_FILTER 1
1498#endif 1498#endif
1499 1499
1500#ifndef SECCOMP_FLAG_FILTER_TSYNC 1500#ifndef SECCOMP_FILTER_FLAG_TSYNC
1501#define SECCOMP_FLAG_FILTER_TSYNC 1 1501#define SECCOMP_FILTER_FLAG_TSYNC 1
1502#endif 1502#endif
1503 1503
1504#ifndef seccomp 1504#ifndef seccomp
1505int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter) 1505int seccomp(unsigned int op, unsigned int flags, void *args)
1506{ 1506{
1507 errno = 0; 1507 errno = 0;
1508 return syscall(__NR_seccomp, op, flags, filter); 1508 return syscall(__NR_seccomp, op, flags, args);
1509} 1509}
1510#endif 1510#endif
1511 1511
@@ -1613,7 +1613,7 @@ TEST(TSYNC_first)
1613 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1613 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1614 } 1614 }
1615 1615
1616 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1616 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1617 &prog); 1617 &prog);
1618 ASSERT_NE(ENOSYS, errno) { 1618 ASSERT_NE(ENOSYS, errno) {
1619 TH_LOG("Kernel does not support seccomp syscall!"); 1619 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1831,7 +1831,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
1831 self->sibling_count++; 1831 self->sibling_count++;
1832 } 1832 }
1833 1833
1834 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1834 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1835 &self->apply_prog); 1835 &self->apply_prog);
1836 ASSERT_EQ(0, ret) { 1836 ASSERT_EQ(0, ret) {
1837 TH_LOG("Could install filter on all threads!"); 1837 TH_LOG("Could install filter on all threads!");
@@ -1892,7 +1892,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
1892 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1892 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1893 } 1893 }
1894 1894
1895 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1895 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1896 &self->apply_prog); 1896 &self->apply_prog);
1897 ASSERT_NE(ENOSYS, errno) { 1897 ASSERT_NE(ENOSYS, errno) {
1898 TH_LOG("Kernel does not support seccomp syscall!"); 1898 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1940,7 +1940,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
1940 self->sibling_count++; 1940 self->sibling_count++;
1941 } 1941 }
1942 1942
1943 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1943 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1944 &self->apply_prog); 1944 &self->apply_prog);
1945 ASSERT_EQ(self->sibling[0].system_tid, ret) { 1945 ASSERT_EQ(self->sibling[0].system_tid, ret) {
1946 TH_LOG("Did not fail on diverged sibling."); 1946 TH_LOG("Did not fail on diverged sibling.");
@@ -1992,7 +1992,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
1992 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 1992 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1993 } 1993 }
1994 1994
1995 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1995 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1996 &self->apply_prog); 1996 &self->apply_prog);
1997 ASSERT_EQ(ret, self->sibling[0].system_tid) { 1997 ASSERT_EQ(ret, self->sibling[0].system_tid) {
1998 TH_LOG("Did not fail on diverged sibling."); 1998 TH_LOG("Did not fail on diverged sibling.");
@@ -2021,7 +2021,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2021 /* Switch to the remaining sibling */ 2021 /* Switch to the remaining sibling */
2022 sib = !sib; 2022 sib = !sib;
2023 2023
2024 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2024 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2025 &self->apply_prog); 2025 &self->apply_prog);
2026 ASSERT_EQ(0, ret) { 2026 ASSERT_EQ(0, ret) {
2027 TH_LOG("Expected the remaining sibling to sync"); 2027 TH_LOG("Expected the remaining sibling to sync");
@@ -2044,7 +2044,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2044 while (!kill(self->sibling[sib].system_tid, 0)) 2044 while (!kill(self->sibling[sib].system_tid, 0))
2045 sleep(0.1); 2045 sleep(0.1);
2046 2046
2047 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2047 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2048 &self->apply_prog); 2048 &self->apply_prog);
2049 ASSERT_EQ(0, ret); /* just us chickens */ 2049 ASSERT_EQ(0, ret); /* just us chickens */
2050} 2050}