aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl29
-rw-r--r--Documentation/devicetree/bindings/clock/qca,ath79-pll.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt12
-rw-r--r--Documentation/filesystems/cramfs.txt2
-rw-r--r--Documentation/filesystems/tmpfs.txt2
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--Documentation/networking/switchdev.txt2
-rw-r--r--Documentation/power/runtime_pm.txt4
-rw-r--r--Documentation/x86/topology.txt208
-rw-r--r--MAINTAINERS44
-rw-r--r--Makefile2
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S4
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kvm/arm.c52
-rw-r--r--arch/arm/mm/flush.c4
-rw-r--r--arch/arm/mm/proc-v7.S10
-rw-r--r--arch/arm64/configs/defconfig28
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm64/include/asm/kvm_perf_event.h68
-rw-r--r--arch/arm64/include/asm/opcodes.h4
-rw-r--r--arch/arm64/include/asm/perf_event.h47
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/kernel/perf_event.c72
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c6
-rw-r--r--arch/mips/alchemy/common/dbdma.c4
-rw-r--r--arch/mips/alchemy/devboards/db1000.c18
-rw-r--r--arch/mips/alchemy/devboards/db1550.c4
-rw-r--r--arch/mips/ath79/clock.c44
-rw-r--r--arch/mips/bcm47xx/sprom.c4
-rw-r--r--arch/mips/boot/compressed/Makefile7
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c14
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c2
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/configs/ci20_defconfig14
-rw-r--r--arch/mips/dec/int-handler.S2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/include/asm/cpu-info.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-generic/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h2
-rw-r--r--arch/mips/include/asm/mips-cm.h2
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h2
-rw-r--r--arch/mips/include/asm/pci/bridge.h18
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h2
-rw-r--r--arch/mips/include/asm/sgiarcs.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h2
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h2
-rw-r--r--arch/mips/include/asm/uaccess.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c12
-rw-r--r--arch/mips/kernel/module-rela.c19
-rw-r--r--arch/mips/kernel/module.c19
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smp.c12
-rw-r--r--arch/mips/kernel/traps.c46
-rw-r--r--arch/mips/kernel/unaligned.c51
-rw-r--r--arch/mips/kvm/tlb.c2
-rw-r--r--arch/mips/kvm/trap_emul.c2
-rw-r--r--arch/mips/math-emu/ieee754dp.c6
-rw-r--r--arch/mips/math-emu/ieee754sp.c6
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c5
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/nios2/kernel/prom.c3
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/compat.h7
-rw-r--r--arch/parisc/include/asm/syscall.h13
-rw-r--r--arch/parisc/include/asm/uaccess.h11
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/cache.c2
-rw-r--r--arch/parisc/kernel/module.c8
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/ptrace.c9
-rw-r--r--arch/parisc/kernel/signal32.c5
-rw-r--r--arch/parisc/kernel/syscall.S2
-rw-r--r--arch/parisc/kernel/traps.c3
-rw-r--r--arch/parisc/lib/fixup.S6
-rw-r--r--arch/parisc/mm/fault.c1
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c4
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/hypfs/inode.c4
-rw-r--r--arch/s390/include/asm/cache.h3
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/s390/mm/gup.c8
-rw-r--r--arch/s390/mm/init.c10
-rw-r--r--arch/s390/pci/pci_clp.c3
-rw-r--r--arch/sparc/include/asm/compat_signal.h8
-rw-r--r--arch/sparc/include/asm/obio.h32
-rw-r--r--arch/sparc/include/asm/openprom.h10
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/include/asm/processor_64.h2
-rw-r--r--arch/sparc/include/asm/sigcontext.h2
-rw-r--r--arch/sparc/include/asm/tsb.h2
-rw-r--r--arch/sparc/include/uapi/asm/stat.h4
-rw-r--r--arch/sparc/kernel/audit.c12
-rw-r--r--arch/sparc/kernel/compat_audit.c12
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/ioport.c6
-rw-r--r--arch/sparc/kernel/kernel.h12
-rw-r--r--arch/sparc/kernel/leon_kernel.c2
-rw-r--r--arch/sparc/kernel/process_64.c2
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/signal32.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/kernel/sysfs.c2
-rw-r--r--arch/sparc/kernel/unaligned_64.c4
-rw-r--r--arch/sparc/mm/fault_32.c8
-rw-r--r--arch/sparc/net/bpf_jit_comp.c2
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h26
-rw-r--r--arch/tile/kernel/kgdb.c16
-rw-r--r--arch/tile/kernel/pci_gx.c2
-rw-r--r--arch/x86/events/amd/core.c21
-rw-r--r--arch/x86/events/amd/ibs.c52
-rw-r--r--arch/x86/events/perf_event.h11
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/msr-index.h16
-rw-r--r--arch/x86/include/asm/pmem.h9
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/thread_info.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/kernel/amd_nb.c6
-rw-r--r--arch/x86/kernel/cpu/amd.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c3
-rw-r--r--arch/x86/kernel/cpu/powerflags.c2
-rw-r--r--arch/x86/kernel/setup.c37
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kvm/hyperv.c5
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/x86.c20
-rw-r--r--arch/x86/mm/tlb.c14
-rw-r--r--arch/x86/ras/mce_amd_inj.c3
-rw-r--r--arch/x86/xen/apic.c12
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--block/bio.c12
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-settings.c12
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/ioctl.c4
-rw-r--r--block/partition-generic.c8
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c2
-rw-r--r--drivers/acpi/acpi_processor.c52
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/mmp/reset.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c70
-rw-r--r--drivers/clk/qcom/reset.c2
-rw-r--r--drivers/clk/qcom/reset.h2
-rw-r--r--drivers/clk/rockchip/softrst.c2
-rw-r--r--drivers/clk/sirf/clk-atlas7.c2
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/sunxi/clk-usb.c2
-rw-r--r--drivers/clk/tegra/clk.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c206
-rw-r--r--drivers/firmware/qemu_fw_cfg.c24
-rw-r--r--drivers/gpio/gpio-menz127.c9
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpio-xgene.c5
-rw-r--r--drivers/gpio/gpiolib.c133
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c69
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c16
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c14
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c123
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c79
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c79
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c8
-rw-r--r--drivers/hwmon/max1111.c6
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c7
-rw-r--r--drivers/i2c/i2c-core.c10
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c39
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/palm_bk3710.c2
-rw-r--r--drivers/idle/intel_idle.c97
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c6
-rw-r--r--drivers/iio/adc/max1363.c12
-rw-r--r--drivers/iio/gyro/bmg160_core.c6
-rw-r--r--drivers/iio/health/max30100.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig3
-rw-r--r--drivers/iio/industrialio-buffer.c1
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c10
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c39
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c55
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h2
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/iommu/rockchip-iommu.c8
-rw-r--r--drivers/isdn/hisax/isac.c15
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/md/bitmap.c21
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c52
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c63
-rw-r--r--drivers/media/usb/au0828/au0828.h9
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c22
-rw-r--r--drivers/mmc/host/sdhci.c39
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx.c85
-rw-r--r--drivers/net/dsa/mv88e6xxx.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
-rw-r--r--drivers/net/ethernet/cadence/macb.c69
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c196
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c165
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c40
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c91
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/core.c41
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c42
-rw-r--r--drivers/oprofile/oprofilefs.c4
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c35
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/pinctrl-xway.c17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c14
-rw-r--r--drivers/pinctrl/sh-pfc/core.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/s390/block/dasd_alias.c226
-rw-r--r--drivers/s390/block/dasd_eckd.c38
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/scsi/aacraid/linit.c3
-rw-r--r--drivers/scsi/cxlflash/main.c138
-rw-r--r--drivers/scsi/cxlflash/main.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c33
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/sd.c49
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/spi/spi-imx.c16
-rw-r--r--drivers/spi/spi-omap2-mcspi.c62
-rw-r--r--drivers/spi/spi-rockchip.c16
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h18
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c128
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c16
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c74
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c159
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h40
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c237
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h40
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c206
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c32
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c119
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h6
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h193
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c3
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h10
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h22
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h10
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h3
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c51
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c61
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c59
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h12
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c31
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c26
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c12
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c17
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c10
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c12
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c7
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c9
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c8
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c22
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c13
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c11
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c84
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c16
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c44
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c5
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c3
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c3
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c9
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c29
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c13
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/staging/olpc_dcon/Kconfig35
-rw-r--r--drivers/staging/olpc_dcon/Makefile6
-rw-r--r--drivers/staging/olpc_dcon/TODO9
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c813
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h111
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c205
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c161
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig1
-rw-r--r--drivers/staging/unisys/include/visorbus.h1
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c5
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c205
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c53
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c28
-rw-r--r--drivers/target/target_core_fabric_configfs.c24
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/usb/core/config.c16
-rw-r--r--drivers/usb/dwc2/gadget.c23
-rw-r--r--drivers/usb/dwc3/core.c48
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c5
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/usb/gadget/composite.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_midi.c17
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c14
-rw-r--r--drivers/usb/gadget/udc/udc-core.c6
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c72
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/mct_u232.c9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c11
-rw-r--r--drivers/xen/events/events_base.c28
-rw-r--r--fs/9p/vfs_addr.c18
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/affs/file.c26
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/file.c4
-rw-r--r--fs/afs/mntpt.c6
-rw-r--r--fs/afs/super.c4
-rw-r--r--fs/afs/write.c26
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/check-integrity.c64
-rw-r--r--fs/btrfs/compression.c84
-rw-r--r--fs/btrfs/ctree.c12
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c59
-rw-r--r--fs/btrfs/extent-tree.c25
-rw-r--r--fs/btrfs/extent_io.c266
-rw-r--r--fs/btrfs/extent_io.h6
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c51
-rw-r--r--fs/btrfs/free-space-cache.c30
-rw-r--r--fs/btrfs/inode-map.c10
-rw-r--r--fs/btrfs/inode.c104
-rw-r--r--fs/btrfs/ioctl.c86
-rw-r--r--fs/btrfs/lzo.c32
-rw-r--r--fs/btrfs/qgroup.c63
-rw-r--r--fs/btrfs/raid56.c28
-rw-r--r--fs/btrfs/reada.c30
-rw-r--r--fs/btrfs/relocation.c17
-rw-r--r--fs/btrfs/scrub.c24
-rw-r--r--fs/btrfs/send.c16
-rw-r--r--fs/btrfs/struct-funcs.c4
-rw-r--r--fs/btrfs/tests/extent-io-tests.c44
-rw-r--r--fs/btrfs/tests/free-space-tests.c2
-rw-r--r--fs/btrfs/tree-log.c137
-rw-r--r--fs/btrfs/volumes.c14
-rw-r--r--fs/btrfs/zlib.c38
-rw-r--r--fs/buffer.c100
-rw-r--r--fs/cachefiles/rdwr.c38
-rw-r--r--fs/ceph/addr.c114
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/file.c32
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/super.c8
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifssmb.c16
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c96
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/configfs/mount.c4
-rw-r--r--fs/cramfs/README26
-rw-r--r--fs/cramfs/inode.c32
-rw-r--r--fs/crypto/crypto.c8
-rw-r--r--fs/dax.c34
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/direct-io.c26
-rw-r--r--fs/dlm/config.c3
-rw-r--r--fs/dlm/lowcomms.c8
-rw-r--r--fs/ecryptfs/crypto.c22
-rw-r--r--fs/ecryptfs/inode.c8
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/main.c8
-rw-r--r--fs/ecryptfs/mmap.c44
-rw-r--r--fs/ecryptfs/read_write.c14
-rw-r--r--fs/efivarfs/super.c4
-rw-r--r--fs/exofs/dir.c30
-rw-r--r--fs/exofs/inode.c34
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/ext2/dir.c36
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext4/crypto.c57
-rw-r--r--fs/ext4/dir.c4
-rw-r--r--fs/ext4/ext4.h33
-rw-r--r--fs/ext4/file.c16
-rw-r--r--fs/ext4/inline.c18
-rw-r--r--fs/ext4/inode.c176
-rw-r--r--fs/ext4/mballoc.c40
-rw-r--r--fs/ext4/move_extent.c27
-rw-r--r--fs/ext4/page-io.c18
-rw-r--r--fs/ext4/readpage.c14
-rw-r--r--fs/ext4/super.c65
-rw-r--r--fs/ext4/symlink.c4
-rw-r--r--fs/ext4/xattr.c32
-rw-r--r--fs/f2fs/data.c52
-rw-r--r--fs/f2fs/debug.c6
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/file.c74
-rw-r--r--fs/f2fs/inline.c10
-rw-r--r--fs/f2fs/namei.c16
-rw-r--r--fs/f2fs/node.c10
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c16
-rw-r--r--fs/f2fs/super.c108
-rw-r--r--fs/freevxfs/vxfs_immed.c4
-rw-r--r--fs/freevxfs/vxfs_lookup.c12
-rw-r--r--fs/freevxfs/vxfs_subr.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fscache/page.c10
-rw-r--r--fs/fuse/dev.c26
-rw-r--r--fs/fuse/file.c72
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/gfs2/aops.c44
-rw-r--r--fs/gfs2/bmap.c12
-rw-r--r--fs/gfs2/file.c16
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/quota.c14
-rw-r--r--fs/gfs2/rgrp.c5
-rw-r--r--fs/hfs/bnode.c12
-rw-r--r--fs/hfs/btree.c20
-rw-r--r--fs/hfs/inode.c8
-rw-r--r--fs/hfsplus/bitmap.c2
-rw-r--r--fs/hfsplus/bnode.c90
-rw-r--r--fs/hfsplus/btree.c22
-rw-r--r--fs/hfsplus/inode.c8
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/xattr.c6
-rw-r--r--fs/hostfs/hostfs_kern.c18
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/isofs/compress.c36
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/jffs2/debug.c8
-rw-r--r--fs/jffs2/file.c23
-rw-r--r--fs/jffs2/fs.c8
-rw-r--r--fs/jffs2/gc.c8
-rw-r--r--fs/jffs2/nodelist.c8
-rw-r--r--fs/jffs2/write.c7
-rw-r--r--fs/jfs/jfs_metapage.c42
-rw-r--r--fs/jfs/jfs_metapage.h4
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/mount.c4
-rw-r--r--fs/libfs.c24
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/logfs/dev_mtd.c10
-rw-r--r--fs/logfs/dir.c12
-rw-r--r--fs/logfs/file.c26
-rw-r--r--fs/logfs/readwrite.c20
-rw-r--r--fs/logfs/segment.c28
-rw-r--r--fs/logfs/super.c16
-rw-r--r--fs/minix/dir.c18
-rw-r--r--fs/minix/namei.c4
-rw-r--r--fs/mpage.c22
-rw-r--r--fs/namei.c10
-rw-r--r--fs/ncpfs/dir.c10
-rw-r--r--fs/ncpfs/ncplib_kernel.h2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c24
-rw-r--r--fs/nfs/blocklayout/blocklayout.h4
-rw-r--r--fs/nfs/client.c8
-rw-r--r--fs/nfs/dir.c10
-rw-r--r--fs/nfs/direct.c8
-rw-r--r--fs/nfs/file.c20
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h6
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/objlayout/objio_osd.c2
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c6
-rw-r--r--fs/nfs/read.c16
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nilfs2/bmap.c2
-rw-r--r--fs/nilfs2/btnode.c10
-rw-r--r--fs/nilfs2/dir.c32
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/mdt.c14
-rw-r--r--fs/nilfs2/namei.c4
-rw-r--r--fs/nilfs2/page.c18
-rw-r--r--fs/nilfs2/recovery.c4
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ntfs/aops.c50
-rw-r--r--fs/ntfs/aops.h4
-rw-r--r--fs/ntfs/attrib.c28
-rw-r--r--fs/ntfs/bitmap.c10
-rw-r--r--fs/ntfs/compress.c77
-rw-r--r--fs/ntfs/dir.c56
-rw-r--r--fs/ntfs/file.c56
-rw-r--r--fs/ntfs/index.c14
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/lcnalloc.c6
-rw-r--r--fs/ntfs/logfile.c16
-rw-r--r--fs/ntfs/mft.c34
-rw-r--r--fs/ntfs/ntfs.h2
-rw-r--r--fs/ntfs/super.c72
-rw-r--r--fs/ocfs2/alloc.c28
-rw-r--r--fs/ocfs2/aops.c50
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/mmap.c6
-rw-r--r--fs/ocfs2/ocfs2.h20
-rw-r--r--fs/ocfs2/quota_global.c11
-rw-r--r--fs/ocfs2/refcounttree.c24
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/orangefs/dir.c12
-rw-r--r--fs/orangefs/inode.c26
-rw-r--r--fs/orangefs/orangefs-bufmap.c4
-rw-r--r--fs/orangefs/orangefs-debugfs.c3
-rw-r--r--fs/orangefs/orangefs-utils.c8
-rw-r--r--fs/orangefs/protocol.h35
-rw-r--r--fs/orangefs/xattr.c19
-rw-r--r--fs/overlayfs/super.c33
-rw-r--r--fs/pipe.c6
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/proc/vmcore.c4
-rw-r--r--fs/pstore/inode.c4
-rw-r--r--fs/qnx6/dir.c16
-rw-r--r--fs/qnx6/inode.c4
-rw-r--r--fs/qnx6/qnx6.h2
-rw-r--r--fs/quota/dquot.c13
-rw-r--r--fs/ramfs/inode.c4
-rw-r--r--fs/reiserfs/file.c4
-rw-r--r--fs/reiserfs/inode.c44
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/tail_conversion.c4
-rw-r--r--fs/reiserfs/xattr.c18
-rw-r--r--fs/splice.c32
-rw-r--r--fs/squashfs/block.c4
-rw-r--r--fs/squashfs/cache.c18
-rw-r--r--fs/squashfs/decompressor.c2
-rw-r--r--fs/squashfs/file.c24
-rw-r--r--fs/squashfs/file_direct.c22
-rw-r--r--fs/squashfs/lz4_wrapper.c8
-rw-r--r--fs/squashfs/lzo_wrapper.c8
-rw-r--r--fs/squashfs/page_actor.c4
-rw-r--r--fs/squashfs/page_actor.h2
-rw-r--r--fs/squashfs/super.c2
-rw-r--r--fs/squashfs/symlink.c6
-rw-r--r--fs/squashfs/xz_wrapper.c4
-rw-r--r--fs/squashfs/zlib_wrapper.c4
-rw-r--r--fs/sync.c4
-rw-r--r--fs/sysv/dir.c18
-rw-r--r--fs/sysv/namei.c4
-rw-r--r--fs/ubifs/file.c54
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/file.c6
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/ufs/balloc.c6
-rw-r--r--fs/ufs/dir.c32
-rw-r--r--fs/ufs/inode.c4
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--fs/ufs/util.c4
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/xfs_aops.c22
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mount.h4
-rw-r--r--fs/xfs/xfs_pnfs.c4
-rw-r--r--fs/xfs/xfs_super.c8
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/linux/atomic.h34
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/dcache.h10
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nilfs2_fs.h4
-rw-r--r--include/linux/pagemap.h32
-rw-r--r--include/linux/pmem.h22
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/sctp/sctp.h6
-rw-r--r--include/scsi/scsi_device.h25
-rw-r--r--include/target/target_core_fabric.h2
-rw-r--r--include/trace/events/btrfs.h89
-rw-r--r--include/trace/events/page_isolation.h2
-rw-r--r--include/uapi/linux/bpf.h1
-rw-r--r--include/uapi/linux/stddef.h4
-rw-r--r--include/uapi/linux/usb/ch9.h2
-rw-r--r--include/uapi/linux/virtio_config.h2
-rw-r--r--include/video/imx-ipu-v3.h7
-rw-r--r--init/Kconfig3
-rw-r--r--ipc/mqueue.c4
-rw-r--r--kernel/bpf/syscall.c6
-rw-r--r--kernel/events/core.c15
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/locking/lockdep.c79
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/time/tick-sched.c61
-rw-r--r--kernel/time/tick-sched.h2
-rw-r--r--lib/test_bpf.c229
-rw-r--r--mm/fadvise.c8
-rw-r--r--mm/filemap.c126
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/madvise.c6
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c55
-rw-r--r--mm/mincore.c8
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c6
-rw-r--r--mm/page-writeback.c12
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/page_isolation.c10
-rw-r--r--mm/readahead.c20
-rw-r--r--mm/rmap.c30
-rw-r--r--mm/shmem.c130
-rw-r--r--mm/swap.c14
-rw-r--r--mm/swap_state.c12
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/truncate.c40
-rw-r--r--mm/userfaultfd.c4
-rw-r--r--mm/zswap.c4
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/netfilter/ebtables.c4
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c20
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/pagelist.c4
-rw-r--r--net/ceph/pagevec.c30
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/filter.c38
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/core/sock.c9
-rw-r--r--net/ipv4/fou.c22
-rw-r--r--net/ipv4/gre_offload.c8
-rw-r--r--net/ipv4/ip_gre.c13
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c43
-rw-r--r--net/ipv4/netfilter/ip_tables.c48
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c54
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c48
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/l2tp/l2tp_ip.c8
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/sta_info.c14
-rw-r--r--net/mac80211/sta_info.h1
-rw-r--r--net/mac80211/tdls.c43
-rw-r--r--net/mac80211/tx.c13
-rw-r--r--net/mac80211/vht.c30
-rw-r--r--net/mpls/af_mpls.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h2
-rw-r--r--net/netfilter/ipset/ip_set_core.c33
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h2
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c7
-rw-r--r--net/openvswitch/Kconfig4
-rw-r--r--net/openvswitch/conntrack.c24
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/ib_recv.c2
-rw-r--r--net/rds/page.c4
-rw-r--r--net/sctp/output.c9
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/socklib.c6
-rw-r--r--net/sunrpc/xdr.c50
-rw-r--r--net/switchdev/switchdev.c2
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--samples/bpf/Makefile12
-rw-r--r--samples/bpf/bpf_helpers.h26
-rw-r--r--samples/bpf/map_perf_test_user.c1
-rw-r--r--samples/bpf/spintest_kern.c2
-rw-r--r--samples/bpf/tracex2_kern.c4
-rw-r--r--samples/bpf/tracex4_kern.c2
-rw-r--r--sound/core/timer.c24
-rw-r--r--sound/core/timer_compat.c30
-rw-r--r--sound/firewire/dice/dice-stream.c14
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_realtek.c19
-rw-r--r--sound/usb/Kconfig4
-rw-r--r--sound/usb/Makefile2
-rw-r--r--sound/usb/card.c14
-rw-r--r--sound/usb/card.h3
-rw-r--r--sound/usb/media.c318
-rw-r--r--sound/usb/media.h72
-rw-r--r--sound/usb/mixer.h3
-rw-r--r--sound/usb/pcm.c28
-rw-r--r--sound/usb/quirks-table.h1
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/stream.c8
-rw-r--r--sound/usb/usbaudio.h6
-rwxr-xr-xtools/lib/lockdep/run_tests.sh12
-rw-r--r--tools/perf/MANIFEST1
-rw-r--r--tools/perf/arch/powerpc/util/header.c2
-rwxr-xr-xtools/perf/tests/perf-targz-src-pkg2
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/event.c23
-rw-r--r--tools/perf/util/genelf.h24
-rw-r--r--tools/perf/util/intel-bts.c1
-rw-r--r--tools/perf/util/intel-pt.c3
-rw-r--r--tools/perf/util/jitdump.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.c117
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c22
953 files changed, 10594 insertions, 7176 deletions
diff --git a/.mailmap b/.mailmap
index 7e6c5334c337..90c0aefc276d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -33,6 +33,7 @@ Björn Steinbrink <B.Steinbrink@gmx.de>
33Brian Avery <b.avery@hp.com> 33Brian Avery <b.avery@hp.com>
34Brian King <brking@us.ibm.com> 34Brian King <brking@us.ibm.com>
35Christoph Hellwig <hch@lst.de> 35Christoph Hellwig <hch@lst.de>
36Christophe Ricard <christophe.ricard@gmail.com>
36Corey Minyard <minyard@acm.org> 37Corey Minyard <minyard@acm.org>
37Damian Hobson-Garcia <dhobsong@igel.co.jp> 38Damian Hobson-Garcia <dhobsong@igel.co.jp>
38David Brownell <david-b@pacbell.net> 39David Brownell <david-b@pacbell.net>
diff --git a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
index 7ac7d7262bb7..3c3514815cd5 100644
--- a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
+++ b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
@@ -1,23 +1,18 @@
1What: /sys/devices/platform/<i2c-demux-name>/cur_master 1What: /sys/devices/platform/<i2c-demux-name>/available_masters
2Date: January 2016 2Date: January 2016
3KernelVersion: 4.6 3KernelVersion: 4.6
4Contact: Wolfram Sang <wsa@the-dreams.de> 4Contact: Wolfram Sang <wsa@the-dreams.de>
5Description: 5Description:
6 Reading the file will give you a list of masters which can be
7 selected for a demultiplexed bus. The format is
8 "<index>:<name>". Example from a Renesas Lager board:
6 9
7This file selects the active I2C master for a demultiplexed bus. 10 0:/i2c@e6500000 1:/i2c@e6508000
8 11
9Write 0 there for the first master, 1 for the second etc. Reading the file will 12What: /sys/devices/platform/<i2c-demux-name>/current_master
10give you a list with the active master marked. Example from a Renesas Lager 13Date: January 2016
11board: 14KernelVersion: 4.6
12 15Contact: Wolfram Sang <wsa@the-dreams.de>
13root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master 16Description:
14* 0 - /i2c@9 17 This file selects/shows the active I2C master for a demultiplexed
15 1 - /i2c@e6520000 18 bus. It uses the <index> value from the file 'available_masters'.
16 2 - /i2c@e6530000
17
18root@Lager:~# echo 2 > /sys/devices/platform/i2c@8/cur_master
19
20root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master
21 0 - /i2c@9
22 1 - /i2c@e6520000
23* 2 - /i2c@e6530000
diff --git a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
index e0fc2c11dd00..241fb0545b9e 100644
--- a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
+++ b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9XXX PLL controller
3The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB. 3The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
4 4
5Required Properties: 5Required Properties:
6- compatible: has to be "qca,<soctype>-cpu-intc" and one of the following 6- compatible: has to be "qca,<soctype>-pll" and one of the following
7 fallbacks: 7 fallbacks:
8 - "qca,ar7100-pll" 8 - "qca,ar7100-pll"
9 - "qca,ar7240-pll" 9 - "qca,ar7240-pll"
@@ -21,8 +21,8 @@ Optional properties:
21 21
22Example: 22Example:
23 23
24 memory-controller@18050000 { 24 pll-controller@18050000 {
25 compatible = "qca,ar9132-ppl", "qca,ar9130-pll"; 25 compatible = "qca,ar9132-pll", "qca,ar9130-pll";
26 reg = <0x18050000 0x20>; 26 reg = <0x18050000 0x20>;
27 27
28 clock-names = "ref"; 28 clock-names = "ref";
diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
index 08a4a32c8eb0..0326154c7925 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
@@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug
134mfio81 dreq0, mips_trace_data, eth_debug 134mfio81 dreq0, mips_trace_data, eth_debug
135mfio82 dreq1, mips_trace_data, eth_debug 135mfio82 dreq1, mips_trace_data, eth_debug
136mfio83 mips_pll_lock, mips_trace_data, usb_debug 136mfio83 mips_pll_lock, mips_trace_data, usb_debug
137mfio84 sys_pll_lock, mips_trace_data, usb_debug 137mfio84 audio_pll_lock, mips_trace_data, usb_debug
138mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug 138mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug
139mfio86 bt_pll_lock, mips_trace_data, sdhost_debug 139mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug
140mfio87 rpu_v_pll_lock, dreq2, socif_debug 140mfio87 sys_pll_lock, dreq2, socif_debug
141mfio88 rpu_l_pll_lock, dreq3, socif_debug 141mfio88 wifi_pll_lock, dreq3, socif_debug
142mfio89 audio_pll_lock, dreq4, dreq5 142mfio89 bt_pll_lock, dreq4, dreq5
143tck 143tck
144trstn 144trstn
145tdi 145tdi
diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt
index 31f53f0ab957..4006298f6707 100644
--- a/Documentation/filesystems/cramfs.txt
+++ b/Documentation/filesystems/cramfs.txt
@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
38which the timestamp reverts to 1970, i.e. moves backwards in time. 38which the timestamp reverts to 1970, i.e. moves backwards in time.
39 39
40Currently, cramfs must be written and read with architectures of the 40Currently, cramfs must be written and read with architectures of the
41same endianness, and can be read only by kernels with PAGE_CACHE_SIZE 41same endianness, and can be read only by kernels with PAGE_SIZE
42== 4096. At least the latter of these is a bug, but it hasn't been 42== 4096. At least the latter of these is a bug, but it hasn't been
43decided what the best fix is. For the moment if you have larger pages 43decided what the best fix is. For the moment if you have larger pages
44you can just change the #define in mkcramfs.c, so long as you don't 44you can just change the #define in mkcramfs.c, so long as you don't
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index d392e1505f17..d9c11d25bf02 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
60 default is half of your physical RAM without swap. If you 60 default is half of your physical RAM without swap. If you
61 oversize your tmpfs instances the machine will deadlock 61 oversize your tmpfs instances the machine will deadlock
62 since the OOM handler will not be able to free that memory. 62 since the OOM handler will not be able to free that memory.
63nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE. 63nr_blocks: The same as size, but in blocks of PAGE_SIZE.
64nr_inodes: The maximum number of inodes for this instance. The default 64nr_inodes: The maximum number of inodes for this instance. The default
65 is half of the number of your physical RAM pages, or (on a 65 is half of the number of your physical RAM pages, or (on a
66 machine with highmem) the number of lowmem RAM pages, 66 machine with highmem) the number of lowmem RAM pages,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b02a7d598258..4164bd6397a2 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -708,9 +708,9 @@ struct address_space_operations {
708 from the address space. This generally corresponds to either a 708 from the address space. This generally corresponds to either a
709 truncation, punch hole or a complete invalidation of the address 709 truncation, punch hole or a complete invalidation of the address
710 space (in the latter case 'offset' will always be 0 and 'length' 710 space (in the latter case 'offset' will always be 0 and 'length'
711 will be PAGE_CACHE_SIZE). Any private data associated with the page 711 will be PAGE_SIZE). Any private data associated with the page
712 should be updated to reflect this truncation. If offset is 0 and 712 should be updated to reflect this truncation. If offset is 0 and
713 length is PAGE_CACHE_SIZE, then the private data should be released, 713 length is PAGE_SIZE, then the private data should be released,
714 because the page must be able to be completely discarded. This may 714 because the page must be able to be completely discarded. This may
715 be done by calling the ->releasepage function, but in this case the 715 be done by calling the ->releasepage function, but in this case the
716 release MUST succeed. 716 release MUST succeed.
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index fad63136ee3e..2f659129694b 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -386,7 +386,7 @@ used. First phase is to "prepare" anything needed, including various checks,
386memory allocation, etc. The goal is to handle the stuff that is not unlikely 386memory allocation, etc. The goal is to handle the stuff that is not unlikely
387to fail here. The second phase is to "commit" the actual changes. 387to fail here. The second phase is to "commit" the actual changes.
388 388
389Switchdev provides an inftrastructure for sharing items (for example memory 389Switchdev provides an infrastructure for sharing items (for example memory
390allocations) between the two phases. 390allocations) between the two phases.
391 391
392The object created by a driver in "prepare" phase and it is queued up by: 392The object created by a driver in "prepare" phase and it is queued up by:
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 7328cf85236c..1fd1fbe9ce95 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -586,6 +586,10 @@ drivers to make their ->remove() callbacks avoid races with runtime PM directly,
586but also it allows of more flexibility in the handling of devices during the 586but also it allows of more flexibility in the handling of devices during the
587removal of their drivers. 587removal of their drivers.
588 588
589Drivers in ->remove() callback should undo the runtime PM changes done
590in ->probe(). Usually this means calling pm_runtime_disable(),
591pm_runtime_dont_use_autosuspend() etc.
592
589The user space can effectively disallow the driver of the device to power manage 593The user space can effectively disallow the driver of the device to power manage
590it at run time by changing the value of its /sys/devices/.../power/control 594it at run time by changing the value of its /sys/devices/.../power/control
591attribute to "on", which causes pm_runtime_forbid() to be called. In principle, 595attribute to "on", which causes pm_runtime_forbid() to be called. In principle,
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt
new file mode 100644
index 000000000000..06afac252f5b
--- /dev/null
+++ b/Documentation/x86/topology.txt
@@ -0,0 +1,208 @@
1x86 Topology
2============
3
4This documents and clarifies the main aspects of x86 topology modelling and
5representation in the kernel. Update/change when doing changes to the
6respective code.
7
8The architecture-agnostic topology definitions are in
9Documentation/cputopology.txt. This file holds x86-specific
10differences/specialities which must not necessarily apply to the generic
11definitions. Thus, the way to read up on Linux topology on x86 is to start
12with the generic one and look at this one in parallel for the x86 specifics.
13
14Needless to say, code should use the generic functions - this file is *only*
15here to *document* the inner workings of x86 topology.
16
17Started by Thomas Gleixner <tglx@linutronix.de> and Borislav Petkov <bp@alien8.de>.
18
19The main aim of the topology facilities is to present adequate interfaces to
20code which needs to know/query/use the structure of the running system wrt
21threads, cores, packages, etc.
22
23The kernel does not care about the concept of physical sockets because a
24socket has no relevance to software. It's an electromechanical component. In
25the past a socket always contained a single package (see below), but with the
26advent of Multi Chip Modules (MCM) a socket can hold more than one package. So
27there might be still references to sockets in the code, but they are of
28historical nature and should be cleaned up.
29
30The topology of a system is described in the units of:
31
32 - packages
33 - cores
34 - threads
35
36* Package:
37
38 Packages contain a number of cores plus shared resources, e.g. DRAM
39 controller, shared caches etc.
40
41 AMD nomenclature for package is 'Node'.
42
43 Package-related topology information in the kernel:
44
45 - cpuinfo_x86.x86_max_cores:
46
47 The number of cores in a package. This information is retrieved via CPUID.
48
49 - cpuinfo_x86.phys_proc_id:
50
51 The physical ID of the package. This information is retrieved via CPUID
52 and deduced from the APIC IDs of the cores in the package.
53
54 - cpuinfo_x86.logical_id:
55
56 The logical ID of the package. As we do not trust BIOSes to enumerate the
57 packages in a consistent way, we introduced the concept of logical package
58 ID so we can sanely calculate the number of maximum possible packages in
59 the system and have the packages enumerated linearly.
60
61 - topology_max_packages():
62
63 The maximum possible number of packages in the system. Helpful for per
64 package facilities to preallocate per package information.
65
66
67* Cores:
68
69 A core consists of 1 or more threads. It does not matter whether the threads
70 are SMT- or CMT-type threads.
71
72 AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses
73 "core".
74
75 Core-related topology information in the kernel:
76
77 - smp_num_siblings:
78
79 The number of threads in a core. The number of threads in a package can be
80 calculated by:
81
82 threads_per_package = cpuinfo_x86.x86_max_cores * smp_num_siblings
83
84
85* Threads:
86
87 A thread is a single scheduling unit. It's the equivalent to a logical Linux
88 CPU.
89
90 AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always
91 uses "thread".
92
93 Thread-related topology information in the kernel:
94
95 - topology_core_cpumask():
96
97 The cpumask contains all online threads in the package to which a thread
98 belongs.
99
100 The number of online threads is also printed in /proc/cpuinfo "siblings."
101
102 - topology_sibling_mask():
103
104 The cpumask contains all online threads in the core to which a thread
105 belongs.
106
107 - topology_logical_package_id():
108
109 The logical package ID to which a thread belongs.
110
111 - topology_physical_package_id():
112
113 The physical package ID to which a thread belongs.
114
115 - topology_core_id();
116
117 The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo
118 "core_id."
119
120
121
122System topology examples
123
124Note:
125
126The alternative Linux CPU enumeration depends on how the BIOS enumerates the
127threads. Many BIOSes enumerate all threads 0 first and then all threads 1.
128That has the "advantage" that the logical Linux CPU numbers of threads 0 stay
129the same whether threads are enabled or not. That's merely an implementation
130detail and has no practical impact.
131
1321) Single Package, Single Core
133
134 [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
135
1362) Single Package, Dual Core
137
138 a) One thread per core
139
140 [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
141 -> [core 1] -> [thread 0] -> Linux CPU 1
142
143 b) Two threads per core
144
145 [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
146 -> [thread 1] -> Linux CPU 1
147 -> [core 1] -> [thread 0] -> Linux CPU 2
148 -> [thread 1] -> Linux CPU 3
149
150 Alternative enumeration:
151
152 [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
153 -> [thread 1] -> Linux CPU 2
154 -> [core 1] -> [thread 0] -> Linux CPU 1
155 -> [thread 1] -> Linux CPU 3
156
157 AMD nomenclature for CMT systems:
158
159 [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
160 -> [Compute Unit Core 1] -> Linux CPU 1
161 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
162 -> [Compute Unit Core 1] -> Linux CPU 3
163
1644) Dual Package, Dual Core
165
166 a) One thread per core
167
168 [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
169 -> [core 1] -> [thread 0] -> Linux CPU 1
170
171 [package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
172 -> [core 1] -> [thread 0] -> Linux CPU 3
173
174 b) Two threads per core
175
176 [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
177 -> [thread 1] -> Linux CPU 1
178 -> [core 1] -> [thread 0] -> Linux CPU 2
179 -> [thread 1] -> Linux CPU 3
180
181 [package 1] -> [core 0] -> [thread 0] -> Linux CPU 4
182 -> [thread 1] -> Linux CPU 5
183 -> [core 1] -> [thread 0] -> Linux CPU 6
184 -> [thread 1] -> Linux CPU 7
185
186 Alternative enumeration:
187
188 [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
189 -> [thread 1] -> Linux CPU 4
190 -> [core 1] -> [thread 0] -> Linux CPU 1
191 -> [thread 1] -> Linux CPU 5
192
193 [package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
194 -> [thread 1] -> Linux CPU 6
195 -> [core 1] -> [thread 0] -> Linux CPU 3
196 -> [thread 1] -> Linux CPU 7
197
198 AMD nomenclature for CMT systems:
199
200 [node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
201 -> [Compute Unit Core 1] -> Linux CPU 1
202 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
203 -> [Compute Unit Core 1] -> Linux CPU 3
204
205 [node 1] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 4
206 -> [Compute Unit Core 1] -> Linux CPU 5
207 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 6
208 -> [Compute Unit Core 1] -> Linux CPU 7
diff --git a/MAINTAINERS b/MAINTAINERS
index 1ace393e963c..c0df1e877153 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4302,7 +4302,7 @@ F: drivers/net/ethernet/agere/
4302 4302
4303ETHERNET BRIDGE 4303ETHERNET BRIDGE
4304M: Stephen Hemminger <stephen@networkplumber.org> 4304M: Stephen Hemminger <stephen@networkplumber.org>
4305L: bridge@lists.linux-foundation.org 4305L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
4306L: netdev@vger.kernel.org 4306L: netdev@vger.kernel.org
4307W: http://www.linuxfoundation.org/en/Net:Bridge 4307W: http://www.linuxfoundation.org/en/Net:Bridge
4308S: Maintained 4308S: Maintained
@@ -5042,6 +5042,7 @@ F: include/linux/hw_random.h
5042HARDWARE SPINLOCK CORE 5042HARDWARE SPINLOCK CORE
5043M: Ohad Ben-Cohen <ohad@wizery.com> 5043M: Ohad Ben-Cohen <ohad@wizery.com>
5044M: Bjorn Andersson <bjorn.andersson@linaro.org> 5044M: Bjorn Andersson <bjorn.andersson@linaro.org>
5045L: linux-remoteproc@vger.kernel.org
5045S: Maintained 5046S: Maintained
5046T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git 5047T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
5047F: Documentation/hwspinlock.txt 5048F: Documentation/hwspinlock.txt
@@ -5750,7 +5751,7 @@ R: Don Skidmore <donald.c.skidmore@intel.com>
5750R: Bruce Allan <bruce.w.allan@intel.com> 5751R: Bruce Allan <bruce.w.allan@intel.com>
5751R: John Ronciak <john.ronciak@intel.com> 5752R: John Ronciak <john.ronciak@intel.com>
5752R: Mitch Williams <mitch.a.williams@intel.com> 5753R: Mitch Williams <mitch.a.williams@intel.com>
5753L: intel-wired-lan@lists.osuosl.org 5754L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
5754W: http://www.intel.com/support/feedback.htm 5755W: http://www.intel.com/support/feedback.htm
5755W: http://e1000.sourceforge.net/ 5756W: http://e1000.sourceforge.net/
5756Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ 5757Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
@@ -6402,7 +6403,7 @@ KPROBES
6402M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> 6403M: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
6403M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6404M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6404M: "David S. Miller" <davem@davemloft.net> 6405M: "David S. Miller" <davem@davemloft.net>
6405M: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> 6406M: Masami Hiramatsu <mhiramat@kernel.org>
6406S: Maintained 6407S: Maintained
6407F: Documentation/kprobes.txt 6408F: Documentation/kprobes.txt
6408F: include/linux/kprobes.h 6409F: include/linux/kprobes.h
@@ -7575,7 +7576,7 @@ F: drivers/infiniband/hw/nes/
7575 7576
7576NETEM NETWORK EMULATOR 7577NETEM NETWORK EMULATOR
7577M: Stephen Hemminger <stephen@networkplumber.org> 7578M: Stephen Hemminger <stephen@networkplumber.org>
7578L: netem@lists.linux-foundation.org 7579L: netem@lists.linux-foundation.org (moderated for non-subscribers)
7579S: Maintained 7580S: Maintained
7580F: net/sched/sch_netem.c 7581F: net/sched/sch_netem.c
7581 7582
@@ -8253,7 +8254,7 @@ F: Documentation/filesystems/overlayfs.txt
8253 8254
8254ORANGEFS FILESYSTEM 8255ORANGEFS FILESYSTEM
8255M: Mike Marshall <hubcap@omnibond.com> 8256M: Mike Marshall <hubcap@omnibond.com>
8256L: pvfs2-developers@beowulf-underground.org 8257L: pvfs2-developers@beowulf-underground.org (subscribers-only)
8257T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git 8258T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
8258S: Supported 8259S: Supported
8259F: fs/orangefs/ 8260F: fs/orangefs/
@@ -8711,6 +8712,8 @@ F: drivers/pinctrl/sh-pfc/
8711 8712
8712PIN CONTROLLER - SAMSUNG 8713PIN CONTROLLER - SAMSUNG
8713M: Tomasz Figa <tomasz.figa@gmail.com> 8714M: Tomasz Figa <tomasz.figa@gmail.com>
8715M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
8716M: Sylwester Nawrocki <s.nawrocki@samsung.com>
8714L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8717L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8715L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 8718L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
8716S: Maintained 8719S: Maintained
@@ -9139,6 +9142,13 @@ T: git git://github.com/KrasnikovEugene/wcn36xx.git
9139S: Supported 9142S: Supported
9140F: drivers/net/wireless/ath/wcn36xx/ 9143F: drivers/net/wireless/ath/wcn36xx/
9141 9144
9145QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
9146M: Gabriel Somlo <somlo@cmu.edu>
9147M: "Michael S. Tsirkin" <mst@redhat.com>
9148L: qemu-devel@nongnu.org
9149S: Maintained
9150F: drivers/firmware/qemu_fw_cfg.c
9151
9142RADOS BLOCK DEVICE (RBD) 9152RADOS BLOCK DEVICE (RBD)
9143M: Ilya Dryomov <idryomov@gmail.com> 9153M: Ilya Dryomov <idryomov@gmail.com>
9144M: Sage Weil <sage@redhat.com> 9154M: Sage Weil <sage@redhat.com>
@@ -9314,6 +9324,7 @@ F: include/linux/regmap.h
9314REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM 9324REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
9315M: Ohad Ben-Cohen <ohad@wizery.com> 9325M: Ohad Ben-Cohen <ohad@wizery.com>
9316M: Bjorn Andersson <bjorn.andersson@linaro.org> 9326M: Bjorn Andersson <bjorn.andersson@linaro.org>
9327L: linux-remoteproc@vger.kernel.org
9317T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git 9328T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
9318S: Maintained 9329S: Maintained
9319F: drivers/remoteproc/ 9330F: drivers/remoteproc/
@@ -9323,6 +9334,7 @@ F: include/linux/remoteproc.h
9323REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM 9334REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
9324M: Ohad Ben-Cohen <ohad@wizery.com> 9335M: Ohad Ben-Cohen <ohad@wizery.com>
9325M: Bjorn Andersson <bjorn.andersson@linaro.org> 9336M: Bjorn Andersson <bjorn.andersson@linaro.org>
9337L: linux-remoteproc@vger.kernel.org
9326T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git 9338T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git
9327S: Maintained 9339S: Maintained
9328F: drivers/rpmsg/ 9340F: drivers/rpmsg/
@@ -10583,6 +10595,14 @@ L: linux-tegra@vger.kernel.org
10583S: Maintained 10595S: Maintained
10584F: drivers/staging/nvec/ 10596F: drivers/staging/nvec/
10585 10597
10598STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
10599M: Jens Frederich <jfrederich@gmail.com>
10600M: Daniel Drake <dsd@laptop.org>
10601M: Jon Nettleton <jon.nettleton@gmail.com>
10602W: http://wiki.laptop.org/go/DCON
10603S: Maintained
10604F: drivers/staging/olpc_dcon/
10605
10586STAGING - REALTEK RTL8712U DRIVERS 10606STAGING - REALTEK RTL8712U DRIVERS
10587M: Larry Finger <Larry.Finger@lwfinger.net> 10607M: Larry Finger <Larry.Finger@lwfinger.net>
10588M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>. 10608M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
@@ -11137,8 +11157,8 @@ F: include/uapi/linux/tipc*.h
11137F: net/tipc/ 11157F: net/tipc/
11138 11158
11139TILE ARCHITECTURE 11159TILE ARCHITECTURE
11140M: Chris Metcalf <cmetcalf@ezchip.com> 11160M: Chris Metcalf <cmetcalf@mellanox.com>
11141W: http://www.ezchip.com/scm/ 11161W: http://www.mellanox.com/repository/solutions/tile-scm/
11142T: git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git 11162T: git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
11143S: Supported 11163S: Supported
11144F: arch/tile/ 11164F: arch/tile/
@@ -12202,9 +12222,9 @@ S: Maintained
12202F: drivers/media/tuners/tuner-xc2028.* 12222F: drivers/media/tuners/tuner-xc2028.*
12203 12223
12204XEN HYPERVISOR INTERFACE 12224XEN HYPERVISOR INTERFACE
12205M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
12206M: Boris Ostrovsky <boris.ostrovsky@oracle.com> 12225M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
12207M: David Vrabel <david.vrabel@citrix.com> 12226M: David Vrabel <david.vrabel@citrix.com>
12227M: Juergen Gross <jgross@suse.com>
12208L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12228L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12209T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git 12229T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
12210S: Supported 12230S: Supported
@@ -12216,16 +12236,16 @@ F: include/xen/
12216F: include/uapi/xen/ 12236F: include/uapi/xen/
12217 12237
12218XEN HYPERVISOR ARM 12238XEN HYPERVISOR ARM
12219M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> 12239M: Stefano Stabellini <sstabellini@kernel.org>
12220L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12240L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12221S: Supported 12241S: Maintained
12222F: arch/arm/xen/ 12242F: arch/arm/xen/
12223F: arch/arm/include/asm/xen/ 12243F: arch/arm/include/asm/xen/
12224 12244
12225XEN HYPERVISOR ARM64 12245XEN HYPERVISOR ARM64
12226M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> 12246M: Stefano Stabellini <sstabellini@kernel.org>
12227L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12247L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12228S: Supported 12248S: Maintained
12229F: arch/arm64/xen/ 12249F: arch/arm64/xen/
12230F: arch/arm64/include/asm/xen/ 12250F: arch/arm64/include/asm/xen/
12231 12251
diff --git a/Makefile b/Makefile
index 916b26e999d8..1d0aef03eae7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc3
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d7709e3930a3..9e5eddbb856f 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
628 628
629 /* kernel reading from page with U-mapping */ 629 /* kernel reading from page with U-mapping */
630 phys_addr_t paddr = (unsigned long)page_address(page); 630 phys_addr_t paddr = (unsigned long)page_address(page);
631 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 631 unsigned long vaddr = page->index << PAGE_SHIFT;
632 632
633 if (addr_not_cache_congruent(paddr, vaddr)) 633 if (addr_not_cache_congruent(paddr, vaddr))
634 __flush_dcache_page(paddr, vaddr); 634 __flush_dcache_page(paddr, vaddr);
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7b84657fba35..194b69923389 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
19 * This may need to be greater than __NR_last_syscall+1 in order to 19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table 20 * account for the padding in the syscall table
21 */ 21 */
22#define __NR_syscalls (392) 22#define __NR_syscalls (396)
23 23
24#define __ARCH_WANT_STAT64 24#define __ARCH_WANT_STAT64
25#define __ARCH_WANT_SYS_GETHOSTNAME 25#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 5dd2528e9e45..2cb9dc770e1d 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -418,6 +418,8 @@
418#define __NR_membarrier (__NR_SYSCALL_BASE+389) 418#define __NR_membarrier (__NR_SYSCALL_BASE+389)
419#define __NR_mlock2 (__NR_SYSCALL_BASE+390) 419#define __NR_mlock2 (__NR_SYSCALL_BASE+390)
420#define __NR_copy_file_range (__NR_SYSCALL_BASE+391) 420#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
421#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
422#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
421 423
422/* 424/*
423 * The following SWIs are ARM private. 425 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index dfc7cd6851ad..703fa0f3cd8f 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -399,8 +399,10 @@
399 CALL(sys_execveat) 399 CALL(sys_execveat)
400 CALL(sys_userfaultfd) 400 CALL(sys_userfaultfd)
401 CALL(sys_membarrier) 401 CALL(sys_membarrier)
402 CALL(sys_mlock2) 402/* 390 */ CALL(sys_mlock2)
403 CALL(sys_copy_file_range) 403 CALL(sys_copy_file_range)
404 CALL(sys_preadv2)
405 CALL(sys_pwritev2)
404#ifndef syscalls_counted 406#ifndef syscalls_counted
405.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 407.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
406#define syscalls_counted 408#define syscalls_counted
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 139791ed473d..a28fce0bdbbe 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void)
430 pr_info("CPU: div instructions available: patching division code\n"); 430 pr_info("CPU: div instructions available: patching division code\n");
431 431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1; 432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
433 asm ("" : "+g" (fn_addr));
433 ((u32 *)fn_addr)[0] = udiv_instruction(); 434 ((u32 *)fn_addr)[0] = udiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 435 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8); 436 flush_icache_range(fn_addr, fn_addr + 8);
436 437
437 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1; 438 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
439 asm ("" : "+g" (fn_addr));
438 ((u32 *)fn_addr)[0] = sdiv_instruction(); 440 ((u32 *)fn_addr)[0] = sdiv_instruction();
439 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 441 ((u32 *)fn_addr)[1] = bx_lr_instruction();
440 flush_icache_range(fn_addr, fn_addr + 8); 442 flush_icache_range(fn_addr, fn_addr + 8);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 6accd66d26f0..b5384311dec4 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy)
1061 kvm_arm_init_debug(); 1061 kvm_arm_init_debug();
1062} 1062}
1063 1063
1064static void cpu_hyp_reinit(void)
1065{
1066 if (is_kernel_in_hyp_mode()) {
1067 /*
1068 * cpu_init_stage2() is safe to call even if the PM
1069 * event was cancelled before the CPU was reset.
1070 */
1071 cpu_init_stage2(NULL);
1072 } else {
1073 if (__hyp_get_vectors() == hyp_default_vectors)
1074 cpu_init_hyp_mode(NULL);
1075 }
1076}
1077
1064static int hyp_init_cpu_notify(struct notifier_block *self, 1078static int hyp_init_cpu_notify(struct notifier_block *self,
1065 unsigned long action, void *cpu) 1079 unsigned long action, void *cpu)
1066{ 1080{
1067 switch (action) { 1081 switch (action) {
1068 case CPU_STARTING: 1082 case CPU_STARTING:
1069 case CPU_STARTING_FROZEN: 1083 case CPU_STARTING_FROZEN:
1070 if (__hyp_get_vectors() == hyp_default_vectors) 1084 cpu_hyp_reinit();
1071 cpu_init_hyp_mode(NULL);
1072 break;
1073 } 1085 }
1074 1086
1075 return NOTIFY_OK; 1087 return NOTIFY_OK;
@@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1084 unsigned long cmd, 1096 unsigned long cmd,
1085 void *v) 1097 void *v)
1086{ 1098{
1087 if (cmd == CPU_PM_EXIT && 1099 if (cmd == CPU_PM_EXIT) {
1088 __hyp_get_vectors() == hyp_default_vectors) { 1100 cpu_hyp_reinit();
1089 cpu_init_hyp_mode(NULL);
1090 return NOTIFY_OK; 1101 return NOTIFY_OK;
1091 } 1102 }
1092 1103
@@ -1128,6 +1139,22 @@ static int init_subsystems(void)
1128 int err; 1139 int err;
1129 1140
1130 /* 1141 /*
1142 * Register CPU Hotplug notifier
1143 */
1144 cpu_notifier_register_begin();
1145 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1146 cpu_notifier_register_done();
1147 if (err) {
1148 kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
1149 return err;
1150 }
1151
1152 /*
1153 * Register CPU lower-power notifier
1154 */
1155 hyp_cpu_pm_init();
1156
1157 /*
1131 * Init HYP view of VGIC 1158 * Init HYP view of VGIC
1132 */ 1159 */
1133 err = kvm_vgic_hyp_init(); 1160 err = kvm_vgic_hyp_init();
@@ -1270,19 +1297,6 @@ static int init_hyp_mode(void)
1270 free_boot_hyp_pgd(); 1297 free_boot_hyp_pgd();
1271#endif 1298#endif
1272 1299
1273 cpu_notifier_register_begin();
1274
1275 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1276
1277 cpu_notifier_register_done();
1278
1279 if (err) {
1280 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1281 goto out_err;
1282 }
1283
1284 hyp_cpu_pm_init();
1285
1286 /* set size of VMID supported by CPU */ 1300 /* set size of VMID supported by CPU */
1287 kvm_vmid_bits = kvm_get_vmid_bits(); 1301 kvm_vmid_bits = kvm_get_vmid_bits();
1288 kvm_info("%d-bit VMID\n", kvm_vmid_bits); 1302 kvm_info("%d-bit VMID\n", kvm_vmid_bits);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index d0ba3551d49a..3cced8455727 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
235 */ 235 */
236 if (mapping && cache_is_vipt_aliasing()) 236 if (mapping && cache_is_vipt_aliasing())
237 flush_pfn_alias(page_to_pfn(page), 237 flush_pfn_alias(page_to_pfn(page),
238 page->index << PAGE_CACHE_SHIFT); 238 page->index << PAGE_SHIFT);
239} 239}
240 240
241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
250 * data in the current VM view associated with this page. 250 * data in the current VM view associated with this page.
251 * - aliasing VIPT: we only need to find one mapping of this page. 251 * - aliasing VIPT: we only need to find one mapping of this page.
252 */ 252 */
253 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 253 pgoff = page->index;
254 254
255 flush_dcache_mmap_lock(mapping); 255 flush_dcache_mmap_lock(mapping);
256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0f8963a7e7d9..6fcaac8e200f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -281,12 +281,12 @@ __v7_ca17mp_setup:
281 bl v7_invalidate_l1 281 bl v7_invalidate_l1
282 ldmia r12, {r1-r6, lr} 282 ldmia r12, {r1-r6, lr}
283#ifdef CONFIG_SMP 283#ifdef CONFIG_SMP
284 orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode
284 ALT_SMP(mrc p15, 0, r0, c1, c0, 1) 285 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
285 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP 286 ALT_UP(mov r0, r10) @ fake it for UP
286 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 287 orr r10, r10, r0 @ Set required bits
287 orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode 288 teq r10, r0 @ Were they already set?
288 orreq r0, r0, r10 @ Enable CPU-specific SMP bits 289 mcrne p15, 0, r10, c1, c0, 1 @ No, update register
289 mcreq p15, 0, r0, c1, c0, 1
290#endif 290#endif
291 b __v7_setup_cont 291 b __v7_setup_cont
292 292
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f70505186820..a44ef995d8ae 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -68,11 +68,13 @@ CONFIG_KSM=y
68CONFIG_TRANSPARENT_HUGEPAGE=y 68CONFIG_TRANSPARENT_HUGEPAGE=y
69CONFIG_CMA=y 69CONFIG_CMA=y
70CONFIG_XEN=y 70CONFIG_XEN=y
71CONFIG_CMDLINE="console=ttyAMA0"
72# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 71# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
73CONFIG_COMPAT=y 72CONFIG_COMPAT=y
74CONFIG_CPU_IDLE=y 73CONFIG_CPU_IDLE=y
75CONFIG_ARM_CPUIDLE=y 74CONFIG_ARM_CPUIDLE=y
75CONFIG_CPU_FREQ=y
76CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
77CONFIG_ARM_SCPI_CPUFREQ=y
76CONFIG_NET=y 78CONFIG_NET=y
77CONFIG_PACKET=y 79CONFIG_PACKET=y
78CONFIG_UNIX=y 80CONFIG_UNIX=y
@@ -80,7 +82,6 @@ CONFIG_INET=y
80CONFIG_IP_PNP=y 82CONFIG_IP_PNP=y
81CONFIG_IP_PNP_DHCP=y 83CONFIG_IP_PNP_DHCP=y
82CONFIG_IP_PNP_BOOTP=y 84CONFIG_IP_PNP_BOOTP=y
83# CONFIG_INET_LRO is not set
84# CONFIG_IPV6 is not set 85# CONFIG_IPV6 is not set
85CONFIG_BPF_JIT=y 86CONFIG_BPF_JIT=y
86# CONFIG_WIRELESS is not set 87# CONFIG_WIRELESS is not set
@@ -144,16 +145,18 @@ CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
144CONFIG_SERIAL_MVEBU_UART=y 145CONFIG_SERIAL_MVEBU_UART=y
145CONFIG_VIRTIO_CONSOLE=y 146CONFIG_VIRTIO_CONSOLE=y
146# CONFIG_HW_RANDOM is not set 147# CONFIG_HW_RANDOM is not set
147CONFIG_I2C=y
148CONFIG_I2C_CHARDEV=y 148CONFIG_I2C_CHARDEV=y
149CONFIG_I2C_DESIGNWARE_PLATFORM=y
149CONFIG_I2C_MV64XXX=y 150CONFIG_I2C_MV64XXX=y
150CONFIG_I2C_QUP=y 151CONFIG_I2C_QUP=y
152CONFIG_I2C_TEGRA=y
151CONFIG_I2C_UNIPHIER_F=y 153CONFIG_I2C_UNIPHIER_F=y
152CONFIG_I2C_RCAR=y 154CONFIG_I2C_RCAR=y
153CONFIG_SPI=y 155CONFIG_SPI=y
154CONFIG_SPI_PL022=y 156CONFIG_SPI_PL022=y
155CONFIG_SPI_QUP=y 157CONFIG_SPI_QUP=y
156CONFIG_SPMI=y 158CONFIG_SPMI=y
159CONFIG_PINCTRL_SINGLE=y
157CONFIG_PINCTRL_MSM8916=y 160CONFIG_PINCTRL_MSM8916=y
158CONFIG_PINCTRL_QCOM_SPMI_PMIC=y 161CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
159CONFIG_GPIO_SYSFS=y 162CONFIG_GPIO_SYSFS=y
@@ -196,6 +199,7 @@ CONFIG_USB_EHCI_HCD_PLATFORM=y
196CONFIG_USB_OHCI_HCD=y 199CONFIG_USB_OHCI_HCD=y
197CONFIG_USB_OHCI_HCD_PLATFORM=y 200CONFIG_USB_OHCI_HCD_PLATFORM=y
198CONFIG_USB_STORAGE=y 201CONFIG_USB_STORAGE=y
202CONFIG_USB_DWC2=y
199CONFIG_USB_CHIPIDEA=y 203CONFIG_USB_CHIPIDEA=y
200CONFIG_USB_CHIPIDEA_UDC=y 204CONFIG_USB_CHIPIDEA_UDC=y
201CONFIG_USB_CHIPIDEA_HOST=y 205CONFIG_USB_CHIPIDEA_HOST=y
@@ -205,19 +209,20 @@ CONFIG_USB_MSM_OTG=y
205CONFIG_USB_ULPI=y 209CONFIG_USB_ULPI=y
206CONFIG_USB_GADGET=y 210CONFIG_USB_GADGET=y
207CONFIG_MMC=y 211CONFIG_MMC=y
208CONFIG_MMC_BLOCK_MINORS=16 212CONFIG_MMC_BLOCK_MINORS=32
209CONFIG_MMC_ARMMMCI=y 213CONFIG_MMC_ARMMMCI=y
210CONFIG_MMC_SDHCI=y 214CONFIG_MMC_SDHCI=y
211CONFIG_MMC_SDHCI_PLTFM=y 215CONFIG_MMC_SDHCI_PLTFM=y
212CONFIG_MMC_SDHCI_TEGRA=y 216CONFIG_MMC_SDHCI_TEGRA=y
213CONFIG_MMC_SDHCI_MSM=y 217CONFIG_MMC_SDHCI_MSM=y
214CONFIG_MMC_SPI=y 218CONFIG_MMC_SPI=y
215CONFIG_MMC_SUNXI=y
216CONFIG_MMC_DW=y 219CONFIG_MMC_DW=y
217CONFIG_MMC_DW_EXYNOS=y 220CONFIG_MMC_DW_EXYNOS=y
218CONFIG_MMC_BLOCK_MINORS=16 221CONFIG_MMC_DW_K3=y
222CONFIG_MMC_SUNXI=y
219CONFIG_NEW_LEDS=y 223CONFIG_NEW_LEDS=y
220CONFIG_LEDS_CLASS=y 224CONFIG_LEDS_CLASS=y
225CONFIG_LEDS_GPIO=y
221CONFIG_LEDS_SYSCON=y 226CONFIG_LEDS_SYSCON=y
222CONFIG_LEDS_TRIGGERS=y 227CONFIG_LEDS_TRIGGERS=y
223CONFIG_LEDS_TRIGGER_HEARTBEAT=y 228CONFIG_LEDS_TRIGGER_HEARTBEAT=y
@@ -229,8 +234,8 @@ CONFIG_RTC_DRV_PL031=y
229CONFIG_RTC_DRV_SUN6I=y 234CONFIG_RTC_DRV_SUN6I=y
230CONFIG_RTC_DRV_XGENE=y 235CONFIG_RTC_DRV_XGENE=y
231CONFIG_DMADEVICES=y 236CONFIG_DMADEVICES=y
232CONFIG_QCOM_BAM_DMA=y
233CONFIG_TEGRA20_APB_DMA=y 237CONFIG_TEGRA20_APB_DMA=y
238CONFIG_QCOM_BAM_DMA=y
234CONFIG_RCAR_DMAC=y 239CONFIG_RCAR_DMAC=y
235CONFIG_VFIO=y 240CONFIG_VFIO=y
236CONFIG_VFIO_PCI=y 241CONFIG_VFIO_PCI=y
@@ -239,20 +244,26 @@ CONFIG_VIRTIO_BALLOON=y
239CONFIG_VIRTIO_MMIO=y 244CONFIG_VIRTIO_MMIO=y
240CONFIG_XEN_GNTDEV=y 245CONFIG_XEN_GNTDEV=y
241CONFIG_XEN_GRANT_DEV_ALLOC=y 246CONFIG_XEN_GRANT_DEV_ALLOC=y
247CONFIG_COMMON_CLK_SCPI=y
242CONFIG_COMMON_CLK_CS2000_CP=y 248CONFIG_COMMON_CLK_CS2000_CP=y
243CONFIG_COMMON_CLK_QCOM=y 249CONFIG_COMMON_CLK_QCOM=y
244CONFIG_MSM_GCC_8916=y 250CONFIG_MSM_GCC_8916=y
245CONFIG_HWSPINLOCK_QCOM=y 251CONFIG_HWSPINLOCK_QCOM=y
252CONFIG_MAILBOX=y
253CONFIG_ARM_MHU=y
254CONFIG_HI6220_MBOX=y
246CONFIG_ARM_SMMU=y 255CONFIG_ARM_SMMU=y
247CONFIG_QCOM_SMEM=y 256CONFIG_QCOM_SMEM=y
248CONFIG_QCOM_SMD=y 257CONFIG_QCOM_SMD=y
249CONFIG_QCOM_SMD_RPM=y 258CONFIG_QCOM_SMD_RPM=y
250CONFIG_ARCH_TEGRA_132_SOC=y 259CONFIG_ARCH_TEGRA_132_SOC=y
251CONFIG_ARCH_TEGRA_210_SOC=y 260CONFIG_ARCH_TEGRA_210_SOC=y
252CONFIG_HISILICON_IRQ_MBIGEN=y
253CONFIG_EXTCON_USB_GPIO=y 261CONFIG_EXTCON_USB_GPIO=y
262CONFIG_COMMON_RESET_HI6220=y
254CONFIG_PHY_RCAR_GEN3_USB2=y 263CONFIG_PHY_RCAR_GEN3_USB2=y
264CONFIG_PHY_HI6220_USB=y
255CONFIG_PHY_XGENE=y 265CONFIG_PHY_XGENE=y
266CONFIG_ARM_SCPI_PROTOCOL=y
256CONFIG_EXT2_FS=y 267CONFIG_EXT2_FS=y
257CONFIG_EXT3_FS=y 268CONFIG_EXT3_FS=y
258CONFIG_FANOTIFY=y 269CONFIG_FANOTIFY=y
@@ -264,6 +275,7 @@ CONFIG_CUSE=y
264CONFIG_VFAT_FS=y 275CONFIG_VFAT_FS=y
265CONFIG_TMPFS=y 276CONFIG_TMPFS=y
266CONFIG_HUGETLBFS=y 277CONFIG_HUGETLBFS=y
278CONFIG_CONFIGFS_FS=y
267CONFIG_EFIVAR_FS=y 279CONFIG_EFIVAR_FS=y
268CONFIG_SQUASHFS=y 280CONFIG_SQUASHFS=y
269CONFIG_NFS_FS=y 281CONFIG_NFS_FS=y
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 0e391dbfc420..4150fd8bae01 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -124,7 +124,9 @@
124#define VTCR_EL2_SL0_LVL1 (1 << 6) 124#define VTCR_EL2_SL0_LVL1 (1 << 6)
125#define VTCR_EL2_T0SZ_MASK 0x3f 125#define VTCR_EL2_T0SZ_MASK 0x3f
126#define VTCR_EL2_T0SZ_40B 24 126#define VTCR_EL2_T0SZ_40B 24
127#define VTCR_EL2_VS 19 127#define VTCR_EL2_VS_SHIFT 19
128#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
129#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
128 130
129/* 131/*
130 * We configure the Stage-2 page tables to always restrict the IPA space to be 132 * We configure the Stage-2 page tables to always restrict the IPA space to be
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 227ed475dbd3..b7e82a795ac9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -27,7 +27,6 @@
27#include <asm/kvm.h> 27#include <asm/kvm.h>
28#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
29#include <asm/kvm_mmio.h> 29#include <asm/kvm_mmio.h>
30#include <asm/kvm_perf_event.h>
31 30
32#define __KVM_HAVE_ARCH_INTC_INITIALIZED 31#define __KVM_HAVE_ARCH_INTC_INITIALIZED
33 32
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index a46b019ebcf5..44eaff70da6a 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -21,7 +21,6 @@
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/kvm_mmu.h> 23#include <asm/kvm_mmu.h>
24#include <asm/kvm_perf_event.h>
25#include <asm/sysreg.h> 24#include <asm/sysreg.h>
26 25
27#define __hyp_text __section(.hyp.text) notrace 26#define __hyp_text __section(.hyp.text) notrace
diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h
deleted file mode 100644
index c18fdebb8f66..000000000000
--- a/arch/arm64/include/asm/kvm_perf_event.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ASM_KVM_PERF_EVENT_H
18#define __ASM_KVM_PERF_EVENT_H
19
20#define ARMV8_PMU_MAX_COUNTERS 32
21#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
22
23/*
24 * Per-CPU PMCR: config reg
25 */
26#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
27#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
28#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
29#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
30#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
31#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
32/* Determines which bit of PMCCNTR_EL0 generates an overflow */
33#define ARMV8_PMU_PMCR_LC (1 << 6)
34#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
35#define ARMV8_PMU_PMCR_N_MASK 0x1f
36#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */
37
38/*
39 * PMOVSR: counters overflow flag status reg
40 */
41#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
42#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
43
44/*
45 * PMXEVTYPER: Event selection reg
46 */
47#define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
48#define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
49
50#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
51
52/*
53 * Event filters for PMUv3
54 */
55#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
56#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
57#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)
58
59/*
60 * PMUSERENR: user enable reg
61 */
62#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
63#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
64#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
65#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
66#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
67
68#endif
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
index 4e603ea36ad3..123f45d92cd1 100644
--- a/arch/arm64/include/asm/opcodes.h
+++ b/arch/arm64/include/asm/opcodes.h
@@ -1 +1,5 @@
1#ifdef CONFIG_CPU_BIG_ENDIAN
2#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
3#endif
4
1#include <../../arm/include/asm/opcodes.h> 5#include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 7bd3cdb533ea..2065f46fa740 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,53 @@
17#ifndef __ASM_PERF_EVENT_H 17#ifndef __ASM_PERF_EVENT_H
18#define __ASM_PERF_EVENT_H 18#define __ASM_PERF_EVENT_H
19 19
20#define ARMV8_PMU_MAX_COUNTERS 32
21#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
22
23/*
24 * Per-CPU PMCR: config reg
25 */
26#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
27#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
28#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
29#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
30#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
31#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
32#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
33#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
34#define ARMV8_PMU_PMCR_N_MASK 0x1f
35#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */
36
37/*
38 * PMOVSR: counters overflow flag status reg
39 */
40#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
41#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
42
43/*
44 * PMXEVTYPER: Event selection reg
45 */
46#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
47#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
48
49#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
50
51/*
52 * Event filters for PMUv3
53 */
54#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
55#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
56#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)
57
58/*
59 * PMUSERENR: user enable reg
60 */
61#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
62#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
63#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
64#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
65#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
66
20#ifdef CONFIG_PERF_EVENTS 67#ifdef CONFIG_PERF_EVENTS
21struct pt_regs; 68struct pt_regs;
22extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 69extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 1a78d6e2a78b..12874164b0ae 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -141,6 +141,9 @@
141#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 141#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
142#define ID_AA64MMFR1_HADBS_SHIFT 0 142#define ID_AA64MMFR1_HADBS_SHIFT 0
143 143
144#define ID_AA64MMFR1_VMIDBITS_8 0
145#define ID_AA64MMFR1_VMIDBITS_16 2
146
144/* id_aa64mmfr2 */ 147/* id_aa64mmfr2 */
145#define ID_AA64MMFR2_UAO_SHIFT 4 148#define ID_AA64MMFR2_UAO_SHIFT 4
146 149
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 767c4f6e1f5b..f419a7c075a4 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <asm/irq_regs.h> 22#include <asm/irq_regs.h>
23#include <asm/perf_event.h>
23#include <asm/virt.h> 24#include <asm/virt.h>
24 25
25#include <linux/of.h> 26#include <linux/of.h>
@@ -384,9 +385,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
384#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ 385#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
385 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) 386 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
386 387
387#define ARMV8_MAX_COUNTERS 32
388#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
389
390/* 388/*
391 * ARMv8 low level PMU access 389 * ARMv8 low level PMU access
392 */ 390 */
@@ -395,40 +393,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
395 * Perf Event to low level counters mapping 393 * Perf Event to low level counters mapping
396 */ 394 */
397#define ARMV8_IDX_TO_COUNTER(x) \ 395#define ARMV8_IDX_TO_COUNTER(x) \
398 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) 396 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
399
400/*
401 * Per-CPU PMCR: config reg
402 */
403#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
404#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
405#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
406#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
407#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
408#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
409#define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
410#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
411#define ARMV8_PMCR_N_MASK 0x1f
412#define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */
413
414/*
415 * PMOVSR: counters overflow flag status reg
416 */
417#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
418#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
419
420/*
421 * PMXEVTYPER: Event selection reg
422 */
423#define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
424#define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
425
426/*
427 * Event filters for PMUv3
428 */
429#define ARMV8_EXCLUDE_EL1 (1 << 31)
430#define ARMV8_EXCLUDE_EL0 (1 << 30)
431#define ARMV8_INCLUDE_EL2 (1 << 27)
432 397
433static inline u32 armv8pmu_pmcr_read(void) 398static inline u32 armv8pmu_pmcr_read(void)
434{ 399{
@@ -439,14 +404,14 @@ static inline u32 armv8pmu_pmcr_read(void)
439 404
440static inline void armv8pmu_pmcr_write(u32 val) 405static inline void armv8pmu_pmcr_write(u32 val)
441{ 406{
442 val &= ARMV8_PMCR_MASK; 407 val &= ARMV8_PMU_PMCR_MASK;
443 isb(); 408 isb();
444 asm volatile("msr pmcr_el0, %0" :: "r" (val)); 409 asm volatile("msr pmcr_el0, %0" :: "r" (val));
445} 410}
446 411
447static inline int armv8pmu_has_overflowed(u32 pmovsr) 412static inline int armv8pmu_has_overflowed(u32 pmovsr)
448{ 413{
449 return pmovsr & ARMV8_OVERFLOWED_MASK; 414 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
450} 415}
451 416
452static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) 417static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
@@ -512,7 +477,7 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
512static inline void armv8pmu_write_evtype(int idx, u32 val) 477static inline void armv8pmu_write_evtype(int idx, u32 val)
513{ 478{
514 if (armv8pmu_select_counter(idx) == idx) { 479 if (armv8pmu_select_counter(idx) == idx) {
515 val &= ARMV8_EVTYPE_MASK; 480 val &= ARMV8_PMU_EVTYPE_MASK;
516 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); 481 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
517 } 482 }
518} 483}
@@ -558,7 +523,7 @@ static inline u32 armv8pmu_getreset_flags(void)
558 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); 523 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
559 524
560 /* Write to clear flags */ 525 /* Write to clear flags */
561 value &= ARMV8_OVSR_MASK; 526 value &= ARMV8_PMU_OVSR_MASK;
562 asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); 527 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
563 528
564 return value; 529 return value;
@@ -696,7 +661,7 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
696 661
697 raw_spin_lock_irqsave(&events->pmu_lock, flags); 662 raw_spin_lock_irqsave(&events->pmu_lock, flags);
698 /* Enable all counters */ 663 /* Enable all counters */
699 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); 664 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
700 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 665 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
701} 666}
702 667
@@ -707,7 +672,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
707 672
708 raw_spin_lock_irqsave(&events->pmu_lock, flags); 673 raw_spin_lock_irqsave(&events->pmu_lock, flags);
709 /* Disable all counters */ 674 /* Disable all counters */
710 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); 675 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
711 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 676 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
712} 677}
713 678
@@ -717,7 +682,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
717 int idx; 682 int idx;
718 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); 683 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
719 struct hw_perf_event *hwc = &event->hw; 684 struct hw_perf_event *hwc = &event->hw;
720 unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT; 685 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
721 686
722 /* Always place a cycle counter into the cycle counter. */ 687 /* Always place a cycle counter into the cycle counter. */
723 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { 688 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
@@ -754,11 +719,11 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
754 attr->exclude_kernel != attr->exclude_hv) 719 attr->exclude_kernel != attr->exclude_hv)
755 return -EINVAL; 720 return -EINVAL;
756 if (attr->exclude_user) 721 if (attr->exclude_user)
757 config_base |= ARMV8_EXCLUDE_EL0; 722 config_base |= ARMV8_PMU_EXCLUDE_EL0;
758 if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) 723 if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
759 config_base |= ARMV8_EXCLUDE_EL1; 724 config_base |= ARMV8_PMU_EXCLUDE_EL1;
760 if (!attr->exclude_hv) 725 if (!attr->exclude_hv)
761 config_base |= ARMV8_INCLUDE_EL2; 726 config_base |= ARMV8_PMU_INCLUDE_EL2;
762 727
763 /* 728 /*
764 * Install the filter into config_base as this is used to 729 * Install the filter into config_base as this is used to
@@ -784,35 +749,36 @@ static void armv8pmu_reset(void *info)
784 * Initialize & Reset PMNC. Request overflow interrupt for 749 * Initialize & Reset PMNC. Request overflow interrupt for
785 * 64 bit cycle counter but cheat in armv8pmu_write_counter(). 750 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
786 */ 751 */
787 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC); 752 armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
753 ARMV8_PMU_PMCR_LC);
788} 754}
789 755
790static int armv8_pmuv3_map_event(struct perf_event *event) 756static int armv8_pmuv3_map_event(struct perf_event *event)
791{ 757{
792 return armpmu_map_event(event, &armv8_pmuv3_perf_map, 758 return armpmu_map_event(event, &armv8_pmuv3_perf_map,
793 &armv8_pmuv3_perf_cache_map, 759 &armv8_pmuv3_perf_cache_map,
794 ARMV8_EVTYPE_EVENT); 760 ARMV8_PMU_EVTYPE_EVENT);
795} 761}
796 762
797static int armv8_a53_map_event(struct perf_event *event) 763static int armv8_a53_map_event(struct perf_event *event)
798{ 764{
799 return armpmu_map_event(event, &armv8_a53_perf_map, 765 return armpmu_map_event(event, &armv8_a53_perf_map,
800 &armv8_a53_perf_cache_map, 766 &armv8_a53_perf_cache_map,
801 ARMV8_EVTYPE_EVENT); 767 ARMV8_PMU_EVTYPE_EVENT);
802} 768}
803 769
804static int armv8_a57_map_event(struct perf_event *event) 770static int armv8_a57_map_event(struct perf_event *event)
805{ 771{
806 return armpmu_map_event(event, &armv8_a57_perf_map, 772 return armpmu_map_event(event, &armv8_a57_perf_map,
807 &armv8_a57_perf_cache_map, 773 &armv8_a57_perf_cache_map,
808 ARMV8_EVTYPE_EVENT); 774 ARMV8_PMU_EVTYPE_EVENT);
809} 775}
810 776
811static int armv8_thunder_map_event(struct perf_event *event) 777static int armv8_thunder_map_event(struct perf_event *event)
812{ 778{
813 return armpmu_map_event(event, &armv8_thunder_perf_map, 779 return armpmu_map_event(event, &armv8_thunder_perf_map,
814 &armv8_thunder_perf_cache_map, 780 &armv8_thunder_perf_cache_map,
815 ARMV8_EVTYPE_EVENT); 781 ARMV8_PMU_EVTYPE_EVENT);
816} 782}
817 783
818static void armv8pmu_read_num_pmnc_events(void *info) 784static void armv8pmu_read_num_pmnc_events(void *info)
@@ -820,7 +786,7 @@ static void armv8pmu_read_num_pmnc_events(void *info)
820 int *nb_cnt = info; 786 int *nb_cnt = info;
821 787
822 /* Read the nb of CNTx counters supported from PMNC */ 788 /* Read the nb of CNTx counters supported from PMNC */
823 *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; 789 *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
824 790
825 /* Add the CPU cycles counter */ 791 /* Add the CPU cycles counter */
826 *nb_cnt += 1; 792 *nb_cnt += 1;
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index bfc54fd82797..5a9f3bf542b0 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -36,8 +36,10 @@ void __hyp_text __init_stage2_translation(void)
36 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS 36 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
37 * bit in VTCR_EL2. 37 * bit in VTCR_EL2.
38 */ 38 */
39 tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; 39 tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
40 val |= (tmp == 2) ? VTCR_EL2_VS : 0; 40 val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
41 VTCR_EL2_VS_16BIT :
42 VTCR_EL2_VS_8BIT;
41 43
42 write_sysreg(val, vtcr_el2); 44 write_sysreg(val, vtcr_el2);
43} 45}
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 745695db5ba0..f2f264b5aafe 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -261,7 +261,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
261 au1x_dma_chan_t *cp; 261 au1x_dma_chan_t *cp;
262 262
263 /* 263 /*
264 * We do the intialization on the first channel allocation. 264 * We do the initialization on the first channel allocation.
265 * We have to wait because of the interrupt handler initialization 265 * We have to wait because of the interrupt handler initialization
266 * which can't be done successfully during board set up. 266 * which can't be done successfully during board set up.
267 */ 267 */
@@ -964,7 +964,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
964 dp->dscr_source1 = dscr->dscr_source1; 964 dp->dscr_source1 = dscr->dscr_source1;
965 dp->dscr_cmd1 = dscr->dscr_cmd1; 965 dp->dscr_cmd1 = dscr->dscr_cmd1;
966 nbytes = dscr->dscr_cmd1; 966 nbytes = dscr->dscr_cmd1;
967 /* Allow the caller to specifiy if an interrupt is generated */ 967 /* Allow the caller to specify if an interrupt is generated */
968 dp->dscr_cmd0 &= ~DSCR_CMD0_IE; 968 dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
969 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V; 969 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
970 ctp->chan_ptr->ddma_dbell = 0; 970 ctp->chan_ptr->ddma_dbell = 0;
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index bdeed9d13c6f..433c4b9a9f0a 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
503 if (board == BCSR_WHOAMI_DB1500) { 503 if (board == BCSR_WHOAMI_DB1500) {
504 c0 = AU1500_GPIO2_INT; 504 c0 = AU1500_GPIO2_INT;
505 c1 = AU1500_GPIO5_INT; 505 c1 = AU1500_GPIO5_INT;
506 d0 = AU1500_GPIO0_INT; 506 d0 = 0; /* GPIO number, NOT irq! */
507 d1 = AU1500_GPIO3_INT; 507 d1 = 3; /* GPIO number, NOT irq! */
508 s0 = AU1500_GPIO1_INT; 508 s0 = AU1500_GPIO1_INT;
509 s1 = AU1500_GPIO4_INT; 509 s1 = AU1500_GPIO4_INT;
510 } else if (board == BCSR_WHOAMI_DB1100) { 510 } else if (board == BCSR_WHOAMI_DB1100) {
511 c0 = AU1100_GPIO2_INT; 511 c0 = AU1100_GPIO2_INT;
512 c1 = AU1100_GPIO5_INT; 512 c1 = AU1100_GPIO5_INT;
513 d0 = AU1100_GPIO0_INT; 513 d0 = 0; /* GPIO number, NOT irq! */
514 d1 = AU1100_GPIO3_INT; 514 d1 = 3; /* GPIO number, NOT irq! */
515 s0 = AU1100_GPIO1_INT; 515 s0 = AU1100_GPIO1_INT;
516 s1 = AU1100_GPIO4_INT; 516 s1 = AU1100_GPIO4_INT;
517 517
@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
545 } else if (board == BCSR_WHOAMI_DB1000) { 545 } else if (board == BCSR_WHOAMI_DB1000) {
546 c0 = AU1000_GPIO2_INT; 546 c0 = AU1000_GPIO2_INT;
547 c1 = AU1000_GPIO5_INT; 547 c1 = AU1000_GPIO5_INT;
548 d0 = AU1000_GPIO0_INT; 548 d0 = 0; /* GPIO number, NOT irq! */
549 d1 = AU1000_GPIO3_INT; 549 d1 = 3; /* GPIO number, NOT irq! */
550 s0 = AU1000_GPIO1_INT; 550 s0 = AU1000_GPIO1_INT;
551 s1 = AU1000_GPIO4_INT; 551 s1 = AU1000_GPIO4_INT;
552 platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs)); 552 platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
553 } else if ((board == BCSR_WHOAMI_PB1500) || 553 } else if ((board == BCSR_WHOAMI_PB1500) ||
554 (board == BCSR_WHOAMI_PB1500R2)) { 554 (board == BCSR_WHOAMI_PB1500R2)) {
555 c0 = AU1500_GPIO203_INT; 555 c0 = AU1500_GPIO203_INT;
556 d0 = AU1500_GPIO201_INT; 556 d0 = 1; /* GPIO number, NOT irq! */
557 s0 = AU1500_GPIO202_INT; 557 s0 = AU1500_GPIO202_INT;
558 twosocks = 0; 558 twosocks = 0;
559 flashsize = 64; 559 flashsize = 64;
@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
566 */ 566 */
567 } else if (board == BCSR_WHOAMI_PB1100) { 567 } else if (board == BCSR_WHOAMI_PB1100) {
568 c0 = AU1100_GPIO11_INT; 568 c0 = AU1100_GPIO11_INT;
569 d0 = AU1100_GPIO9_INT; 569 d0 = 9; /* GPIO number, NOT irq! */
570 s0 = AU1100_GPIO10_INT; 570 s0 = AU1100_GPIO10_INT;
571 twosocks = 0; 571 twosocks = 0;
572 flashsize = 64; 572 flashsize = 64;
@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
583 } else 583 } else
584 return 0; /* unknown board, no further dev setup to do */ 584 return 0; /* unknown board, no further dev setup to do */
585 585
586 irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
587 irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW); 586 irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
588 irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW); 587 irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
589 588
@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
597 c0, d0, /*s0*/0, 0, 0); 596 c0, d0, /*s0*/0, 0, 0);
598 597
599 if (twosocks) { 598 if (twosocks) {
600 irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
601 irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW); 599 irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
602 irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW); 600 irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
603 601
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index b518f029f5e7..1c01d6eadb08 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
514 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 514 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
515 AU1000_PCMCIA_IO_PHYS_ADDR, 515 AU1000_PCMCIA_IO_PHYS_ADDR,
516 AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 516 AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
517 AU1550_GPIO3_INT, AU1550_GPIO0_INT, 517 AU1550_GPIO3_INT, 0,
518 /*AU1550_GPIO21_INT*/0, 0, 0); 518 /*AU1550_GPIO21_INT*/0, 0, 0);
519 519
520 db1x_register_pcmcia_socket( 520 db1x_register_pcmcia_socket(
@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
524 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, 524 AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
525 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, 525 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
526 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, 526 AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
527 AU1550_GPIO5_INT, AU1550_GPIO1_INT, 527 AU1550_GPIO5_INT, 1,
528 /*AU1550_GPIO22_INT*/0, 0, 1); 528 /*AU1550_GPIO22_INT*/0, 0, 1);
529 529
530 platform_device_register(&db1550_nand_dev); 530 platform_device_register(&db1550_nand_dev);
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index eb5117ced95a..618dfd735eed 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -26,8 +26,7 @@
26#include "common.h" 26#include "common.h"
27 27
28#define AR71XX_BASE_FREQ 40000000 28#define AR71XX_BASE_FREQ 40000000
29#define AR724X_BASE_FREQ 5000000 29#define AR724X_BASE_FREQ 40000000
30#define AR913X_BASE_FREQ 5000000
31 30
32static struct clk *clks[3]; 31static struct clk *clks[3];
33static struct clk_onecell_data clk_data = { 32static struct clk_onecell_data clk_data = {
@@ -103,8 +102,8 @@ static void __init ar724x_clocks_init(void)
103 div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK); 102 div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
104 freq = div * ref_rate; 103 freq = div * ref_rate;
105 104
106 div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK); 105 div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
107 freq *= div; 106 freq /= div;
108 107
109 cpu_rate = freq; 108 cpu_rate = freq;
110 109
@@ -123,39 +122,6 @@ static void __init ar724x_clocks_init(void)
123 clk_add_alias("uart", NULL, "ahb", NULL); 122 clk_add_alias("uart", NULL, "ahb", NULL);
124} 123}
125 124
126static void __init ar913x_clocks_init(void)
127{
128 unsigned long ref_rate;
129 unsigned long cpu_rate;
130 unsigned long ddr_rate;
131 unsigned long ahb_rate;
132 u32 pll;
133 u32 freq;
134 u32 div;
135
136 ref_rate = AR913X_BASE_FREQ;
137 pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
138
139 div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK);
140 freq = div * ref_rate;
141
142 cpu_rate = freq;
143
144 div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
145 ddr_rate = freq / div;
146
147 div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
148 ahb_rate = cpu_rate / div;
149
150 ath79_add_sys_clkdev("ref", ref_rate);
151 clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
152 clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
153 clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
154
155 clk_add_alias("wdt", NULL, "ahb", NULL);
156 clk_add_alias("uart", NULL, "ahb", NULL);
157}
158
159static void __init ar933x_clocks_init(void) 125static void __init ar933x_clocks_init(void)
160{ 126{
161 unsigned long ref_rate; 127 unsigned long ref_rate;
@@ -443,10 +409,8 @@ void __init ath79_clocks_init(void)
443{ 409{
444 if (soc_is_ar71xx()) 410 if (soc_is_ar71xx())
445 ar71xx_clocks_init(); 411 ar71xx_clocks_init();
446 else if (soc_is_ar724x()) 412 else if (soc_is_ar724x() || soc_is_ar913x())
447 ar724x_clocks_init(); 413 ar724x_clocks_init();
448 else if (soc_is_ar913x())
449 ar913x_clocks_init();
450 else if (soc_is_ar933x()) 414 else if (soc_is_ar933x())
451 ar933x_clocks_init(); 415 ar933x_clocks_init();
452 else if (soc_is_ar934x()) 416 else if (soc_is_ar934x())
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 959c145a0a2c..ca7ad131d057 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -714,11 +714,11 @@ void bcm47xx_sprom_register_fallbacks(void)
714{ 714{
715#if defined(CONFIG_BCM47XX_SSB) 715#if defined(CONFIG_BCM47XX_SSB)
716 if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb)) 716 if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
717 pr_warn("Failed to registered ssb SPROM handler\n"); 717 pr_warn("Failed to register ssb SPROM handler\n");
718#endif 718#endif
719 719
720#if defined(CONFIG_BCM47XX_BCMA) 720#if defined(CONFIG_BCM47XX_BCMA)
721 if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma)) 721 if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
722 pr_warn("Failed to registered bcma SPROM handler\n"); 722 pr_warn("Failed to register bcma SPROM handler\n");
723#endif 723#endif
724} 724}
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 4eff1ef02eff..309d2ad67e4d 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -39,10 +39,11 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
39vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o 39vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
40endif 40endif
41 41
42vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o 42vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
43 43
44$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib 44extra-y += ashldi3.c bswapsi.c
45$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c 45$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
46$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
46 $(call cmd,shipped) 47 $(call cmd,shipped)
47 48
48targets := $(notdir $(vmlinuzobjs-y)) 49targets := $(notdir $(vmlinuzobjs-y))
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index adb33e355043..56035e5b7008 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -82,7 +82,7 @@
82 }; 82 };
83 83
84 gisb-arb@400000 { 84 gisb-arb@400000 {
85 compatible = "brcm,bcm7400-gisb-arb"; 85 compatible = "brcm,bcm7435-gisb-arb";
86 reg = <0x400000 0xdc>; 86 reg = <0x400000 0xdc>;
87 native-endian; 87 native-endian;
88 interrupt-parent = <&sun_l2_intc>; 88 interrupt-parent = <&sun_l2_intc>;
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 3ad4ba9b12fd..3c2ed9ee5b2f 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -83,7 +83,7 @@
83 }; 83 };
84 84
85 pll: pll-controller@18050000 { 85 pll: pll-controller@18050000 {
86 compatible = "qca,ar9132-ppl", 86 compatible = "qca,ar9132-pll",
87 "qca,ar9130-pll"; 87 "qca,ar9130-pll";
88 reg = <0x18050000 0x20>; 88 reg = <0x18050000 0x20>;
89 89
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index e535ee3c26a4..4f1540e5f963 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -18,7 +18,7 @@
18 reg = <0x0 0x2000000>; 18 reg = <0x0 0x2000000>;
19 }; 19 };
20 20
21 extosc: oscillator { 21 extosc: ref {
22 compatible = "fixed-clock"; 22 compatible = "fixed-clock";
23 #clock-cells = <0>; 23 #clock-cells = <0>;
24 clock-frequency = <40000000>; 24 clock-frequency = <40000000>;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
index e59d1b79f24c..2f415d9d0f3c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
@@ -68,7 +68,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
68 gmx_rx_int_en.s.pause_drp = 1; 68 gmx_rx_int_en.s.pause_drp = 1;
69 /* Skipping gmx_rx_int_en.s.reserved_16_18 */ 69 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
70 /*gmx_rx_int_en.s.ifgerr = 1; */ 70 /*gmx_rx_int_en.s.ifgerr = 1; */
71 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 71 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
72 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 72 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
73 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 73 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
74 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 74 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -89,7 +89,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
89 /*gmx_rx_int_en.s.phy_spd = 1; */ 89 /*gmx_rx_int_en.s.phy_spd = 1; */
90 /*gmx_rx_int_en.s.phy_link = 1; */ 90 /*gmx_rx_int_en.s.phy_link = 1; */
91 /*gmx_rx_int_en.s.ifgerr = 1; */ 91 /*gmx_rx_int_en.s.ifgerr = 1; */
92 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 92 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
93 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 93 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
94 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 94 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
95 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 95 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -112,7 +112,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
112 /*gmx_rx_int_en.s.phy_spd = 1; */ 112 /*gmx_rx_int_en.s.phy_spd = 1; */
113 /*gmx_rx_int_en.s.phy_link = 1; */ 113 /*gmx_rx_int_en.s.phy_link = 1; */
114 /*gmx_rx_int_en.s.ifgerr = 1; */ 114 /*gmx_rx_int_en.s.ifgerr = 1; */
115 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 115 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
116 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 116 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
117 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 117 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
118 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 118 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -134,7 +134,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
134 /*gmx_rx_int_en.s.phy_spd = 1; */ 134 /*gmx_rx_int_en.s.phy_spd = 1; */
135 /*gmx_rx_int_en.s.phy_link = 1; */ 135 /*gmx_rx_int_en.s.phy_link = 1; */
136 /*gmx_rx_int_en.s.ifgerr = 1; */ 136 /*gmx_rx_int_en.s.ifgerr = 1; */
137 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 137 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
138 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 138 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
139 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 139 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
140 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 140 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -156,7 +156,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
156 /*gmx_rx_int_en.s.phy_spd = 1; */ 156 /*gmx_rx_int_en.s.phy_spd = 1; */
157 /*gmx_rx_int_en.s.phy_link = 1; */ 157 /*gmx_rx_int_en.s.phy_link = 1; */
158 /*gmx_rx_int_en.s.ifgerr = 1; */ 158 /*gmx_rx_int_en.s.ifgerr = 1; */
159 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 159 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
160 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 160 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
161 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 161 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
162 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 162 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -179,7 +179,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
179 /*gmx_rx_int_en.s.phy_spd = 1; */ 179 /*gmx_rx_int_en.s.phy_spd = 1; */
180 /*gmx_rx_int_en.s.phy_link = 1; */ 180 /*gmx_rx_int_en.s.phy_link = 1; */
181 /*gmx_rx_int_en.s.ifgerr = 1; */ 181 /*gmx_rx_int_en.s.ifgerr = 1; */
182 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 182 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
183 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 183 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
184 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 184 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
185 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 185 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -209,7 +209,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
209 gmx_rx_int_en.s.pause_drp = 1; 209 gmx_rx_int_en.s.pause_drp = 1;
210 /* Skipping gmx_rx_int_en.s.reserved_16_18 */ 210 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
211 /*gmx_rx_int_en.s.ifgerr = 1; */ 211 /*gmx_rx_int_en.s.ifgerr = 1; */
212 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ 212 /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
213 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ 213 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
214 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ 214 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
215 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ 215 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 87be167a7a6a..676fab50dd2b 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -189,7 +189,7 @@ void cvmx_pko_initialize_global(void)
189 /* 189 /*
190 * Set the size of the PKO command buffers to an odd number of 190 * Set the size of the PKO command buffers to an odd number of
191 * 64bit words. This allows the normal two word send to stay 191 * 64bit words. This allows the normal two word send to stay
192 * aligned and never span a comamnd word buffer. 192 * aligned and never span a command word buffer.
193 */ 193 */
194 config.u64 = 0; 194 config.u64 = 0;
195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; 195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index b7fa9ae28c36..42412ba0f3bf 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -331,7 +331,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
331 } 331 }
332 332
333 if (!(avail_coremask & (1 << coreid))) { 333 if (!(avail_coremask & (1 << coreid))) {
334 /* core not available, assume, that catched by simple-executive */ 334 /* core not available, assume, that caught by simple-executive */
335 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 335 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
336 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 336 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
337 } 337 }
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 4e36b6e1869c..43e0ba24470c 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -17,13 +17,12 @@ CONFIG_IKCONFIG=y
17CONFIG_IKCONFIG_PROC=y 17CONFIG_IKCONFIG_PROC=y
18CONFIG_LOG_BUF_SHIFT=14 18CONFIG_LOG_BUF_SHIFT=14
19CONFIG_CGROUPS=y 19CONFIG_CGROUPS=y
20CONFIG_MEMCG=y
21CONFIG_CGROUP_SCHED=y
20CONFIG_CGROUP_FREEZER=y 22CONFIG_CGROUP_FREEZER=y
21CONFIG_CGROUP_DEVICE=y
22CONFIG_CPUSETS=y 23CONFIG_CPUSETS=y
24CONFIG_CGROUP_DEVICE=y
23CONFIG_CGROUP_CPUACCT=y 25CONFIG_CGROUP_CPUACCT=y
24CONFIG_MEMCG=y
25CONFIG_MEMCG_KMEM=y
26CONFIG_CGROUP_SCHED=y
27CONFIG_NAMESPACES=y 26CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 27CONFIG_USER_NS=y
29CONFIG_CC_OPTIMIZE_FOR_SIZE=y 28CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,6 +51,11 @@ CONFIG_DEVTMPFS=y
52# CONFIG_ALLOW_DEV_COREDUMP is not set 51# CONFIG_ALLOW_DEV_COREDUMP is not set
53CONFIG_DMA_CMA=y 52CONFIG_DMA_CMA=y
54CONFIG_CMA_SIZE_MBYTES=32 53CONFIG_CMA_SIZE_MBYTES=32
54CONFIG_MTD=y
55CONFIG_MTD_NAND=y
56CONFIG_MTD_NAND_JZ4780=y
57CONFIG_MTD_UBI=y
58CONFIG_MTD_UBI_FASTMAP=y
55CONFIG_NETDEVICES=y 59CONFIG_NETDEVICES=y
56# CONFIG_NET_VENDOR_ARC is not set 60# CONFIG_NET_VENDOR_ARC is not set
57# CONFIG_NET_CADENCE is not set 61# CONFIG_NET_CADENCE is not set
@@ -103,7 +107,7 @@ CONFIG_PROC_KCORE=y
103# CONFIG_PROC_PAGE_MONITOR is not set 107# CONFIG_PROC_PAGE_MONITOR is not set
104CONFIG_TMPFS=y 108CONFIG_TMPFS=y
105CONFIG_CONFIGFS_FS=y 109CONFIG_CONFIGFS_FS=y
106# CONFIG_MISC_FILESYSTEMS is not set 110CONFIG_UBIFS_FS=y
107# CONFIG_NETWORK_FILESYSTEMS is not set 111# CONFIG_NETWORK_FILESYSTEMS is not set
108CONFIG_NLS=y 112CONFIG_NLS=y
109CONFIG_NLS_CODEPAGE_437=y 113CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 8c6f508e59de..d7b99180c6e1 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -5,7 +5,7 @@
5 * Written by Ralf Baechle and Andreas Busse, modified for DECstation 5 * Written by Ralf Baechle and Andreas Busse, modified for DECstation
6 * support by Paul Antoine and Harald Koerfgen. 6 * support by Paul Antoine and Harald Koerfgen.
7 * 7 *
8 * completly rewritten: 8 * completely rewritten:
9 * Copyright (C) 1998 Harald Koerfgen 9 * Copyright (C) 1998 Harald Koerfgen
10 * 10 *
11 * Rewritten extensively for controller-driven IRQ support 11 * Rewritten extensively for controller-driven IRQ support
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 5537b94572b2..0d75b5a0bad4 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -9,7 +9,7 @@
9 * PROM library functions for acquiring/using memory descriptors given to us 9 * PROM library functions for acquiring/using memory descriptors given to us
10 * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set 10 * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set
11 * because on some machines like SGI IP27 the ARC memory configuration data 11 * because on some machines like SGI IP27 the ARC memory configuration data
12 * completly bogus and alternate easier to use mechanisms are available. 12 * completely bogus and alternate easier to use mechanisms are available.
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index e7dc785a91ca..af12c1f9f1a8 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -102,7 +102,7 @@ extern void cpu_probe(void);
102extern void cpu_report(void); 102extern void cpu_report(void);
103 103
104extern const char *__cpu_name[]; 104extern const char *__cpu_name[];
105#define cpu_name_string() __cpu_name[smp_processor_id()] 105#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
106 106
107struct seq_file; 107struct seq_file;
108struct notifier_block; 108struct notifier_block;
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index cf92fe733995..c4873e8594ef 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -141,7 +141,7 @@ octeon_main_processor:
141.endm 141.endm
142 142
143/* 143/*
144 * Do SMP slave processor setup necessary before we can savely execute C code. 144 * Do SMP slave processor setup necessary before we can safely execute C code.
145 */ 145 */
146 .macro smp_slave_setup 146 .macro smp_slave_setup
147 .endm 147 .endm
diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
index 13b0751b010a..a229297c880b 100644
--- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
@@ -16,7 +16,7 @@
16 .endm 16 .endm
17 17
18/* 18/*
19 * Do SMP slave processor setup necessary before we can savely execute C code. 19 * Do SMP slave processor setup necessary before we can safely execute C code.
20 */ 20 */
21 .macro smp_slave_setup 21 .macro smp_slave_setup
22 .endm 22 .endm
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
index cf4384bfa846..b0b7261ff3ad 100644
--- a/arch/mips/include/asm/mach-ip27/irq.h
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -11,7 +11,7 @@
11#define __ASM_MACH_IP27_IRQ_H 11#define __ASM_MACH_IP27_IRQ_H
12 12
13/* 13/*
14 * A hardwired interrupt number is completly stupid for this system - a 14 * A hardwired interrupt number is completely stupid for this system - a
15 * large configuration might have thousands if not tenthousands of 15 * large configuration might have thousands if not tenthousands of
16 * interrupts. 16 * interrupts.
17 */ 17 */
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index b087cb83da3a..f992c1db876b 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -81,7 +81,7 @@
81 .endm 81 .endm
82 82
83/* 83/*
84 * Do SMP slave processor setup necessary before we can savely execute C code. 84 * Do SMP slave processor setup necessary before we can safely execute C code.
85 */ 85 */
86 .macro smp_slave_setup 86 .macro smp_slave_setup
87 GET_NASID_ASM t1 87 GET_NASID_ASM t1
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index bf8c3e1860e7..7c7708a23baa 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -27,7 +27,7 @@ enum jz_gpio_function {
27 27
28/* 28/*
29 Usually a driver for a SoC component has to request several gpio pins and 29 Usually a driver for a SoC component has to request several gpio pins and
30 configure them as funcion pins. 30 configure them as function pins.
31 jz_gpio_bulk_request can be used to ease this process. 31 jz_gpio_bulk_request can be used to ease this process.
32 Usually one would do something like: 32 Usually one would do something like:
33 33
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b196825a1de9..d4635391c36a 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -28,7 +28,7 @@ extern void __iomem *mips_cm_l2sync_base;
28 * This function returns the physical base address of the Coherence Manager 28 * This function returns the physical base address of the Coherence Manager
29 * global control block, or 0 if no Coherence Manager is present. It provides 29 * global control block, or 0 if no Coherence Manager is present. It provides
30 * a default implementation which reads the CMGCRBase register where available, 30 * a default implementation which reads the CMGCRBase register where available,
31 * and may be overriden by platforms which determine this address in a 31 * and may be overridden by platforms which determine this address in a
32 * different way by defining a function with the same prototype except for the 32 * different way by defining a function with the same prototype except for the
33 * name mips_cm_phys_base (without underscores). 33 * name mips_cm_phys_base (without underscores).
34 */ 34 */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
index 1f6ea8352ca9..20621e1ca238 100644
--- a/arch/mips/include/asm/mips-r2-to-r6-emul.h
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -79,7 +79,7 @@ struct r2_decoder_table {
79}; 79};
80 80
81 81
82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
83 const char *str); 83 const char *str);
84 84
85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR 85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index f7dd17d0dc22..f4f1996e0fac 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -33,7 +33,7 @@
33/* Packet buffers */ 33/* Packet buffers */
34#define CVMX_FPA_PACKET_POOL (0) 34#define CVMX_FPA_PACKET_POOL (0)
35#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE 35#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
36/* Work queue entrys */ 36/* Work queue entries */
37#define CVMX_FPA_WQE_POOL (1) 37#define CVMX_FPA_WQE_POOL (1)
38#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE 38#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
39/* PKO queue command buffers */ 39/* PKO queue command buffers */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 19e139c9f337..3e982e0c397e 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -189,7 +189,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
189static inline void *cvmx_phys_to_ptr(uint64_t physical_address) 189static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
190{ 190{
191 if (sizeof(void *) == 8) { 191 if (sizeof(void *) == 8) {
192 /* Just set the top bit, avoiding any TLB uglyness */ 192 /* Just set the top bit, avoiding any TLB ugliness */
193 return CASTPTR(void, 193 return CASTPTR(void,
194 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 194 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
195 physical_address)); 195 physical_address));
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 8d7a63b52ac7..3206245d1ed6 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -269,16 +269,16 @@ typedef struct bridge_err_cmdword_s {
269 union { 269 union {
270 u32 cmd_word; 270 u32 cmd_word;
271 struct { 271 struct {
272 u32 didn:4, /* Destination ID */ 272 u32 didn:4, /* Destination ID */
273 sidn:4, /* Source ID */ 273 sidn:4, /* Source ID */
274 pactyp:4, /* Packet type */ 274 pactyp:4, /* Packet type */
275 tnum:5, /* Trans Number */ 275 tnum:5, /* Trans Number */
276 coh:1, /* Coh Transacti */ 276 coh:1, /* Coh Transaction */
277 ds:2, /* Data size */ 277 ds:2, /* Data size */
278 gbr:1, /* GBR enable */ 278 gbr:1, /* GBR enable */
279 vbpm:1, /* VBPM message */ 279 vbpm:1, /* VBPM message */
280 error:1, /* Error occurred */ 280 error:1, /* Error occurred */
281 barr:1, /* Barrier op */ 281 barr:1, /* Barrier op */
282 rsvd:8; 282 rsvd:8;
283 } berr_st; 283 } berr_st;
284 } berr_un; 284 } berr_un;
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 59920b345942..4a9c99050c13 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -147,7 +147,7 @@ struct hpc3_ethregs {
147#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */ 147#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
148#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */ 148#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
149#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */ 149#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
150#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */ 150#define HPC3_EPCFG_TST 0x1000 /* Diagnostic ram test feature bit */
151 151
152 u32 _unused2[0x1000/4 - 8]; /* padding */ 152 u32 _unused2[0x1000/4 - 8]; /* padding */
153 153
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 26ddfff28c8e..105a9479ac5f 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -144,7 +144,7 @@ struct linux_tinfo {
144struct linux_vdirent { 144struct linux_vdirent {
145 ULONG namelen; 145 ULONG namelen;
146 unsigned char attr; 146 unsigned char attr;
147 char fname[32]; /* XXX imperical, should be a define */ 147 char fname[32]; /* XXX empirical, should be a define */
148}; 148};
149 149
150/* Other stuff for files. */ 150/* Other stuff for files. */
@@ -179,7 +179,7 @@ struct linux_finfo {
179 enum linux_devtypes dtype; 179 enum linux_devtypes dtype;
180 unsigned long namelen; 180 unsigned long namelen;
181 unsigned char attr; 181 unsigned char attr;
182 char name[32]; /* XXX imperical, should be define */ 182 char name[32]; /* XXX empirical, should be define */
183}; 183};
184 184
185/* This describes the vector containing function pointers to the ARC 185/* This describes the vector containing function pointers to the ARC
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index e33f0363235b..feb385180f87 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -355,7 +355,7 @@ struct ioc3_etxd {
355#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */ 355#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */
356#define SSCR_RESET 0x80000000 /* reset DMA channels */ 356#define SSCR_RESET 0x80000000 /* reset DMA channels */
357 357
358/* all producer/comsumer pointers are the same bitfield */ 358/* all producer/consumer pointers are the same bitfield */
359#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */ 359#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
360#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */ 360#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
361#define PROD_CONS_PTR_OFF 3 361#define PROD_CONS_PTR_OFF 3
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 5998b13e9764..57ece90f8cf1 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -628,7 +628,7 @@ typedef union h1_icrbb_u {
628/* 628/*
629 * Values for field imsgtype 629 * Values for field imsgtype
630 */ 630 */
631#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ 631#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Message from Xtalk */
632#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ 632#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
633#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */ 633#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
634#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ 634#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 095ecafe6bd3..7f109d4f64a4 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -95,7 +95,7 @@ static inline bool eva_kernel_access(void)
95} 95}
96 96
97/* 97/*
98 * Is a address valid? This does a straighforward calculation rather 98 * Is a address valid? This does a straightforward calculation rather
99 * than tests. 99 * than tests.
100 * 100 *
101 * Address valid if: 101 * Address valid if:
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3129795de940..24ad815c7f38 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -381,16 +381,18 @@
381#define __NR_membarrier (__NR_Linux + 358) 381#define __NR_membarrier (__NR_Linux + 358)
382#define __NR_mlock2 (__NR_Linux + 359) 382#define __NR_mlock2 (__NR_Linux + 359)
383#define __NR_copy_file_range (__NR_Linux + 360) 383#define __NR_copy_file_range (__NR_Linux + 360)
384#define __NR_preadv2 (__NR_Linux + 361)
385#define __NR_pwritev2 (__NR_Linux + 362)
384 386
385/* 387/*
386 * Offset of the last Linux o32 flavoured syscall 388 * Offset of the last Linux o32 flavoured syscall
387 */ 389 */
388#define __NR_Linux_syscalls 360 390#define __NR_Linux_syscalls 362
389 391
390#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 392#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
391 393
392#define __NR_O32_Linux 4000 394#define __NR_O32_Linux 4000
393#define __NR_O32_Linux_syscalls 360 395#define __NR_O32_Linux_syscalls 362
394 396
395#if _MIPS_SIM == _MIPS_SIM_ABI64 397#if _MIPS_SIM == _MIPS_SIM_ABI64
396 398
@@ -719,16 +721,18 @@
719#define __NR_membarrier (__NR_Linux + 318) 721#define __NR_membarrier (__NR_Linux + 318)
720#define __NR_mlock2 (__NR_Linux + 319) 722#define __NR_mlock2 (__NR_Linux + 319)
721#define __NR_copy_file_range (__NR_Linux + 320) 723#define __NR_copy_file_range (__NR_Linux + 320)
724#define __NR_preadv2 (__NR_Linux + 321)
725#define __NR_pwritev2 (__NR_Linux + 322)
722 726
723/* 727/*
724 * Offset of the last Linux 64-bit flavoured syscall 728 * Offset of the last Linux 64-bit flavoured syscall
725 */ 729 */
726#define __NR_Linux_syscalls 320 730#define __NR_Linux_syscalls 322
727 731
728#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 732#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
729 733
730#define __NR_64_Linux 5000 734#define __NR_64_Linux 5000
731#define __NR_64_Linux_syscalls 320 735#define __NR_64_Linux_syscalls 322
732 736
733#if _MIPS_SIM == _MIPS_SIM_NABI32 737#if _MIPS_SIM == _MIPS_SIM_NABI32
734 738
@@ -1061,15 +1065,17 @@
1061#define __NR_membarrier (__NR_Linux + 322) 1065#define __NR_membarrier (__NR_Linux + 322)
1062#define __NR_mlock2 (__NR_Linux + 323) 1066#define __NR_mlock2 (__NR_Linux + 323)
1063#define __NR_copy_file_range (__NR_Linux + 324) 1067#define __NR_copy_file_range (__NR_Linux + 324)
1068#define __NR_preadv2 (__NR_Linux + 325)
1069#define __NR_pwritev2 (__NR_Linux + 326)
1064 1070
1065/* 1071/*
1066 * Offset of the last N32 flavoured syscall 1072 * Offset of the last N32 flavoured syscall
1067 */ 1073 */
1068#define __NR_Linux_syscalls 324 1074#define __NR_Linux_syscalls 326
1069 1075
1070#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1076#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1071 1077
1072#define __NR_N32_Linux 6000 1078#define __NR_N32_Linux 6000
1073#define __NR_N32_Linux_syscalls 324 1079#define __NR_N32_Linux_syscalls 326
1074 1080
1075#endif /* _UAPI_ASM_UNISTD_H */ 1081#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 1448c1f43d4e..760217bbb2fa 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -24,7 +24,7 @@ static char *cm2_tr[8] = {
24 "0x04", "cpc", "0x06", "0x07" 24 "0x04", "cpc", "0x06", "0x07"
25}; 25};
26 26
27/* CM3 Tag ECC transation type */ 27/* CM3 Tag ECC transaction type */
28static char *cm3_tr[16] = { 28static char *cm3_tr[16] = {
29 [0x0] = "ReqNoData", 29 [0x0] = "ReqNoData",
30 [0x1] = "0x1", 30 [0x1] = "0x1",
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 1f5aac7f9ec3..3fff89ae760b 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -940,42 +940,42 @@ repeat:
940 switch (rt) { 940 switch (rt) {
941 case tgei_op: 941 case tgei_op:
942 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) 942 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
943 do_trap_or_bp(regs, 0, "TGEI"); 943 do_trap_or_bp(regs, 0, 0, "TGEI");
944 944
945 MIPS_R2_STATS(traps); 945 MIPS_R2_STATS(traps);
946 946
947 break; 947 break;
948 case tgeiu_op: 948 case tgeiu_op:
949 if (regs->regs[rs] >= MIPSInst_UIMM(inst)) 949 if (regs->regs[rs] >= MIPSInst_UIMM(inst))
950 do_trap_or_bp(regs, 0, "TGEIU"); 950 do_trap_or_bp(regs, 0, 0, "TGEIU");
951 951
952 MIPS_R2_STATS(traps); 952 MIPS_R2_STATS(traps);
953 953
954 break; 954 break;
955 case tlti_op: 955 case tlti_op:
956 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) 956 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
957 do_trap_or_bp(regs, 0, "TLTI"); 957 do_trap_or_bp(regs, 0, 0, "TLTI");
958 958
959 MIPS_R2_STATS(traps); 959 MIPS_R2_STATS(traps);
960 960
961 break; 961 break;
962 case tltiu_op: 962 case tltiu_op:
963 if (regs->regs[rs] < MIPSInst_UIMM(inst)) 963 if (regs->regs[rs] < MIPSInst_UIMM(inst))
964 do_trap_or_bp(regs, 0, "TLTIU"); 964 do_trap_or_bp(regs, 0, 0, "TLTIU");
965 965
966 MIPS_R2_STATS(traps); 966 MIPS_R2_STATS(traps);
967 967
968 break; 968 break;
969 case teqi_op: 969 case teqi_op:
970 if (regs->regs[rs] == MIPSInst_SIMM(inst)) 970 if (regs->regs[rs] == MIPSInst_SIMM(inst))
971 do_trap_or_bp(regs, 0, "TEQI"); 971 do_trap_or_bp(regs, 0, 0, "TEQI");
972 972
973 MIPS_R2_STATS(traps); 973 MIPS_R2_STATS(traps);
974 974
975 break; 975 break;
976 case tnei_op: 976 case tnei_op:
977 if (regs->regs[rs] != MIPSInst_SIMM(inst)) 977 if (regs->regs[rs] != MIPSInst_SIMM(inst))
978 do_trap_or_bp(regs, 0, "TNEI"); 978 do_trap_or_bp(regs, 0, 0, "TNEI");
979 979
980 MIPS_R2_STATS(traps); 980 MIPS_R2_STATS(traps);
981 981
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
index 2b70723071c3..9083d63b765c 100644
--- a/arch/mips/kernel/module-rela.c
+++ b/arch/mips/kernel/module-rela.c
@@ -109,9 +109,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
109 struct module *me) 109 struct module *me)
110{ 110{
111 Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; 111 Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
112 int (*handler)(struct module *me, u32 *location, Elf_Addr v);
112 Elf_Sym *sym; 113 Elf_Sym *sym;
113 u32 *location; 114 u32 *location;
114 unsigned int i; 115 unsigned int i, type;
115 Elf_Addr v; 116 Elf_Addr v;
116 int res; 117 int res;
117 118
@@ -134,9 +135,21 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
134 return -ENOENT; 135 return -ENOENT;
135 } 136 }
136 137
137 v = sym->st_value + rel[i].r_addend; 138 type = ELF_MIPS_R_TYPE(rel[i]);
139
140 if (type < ARRAY_SIZE(reloc_handlers_rela))
141 handler = reloc_handlers_rela[type];
142 else
143 handler = NULL;
138 144
139 res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); 145 if (!handler) {
146 pr_err("%s: Unknown relocation type %u\n",
147 me->name, type);
148 return -EINVAL;
149 }
150
151 v = sym->st_value + rel[i].r_addend;
152 res = handler(me, location, v);
140 if (res) 153 if (res)
141 return res; 154 return res;
142 } 155 }
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 1833f5171ccd..f9b2936d598d 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -197,9 +197,10 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
197 struct module *me) 197 struct module *me)
198{ 198{
199 Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; 199 Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
200 int (*handler)(struct module *me, u32 *location, Elf_Addr v);
200 Elf_Sym *sym; 201 Elf_Sym *sym;
201 u32 *location; 202 u32 *location;
202 unsigned int i; 203 unsigned int i, type;
203 Elf_Addr v; 204 Elf_Addr v;
204 int res; 205 int res;
205 206
@@ -223,9 +224,21 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
223 return -ENOENT; 224 return -ENOENT;
224 } 225 }
225 226
226 v = sym->st_value; 227 type = ELF_MIPS_R_TYPE(rel[i]);
228
229 if (type < ARRAY_SIZE(reloc_handlers_rel))
230 handler = reloc_handlers_rel[type];
231 else
232 handler = NULL;
227 233
228 res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v); 234 if (!handler) {
235 pr_err("%s: Unknown relocation type %u\n",
236 me->name, type);
237 return -EINVAL;
238 }
239
240 v = sym->st_value;
241 res = handler(me, location, v);
229 if (res) 242 if (res)
230 return res; 243 return res;
231 } 244 }
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d7b8dd43147a..9bc1191b1ab0 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -530,7 +530,7 @@ static void mipspmu_enable(struct pmu *pmu)
530 530
531/* 531/*
532 * MIPS performance counters can be per-TC. The control registers can 532 * MIPS performance counters can be per-TC. The control registers can
533 * not be directly accessed accross CPUs. Hence if we want to do global 533 * not be directly accessed across CPUs. Hence if we want to do global
534 * control, we need cross CPU calls. on_each_cpu() can help us, but we 534 * control, we need cross CPU calls. on_each_cpu() can help us, but we
535 * can not make sure this function is called with interrupts enabled. So 535 * can not make sure this function is called with interrupts enabled. So
536 * here we pause local counters and then grab a rwlock and leave the 536 * here we pause local counters and then grab a rwlock and leave the
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289977cc..fa3f9ebad8f4 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -472,7 +472,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
472 /* 472 /*
473 * Disable all but self interventions. The load from COHCTL is defined 473 * Disable all but self interventions. The load from COHCTL is defined
474 * by the interAptiv & proAptiv SUMs as ensuring that the operation 474 * by the interAptiv & proAptiv SUMs as ensuring that the operation
475 * resulting from the preceeding store is complete. 475 * resulting from the preceding store is complete.
476 */ 476 */
477 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); 477 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
478 uasm_i_sw(&p, t0, 0, r_pcohctl); 478 uasm_i_sw(&p, t0, 0, r_pcohctl);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eddd5fd6fdfa..92880cee449e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -615,7 +615,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
615 * allows us to only worry about whether an FP mode switch is in 615 * allows us to only worry about whether an FP mode switch is in
616 * progress when FP is first used in a tasks time slice. Pretty much all 616 * progress when FP is first used in a tasks time slice. Pretty much all
617 * of the mode switch overhead can thus be confined to cases where mode 617 * of the mode switch overhead can thus be confined to cases where mode
618 * switches are actually occuring. That is, to here. However for the 618 * switches are actually occurring. That is, to here. However for the
619 * thread performing the mode switch it may take a while... 619 * thread performing the mode switch it may take a while...
620 */ 620 */
621 if (num_online_cpus() > 1) { 621 if (num_online_cpus() > 1) {
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a56317444bda..d01fe53a6638 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -596,3 +596,5 @@ EXPORT(sys_call_table)
596 PTR sys_membarrier 596 PTR sys_membarrier
597 PTR sys_mlock2 597 PTR sys_mlock2
598 PTR sys_copy_file_range /* 4360 */ 598 PTR sys_copy_file_range /* 4360 */
599 PTR sys_preadv2
600 PTR sys_pwritev2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2b2dc14610d0..6b73ecc02597 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -434,4 +434,6 @@ EXPORT(sys_call_table)
434 PTR sys_membarrier 434 PTR sys_membarrier
435 PTR sys_mlock2 435 PTR sys_mlock2
436 PTR sys_copy_file_range /* 5320 */ 436 PTR sys_copy_file_range /* 5320 */
437 PTR sys_preadv2
438 PTR sys_pwritev2
437 .size sys_call_table,.-sys_call_table 439 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2bf5c8593d91..71f99d5f7a06 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -424,4 +424,6 @@ EXPORT(sysn32_call_table)
424 PTR sys_membarrier 424 PTR sys_membarrier
425 PTR sys_mlock2 425 PTR sys_mlock2
426 PTR sys_copy_file_range 426 PTR sys_copy_file_range
427 PTR compat_sys_preadv2 /* 6325 */
428 PTR compat_sys_pwritev2
427 .size sysn32_call_table,.-sysn32_call_table 429 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index c5b759e584c7..91b43eea2d5a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -579,4 +579,6 @@ EXPORT(sys32_call_table)
579 PTR sys_membarrier 579 PTR sys_membarrier
580 PTR sys_mlock2 580 PTR sys_mlock2
581 PTR sys_copy_file_range /* 4360 */ 581 PTR sys_copy_file_range /* 4360 */
582 PTR compat_sys_preadv2
583 PTR compat_sys_pwritev2
582 .size sys32_call_table,.-sys32_call_table 584 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 37708d9af638..27cb638f0824 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -243,6 +243,18 @@ static int __init mips_smp_ipi_init(void)
243 struct irq_domain *ipidomain; 243 struct irq_domain *ipidomain;
244 struct device_node *node; 244 struct device_node *node;
245 245
246 /*
247 * In some cases like qemu-malta, it is desired to try SMP with
248 * a single core. Qemu-malta has no GIC, so an attempt to set any IPIs
249 * would cause a BUG_ON() to be triggered since there's no ipidomain.
250 *
251 * Since for a single core system IPIs aren't required really, skip the
252 * initialisation which should generally keep any such configurations
253 * happy and only fail hard when trying to truely run SMP.
254 */
255 if (cpumask_weight(cpu_possible_mask) == 1)
256 return 0;
257
246 node = of_irq_find_parent(of_root); 258 node = of_irq_find_parent(of_root);
247 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); 259 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
248 260
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bf14da9f3e33..ae0c89d23ad7 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -56,6 +56,7 @@
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/ptrace.h> 57#include <asm/ptrace.h>
58#include <asm/sections.h> 58#include <asm/sections.h>
59#include <asm/siginfo.h>
59#include <asm/tlbdebug.h> 60#include <asm/tlbdebug.h>
60#include <asm/traps.h> 61#include <asm/traps.h>
61#include <asm/uaccess.h> 62#include <asm/uaccess.h>
@@ -871,7 +872,7 @@ out:
871 exception_exit(prev_state); 872 exception_exit(prev_state);
872} 873}
873 874
874void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 875void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
875 const char *str) 876 const char *str)
876{ 877{
877 siginfo_t info = { 0 }; 878 siginfo_t info = { 0 };
@@ -928,7 +929,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
928 default: 929 default:
929 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 930 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
930 die_if_kernel(b, regs); 931 die_if_kernel(b, regs);
931 force_sig(SIGTRAP, current); 932 if (si_code) {
933 info.si_signo = SIGTRAP;
934 info.si_code = si_code;
935 force_sig_info(SIGTRAP, &info, current);
936 } else {
937 force_sig(SIGTRAP, current);
938 }
932 } 939 }
933} 940}
934 941
@@ -1012,7 +1019,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
1012 break; 1019 break;
1013 } 1020 }
1014 1021
1015 do_trap_or_bp(regs, bcode, "Break"); 1022 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1016 1023
1017out: 1024out:
1018 set_fs(seg); 1025 set_fs(seg);
@@ -1054,7 +1061,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
1054 tcode = (opcode >> 6) & ((1 << 10) - 1); 1061 tcode = (opcode >> 6) & ((1 << 10) - 1);
1055 } 1062 }
1056 1063
1057 do_trap_or_bp(regs, tcode, "Trap"); 1064 do_trap_or_bp(regs, tcode, 0, "Trap");
1058 1065
1059out: 1066out:
1060 set_fs(seg); 1067 set_fs(seg);
@@ -1115,19 +1122,7 @@ no_r2_instr:
1115 if (unlikely(compute_return_epc(regs) < 0)) 1122 if (unlikely(compute_return_epc(regs) < 0))
1116 goto out; 1123 goto out;
1117 1124
1118 if (get_isa16_mode(regs->cp0_epc)) { 1125 if (!get_isa16_mode(regs->cp0_epc)) {
1119 unsigned short mmop[2] = { 0 };
1120
1121 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1122 status = SIGSEGV;
1123 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1124 status = SIGSEGV;
1125 opcode = mmop[0];
1126 opcode = (opcode << 16) | mmop[1];
1127
1128 if (status < 0)
1129 status = simulate_rdhwr_mm(regs, opcode);
1130 } else {
1131 if (unlikely(get_user(opcode, epc) < 0)) 1126 if (unlikely(get_user(opcode, epc) < 0))
1132 status = SIGSEGV; 1127 status = SIGSEGV;
1133 1128
@@ -1142,6 +1137,18 @@ no_r2_instr:
1142 1137
1143 if (status < 0) 1138 if (status < 0)
1144 status = simulate_fp(regs, opcode, old_epc, old31); 1139 status = simulate_fp(regs, opcode, old_epc, old31);
1140 } else if (cpu_has_mmips) {
1141 unsigned short mmop[2] = { 0 };
1142
1143 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1144 status = SIGSEGV;
1145 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1146 status = SIGSEGV;
1147 opcode = mmop[0];
1148 opcode = (opcode << 16) | mmop[1];
1149
1150 if (status < 0)
1151 status = simulate_rdhwr_mm(regs, opcode);
1145 } 1152 }
1146 1153
1147 if (status < 0) 1154 if (status < 0)
@@ -1492,6 +1499,7 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
1492 */ 1499 */
1493asmlinkage void do_watch(struct pt_regs *regs) 1500asmlinkage void do_watch(struct pt_regs *regs)
1494{ 1501{
1502 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1495 enum ctx_state prev_state; 1503 enum ctx_state prev_state;
1496 u32 cause; 1504 u32 cause;
1497 1505
@@ -1512,7 +1520,7 @@ asmlinkage void do_watch(struct pt_regs *regs)
1512 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 1520 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1513 mips_read_watch_registers(); 1521 mips_read_watch_registers();
1514 local_irq_enable(); 1522 local_irq_enable();
1515 force_sig(SIGTRAP, current); 1523 force_sig_info(SIGTRAP, &info, current);
1516 } else { 1524 } else {
1517 mips_clear_watch_registers(); 1525 mips_clear_watch_registers();
1518 local_irq_enable(); 1526 local_irq_enable();
@@ -2214,7 +2222,7 @@ void __init trap_init(void)
2214 2222
2215 /* 2223 /*
2216 * Copy the generic exception handlers to their final destination. 2224 * Copy the generic exception handlers to their final destination.
2217 * This will be overriden later as suitable for a particular 2225 * This will be overridden later as suitable for a particular
2218 * configuration. 2226 * configuration.
2219 */ 2227 */
2220 set_handler(0x180, &except_vec3_generic, 0x80); 2228 set_handler(0x180, &except_vec3_generic, 0x80);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 490cea569d57..5c62065cbf22 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
885{ 885{
886 union mips_instruction insn; 886 union mips_instruction insn;
887 unsigned long value; 887 unsigned long value;
888 unsigned int res; 888 unsigned int res, preempted;
889 unsigned long origpc; 889 unsigned long origpc;
890 unsigned long orig31; 890 unsigned long orig31;
891 void __user *fault_addr = NULL; 891 void __user *fault_addr = NULL;
@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1226 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) 1226 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1227 goto sigbus; 1227 goto sigbus;
1228 1228
1229 /* 1229 do {
1230 * Disable preemption to avoid a race between copying 1230 /*
1231 * state from userland, migrating to another CPU and 1231 * If we have live MSA context keep track of
1232 * updating the hardware vector register below. 1232 * whether we get preempted in order to avoid
1233 */ 1233 * the register context we load being clobbered
1234 preempt_disable(); 1234 * by the live context as it's saved during
1235 1235 * preemption. If we don't have live context
1236 res = __copy_from_user_inatomic(fpr, addr, 1236 * then it can't be saved to clobber the value
1237 sizeof(*fpr)); 1237 * we load.
1238 if (res) 1238 */
1239 goto fault; 1239 preempted = test_thread_flag(TIF_USEDMSA);
1240 1240
1241 /* 1241 res = __copy_from_user_inatomic(fpr, addr,
1242 * Update the hardware register if it is in use by the 1242 sizeof(*fpr));
1243 * task in this quantum, in order to avoid having to 1243 if (res)
1244 * save & restore the whole vector context. 1244 goto fault;
1245 */
1246 if (test_thread_flag(TIF_USEDMSA))
1247 write_msa_wr(wd, fpr, df);
1248 1245
1249 preempt_enable(); 1246 /*
1247 * Update the hardware register if it is in use
1248 * by the task in this quantum, in order to
1249 * avoid having to save & restore the whole
1250 * vector context.
1251 */
1252 preempt_disable();
1253 if (test_thread_flag(TIF_USEDMSA)) {
1254 write_msa_wr(wd, fpr, df);
1255 preempted = 0;
1256 }
1257 preempt_enable();
1258 } while (preempted);
1250 break; 1259 break;
1251 1260
1252 case msa_st_op: 1261 case msa_st_op:
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a08c43946247..e0e1d0a611fc 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -632,7 +632,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
632 632
633 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); 633 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
634 634
635 /* Alocate new kernel and user ASIDs if needed */ 635 /* Allocate new kernel and user ASIDs if needed */
636 636
637 local_irq_save(flags); 637 local_irq_save(flags);
638 638
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index ad988000563f..c4038d2a724c 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -500,7 +500,7 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
500 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); 500 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
501 501
502 /* 502 /*
503 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) 503 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
504 */ 504 */
505 kvm_write_c0_guest_intctl(cop0, 0xFC000000); 505 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
506 506
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index ad3c73436777..47d26c805eac 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -97,7 +97,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
97{ 97{
98 assert(xm); /* we don't gen exact zeros (probably should) */ 98 assert(xm); /* we don't gen exact zeros (probably should) */
99 99
100 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */ 100 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */
101 assert(xm & (DP_HIDDEN_BIT << 3)); 101 assert(xm & (DP_HIDDEN_BIT << 3));
102 102
103 if (xe < DP_EMIN) { 103 if (xe < DP_EMIN) {
@@ -165,7 +165,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
165 /* strip grs bits */ 165 /* strip grs bits */
166 xm >>= 3; 166 xm >>= 3;
167 167
168 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 168 assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
169 assert(xe >= DP_EMIN); 169 assert(xe >= DP_EMIN);
170 170
171 if (xe > DP_EMAX) { 171 if (xe > DP_EMAX) {
@@ -198,7 +198,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
198 ieee754_setcx(IEEE754_UNDERFLOW); 198 ieee754_setcx(IEEE754_UNDERFLOW);
199 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); 199 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
200 } else { 200 } else {
201 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 201 assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
202 assert(xm & DP_HIDDEN_BIT); 202 assert(xm & DP_HIDDEN_BIT);
203 203
204 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 204 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index def00ffc50fc..e0b2c450b963 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -97,7 +97,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
97{ 97{
98 assert(xm); /* we don't gen exact zeros (probably should) */ 98 assert(xm); /* we don't gen exact zeros (probably should) */
99 99
100 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */ 100 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */
101 assert(xm & (SP_HIDDEN_BIT << 3)); 101 assert(xm & (SP_HIDDEN_BIT << 3));
102 102
103 if (xe < SP_EMIN) { 103 if (xe < SP_EMIN) {
@@ -163,7 +163,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
163 /* strip grs bits */ 163 /* strip grs bits */
164 xm >>= 3; 164 xm >>= 3;
165 165
166 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 166 assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
167 assert(xe >= SP_EMIN); 167 assert(xe >= SP_EMIN);
168 168
169 if (xe > SP_EMAX) { 169 if (xe > SP_EMAX) {
@@ -196,7 +196,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
196 ieee754_setcx(IEEE754_UNDERFLOW); 196 ieee754_setcx(IEEE754_UNDERFLOW);
197 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm); 197 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
198 } else { 198 } else {
199 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 199 assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
200 assert(xm & SP_HIDDEN_BIT); 200 assert(xm & SP_HIDDEN_BIT);
201 201
202 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); 202 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index dc7c5a5214a9..026cb59a914d 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -158,7 +158,7 @@ static inline int __init indy_sc_probe(void)
158 return 1; 158 return 1;
159} 159}
160 160
161/* XXX Check with wje if the Indy caches can differenciate between 161/* XXX Check with wje if the Indy caches can differentiate between
162 writeback + invalidate and just invalidate. */ 162 writeback + invalidate and just invalidate. */
163static struct bcache_ops indy_sc_ops = { 163static struct bcache_ops indy_sc_ops = {
164 .bc_enable = indy_sc_enable, 164 .bc_enable = indy_sc_enable,
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 5037d5868cef..c17d7627f872 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -19,6 +19,7 @@
19#include <asm/cpu.h> 19#include <asm/cpu.h>
20#include <asm/cpu-type.h> 20#include <asm/cpu-type.h>
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#include <asm/hazards.h>
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23#include <asm/pgtable.h> 24#include <asm/pgtable.h>
24#include <asm/tlb.h> 25#include <asm/tlb.h>
@@ -486,6 +487,10 @@ static void r4k_tlb_configure(void)
486 * be set to fixed-size pages. 487 * be set to fixed-size pages.
487 */ 488 */
488 write_c0_pagemask(PM_DEFAULT_MASK); 489 write_c0_pagemask(PM_DEFAULT_MASK);
490 back_to_back_c0_hazard();
491 if (read_c0_pagemask() != PM_DEFAULT_MASK)
492 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
493
489 write_c0_wired(0); 494 write_c0_wired(0);
490 if (current_cpu_type() == CPU_R10000 || 495 if (current_cpu_type() == CPU_R10000 ||
491 current_cpu_type() == CPU_R12000 || 496 current_cpu_type() == CPU_R12000 ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 5a04b6f5c6fb..84c6e3fda84a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -12,7 +12,7 @@
12 * Copyright (C) 2011 MIPS Technologies, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc.
13 * 13 *
14 * ... and the days got worse and worse and now you see 14 * ... and the days got worse and worse and now you see
15 * I've gone completly out of my mind. 15 * I've gone completely out of my mind.
16 * 16 *
17 * They're coming to take me a away haha 17 * They're coming to take me a away haha
18 * they're coming to take me a away hoho hihi haha 18 * they're coming to take me a away hoho hihi haha
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 8d0eb2643248..f1f88291451e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2000 by Silicon Graphics, Inc. 7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig 8 * Copyright (C) 2004 by Christoph Hellwig
9 * 9 *
10 * On SGI IP27 the ARC memory configuration data is completly bogus but 10 * On SGI IP27 the ARC memory configuration data is completely bogus but
11 * alternate easier to use mechanisms are available. 11 * alternate easier to use mechanisms are available.
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 718dd197909f..367c5426157b 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -97,8 +97,7 @@ static int __init early_init_dt_scan_serial(unsigned long node,
97 return 0; 97 return 0;
98#endif 98#endif
99 99
100 *addr64 = fdt_translate_address((const void *)initial_boot_params, 100 *addr64 = of_flat_dt_translate_address(node);
101 node);
102 101
103 return *addr64 == OF_BAD_ADDR ? 0 : 1; 102 return *addr64 == OF_BAD_ADDR ? 0 : 1;
104} 103}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 989fa14147a9..bd3c873951a1 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -30,6 +30,7 @@ config PARISC
30 select TTY # Needed for pdc_cons.c 30 select TTY # Needed for pdc_cons.c
31 select HAVE_DEBUG_STACKOVERFLOW 31 select HAVE_DEBUG_STACKOVERFLOW
32 select HAVE_ARCH_AUDITSYSCALL 32 select HAVE_ARCH_AUDITSYSCALL
33 select HAVE_ARCH_SECCOMP_FILTER
33 select ARCH_NO_COHERENT_DMA_MMAP 34 select ARCH_NO_COHERENT_DMA_MMAP
34 35
35 help 36 help
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 0448a2c8eafb..3387307cc33e 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -183,6 +183,13 @@ typedef struct compat_siginfo {
183 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ 183 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
184 int _fd; 184 int _fd;
185 } _sigpoll; 185 } _sigpoll;
186
187 /* SIGSYS */
188 struct {
189 compat_uptr_t _call_addr; /* calling user insn */
190 int _syscall; /* triggering system call number */
191 compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
192 } _sigsys;
186 } _sifields; 193 } _sifields;
187} compat_siginfo_t; 194} compat_siginfo_t;
188 195
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index a5eba95d87fe..637ce8d6f375 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -39,6 +39,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
39 } 39 }
40} 40}
41 41
42static inline void syscall_set_return_value(struct task_struct *task,
43 struct pt_regs *regs,
44 int error, long val)
45{
46 regs->gr[28] = error ? error : val;
47}
48
49static inline void syscall_rollback(struct task_struct *task,
50 struct pt_regs *regs)
51{
52 /* do nothing */
53}
54
42static inline int syscall_get_arch(void) 55static inline int syscall_get_arch(void)
43{ 56{
44 int arch = AUDIT_ARCH_PARISC; 57 int arch = AUDIT_ARCH_PARISC;
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index d4dd6e58682c..7955e43f3f3f 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -44,20 +44,18 @@ static inline long access_ok(int type, const void __user * addr,
44#define LDD_USER(ptr) BUILD_BUG() 44#define LDD_USER(ptr) BUILD_BUG()
45#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr) 45#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr)
46#define STD_USER(x, ptr) __put_user_asm64(x, ptr) 46#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
47#define ASM_WORD_INSN ".word\t"
48#else 47#else
49#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr) 48#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr)
50#define LDD_USER(ptr) __get_user_asm("ldd", ptr) 49#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
51#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr) 50#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr)
52#define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 51#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
53#define ASM_WORD_INSN ".dword\t"
54#endif 52#endif
55 53
56/* 54/*
57 * The exception table contains two values: the first is an address 55 * The exception table contains two values: the first is the relative offset to
58 * for an instruction that is allowed to fault, and the second is 56 * the address of the instruction that is allowed to fault, and the second is
59 * the address to the fixup routine. Even on a 64bit kernel we could 57 * the relative offset to the address of the fixup routine. Since relative
60 * use a 32bit (unsigned int) address here. 58 * addresses are used, 32bit values are sufficient even on 64bit kernel.
61 */ 59 */
62 60
63#define ARCH_HAS_RELATIVE_EXTABLE 61#define ARCH_HAS_RELATIVE_EXTABLE
@@ -77,6 +75,7 @@ struct exception_table_entry {
77 */ 75 */
78struct exception_data { 76struct exception_data {
79 unsigned long fault_ip; 77 unsigned long fault_ip;
78 unsigned long fault_gp;
80 unsigned long fault_space; 79 unsigned long fault_space;
81 unsigned long fault_addr; 80 unsigned long fault_addr;
82}; 81};
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index d2f62570a7b1..78d30d2ea2d8 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -299,6 +299,7 @@ int main(void)
299#endif 299#endif
300 BLANK(); 300 BLANK();
301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
302 DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
302 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 303 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
303 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); 304 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
304 BLANK(); 305 BLANK();
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 91c2a39cd5aa..67001277256c 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
319 if (!mapping) 319 if (!mapping)
320 return; 320 return;
321 321
322 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 322 pgoff = page->index;
323 323
324 /* We have carefully arranged in arch_get_unmapped_area() that 324 /* We have carefully arranged in arch_get_unmapped_area() that
325 * *any* mappings of a file are always congruently mapped (whether 325 * *any* mappings of a file are always congruently mapped (whether
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index b9d75d9fa9ac..a0ecdb4abcc8 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -660,6 +660,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
660 } 660 }
661 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); 661 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
662 break; 662 break;
663 case R_PARISC_PCREL32:
664 /* 32-bit PC relative address */
665 *loc = val - dot - 8 + addend;
666 break;
663 667
664 default: 668 default:
665 printk(KERN_ERR "module %s: Unknown relocation: %u\n", 669 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
@@ -788,6 +792,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
788 CHECK_RELOC(val, 22); 792 CHECK_RELOC(val, 22);
789 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); 793 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
790 break; 794 break;
795 case R_PARISC_PCREL32:
796 /* 32-bit PC relative address */
797 *loc = val - dot - 8 + addend;
798 break;
791 case R_PARISC_DIR64: 799 case R_PARISC_DIR64:
792 /* 64-bit effective address */ 800 /* 64-bit effective address */
793 *loc64 = val + addend; 801 *loc64 = val + addend;
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 568b2c61ea02..3cad8aadc69e 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
47EXPORT_SYMBOL(lclear_user); 47EXPORT_SYMBOL(lclear_user);
48EXPORT_SYMBOL(lstrnlen_user); 48EXPORT_SYMBOL(lstrnlen_user);
49 49
50/* Global fixups */ 50/* Global fixups - defined as int to avoid creation of function pointers */
51extern void fixup_get_user_skip_1(void); 51extern int fixup_get_user_skip_1;
52extern void fixup_get_user_skip_2(void); 52extern int fixup_get_user_skip_2;
53extern void fixup_put_user_skip_1(void); 53extern int fixup_put_user_skip_1;
54extern void fixup_put_user_skip_2(void); 54extern int fixup_put_user_skip_2;
55EXPORT_SYMBOL(fixup_get_user_skip_1); 55EXPORT_SYMBOL(fixup_get_user_skip_1);
56EXPORT_SYMBOL(fixup_get_user_skip_2); 56EXPORT_SYMBOL(fixup_get_user_skip_2);
57EXPORT_SYMBOL(fixup_put_user_skip_1); 57EXPORT_SYMBOL(fixup_put_user_skip_1);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index ce0b2b4075c7..8fb81a391599 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -270,7 +270,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
270long do_syscall_trace_enter(struct pt_regs *regs) 270long do_syscall_trace_enter(struct pt_regs *regs)
271{ 271{
272 /* Do the secure computing check first. */ 272 /* Do the secure computing check first. */
273 secure_computing_strict(regs->gr[20]); 273 if (secure_computing() == -1)
274 return -1;
274 275
275 if (test_thread_flag(TIF_SYSCALL_TRACE) && 276 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
276 tracehook_report_syscall_entry(regs)) { 277 tracehook_report_syscall_entry(regs)) {
@@ -296,7 +297,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
296 regs->gr[23] & 0xffffffff); 297 regs->gr[23] & 0xffffffff);
297 298
298out: 299out:
299 return regs->gr[20]; 300 /*
301 * Sign extend the syscall number to 64bit since it may have been
302 * modified by a compat ptrace call
303 */
304 return (int) ((u32) regs->gr[20]);
300} 305}
301 306
302void do_syscall_trace_exit(struct pt_regs *regs) 307void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 984abbee71ca..c342b2e17492 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -371,6 +371,11 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
371 val = (compat_int_t)from->si_int; 371 val = (compat_int_t)from->si_int;
372 err |= __put_user(val, &to->si_int); 372 err |= __put_user(val, &to->si_int);
373 break; 373 break;
374 case __SI_SYS >> 16:
375 err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
376 err |= __put_user(from->si_syscall, &to->si_syscall);
377 err |= __put_user(from->si_arch, &to->si_arch);
378 break;
374 } 379 }
375 } 380 }
376 return err; 381 return err;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index fbafa0d0e2bf..c976ebfe2269 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -329,6 +329,7 @@ tracesys_next:
329 329
330 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 330 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
331 LDREG TI_TASK(%r1), %r1 331 LDREG TI_TASK(%r1), %r1
332 LDREG TASK_PT_GR28(%r1), %r28 /* Restore return value */
332 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ 333 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
333 LDREG TASK_PT_GR25(%r1), %r25 334 LDREG TASK_PT_GR25(%r1), %r25
334 LDREG TASK_PT_GR24(%r1), %r24 335 LDREG TASK_PT_GR24(%r1), %r24
@@ -342,6 +343,7 @@ tracesys_next:
342 stw %r21, -56(%r30) /* 6th argument */ 343 stw %r21, -56(%r30) /* 6th argument */
343#endif 344#endif
344 345
346 cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
345 comiclr,>>= __NR_Linux_syscalls, %r20, %r0 347 comiclr,>>= __NR_Linux_syscalls, %r20, %r0
346 b,n .Ltracesys_nosys 348 b,n .Ltracesys_nosys
347 349
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 16e0735e2f46..97d6b208e129 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -795,6 +795,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
795 795
796 if (fault_space == 0 && !faulthandler_disabled()) 796 if (fault_space == 0 && !faulthandler_disabled())
797 { 797 {
798 /* Clean up and return if in exception table. */
799 if (fixup_exception(regs))
800 return;
798 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 801 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
799 parisc_terminate("Kernel Fault", regs, code, fault_address); 802 parisc_terminate("Kernel Fault", regs, code, fault_address);
800 } 803 }
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index 536ef66bb94b..1052b747e011 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -26,6 +26,7 @@
26 26
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 .macro get_fault_ip t1 t2 28 .macro get_fault_ip t1 t2
29 loadgp
29 addil LT%__per_cpu_offset,%r27 30 addil LT%__per_cpu_offset,%r27
30 LDREG RT%__per_cpu_offset(%r1),\t1 31 LDREG RT%__per_cpu_offset(%r1),\t1
31 /* t2 = smp_processor_id() */ 32 /* t2 = smp_processor_id() */
@@ -40,14 +41,19 @@
40 LDREG RT%exception_data(%r1),\t1 41 LDREG RT%exception_data(%r1),\t1
41 /* t1 = this_cpu_ptr(&exception_data) */ 42 /* t1 = this_cpu_ptr(&exception_data) */
42 add,l \t1,\t2,\t1 43 add,l \t1,\t2,\t1
44 /* %r27 = t1->fault_gp - restore gp */
45 LDREG EXCDATA_GP(\t1), %r27
43 /* t1 = t1->fault_ip */ 46 /* t1 = t1->fault_ip */
44 LDREG EXCDATA_IP(\t1), \t1 47 LDREG EXCDATA_IP(\t1), \t1
45 .endm 48 .endm
46#else 49#else
47 .macro get_fault_ip t1 t2 50 .macro get_fault_ip t1 t2
51 loadgp
48 /* t1 = this_cpu_ptr(&exception_data) */ 52 /* t1 = this_cpu_ptr(&exception_data) */
49 addil LT%exception_data,%r27 53 addil LT%exception_data,%r27
50 LDREG RT%exception_data(%r1),\t2 54 LDREG RT%exception_data(%r1),\t2
55 /* %r27 = t2->fault_gp - restore gp */
56 LDREG EXCDATA_GP(\t2), %r27
51 /* t1 = t2->fault_ip */ 57 /* t1 = t2->fault_ip */
52 LDREG EXCDATA_IP(\t2), \t1 58 LDREG EXCDATA_IP(\t2), \t1
53 .endm 59 .endm
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 26fac9c671c9..16dbe81c97c9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -145,6 +145,7 @@ int fixup_exception(struct pt_regs *regs)
145 struct exception_data *d; 145 struct exception_data *d;
146 d = this_cpu_ptr(&exception_data); 146 d = this_cpu_ptr(&exception_data);
147 d->fault_ip = regs->iaoq[0]; 147 d->fault_ip = regs->iaoq[0];
148 d->fault_gp = regs->gr[27];
148 d->fault_space = regs->isr; 149 d->fault_space = regs->isr;
149 d->fault_addr = regs->ior; 150 d->fault_addr = regs->ior;
150 151
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3c07d6b96877..6b3e7c6ee096 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -22,7 +22,7 @@
22#include <linux/swap.h> 22#include <linux/swap.h>
23#include <linux/unistd.h> 23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */ 24#include <linux/nodemask.h> /* for node_online_map */
25#include <linux/pagemap.h> /* for release_pages and page_cache_release */ 25#include <linux/pagemap.h> /* for release_pages */
26#include <linux/compat.h> 26#include <linux/compat.h>
27 27
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8ab8a1a9610a..009fab130cd8 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -246,7 +246,7 @@ struct thread_struct {
246#endif /* CONFIG_ALTIVEC */ 246#endif /* CONFIG_ALTIVEC */
247#ifdef CONFIG_VSX 247#ifdef CONFIG_VSX
248 /* VSR status */ 248 /* VSR status */
249 int used_vsr; /* set if process has used altivec */ 249 int used_vsr; /* set if process has used VSX */
250#endif /* CONFIG_VSX */ 250#endif /* CONFIG_VSX */
251#ifdef CONFIG_SPE 251#ifdef CONFIG_SPE
252 unsigned long evr[32]; /* upper 32-bits of SPE regs */ 252 unsigned long evr[32]; /* upper 32-bits of SPE regs */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 612df305886b..b8500b4ac7fe 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -983,7 +983,7 @@ void restore_tm_state(struct pt_regs *regs)
983static inline void save_sprs(struct thread_struct *t) 983static inline void save_sprs(struct thread_struct *t)
984{ 984{
985#ifdef CONFIG_ALTIVEC 985#ifdef CONFIG_ALTIVEC
986 if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC))) 986 if (cpu_has_feature(CPU_FTR_ALTIVEC))
987 t->vrsave = mfspr(SPRN_VRSAVE); 987 t->vrsave = mfspr(SPRN_VRSAVE);
988#endif 988#endif
989#ifdef CONFIG_PPC_BOOK3S_64 989#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6dd272b6196f..d991b9e80dbb 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -413,13 +413,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
413{ 413{
414 struct hugepd_freelist **batchp; 414 struct hugepd_freelist **batchp;
415 415
416 batchp = this_cpu_ptr(&hugepd_freelist_cur); 416 batchp = &get_cpu_var(hugepd_freelist_cur);
417 417
418 if (atomic_read(&tlb->mm->mm_users) < 2 || 418 if (atomic_read(&tlb->mm->mm_users) < 2 ||
419 cpumask_equal(mm_cpumask(tlb->mm), 419 cpumask_equal(mm_cpumask(tlb->mm),
420 cpumask_of(smp_processor_id()))) { 420 cpumask_of(smp_processor_id()))) {
421 kmem_cache_free(hugepte_cache, hugepte); 421 kmem_cache_free(hugepte_cache, hugepte);
422 put_cpu_var(hugepd_freelist_cur); 422 put_cpu_var(hugepd_freelist_cur);
423 return; 423 return;
424 } 424 }
425 425
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index dfa863876778..6ca5f0525e57 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
732 return -ENOMEM; 732 return -ENOMEM;
733 733
734 sb->s_maxbytes = MAX_LFS_FILESIZE; 734 sb->s_maxbytes = MAX_LFS_FILESIZE;
735 sb->s_blocksize = PAGE_CACHE_SIZE; 735 sb->s_blocksize = PAGE_SIZE;
736 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 736 sb->s_blocksize_bits = PAGE_SHIFT;
737 sb->s_magic = SPUFS_MAGIC; 737 sb->s_magic = SPUFS_MAGIC;
738 sb->s_op = &s_ops; 738 sb->s_op = &s_ops;
739 sb->s_fs_info = info; 739 sb->s_fs_info = info;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b9df8d11d7a9..aad23e3dff2c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -59,6 +59,9 @@ config PCI_QUIRKS
59config ARCH_SUPPORTS_UPROBES 59config ARCH_SUPPORTS_UPROBES
60 def_bool y 60 def_bool y
61 61
62config DEBUG_RODATA
63 def_bool y
64
62config S390 65config S390
63 def_bool y 66 def_bool y
64 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 67 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index b8045b97f4fb..d750cc0dfe30 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -669,11 +669,13 @@ static const struct file_operations prng_tdes_fops = {
669static struct miscdevice prng_sha512_dev = { 669static struct miscdevice prng_sha512_dev = {
670 .name = "prandom", 670 .name = "prandom",
671 .minor = MISC_DYNAMIC_MINOR, 671 .minor = MISC_DYNAMIC_MINOR,
672 .mode = 0644,
672 .fops = &prng_sha512_fops, 673 .fops = &prng_sha512_fops,
673}; 674};
674static struct miscdevice prng_tdes_dev = { 675static struct miscdevice prng_tdes_dev = {
675 .name = "prandom", 676 .name = "prandom",
676 .minor = MISC_DYNAMIC_MINOR, 677 .minor = MISC_DYNAMIC_MINOR,
678 .mode = 0644,
677 .fops = &prng_tdes_fops, 679 .fops = &prng_tdes_fops,
678}; 680};
679 681
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 0f3da2cb2bd6..255c7eec4481 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
278 sbi->uid = current_uid(); 278 sbi->uid = current_uid();
279 sbi->gid = current_gid(); 279 sbi->gid = current_gid();
280 sb->s_fs_info = sbi; 280 sb->s_fs_info = sbi;
281 sb->s_blocksize = PAGE_CACHE_SIZE; 281 sb->s_blocksize = PAGE_SIZE;
282 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 282 sb->s_blocksize_bits = PAGE_SHIFT;
283 sb->s_magic = HYPFS_MAGIC; 283 sb->s_magic = HYPFS_MAGIC;
284 sb->s_op = &hypfs_s_ops; 284 sb->s_op = &hypfs_s_ops;
285 if (hypfs_parse_options(data, sb)) 285 if (hypfs_parse_options(data, sb))
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 4d7ccac5fd1d..22da3b34c655 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -15,4 +15,7 @@
15 15
16#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 16#define __read_mostly __attribute__((__section__(".data..read_mostly")))
17 17
18/* Read-only memory is marked before mark_rodata_ro() is called. */
19#define __ro_after_init __read_mostly
20
18#endif 21#endif
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index ab3aa6875a59..4384bc797a54 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -311,7 +311,9 @@
311#define __NR_shutdown 373 311#define __NR_shutdown 373
312#define __NR_mlock2 374 312#define __NR_mlock2 374
313#define __NR_copy_file_range 375 313#define __NR_copy_file_range 375
314#define NR_syscalls 376 314#define __NR_preadv2 376
315#define __NR_pwritev2 377
316#define NR_syscalls 378
315 317
316/* 318/*
317 * There are some system calls that are not present on 64 bit, some 319 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 58bf4572d457..62f066b5259e 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -670,6 +670,7 @@ static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
670 670
671 switch (action & ~CPU_TASKS_FROZEN) { 671 switch (action & ~CPU_TASKS_FROZEN) {
672 case CPU_ONLINE: 672 case CPU_ONLINE:
673 case CPU_DOWN_FAILED:
673 flags = PMC_INIT; 674 flags = PMC_INIT;
674 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); 675 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
675 break; 676 break;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 1a43474df541..eaab9a7cb3be 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1521,7 +1521,7 @@ static int cpumf_pmu_notifier(struct notifier_block *self,
1521 1521
1522 switch (action & ~CPU_TASKS_FROZEN) { 1522 switch (action & ~CPU_TASKS_FROZEN) {
1523 case CPU_ONLINE: 1523 case CPU_ONLINE:
1524 case CPU_ONLINE_FROZEN: 1524 case CPU_DOWN_FAILED:
1525 flags = PMC_INIT; 1525 flags = PMC_INIT;
1526 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); 1526 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
1527 break; 1527 break;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 293d8b98fd52..9b59e6212d8f 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -384,3 +384,5 @@ SYSCALL(sys_recvmsg,compat_sys_recvmsg)
384SYSCALL(sys_shutdown,sys_shutdown) 384SYSCALL(sys_shutdown,sys_shutdown)
385SYSCALL(sys_mlock2,compat_sys_mlock2) 385SYSCALL(sys_mlock2,compat_sys_mlock2)
386SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ 386SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
387SYSCALL(sys_preadv2,compat_sys_preadv2)
388SYSCALL(sys_pwritev2,compat_sys_pwritev2)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 69247b4dcc43..cace818d86eb 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -23,7 +23,7 @@
23/** 23/**
24 * gmap_alloc - allocate a guest address space 24 * gmap_alloc - allocate a guest address space
25 * @mm: pointer to the parent mm_struct 25 * @mm: pointer to the parent mm_struct
26 * @limit: maximum size of the gmap address space 26 * @limit: maximum address of the gmap address space
27 * 27 *
28 * Returns a guest address space structure. 28 * Returns a guest address space structure.
29 */ 29 */
@@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
292 if ((from | to | len) & (PMD_SIZE - 1)) 292 if ((from | to | len) & (PMD_SIZE - 1))
293 return -EINVAL; 293 return -EINVAL;
294 if (len == 0 || from + len < from || to + len < to || 294 if (len == 0 || from + len < from || to + len < to ||
295 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) 295 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
296 return -EINVAL; 296 return -EINVAL;
297 297
298 flush = 0; 298 flush = 0;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 49a1c84ed266..a8a6765f1a51 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -20,9 +20,9 @@
20static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, 20static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
21 unsigned long end, int write, struct page **pages, int *nr) 21 unsigned long end, int write, struct page **pages, int *nr)
22{ 22{
23 struct page *head, *page;
23 unsigned long mask; 24 unsigned long mask;
24 pte_t *ptep, pte; 25 pte_t *ptep, pte;
25 struct page *page;
26 26
27 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL; 27 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
28 28
@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
37 return 0; 37 return 0;
38 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 38 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
39 page = pte_page(pte); 39 page = pte_page(pte);
40 if (!page_cache_get_speculative(page)) 40 head = compound_head(page);
41 if (!page_cache_get_speculative(head))
41 return 0; 42 return 0;
42 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 43 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
43 put_page(page); 44 put_page(head);
44 return 0; 45 return 0;
45 } 46 }
47 VM_BUG_ON_PAGE(compound_head(page) != head, page);
46 pages[*nr] = page; 48 pages[*nr] = page;
47 (*nr)++; 49 (*nr)++;
48 50
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 73e290337092..c7b0451397d6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -108,6 +108,13 @@ void __init paging_init(void)
108 free_area_init_nodes(max_zone_pfns); 108 free_area_init_nodes(max_zone_pfns);
109} 109}
110 110
111void mark_rodata_ro(void)
112{
113 /* Text and rodata are already protected. Nothing to do here. */
114 pr_info("Write protecting the kernel read-only data: %luk\n",
115 ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
116}
117
111void __init mem_init(void) 118void __init mem_init(void)
112{ 119{
113 if (MACHINE_HAS_TLB_LC) 120 if (MACHINE_HAS_TLB_LC)
@@ -126,9 +133,6 @@ void __init mem_init(void)
126 setup_zero_pages(); /* Setup zeroed pages. */ 133 setup_zero_pages(); /* Setup zeroed pages. */
127 134
128 mem_init_print_info(NULL); 135 mem_init_print_info(NULL);
129 printk("Write protected kernel read-only data: %#lx - %#lx\n",
130 (unsigned long)&_stext,
131 PFN_ALIGN((unsigned long)&_eshared) - 1);
132} 136}
133 137
134void free_initmem(void) 138void free_initmem(void)
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 21591ddb4c1f..1a4512c8544a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -176,8 +176,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
176 rc = clp_store_query_pci_fn(zdev, &rrb->response); 176 rc = clp_store_query_pci_fn(zdev, &rrb->response);
177 if (rc) 177 if (rc)
178 goto out; 178 goto out;
179 if (rrb->response.pfgid) 179 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
180 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
181 } else { 180 } else {
182 zpci_err("Q PCI FN:\n"); 181 zpci_err("Q PCI FN:\n");
183 zpci_err_clp(rrb->response.hdr.rsp, rc); 182 zpci_err_clp(rrb->response.hdr.rsp, rc);
diff --git a/arch/sparc/include/asm/compat_signal.h b/arch/sparc/include/asm/compat_signal.h
index 9ed1f128b4d1..4b027b1044fa 100644
--- a/arch/sparc/include/asm/compat_signal.h
+++ b/arch/sparc/include/asm/compat_signal.h
@@ -6,17 +6,17 @@
6 6
7#ifdef CONFIG_COMPAT 7#ifdef CONFIG_COMPAT
8struct __new_sigaction32 { 8struct __new_sigaction32 {
9 unsigned sa_handler; 9 unsigned int sa_handler;
10 unsigned int sa_flags; 10 unsigned int sa_flags;
11 unsigned sa_restorer; /* not used by Linux/SPARC yet */ 11 unsigned int sa_restorer; /* not used by Linux/SPARC yet */
12 compat_sigset_t sa_mask; 12 compat_sigset_t sa_mask;
13}; 13};
14 14
15struct __old_sigaction32 { 15struct __old_sigaction32 {
16 unsigned sa_handler; 16 unsigned int sa_handler;
17 compat_old_sigset_t sa_mask; 17 compat_old_sigset_t sa_mask;
18 unsigned int sa_flags; 18 unsigned int sa_flags;
19 unsigned sa_restorer; /* not used by Linux/SPARC yet */ 19 unsigned int sa_restorer; /* not used by Linux/SPARC yet */
20}; 20};
21#endif 21#endif
22 22
diff --git a/arch/sparc/include/asm/obio.h b/arch/sparc/include/asm/obio.h
index 910c1d9af1f8..426ad75103fb 100644
--- a/arch/sparc/include/asm/obio.h
+++ b/arch/sparc/include/asm/obio.h
@@ -117,9 +117,9 @@ static inline void bw_clear_intr_mask(int sbus_level, int mask)
117 "i" (ASI_M_CTL)); 117 "i" (ASI_M_CTL));
118} 118}
119 119
120static inline unsigned bw_get_prof_limit(int cpu) 120static inline unsigned int bw_get_prof_limit(int cpu)
121{ 121{
122 unsigned limit; 122 unsigned int limit;
123 123
124 __asm__ __volatile__ ("lda [%1] %2, %0" : 124 __asm__ __volatile__ ("lda [%1] %2, %0" :
125 "=r" (limit) : 125 "=r" (limit) :
@@ -128,7 +128,7 @@ static inline unsigned bw_get_prof_limit(int cpu)
128 return limit; 128 return limit;
129} 129}
130 130
131static inline void bw_set_prof_limit(int cpu, unsigned limit) 131static inline void bw_set_prof_limit(int cpu, unsigned int limit)
132{ 132{
133 __asm__ __volatile__ ("sta %0, [%1] %2" : : 133 __asm__ __volatile__ ("sta %0, [%1] %2" : :
134 "r" (limit), 134 "r" (limit),
@@ -136,9 +136,9 @@ static inline void bw_set_prof_limit(int cpu, unsigned limit)
136 "i" (ASI_M_CTL)); 136 "i" (ASI_M_CTL));
137} 137}
138 138
139static inline unsigned bw_get_ctrl(int cpu) 139static inline unsigned int bw_get_ctrl(int cpu)
140{ 140{
141 unsigned ctrl; 141 unsigned int ctrl;
142 142
143 __asm__ __volatile__ ("lda [%1] %2, %0" : 143 __asm__ __volatile__ ("lda [%1] %2, %0" :
144 "=r" (ctrl) : 144 "=r" (ctrl) :
@@ -147,7 +147,7 @@ static inline unsigned bw_get_ctrl(int cpu)
147 return ctrl; 147 return ctrl;
148} 148}
149 149
150static inline void bw_set_ctrl(int cpu, unsigned ctrl) 150static inline void bw_set_ctrl(int cpu, unsigned int ctrl)
151{ 151{
152 __asm__ __volatile__ ("sta %0, [%1] %2" : : 152 __asm__ __volatile__ ("sta %0, [%1] %2" : :
153 "r" (ctrl), 153 "r" (ctrl),
@@ -155,9 +155,9 @@ static inline void bw_set_ctrl(int cpu, unsigned ctrl)
155 "i" (ASI_M_CTL)); 155 "i" (ASI_M_CTL));
156} 156}
157 157
158static inline unsigned cc_get_ipen(void) 158static inline unsigned int cc_get_ipen(void)
159{ 159{
160 unsigned pending; 160 unsigned int pending;
161 161
162 __asm__ __volatile__ ("lduha [%1] %2, %0" : 162 __asm__ __volatile__ ("lduha [%1] %2, %0" :
163 "=r" (pending) : 163 "=r" (pending) :
@@ -166,7 +166,7 @@ static inline unsigned cc_get_ipen(void)
166 return pending; 166 return pending;
167} 167}
168 168
169static inline void cc_set_iclr(unsigned clear) 169static inline void cc_set_iclr(unsigned int clear)
170{ 170{
171 __asm__ __volatile__ ("stha %0, [%1] %2" : : 171 __asm__ __volatile__ ("stha %0, [%1] %2" : :
172 "r" (clear), 172 "r" (clear),
@@ -174,9 +174,9 @@ static inline void cc_set_iclr(unsigned clear)
174 "i" (ASI_M_MXCC)); 174 "i" (ASI_M_MXCC));
175} 175}
176 176
177static inline unsigned cc_get_imsk(void) 177static inline unsigned int cc_get_imsk(void)
178{ 178{
179 unsigned mask; 179 unsigned int mask;
180 180
181 __asm__ __volatile__ ("lduha [%1] %2, %0" : 181 __asm__ __volatile__ ("lduha [%1] %2, %0" :
182 "=r" (mask) : 182 "=r" (mask) :
@@ -185,7 +185,7 @@ static inline unsigned cc_get_imsk(void)
185 return mask; 185 return mask;
186} 186}
187 187
188static inline void cc_set_imsk(unsigned mask) 188static inline void cc_set_imsk(unsigned int mask)
189{ 189{
190 __asm__ __volatile__ ("stha %0, [%1] %2" : : 190 __asm__ __volatile__ ("stha %0, [%1] %2" : :
191 "r" (mask), 191 "r" (mask),
@@ -193,9 +193,9 @@ static inline void cc_set_imsk(unsigned mask)
193 "i" (ASI_M_MXCC)); 193 "i" (ASI_M_MXCC));
194} 194}
195 195
196static inline unsigned cc_get_imsk_other(int cpuid) 196static inline unsigned int cc_get_imsk_other(int cpuid)
197{ 197{
198 unsigned mask; 198 unsigned int mask;
199 199
200 __asm__ __volatile__ ("lduha [%1] %2, %0" : 200 __asm__ __volatile__ ("lduha [%1] %2, %0" :
201 "=r" (mask) : 201 "=r" (mask) :
@@ -204,7 +204,7 @@ static inline unsigned cc_get_imsk_other(int cpuid)
204 return mask; 204 return mask;
205} 205}
206 206
207static inline void cc_set_imsk_other(int cpuid, unsigned mask) 207static inline void cc_set_imsk_other(int cpuid, unsigned int mask)
208{ 208{
209 __asm__ __volatile__ ("stha %0, [%1] %2" : : 209 __asm__ __volatile__ ("stha %0, [%1] %2" : :
210 "r" (mask), 210 "r" (mask),
@@ -212,7 +212,7 @@ static inline void cc_set_imsk_other(int cpuid, unsigned mask)
212 "i" (ASI_M_CTL)); 212 "i" (ASI_M_CTL));
213} 213}
214 214
215static inline void cc_set_igen(unsigned gen) 215static inline void cc_set_igen(unsigned int gen)
216{ 216{
217 __asm__ __volatile__ ("sta %0, [%1] %2" : : 217 __asm__ __volatile__ ("sta %0, [%1] %2" : :
218 "r" (gen), 218 "r" (gen),
diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h
index 47eaafad15ce..63374c4413a8 100644
--- a/arch/sparc/include/asm/openprom.h
+++ b/arch/sparc/include/asm/openprom.h
@@ -29,12 +29,12 @@ struct linux_dev_v0_funcs {
29/* V2 and later prom device operations. */ 29/* V2 and later prom device operations. */
30struct linux_dev_v2_funcs { 30struct linux_dev_v2_funcs {
31 phandle (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */ 31 phandle (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
32 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz); 32 char * (*v2_dumb_mem_alloc)(char *va, unsigned int sz);
33 void (*v2_dumb_mem_free)(char *va, unsigned sz); 33 void (*v2_dumb_mem_free)(char *va, unsigned int sz);
34 34
35 /* To map devices into virtual I/O space. */ 35 /* To map devices into virtual I/O space. */
36 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz); 36 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned int paddr, unsigned int sz);
37 void (*v2_dumb_munmap)(char *virta, unsigned size); 37 void (*v2_dumb_munmap)(char *virta, unsigned int size);
38 38
39 int (*v2_dev_open)(char *devpath); 39 int (*v2_dev_open)(char *devpath);
40 void (*v2_dev_close)(int d); 40 void (*v2_dev_close)(int d);
@@ -50,7 +50,7 @@ struct linux_dev_v2_funcs {
50struct linux_mlist_v0 { 50struct linux_mlist_v0 {
51 struct linux_mlist_v0 *theres_more; 51 struct linux_mlist_v0 *theres_more;
52 unsigned int start_adr; 52 unsigned int start_adr;
53 unsigned num_bytes; 53 unsigned int num_bytes;
54}; 54};
55 55
56struct linux_mem_v0 { 56struct linux_mem_v0 {
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 7a38d6a576c5..f089cfa249f3 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -218,7 +218,7 @@ extern pgprot_t PAGE_KERNEL_LOCKED;
218extern pgprot_t PAGE_COPY; 218extern pgprot_t PAGE_COPY;
219extern pgprot_t PAGE_SHARED; 219extern pgprot_t PAGE_SHARED;
220 220
221/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ 221/* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
222extern unsigned long _PAGE_IE; 222extern unsigned long _PAGE_IE;
223extern unsigned long _PAGE_E; 223extern unsigned long _PAGE_E;
224extern unsigned long _PAGE_CACHE; 224extern unsigned long _PAGE_CACHE;
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 6924bdefe148..ce2595c89471 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -201,7 +201,7 @@ unsigned long get_wchan(struct task_struct *task);
201#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) 201#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
202 202
203/* Please see the commentary in asm/backoff.h for a description of 203/* Please see the commentary in asm/backoff.h for a description of
204 * what these instructions are doing and how they have been choosen. 204 * what these instructions are doing and how they have been chosen.
205 * To make a long story short, we are trying to yield the current cpu 205 * To make a long story short, we are trying to yield the current cpu
206 * strand during busy loops. 206 * strand during busy loops.
207 */ 207 */
diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h
index fc2df1e892cb..f4eb630a58ed 100644
--- a/arch/sparc/include/asm/sigcontext.h
+++ b/arch/sparc/include/asm/sigcontext.h
@@ -25,7 +25,7 @@ struct sigcontext32 {
25 int sigc_oswins; /* outstanding windows */ 25 int sigc_oswins; /* outstanding windows */
26 26
27 /* stack ptrs for each regwin buf */ 27 /* stack ptrs for each regwin buf */
28 unsigned sigc_spbuf[__SUNOS_MAXWIN]; 28 unsigned int sigc_spbuf[__SUNOS_MAXWIN];
29 29
30 /* Windows to restore after signal */ 30 /* Windows to restore after signal */
31 struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN]; 31 struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN];
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index ecb49cfa3be9..c6a155c3904e 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -149,7 +149,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
149 * page size in question. So for PMD mappings (which fall on 149 * page size in question. So for PMD mappings (which fall on
150 * bit 23, for 8MB per PMD) we must propagate bit 22 for a 150 * bit 23, for 8MB per PMD) we must propagate bit 22 for a
151 * 4MB huge page. For huge PUDs (which fall on bit 33, for 151 * 4MB huge page. For huge PUDs (which fall on bit 33, for
152 * 8GB per PUD), we have to accomodate 256MB and 2GB huge 152 * 8GB per PUD), we have to accommodate 256MB and 2GB huge
153 * pages. So for those we propagate bits 32 to 28. 153 * pages. So for those we propagate bits 32 to 28.
154 */ 154 */
155#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ 155#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
diff --git a/arch/sparc/include/uapi/asm/stat.h b/arch/sparc/include/uapi/asm/stat.h
index a232e9e1f4e5..2f0583a2c689 100644
--- a/arch/sparc/include/uapi/asm/stat.h
+++ b/arch/sparc/include/uapi/asm/stat.h
@@ -6,13 +6,13 @@
6#if defined(__sparc__) && defined(__arch64__) 6#if defined(__sparc__) && defined(__arch64__)
7/* 64 bit sparc */ 7/* 64 bit sparc */
8struct stat { 8struct stat {
9 unsigned st_dev; 9 unsigned int st_dev;
10 ino_t st_ino; 10 ino_t st_ino;
11 mode_t st_mode; 11 mode_t st_mode;
12 short st_nlink; 12 short st_nlink;
13 uid_t st_uid; 13 uid_t st_uid;
14 gid_t st_gid; 14 gid_t st_gid;
15 unsigned st_rdev; 15 unsigned int st_rdev;
16 off_t st_size; 16 off_t st_size;
17 time_t st_atime; 17 time_t st_atime;
18 time_t st_mtime; 18 time_t st_mtime;
diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c
index 24361b494a93..2585c1e14bcc 100644
--- a/arch/sparc/kernel/audit.c
+++ b/arch/sparc/kernel/audit.c
@@ -5,27 +5,27 @@
5 5
6#include "kernel.h" 6#include "kernel.h"
7 7
8static unsigned dir_class[] = { 8static unsigned int dir_class[] = {
9#include <asm-generic/audit_dir_write.h> 9#include <asm-generic/audit_dir_write.h>
10~0U 10~0U
11}; 11};
12 12
13static unsigned read_class[] = { 13static unsigned int read_class[] = {
14#include <asm-generic/audit_read.h> 14#include <asm-generic/audit_read.h>
15~0U 15~0U
16}; 16};
17 17
18static unsigned write_class[] = { 18static unsigned int write_class[] = {
19#include <asm-generic/audit_write.h> 19#include <asm-generic/audit_write.h>
20~0U 20~0U
21}; 21};
22 22
23static unsigned chattr_class[] = { 23static unsigned int chattr_class[] = {
24#include <asm-generic/audit_change_attr.h> 24#include <asm-generic/audit_change_attr.h>
25~0U 25~0U
26}; 26};
27 27
28static unsigned signal_class[] = { 28static unsigned int signal_class[] = {
29#include <asm-generic/audit_signal.h> 29#include <asm-generic/audit_signal.h>
30~0U 30~0U
31}; 31};
@@ -39,7 +39,7 @@ int audit_classify_arch(int arch)
39 return 0; 39 return 0;
40} 40}
41 41
42int audit_classify_syscall(int abi, unsigned syscall) 42int audit_classify_syscall(int abi, unsigned int syscall)
43{ 43{
44#ifdef CONFIG_COMPAT 44#ifdef CONFIG_COMPAT
45 if (abi == AUDIT_ARCH_SPARC) 45 if (abi == AUDIT_ARCH_SPARC)
diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c
index 7062263d09c1..e5611cd428f1 100644
--- a/arch/sparc/kernel/compat_audit.c
+++ b/arch/sparc/kernel/compat_audit.c
@@ -2,32 +2,32 @@
2#include <asm/unistd.h> 2#include <asm/unistd.h>
3#include "kernel.h" 3#include "kernel.h"
4 4
5unsigned sparc32_dir_class[] = { 5unsigned int sparc32_dir_class[] = {
6#include <asm-generic/audit_dir_write.h> 6#include <asm-generic/audit_dir_write.h>
7~0U 7~0U
8}; 8};
9 9
10unsigned sparc32_chattr_class[] = { 10unsigned int sparc32_chattr_class[] = {
11#include <asm-generic/audit_change_attr.h> 11#include <asm-generic/audit_change_attr.h>
12~0U 12~0U
13}; 13};
14 14
15unsigned sparc32_write_class[] = { 15unsigned int sparc32_write_class[] = {
16#include <asm-generic/audit_write.h> 16#include <asm-generic/audit_write.h>
17~0U 17~0U
18}; 18};
19 19
20unsigned sparc32_read_class[] = { 20unsigned int sparc32_read_class[] = {
21#include <asm-generic/audit_read.h> 21#include <asm-generic/audit_read.h>
22~0U 22~0U
23}; 23};
24 24
25unsigned sparc32_signal_class[] = { 25unsigned int sparc32_signal_class[] = {
26#include <asm-generic/audit_signal.h> 26#include <asm-generic/audit_signal.h>
27~0U 27~0U
28}; 28};
29 29
30int sparc32_classify_syscall(unsigned syscall) 30int sparc32_classify_syscall(unsigned int syscall)
31{ 31{
32 switch(syscall) { 32 switch(syscall) {
33 case __NR_open: 33 case __NR_open:
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index a83707c83be8..51aa6e86a5f8 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1255,7 +1255,7 @@ flush_patch_exception:
1255kuw_patch1_7win: sll %o3, 6, %o3 1255kuw_patch1_7win: sll %o3, 6, %o3
1256 1256
1257 /* No matter how much overhead this routine has in the worst 1257 /* No matter how much overhead this routine has in the worst
1258 * case scenerio, it is several times better than taking the 1258 * case scenario, it is several times better than taking the
1259 * traps with the old method of just doing flush_user_windows(). 1259 * traps with the old method of just doing flush_user_windows().
1260 */ 1260 */
1261kill_user_windows: 1261kill_user_windows:
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 28fed53b13a0..ffd5ff4678cf 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -131,7 +131,7 @@ void __iomem *ioremap(unsigned long offset, unsigned long size)
131EXPORT_SYMBOL(ioremap); 131EXPORT_SYMBOL(ioremap);
132 132
133/* 133/*
134 * Comlimentary to ioremap(). 134 * Complementary to ioremap().
135 */ 135 */
136void iounmap(volatile void __iomem *virtual) 136void iounmap(volatile void __iomem *virtual)
137{ 137{
@@ -233,7 +233,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
233} 233}
234 234
235/* 235/*
236 * Comlimentary to _sparc_ioremap(). 236 * Complementary to _sparc_ioremap().
237 */ 237 */
238static void _sparc_free_io(struct resource *res) 238static void _sparc_free_io(struct resource *res)
239{ 239{
@@ -532,7 +532,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
532} 532}
533 533
534/* Map a set of buffers described by scatterlist in streaming 534/* Map a set of buffers described by scatterlist in streaming
535 * mode for DMA. This is the scather-gather version of the 535 * mode for DMA. This is the scatter-gather version of the
536 * above pci_map_single interface. Here the scatter gather list 536 * above pci_map_single interface. Here the scatter gather list
537 * elements are each tagged with the appropriate dma address 537 * elements are each tagged with the appropriate dma address
538 * and length. They are obtained via sg_dma_{address,length}(SG). 538 * and length. They are obtained via sg_dma_{address,length}(SG).
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index e7f652be9e61..5057ec2e4af6 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -54,12 +54,12 @@ void do_signal32(struct pt_regs * regs);
54asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp); 54asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
55 55
56/* compat_audit.c */ 56/* compat_audit.c */
57extern unsigned sparc32_dir_class[]; 57extern unsigned int sparc32_dir_class[];
58extern unsigned sparc32_chattr_class[]; 58extern unsigned int sparc32_chattr_class[];
59extern unsigned sparc32_write_class[]; 59extern unsigned int sparc32_write_class[];
60extern unsigned sparc32_read_class[]; 60extern unsigned int sparc32_read_class[];
61extern unsigned sparc32_signal_class[]; 61extern unsigned int sparc32_signal_class[];
62int sparc32_classify_syscall(unsigned syscall); 62int sparc32_classify_syscall(unsigned int syscall);
63#endif 63#endif
64 64
65#ifdef CONFIG_SPARC32 65#ifdef CONFIG_SPARC32
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 42efcf85f721..33cd171d933e 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -203,7 +203,7 @@ static struct irq_chip leon_irq = {
203 203
204/* 204/*
205 * Build a LEON IRQ for the edge triggered LEON IRQ controller: 205 * Build a LEON IRQ for the edge triggered LEON IRQ controller:
206 * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack 206 * Edge (normal) IRQ - handle_simple_irq, ack=DON'T-CARE, never ack
207 * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR 207 * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR
208 * Per-CPU Edge - handle_percpu_irq, ack=0 208 * Per-CPU Edge - handle_percpu_irq, ack=0
209 */ 209 */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 46a59643bb1c..c16ef1af1843 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -103,7 +103,7 @@ static void show_regwindow32(struct pt_regs *regs)
103 mm_segment_t old_fs; 103 mm_segment_t old_fs;
104 104
105 __asm__ __volatile__ ("flushw"); 105 __asm__ __volatile__ ("flushw");
106 rw = compat_ptr((unsigned)regs->u_regs[14]); 106 rw = compat_ptr((unsigned int)regs->u_regs[14]);
107 old_fs = get_fs(); 107 old_fs = get_fs();
108 set_fs (USER_DS); 108 set_fs (USER_DS);
109 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 109 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index baef495c06bd..69d75ff1c25c 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -109,7 +109,7 @@ unsigned long cmdline_memory_size __initdata = 0;
109unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */ 109unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
110 110
111static void 111static void
112prom_console_write(struct console *con, const char *s, unsigned n) 112prom_console_write(struct console *con, const char *s, unsigned int n)
113{ 113{
114 prom_write(s, n); 114 prom_write(s, n);
115} 115}
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f3185e2b028b..26db95b54ee9 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -77,7 +77,7 @@ struct screen_info screen_info = {
77}; 77};
78 78
79static void 79static void
80prom_console_write(struct console *con, const char *s, unsigned n) 80prom_console_write(struct console *con, const char *s, unsigned int n)
81{ 81{
82 prom_write(s, n); 82 prom_write(s, n);
83} 83}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 4eed773a7735..3c25241fa5cb 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -144,7 +144,7 @@ void do_sigreturn32(struct pt_regs *regs)
144 compat_uptr_t fpu_save; 144 compat_uptr_t fpu_save;
145 compat_uptr_t rwin_save; 145 compat_uptr_t rwin_save;
146 unsigned int psr; 146 unsigned int psr;
147 unsigned pc, npc; 147 unsigned int pc, npc;
148 sigset_t set; 148 sigset_t set;
149 compat_sigset_t seta; 149 compat_sigset_t seta;
150 int err, i; 150 int err, i;
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index b489e9759518..fe8b8ee8e660 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -337,10 +337,10 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
337 switch (call) { 337 switch (call) {
338 case SEMOP: 338 case SEMOP:
339 err = sys_semtimedop(first, ptr, 339 err = sys_semtimedop(first, ptr,
340 (unsigned)second, NULL); 340 (unsigned int)second, NULL);
341 goto out; 341 goto out;
342 case SEMTIMEDOP: 342 case SEMTIMEDOP:
343 err = sys_semtimedop(first, ptr, (unsigned)second, 343 err = sys_semtimedop(first, ptr, (unsigned int)second,
344 (const struct timespec __user *) 344 (const struct timespec __user *)
345 (unsigned long) fifth); 345 (unsigned long) fifth);
346 goto out; 346 goto out;
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 7f41d40b7e6e..fa8e21abb5e0 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -1,4 +1,4 @@
1/* sysfs.c: Toplogy sysfs support code for sparc64. 1/* sysfs.c: Topology sysfs support code for sparc64.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */ 4 */
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index d89e97b374cf..9aacb9159262 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -209,8 +209,8 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
209 if (size == 16) { 209 if (size == 16) {
210 size = 8; 210 size = 8;
211 zero = (((long)(reg_num ? 211 zero = (((long)(reg_num ?
212 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | 212 (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
213 (unsigned)fetch_reg(reg_num + 1, regs); 213 (unsigned int)fetch_reg(reg_num + 1, regs);
214 } else if (reg_num) { 214 } else if (reg_num) {
215 src_val_p = fetch_reg_addr(reg_num, regs); 215 src_val_p = fetch_reg_addr(reg_num, regs);
216 } 216 }
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index c399e7b3b035..b6c559cbd64d 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -303,10 +303,10 @@ no_context:
303 fixup = search_extables_range(regs->pc, &g2); 303 fixup = search_extables_range(regs->pc, &g2);
304 /* Values below 10 are reserved for other things */ 304 /* Values below 10 are reserved for other things */
305 if (fixup > 10) { 305 if (fixup > 10) {
306 extern const unsigned __memset_start[]; 306 extern const unsigned int __memset_start[];
307 extern const unsigned __memset_end[]; 307 extern const unsigned int __memset_end[];
308 extern const unsigned __csum_partial_copy_start[]; 308 extern const unsigned int __csum_partial_copy_start[];
309 extern const unsigned __csum_partial_copy_end[]; 309 extern const unsigned int __csum_partial_copy_end[];
310 310
311#ifdef DEBUG_EXCEPTIONS 311#ifdef DEBUG_EXCEPTIONS
312 printk("Exception: PC<%08lx> faddr<%08lx>\n", 312 printk("Exception: PC<%08lx> faddr<%08lx>\n",
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 3e6e05a7c4c2..a6d9204a6a0b 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -351,7 +351,7 @@ do { *prog++ = BR_OPC | WDISP22(OFF); \
351 * 351 *
352 * Sometimes we need to emit a branch earlier in the code 352 * Sometimes we need to emit a branch earlier in the code
353 * sequence. And in these situations we adjust "destination" 353 * sequence. And in these situations we adjust "destination"
354 * to accomodate this difference. For example, if we needed 354 * to accommodate this difference. For example, if we needed
355 * to emit a branch (and it's delay slot) right before the 355 * to emit a branch (and it's delay slot) right before the
356 * final instruction emitted for a BPF opcode, we'd use 356 * final instruction emitted for a BPF opcode, we'd use
357 * "destination + 4" instead of just plain "destination" above. 357 * "destination + 4" instead of just plain "destination" above.
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
index c97e416dd963..ff7f50f970a5 100644
--- a/arch/tile/include/hv/drv_mpipe_intf.h
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -211,7 +211,7 @@ _gxio_mpipe_link_mac_t;
211 * request shared data permission on the same link. 211 * request shared data permission on the same link.
212 * 212 *
213 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, 213 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
214 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() 214 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
215 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. 215 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
216 */ 216 */
217#define GXIO_MPIPE_LINK_DATA 0x00000001UL 217#define GXIO_MPIPE_LINK_DATA 0x00000001UL
@@ -219,7 +219,7 @@ _gxio_mpipe_link_mac_t;
219/** Do not request data permission on the specified link. 219/** Do not request data permission on the specified link.
220 * 220 *
221 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, 221 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
222 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() 222 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
223 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. 223 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
224 */ 224 */
225#define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL 225#define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL
@@ -230,7 +230,7 @@ _gxio_mpipe_link_mac_t;
230 * data permission on it, this open will fail. 230 * data permission on it, this open will fail.
231 * 231 *
232 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, 232 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
233 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() 233 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
234 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. 234 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
235 */ 235 */
236#define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL 236#define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL
@@ -241,7 +241,7 @@ _gxio_mpipe_link_mac_t;
241 * permission on the same link. 241 * permission on the same link.
242 * 242 *
243 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, 243 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
244 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() 244 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
245 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. 245 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
246 */ 246 */
247#define GXIO_MPIPE_LINK_STATS 0x00000008UL 247#define GXIO_MPIPE_LINK_STATS 0x00000008UL
@@ -249,7 +249,7 @@ _gxio_mpipe_link_mac_t;
249/** Do not request stats permission on the specified link. 249/** Do not request stats permission on the specified link.
250 * 250 *
251 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, 251 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
252 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() 252 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
253 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. 253 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
254 */ 254 */
255#define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL 255#define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL
@@ -267,7 +267,7 @@ _gxio_mpipe_link_mac_t;
267 * reset by other statistics programs. 267 * reset by other statistics programs.
268 * 268 *
269 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, 269 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
270 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() 270 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
271 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. 271 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
272 */ 272 */
273#define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL 273#define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL
@@ -278,7 +278,7 @@ _gxio_mpipe_link_mac_t;
278 * permission on the same link. 278 * permission on the same link.
279 * 279 *
280 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, 280 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
281 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() 281 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
282 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. 282 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
283 */ 283 */
284#define GXIO_MPIPE_LINK_CTL 0x00000040UL 284#define GXIO_MPIPE_LINK_CTL 0x00000040UL
@@ -286,7 +286,7 @@ _gxio_mpipe_link_mac_t;
286/** Do not request control permission on the specified link. 286/** Do not request control permission on the specified link.
287 * 287 *
288 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, 288 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
289 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() 289 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
290 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. 290 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
291 */ 291 */
292#define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL 292#define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL
@@ -301,7 +301,7 @@ _gxio_mpipe_link_mac_t;
301 * it prevents programs like mpipe-link from configuring the link. 301 * it prevents programs like mpipe-link from configuring the link.
302 * 302 *
303 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, 303 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
304 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() 304 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
305 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. 305 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
306 */ 306 */
307#define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL 307#define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL
@@ -311,7 +311,7 @@ _gxio_mpipe_link_mac_t;
311 * change the desired state of the link when it is closed or the process 311 * change the desired state of the link when it is closed or the process
312 * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, 312 * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
313 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or 313 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
314 * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() 314 * ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
315 * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. 315 * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
316 */ 316 */
317#define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL 317#define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL
@@ -322,7 +322,7 @@ _gxio_mpipe_link_mac_t;
322 * open, set the desired state of the link to down. No more than one of 322 * open, set the desired state of the link to down. No more than one of
323 * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, 323 * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
324 * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be 324 * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
325 * specifed in a gxio_mpipe_link_open() call. If none are specified, 325 * specified in a gxio_mpipe_link_open() call. If none are specified,
326 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. 326 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
327 */ 327 */
328#define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL 328#define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL
@@ -332,7 +332,7 @@ _gxio_mpipe_link_mac_t;
332 * process has the link open, set the desired state of the link to down. 332 * process has the link open, set the desired state of the link to down.
333 * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, 333 * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
334 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or 334 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
335 * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() 335 * ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
336 * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. 336 * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
337 */ 337 */
338#define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL 338#define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL
@@ -342,7 +342,7 @@ _gxio_mpipe_link_mac_t;
342 * closed or the process exits. No more than one of 342 * closed or the process exits. No more than one of
343 * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, 343 * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
344 * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be 344 * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
345 * specifed in a gxio_mpipe_link_open() call. If none are specified, 345 * specified in a gxio_mpipe_link_open() call. If none are specified,
346 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. 346 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
347 */ 347 */
348#define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL 348#define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
index a506c2c28943..9247d6b562f4 100644
--- a/arch/tile/kernel/kgdb.c
+++ b/arch/tile/kernel/kgdb.c
@@ -126,15 +126,15 @@ void
126sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) 126sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
127{ 127{
128 struct pt_regs *thread_regs; 128 struct pt_regs *thread_regs;
129 const int NGPRS = TREG_LAST_GPR + 1;
129 130
130 if (task == NULL) 131 if (task == NULL)
131 return; 132 return;
132 133
133 /* Initialize to zero. */
134 memset(gdb_regs, 0, NUMREGBYTES);
135
136 thread_regs = task_pt_regs(task); 134 thread_regs = task_pt_regs(task);
137 memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long)); 135 memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
136 memset(&gdb_regs[NGPRS], 0,
137 (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
138 gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc; 138 gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
139 gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum; 139 gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
140} 140}
@@ -433,9 +433,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
433struct kgdb_arch arch_kgdb_ops; 433struct kgdb_arch arch_kgdb_ops;
434 434
435/* 435/*
436 * kgdb_arch_init - Perform any architecture specific initalization. 436 * kgdb_arch_init - Perform any architecture specific initialization.
437 * 437 *
438 * This function will handle the initalization of any architecture 438 * This function will handle the initialization of any architecture
439 * specific callbacks. 439 * specific callbacks.
440 */ 440 */
441int kgdb_arch_init(void) 441int kgdb_arch_init(void)
@@ -447,9 +447,9 @@ int kgdb_arch_init(void)
447} 447}
448 448
449/* 449/*
450 * kgdb_arch_exit - Perform any architecture specific uninitalization. 450 * kgdb_arch_exit - Perform any architecture specific uninitialization.
451 * 451 *
452 * This function will handle the uninitalization of any architecture 452 * This function will handle the uninitialization of any architecture
453 * specific callbacks, for dynamic registration and unregistration. 453 * specific callbacks, for dynamic registration and unregistration.
454 */ 454 */
455void kgdb_arch_exit(void) 455void kgdb_arch_exit(void)
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 4c017d0d2de8..aa2b44cd8fd3 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -1326,7 +1326,7 @@ invalid_device:
1326 1326
1327 1327
1328/* 1328/*
1329 * See tile_cfg_read() for relevent comments. 1329 * See tile_cfg_read() for relevant comments.
1330 * Note that "val" is the value to write, not a pointer to that value. 1330 * Note that "val" is the value to write, not a pointer to that value.
1331 */ 1331 */
1332static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, 1332static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 049ada8d4e9c..86a9bec18dab 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -369,7 +369,7 @@ static int amd_pmu_cpu_prepare(int cpu)
369 369
370 WARN_ON_ONCE(cpuc->amd_nb); 370 WARN_ON_ONCE(cpuc->amd_nb);
371 371
372 if (boot_cpu_data.x86_max_cores < 2) 372 if (!x86_pmu.amd_nb_constraints)
373 return NOTIFY_OK; 373 return NOTIFY_OK;
374 374
375 cpuc->amd_nb = amd_alloc_nb(cpu); 375 cpuc->amd_nb = amd_alloc_nb(cpu);
@@ -388,7 +388,7 @@ static void amd_pmu_cpu_starting(int cpu)
388 388
389 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; 389 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
390 390
391 if (boot_cpu_data.x86_max_cores < 2) 391 if (!x86_pmu.amd_nb_constraints)
392 return; 392 return;
393 393
394 nb_id = amd_get_nb_id(cpu); 394 nb_id = amd_get_nb_id(cpu);
@@ -414,7 +414,7 @@ static void amd_pmu_cpu_dead(int cpu)
414{ 414{
415 struct cpu_hw_events *cpuhw; 415 struct cpu_hw_events *cpuhw;
416 416
417 if (boot_cpu_data.x86_max_cores < 2) 417 if (!x86_pmu.amd_nb_constraints)
418 return; 418 return;
419 419
420 cpuhw = &per_cpu(cpu_hw_events, cpu); 420 cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -648,6 +648,8 @@ static __initconst const struct x86_pmu amd_pmu = {
648 .cpu_prepare = amd_pmu_cpu_prepare, 648 .cpu_prepare = amd_pmu_cpu_prepare,
649 .cpu_starting = amd_pmu_cpu_starting, 649 .cpu_starting = amd_pmu_cpu_starting,
650 .cpu_dead = amd_pmu_cpu_dead, 650 .cpu_dead = amd_pmu_cpu_dead,
651
652 .amd_nb_constraints = 1,
651}; 653};
652 654
653static int __init amd_core_pmu_init(void) 655static int __init amd_core_pmu_init(void)
@@ -674,6 +676,11 @@ static int __init amd_core_pmu_init(void)
674 x86_pmu.eventsel = MSR_F15H_PERF_CTL; 676 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
675 x86_pmu.perfctr = MSR_F15H_PERF_CTR; 677 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
676 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; 678 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
679 /*
680 * AMD Core perfctr has separate MSRs for the NB events, see
681 * the amd/uncore.c driver.
682 */
683 x86_pmu.amd_nb_constraints = 0;
677 684
678 pr_cont("core perfctr, "); 685 pr_cont("core perfctr, ");
679 return 0; 686 return 0;
@@ -693,6 +700,14 @@ __init int amd_pmu_init(void)
693 if (ret) 700 if (ret)
694 return ret; 701 return ret;
695 702
703 if (num_possible_cpus() == 1) {
704 /*
705 * No point in allocating data structures to serialize
706 * against other CPUs, when there is only the one CPU.
707 */
708 x86_pmu.amd_nb_constraints = 0;
709 }
710
696 /* Events are common for all AMDs */ 711 /* Events are common for all AMDs */
697 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 712 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
698 sizeof(hw_cache_event_ids)); 713 sizeof(hw_cache_event_ids));
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 3ea25c3917c0..feb90f6730e8 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -28,10 +28,46 @@ static u32 ibs_caps;
28#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) 28#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
29#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT 29#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
30 30
31
32/*
33 * IBS states:
34 *
35 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
36 * and any further add()s must fail.
37 *
38 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
39 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
40 * we've cleared the EN bit).
41 *
42 * In order to consume these late NMIs we have the STOPPED state, any NMI that
43 * happens after we've cleared the EN state will clear this bit and report the
44 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
45 * someone else can consume our BIT and our NMI will go unhandled).
46 *
47 * And since we cannot set/clear this separate bit together with the EN bit,
48 * there are races; if we cleared STARTED early, an NMI could land in
49 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
50 * could happen if the period is small enough), and consume our STOPPED bit
51 * and trigger streams of unhandled NMIs.
52 *
53 * If, however, we clear STARTED late, an NMI can hit between clearing the
54 * EN bit and clearing STARTED, still see STARTED set and process the event.
55 * If this event will have the VALID bit clear, we bail properly, but this
56 * is not a given. With VALID set we can end up calling pmu::stop() again
57 * (the throttle logic) and trigger the WARNs in there.
58 *
59 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
60 * nesting, and clear STARTED late, so that we have a well defined state over
61 * the clearing of the EN bit.
62 *
63 * XXX: we could probably be using !atomic bitops for all this.
64 */
65
31enum ibs_states { 66enum ibs_states {
32 IBS_ENABLED = 0, 67 IBS_ENABLED = 0,
33 IBS_STARTED = 1, 68 IBS_STARTED = 1,
34 IBS_STOPPING = 2, 69 IBS_STOPPING = 2,
70 IBS_STOPPED = 3,
35 71
36 IBS_MAX_STATES, 72 IBS_MAX_STATES,
37}; 73};
@@ -377,11 +413,10 @@ static void perf_ibs_start(struct perf_event *event, int flags)
377 413
378 perf_ibs_set_period(perf_ibs, hwc, &period); 414 perf_ibs_set_period(perf_ibs, hwc, &period);
379 /* 415 /*
380 * Set STARTED before enabling the hardware, such that 416 * Set STARTED before enabling the hardware, such that a subsequent NMI
381 * a subsequent NMI must observe it. Then clear STOPPING 417 * must observe it.
382 * such that we don't consume NMIs by accident.
383 */ 418 */
384 set_bit(IBS_STARTED, pcpu->state); 419 set_bit(IBS_STARTED, pcpu->state);
385 clear_bit(IBS_STOPPING, pcpu->state); 420 clear_bit(IBS_STOPPING, pcpu->state);
386 perf_ibs_enable_event(perf_ibs, hwc, period >> 4); 421 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
387 422
@@ -396,6 +431,9 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
396 u64 config; 431 u64 config;
397 int stopping; 432 int stopping;
398 433
434 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
435 return;
436
399 stopping = test_bit(IBS_STARTED, pcpu->state); 437 stopping = test_bit(IBS_STARTED, pcpu->state);
400 438
401 if (!stopping && (hwc->state & PERF_HES_UPTODATE)) 439 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
@@ -405,12 +443,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
405 443
406 if (stopping) { 444 if (stopping) {
407 /* 445 /*
408 * Set STOPPING before disabling the hardware, such that it 446 * Set STOPPED before disabling the hardware, such that it
409 * must be visible to NMIs the moment we clear the EN bit, 447 * must be visible to NMIs the moment we clear the EN bit,
410 * at which point we can generate an !VALID sample which 448 * at which point we can generate an !VALID sample which
411 * we need to consume. 449 * we need to consume.
412 */ 450 */
413 set_bit(IBS_STOPPING, pcpu->state); 451 set_bit(IBS_STOPPED, pcpu->state);
414 perf_ibs_disable_event(perf_ibs, hwc, config); 452 perf_ibs_disable_event(perf_ibs, hwc, config);
415 /* 453 /*
416 * Clear STARTED after disabling the hardware; if it were 454 * Clear STARTED after disabling the hardware; if it were
@@ -556,7 +594,7 @@ fail:
556 * with samples that even have the valid bit cleared. 594 * with samples that even have the valid bit cleared.
557 * Mark all this NMIs as handled. 595 * Mark all this NMIs as handled.
558 */ 596 */
559 if (test_and_clear_bit(IBS_STOPPING, pcpu->state)) 597 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
560 return 1; 598 return 1;
561 599
562 return 0; 600 return 0;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ba6ef18528c9..ad4dc7ffffb5 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -608,6 +608,11 @@ struct x86_pmu {
608 atomic_t lbr_exclusive[x86_lbr_exclusive_max]; 608 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
609 609
610 /* 610 /*
611 * AMD bits
612 */
613 unsigned int amd_nb_constraints : 1;
614
615 /*
611 * Extra registers for events 616 * Extra registers for events
612 */ 617 */
613 struct extra_reg *extra_regs; 618 struct extra_reg *extra_regs;
@@ -795,6 +800,9 @@ ssize_t intel_event_sysfs_show(char *page, u64 config);
795 800
796struct attribute **merge_attr(struct attribute **a, struct attribute **b); 801struct attribute **merge_attr(struct attribute **a, struct attribute **b);
797 802
803ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
804 char *page);
805
798#ifdef CONFIG_CPU_SUP_AMD 806#ifdef CONFIG_CPU_SUP_AMD
799 807
800int amd_pmu_init(void); 808int amd_pmu_init(void);
@@ -925,9 +933,6 @@ int p6_pmu_init(void);
925 933
926int knc_pmu_init(void); 934int knc_pmu_init(void);
927 935
928ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
929 char *page);
930
931static inline int is_ht_workaround_enabled(void) 936static inline int is_ht_workaround_enabled(void)
932{ 937{
933 return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); 938 return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f62a9f37f79f..b7e394485a5f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -43,7 +43,7 @@
43 43
44#define KVM_PIO_PAGE_OFFSET 1 44#define KVM_PIO_PAGE_OFFSET 1
45#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 45#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
46#define KVM_HALT_POLL_NS_DEFAULT 500000 46#define KVM_HALT_POLL_NS_DEFAULT 400000
47 47
48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
49 49
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 2da46ac16e37..5b3c9a55f51c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -167,6 +167,14 @@
167#define MSR_PKG_C9_RESIDENCY 0x00000631 167#define MSR_PKG_C9_RESIDENCY 0x00000631
168#define MSR_PKG_C10_RESIDENCY 0x00000632 168#define MSR_PKG_C10_RESIDENCY 0x00000632
169 169
170/* Interrupt Response Limit */
171#define MSR_PKGC3_IRTL 0x0000060a
172#define MSR_PKGC6_IRTL 0x0000060b
173#define MSR_PKGC7_IRTL 0x0000060c
174#define MSR_PKGC8_IRTL 0x00000633
175#define MSR_PKGC9_IRTL 0x00000634
176#define MSR_PKGC10_IRTL 0x00000635
177
170/* Run Time Average Power Limiting (RAPL) Interface */ 178/* Run Time Average Power Limiting (RAPL) Interface */
171 179
172#define MSR_RAPL_POWER_UNIT 0x00000606 180#define MSR_RAPL_POWER_UNIT 0x00000606
@@ -190,6 +198,7 @@
190#define MSR_PP1_ENERGY_STATUS 0x00000641 198#define MSR_PP1_ENERGY_STATUS 0x00000641
191#define MSR_PP1_POLICY 0x00000642 199#define MSR_PP1_POLICY 0x00000642
192 200
201/* Config TDP MSRs */
193#define MSR_CONFIG_TDP_NOMINAL 0x00000648 202#define MSR_CONFIG_TDP_NOMINAL 0x00000648
194#define MSR_CONFIG_TDP_LEVEL_1 0x00000649 203#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
195#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A 204#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
@@ -210,13 +219,6 @@
210#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 219#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
211#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 220#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
212 221
213/* Config TDP MSRs */
214#define MSR_CONFIG_TDP_NOMINAL 0x00000648
215#define MSR_CONFIG_TDP_LEVEL1 0x00000649
216#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
217#define MSR_CONFIG_TDP_CONTROL 0x0000064B
218#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
219
220/* Hardware P state interface */ 222/* Hardware P state interface */
221#define MSR_PPERF 0x0000064e 223#define MSR_PPERF 0x0000064e
222#define MSR_PERF_LIMIT_REASONS 0x0000064f 224#define MSR_PERF_LIMIT_REASONS 0x0000064f
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index bf8b35d2035a..fbc5e92e1ecc 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -47,6 +47,15 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
47 BUG(); 47 BUG();
48} 48}
49 49
50static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
51 size_t n)
52{
53 if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
54 return memcpy_mcsafe(dst, (void __force *) src, n);
55 memcpy(dst, (void __force *) src, n);
56 return 0;
57}
58
50/** 59/**
51 * arch_wmb_pmem - synchronize writes to persistent memory 60 * arch_wmb_pmem - synchronize writes to persistent memory
52 * 61 *
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 983738ac014c..9264476f3d57 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,8 +132,6 @@ struct cpuinfo_x86 {
132 u16 logical_proc_id; 132 u16 logical_proc_id;
133 /* Core id: */ 133 /* Core id: */
134 u16 cpu_core_id; 134 u16 cpu_core_id;
135 /* Compute unit id */
136 u8 compute_unit_id;
137 /* Index into per_cpu list: */ 135 /* Index into per_cpu list: */
138 u16 cpu_index; 136 u16 cpu_index;
139 u32 microcode; 137 u32 microcode;
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 20a3de5cb3b0..66b057306f40 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -155,6 +155,7 @@ static inline int wbinvd_on_all_cpus(void)
155 wbinvd(); 155 wbinvd();
156 return 0; 156 return 0;
157} 157}
158#define smp_num_siblings 1
158#endif /* CONFIG_SMP */ 159#endif /* CONFIG_SMP */
159 160
160extern unsigned disabled_cpus; 161extern unsigned disabled_cpus;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 82866697fcf1..ffae84df8a93 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -276,11 +276,9 @@ static inline bool is_ia32_task(void)
276 */ 276 */
277#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME) 277#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
278 278
279#endif /* !__ASSEMBLY__ */
280
281#ifndef __ASSEMBLY__
282extern void arch_task_cache_init(void); 279extern void arch_task_cache_init(void);
283extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 280extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
284extern void arch_release_task_struct(struct task_struct *tsk); 281extern void arch_release_task_struct(struct task_struct *tsk);
285#endif 282#endif /* !__ASSEMBLY__ */
283
286#endif /* _ASM_X86_THREAD_INFO_H */ 284#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c24b4224d439..1fde8d580a5b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void)
319 319
320#endif /* SMP */ 320#endif /* SMP */
321 321
322/* Not inlined due to inc_irq_stat not being defined yet */
323#define flush_tlb_local() { \
324 inc_irq_stat(irq_tlb_count); \
325 local_flush_tlb(); \
326}
327
328#ifndef CONFIG_PARAVIRT 322#ifndef CONFIG_PARAVIRT
329#define flush_tlb_others(mask, mm, start, end) \ 323#define flush_tlb_others(mask, mm, start, end) \
330 native_flush_tlb_others(mask, mm, start, end) 324 native_flush_tlb_others(mask, mm, start, end)
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 29fa475ec518..a147e676fc7b 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -170,15 +170,13 @@ int amd_get_subcaches(int cpu)
170{ 170{
171 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; 171 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172 unsigned int mask; 172 unsigned int mask;
173 int cuid;
174 173
175 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 174 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
176 return 0; 175 return 0;
177 176
178 pci_read_config_dword(link, 0x1d4, &mask); 177 pci_read_config_dword(link, 0x1d4, &mask);
179 178
180 cuid = cpu_data(cpu).compute_unit_id; 179 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
181 return (mask >> (4 * cuid)) & 0xf;
182} 180}
183 181
184int amd_set_subcaches(int cpu, unsigned long mask) 182int amd_set_subcaches(int cpu, unsigned long mask)
@@ -204,7 +202,7 @@ int amd_set_subcaches(int cpu, unsigned long mask)
204 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 202 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
205 } 203 }
206 204
207 cuid = cpu_data(cpu).compute_unit_id; 205 cuid = cpu_data(cpu).cpu_core_id;
208 mask <<= 4 * cuid; 206 mask <<= 4 * cuid;
209 mask |= (0xf ^ (1 << cuid)) << 26; 207 mask |= (0xf ^ (1 << cuid)) << 26;
210 208
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 6e47e3a916f1..7b76eb67a9b3 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -300,7 +300,6 @@ static int nearby_node(int apicid)
300#ifdef CONFIG_SMP 300#ifdef CONFIG_SMP
301static void amd_get_topology(struct cpuinfo_x86 *c) 301static void amd_get_topology(struct cpuinfo_x86 *c)
302{ 302{
303 u32 cores_per_cu = 1;
304 u8 node_id; 303 u8 node_id;
305 int cpu = smp_processor_id(); 304 int cpu = smp_processor_id();
306 305
@@ -313,8 +312,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
313 312
314 /* get compute unit information */ 313 /* get compute unit information */
315 smp_num_siblings = ((ebx >> 8) & 3) + 1; 314 smp_num_siblings = ((ebx >> 8) & 3) + 1;
316 c->compute_unit_id = ebx & 0xff; 315 c->x86_max_cores /= smp_num_siblings;
317 cores_per_cu += ((ebx >> 8) & 3); 316 c->cpu_core_id = ebx & 0xff;
318 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { 317 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
319 u64 value; 318 u64 value;
320 319
@@ -325,19 +324,16 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
325 324
326 /* fixup multi-node processor information */ 325 /* fixup multi-node processor information */
327 if (nodes_per_socket > 1) { 326 if (nodes_per_socket > 1) {
328 u32 cores_per_node;
329 u32 cus_per_node; 327 u32 cus_per_node;
330 328
331 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 329 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
332 cores_per_node = c->x86_max_cores / nodes_per_socket; 330 cus_per_node = c->x86_max_cores / nodes_per_socket;
333 cus_per_node = cores_per_node / cores_per_cu;
334 331
335 /* store NodeID, use llc_shared_map to store sibling info */ 332 /* store NodeID, use llc_shared_map to store sibling info */
336 per_cpu(cpu_llc_id, cpu) = node_id; 333 per_cpu(cpu_llc_id, cpu) = node_id;
337 334
338 /* core id has to be in the [0 .. cores_per_node - 1] range */ 335 /* core id has to be in the [0 .. cores_per_node - 1] range */
339 c->cpu_core_id %= cores_per_node; 336 c->cpu_core_id %= cus_per_node;
340 c->compute_unit_id %= cus_per_node;
341 } 337 }
342} 338}
343#endif 339#endif
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 0b445c2ff735..ac780cad3b86 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -384,6 +384,9 @@ static void intel_thermal_interrupt(void)
384{ 384{
385 __u64 msr_val; 385 __u64 msr_val;
386 386
387 if (static_cpu_has(X86_FEATURE_HWP))
388 wrmsrl_safe(MSR_HWP_STATUS, 0);
389
387 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 390 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
388 391
389 /* Check for violation of core thermal thresholds*/ 392 /* Check for violation of core thermal thresholds*/
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c
index 31f0f335ed22..1dd8294fd730 100644
--- a/arch/x86/kernel/cpu/powerflags.c
+++ b/arch/x86/kernel/cpu/powerflags.c
@@ -18,4 +18,6 @@ const char *const x86_power_flags[32] = {
18 "", /* tsc invariant mapped to constant_tsc */ 18 "", /* tsc invariant mapped to constant_tsc */
19 "cpb", /* core performance boost */ 19 "cpb", /* core performance boost */
20 "eff_freq_ro", /* Readonly aperf/mperf */ 20 "eff_freq_ro", /* Readonly aperf/mperf */
21 "proc_feedback", /* processor feedback interface */
22 "acc_power", /* accumulated power mechanism */
21}; 23};
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2367ae07eb76..319b08a5b6ed 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -146,31 +146,6 @@ int default_check_phys_apicid_present(int phys_apicid)
146 146
147struct boot_params boot_params; 147struct boot_params boot_params;
148 148
149/*
150 * Machine setup..
151 */
152static struct resource data_resource = {
153 .name = "Kernel data",
154 .start = 0,
155 .end = 0,
156 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
157};
158
159static struct resource code_resource = {
160 .name = "Kernel code",
161 .start = 0,
162 .end = 0,
163 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
164};
165
166static struct resource bss_resource = {
167 .name = "Kernel bss",
168 .start = 0,
169 .end = 0,
170 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
171};
172
173
174#ifdef CONFIG_X86_32 149#ifdef CONFIG_X86_32
175/* cpu data as detected by the assembly code in head.S */ 150/* cpu data as detected by the assembly code in head.S */
176struct cpuinfo_x86 new_cpu_data = { 151struct cpuinfo_x86 new_cpu_data = {
@@ -949,13 +924,6 @@ void __init setup_arch(char **cmdline_p)
949 924
950 mpx_mm_init(&init_mm); 925 mpx_mm_init(&init_mm);
951 926
952 code_resource.start = __pa_symbol(_text);
953 code_resource.end = __pa_symbol(_etext)-1;
954 data_resource.start = __pa_symbol(_etext);
955 data_resource.end = __pa_symbol(_edata)-1;
956 bss_resource.start = __pa_symbol(__bss_start);
957 bss_resource.end = __pa_symbol(__bss_stop)-1;
958
959#ifdef CONFIG_CMDLINE_BOOL 927#ifdef CONFIG_CMDLINE_BOOL
960#ifdef CONFIG_CMDLINE_OVERRIDE 928#ifdef CONFIG_CMDLINE_OVERRIDE
961 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 929 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
@@ -1019,11 +987,6 @@ void __init setup_arch(char **cmdline_p)
1019 987
1020 x86_init.resources.probe_roms(); 988 x86_init.resources.probe_roms();
1021 989
1022 /* after parse_early_param, so could debug it */
1023 insert_resource(&iomem_resource, &code_resource);
1024 insert_resource(&iomem_resource, &data_resource);
1025 insert_resource(&iomem_resource, &bss_resource);
1026
1027 e820_add_kernel_range(); 990 e820_add_kernel_range();
1028 trim_bios_range(); 991 trim_bios_range();
1029#ifdef CONFIG_X86_32 992#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index b2c99f811c3f..a2065d3b3b39 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -422,7 +422,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
422 422
423 if (c->phys_proc_id == o->phys_proc_id && 423 if (c->phys_proc_id == o->phys_proc_id &&
424 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && 424 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
425 c->compute_unit_id == o->compute_unit_id) 425 c->cpu_core_id == o->cpu_core_id)
426 return topology_sane(c, o, "smt"); 426 return topology_sane(c, o, "smt");
427 427
428 } else if (c->phys_proc_id == o->phys_proc_id && 428 } else if (c->phys_proc_id == o->phys_proc_id &&
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5ff3485acb60..01bd7b7a6866 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1116 break; 1116 break;
1117 case HVCALL_POST_MESSAGE: 1117 case HVCALL_POST_MESSAGE:
1118 case HVCALL_SIGNAL_EVENT: 1118 case HVCALL_SIGNAL_EVENT:
1119 /* don't bother userspace if it has no way to handle it */
1120 if (!vcpu_to_synic(vcpu)->active) {
1121 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1122 break;
1123 }
1119 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 1124 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1120 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; 1125 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1121 vcpu->run->hyperv.u.hcall.input = param; 1126 vcpu->run->hyperv.u.hcall.input = param;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 443d2a57ad3d..1a2da0e5a373 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
1369 1369
1370 hrtimer_start(&apic->lapic_timer.timer, 1370 hrtimer_start(&apic->lapic_timer.timer,
1371 ktime_add_ns(now, apic->lapic_timer.period), 1371 ktime_add_ns(now, apic->lapic_timer.period),
1372 HRTIMER_MODE_ABS); 1372 HRTIMER_MODE_ABS_PINNED);
1373 1373
1374 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 1374 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1375 PRIx64 ", " 1375 PRIx64 ", "
@@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
1402 expire = ktime_add_ns(now, ns); 1402 expire = ktime_add_ns(now, ns);
1403 expire = ktime_sub_ns(expire, lapic_timer_advance_ns); 1403 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1404 hrtimer_start(&apic->lapic_timer.timer, 1404 hrtimer_start(&apic->lapic_timer.timer,
1405 expire, HRTIMER_MODE_ABS); 1405 expire, HRTIMER_MODE_ABS_PINNED);
1406 } else 1406 } else
1407 apic_timer_expired(apic); 1407 apic_timer_expired(apic);
1408 1408
@@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
1868 apic->vcpu = vcpu; 1868 apic->vcpu = vcpu;
1869 1869
1870 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 1870 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1871 HRTIMER_MODE_ABS); 1871 HRTIMER_MODE_ABS_PINNED);
1872 apic->lapic_timer.timer.function = apic_timer_fn; 1872 apic->lapic_timer.timer.function = apic_timer_fn;
1873 1873
1874 /* 1874 /*
@@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2003 2003
2004 timer = &vcpu->arch.apic->lapic_timer.timer; 2004 timer = &vcpu->arch.apic->lapic_timer.timer;
2005 if (hrtimer_cancel(timer)) 2005 if (hrtimer_cancel(timer))
2006 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 2006 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2007} 2007}
2008 2008
2009/* 2009/*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70e95d097ef1..1ff4dbb73fb7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
557 !is_writable_pte(new_spte)) 557 !is_writable_pte(new_spte))
558 ret = true; 558 ret = true;
559 559
560 if (!shadow_accessed_mask) 560 if (!shadow_accessed_mask) {
561 /*
562 * We don't set page dirty when dropping non-writable spte.
563 * So do it now if the new spte is becoming non-writable.
564 */
565 if (ret)
566 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
561 return ret; 567 return ret;
568 }
562 569
563 /* 570 /*
564 * Flush TLB when accessed/dirty bits are changed in the page tables, 571 * Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
605 612
606 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 613 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
607 kvm_set_pfn_accessed(pfn); 614 kvm_set_pfn_accessed(pfn);
608 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 615 if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
616 PT_WRITABLE_MASK))
609 kvm_set_pfn_dirty(pfn); 617 kvm_set_pfn_dirty(pfn);
610 return 1; 618 return 1;
611} 619}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 742d0f7d3556..0a2c70e43bc8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6095 } 6095 }
6096 6096
6097 /* try to inject new event if pending */ 6097 /* try to inject new event if pending */
6098 if (vcpu->arch.nmi_pending) { 6098 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6099 if (kvm_x86_ops->nmi_allowed(vcpu)) { 6099 --vcpu->arch.nmi_pending;
6100 --vcpu->arch.nmi_pending; 6100 vcpu->arch.nmi_injected = true;
6101 vcpu->arch.nmi_injected = true; 6101 kvm_x86_ops->set_nmi(vcpu);
6102 kvm_x86_ops->set_nmi(vcpu);
6103 }
6104 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 6102 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
6105 /* 6103 /*
6106 * Because interrupts can be injected asynchronously, we are 6104 * Because interrupts can be injected asynchronously, we are
@@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6569 if (inject_pending_event(vcpu, req_int_win) != 0) 6567 if (inject_pending_event(vcpu, req_int_win) != 0)
6570 req_immediate_exit = true; 6568 req_immediate_exit = true;
6571 /* enable NMI/IRQ window open exits if needed */ 6569 /* enable NMI/IRQ window open exits if needed */
6572 else if (vcpu->arch.nmi_pending) 6570 else {
6573 kvm_x86_ops->enable_nmi_window(vcpu); 6571 if (vcpu->arch.nmi_pending)
6574 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 6572 kvm_x86_ops->enable_nmi_window(vcpu);
6575 kvm_x86_ops->enable_irq_window(vcpu); 6573 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6574 kvm_x86_ops->enable_irq_window(vcpu);
6575 }
6576 6576
6577 if (kvm_lapic_enabled(vcpu)) { 6577 if (kvm_lapic_enabled(vcpu)) {
6578 update_cr8_intercept(vcpu); 6578 update_cr8_intercept(vcpu);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 8f4cc3dfac32..fe9b9f776361 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -104,10 +104,8 @@ static void flush_tlb_func(void *info)
104 104
105 inc_irq_stat(irq_tlb_count); 105 inc_irq_stat(irq_tlb_count);
106 106
107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 107 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
108 return; 108 return;
109 if (!f->flush_end)
110 f->flush_end = f->flush_start + PAGE_SIZE;
111 109
112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 110 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 111 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
135 unsigned long end) 133 unsigned long end)
136{ 134{
137 struct flush_tlb_info info; 135 struct flush_tlb_info info;
136
137 if (end == 0)
138 end = start + PAGE_SIZE;
138 info.flush_mm = mm; 139 info.flush_mm = mm;
139 info.flush_start = start; 140 info.flush_start = start;
140 info.flush_end = end; 141 info.flush_end = end;
141 142
142 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 143 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
143 trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); 144 if (end == TLB_FLUSH_ALL)
145 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
146 else
147 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
148 (end - start) >> PAGE_SHIFT);
149
144 if (is_uv_system()) { 150 if (is_uv_system()) {
145 unsigned int cpu; 151 unsigned int cpu;
146 152
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
index 55d38cfa46c2..9e02dcaef683 100644
--- a/arch/x86/ras/mce_amd_inj.c
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -20,6 +20,7 @@
20#include <linux/pci.h> 20#include <linux/pci.h>
21 21
22#include <asm/mce.h> 22#include <asm/mce.h>
23#include <asm/smp.h>
23#include <asm/amd_nb.h> 24#include <asm/amd_nb.h>
24#include <asm/irq_vectors.h> 25#include <asm/irq_vectors.h>
25 26
@@ -206,7 +207,7 @@ static u32 get_nbc_for_node(int node_id)
206 struct cpuinfo_x86 *c = &boot_cpu_data; 207 struct cpuinfo_x86 *c = &boot_cpu_data;
207 u32 cores_per_node; 208 u32 cores_per_node;
208 209
209 cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket(); 210 cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
210 211
211 return cores_per_node * node_id; 212 return cores_per_node * node_id;
212} 213}
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index abf4901c917b..db52a7fafcc2 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -66,7 +66,7 @@ static u32 xen_apic_read(u32 reg)
66 66
67 ret = HYPERVISOR_platform_op(&op); 67 ret = HYPERVISOR_platform_op(&op);
68 if (ret) 68 if (ret)
69 return 0; 69 op.u.pcpu_info.apic_id = BAD_APICID;
70 70
71 return op.u.pcpu_info.apic_id << 24; 71 return op.u.pcpu_info.apic_id << 24;
72} 72}
@@ -142,6 +142,14 @@ static void xen_silent_inquire(int apicid)
142{ 142{
143} 143}
144 144
145static int xen_cpu_present_to_apicid(int cpu)
146{
147 if (cpu_present(cpu))
148 return xen_get_apic_id(xen_apic_read(APIC_ID));
149 else
150 return BAD_APICID;
151}
152
145static struct apic xen_pv_apic = { 153static struct apic xen_pv_apic = {
146 .name = "Xen PV", 154 .name = "Xen PV",
147 .probe = xen_apic_probe_pv, 155 .probe = xen_apic_probe_pv,
@@ -162,7 +170,7 @@ static struct apic xen_pv_apic = {
162 170
163 .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */ 171 .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
164 .setup_apic_routing = NULL, 172 .setup_apic_routing = NULL,
165 .cpu_present_to_apicid = default_cpu_present_to_apicid, 173 .cpu_present_to_apicid = xen_cpu_present_to_apicid,
166 .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */ 174 .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */
167 .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */ 175 .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */
168 .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */ 176 .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3c6d17fd423a..719cf291dcdf 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -545,6 +545,8 @@ static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
545 * data back is to call: 545 * data back is to call:
546 */ 546 */
547 tick_nohz_idle_enter(); 547 tick_nohz_idle_enter();
548
549 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
548} 550}
549 551
550#else /* !CONFIG_HOTPLUG_CPU */ 552#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/block/bio.c b/block/bio.c
index f124a0a624fc..807d25e466ec 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1339 * release the pages we didn't map into the bio, if any 1339 * release the pages we didn't map into the bio, if any
1340 */ 1340 */
1341 while (j < page_limit) 1341 while (j < page_limit)
1342 page_cache_release(pages[j++]); 1342 put_page(pages[j++]);
1343 } 1343 }
1344 1344
1345 kfree(pages); 1345 kfree(pages);
@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1365 for (j = 0; j < nr_pages; j++) { 1365 for (j = 0; j < nr_pages; j++) {
1366 if (!pages[j]) 1366 if (!pages[j])
1367 break; 1367 break;
1368 page_cache_release(pages[j]); 1368 put_page(pages[j]);
1369 } 1369 }
1370 out: 1370 out:
1371 kfree(pages); 1371 kfree(pages);
@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
1385 if (bio_data_dir(bio) == READ) 1385 if (bio_data_dir(bio) == READ)
1386 set_page_dirty_lock(bvec->bv_page); 1386 set_page_dirty_lock(bvec->bv_page);
1387 1387
1388 page_cache_release(bvec->bv_page); 1388 put_page(bvec->bv_page);
1389 } 1389 }
1390 1390
1391 bio_put(bio); 1391 bio_put(bio);
@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
1615 * the BIO and the offending pages and re-dirty the pages in process context. 1615 * the BIO and the offending pages and re-dirty the pages in process context.
1616 * 1616 *
1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1618 * here on. It will run one page_cache_release() against each page and will 1618 * here on. It will run one put_page() against each page and will run one
1619 * run one bio_put() against the BIO. 1619 * bio_put() against the BIO.
1620 */ 1620 */
1621 1621
1622static void bio_dirty_fn(struct work_struct *work); 1622static void bio_dirty_fn(struct work_struct *work);
@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
1658 struct page *page = bvec->bv_page; 1658 struct page *page = bvec->bv_page;
1659 1659
1660 if (PageDirty(page) || PageCompound(page)) { 1660 if (PageDirty(page) || PageCompound(page)) {
1661 page_cache_release(page); 1661 put_page(page);
1662 bvec->bv_page = NULL; 1662 bvec->bv_page = NULL;
1663 } else { 1663 } else {
1664 nr_clean_pages++; 1664 nr_clean_pages++;
diff --git a/block/blk-core.c b/block/blk-core.c
index 827f8badd143..b60537b2c35b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
706 goto fail_id; 706 goto fail_id;
707 707
708 q->backing_dev_info.ra_pages = 708 q->backing_dev_info.ra_pages =
709 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 709 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
710 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; 710 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
711 q->backing_dev_info.name = "block"; 711 q->backing_dev_info.name = "block";
712 q->node = node_id; 712 q->node = node_id;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c7bb666aafd1..331e4eee0dda 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
239 struct queue_limits *limits = &q->limits; 239 struct queue_limits *limits = &q->limits;
240 unsigned int max_sectors; 240 unsigned int max_sectors;
241 241
242 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 242 if ((max_hw_sectors << 9) < PAGE_SIZE) {
243 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 243 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
244 printk(KERN_INFO "%s: set to minimum %d\n", 244 printk(KERN_INFO "%s: set to minimum %d\n",
245 __func__, max_hw_sectors); 245 __func__, max_hw_sectors);
246 } 246 }
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
329 **/ 329 **/
330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
331{ 331{
332 if (max_size < PAGE_CACHE_SIZE) { 332 if (max_size < PAGE_SIZE) {
333 max_size = PAGE_CACHE_SIZE; 333 max_size = PAGE_SIZE;
334 printk(KERN_INFO "%s: set to minimum %d\n", 334 printk(KERN_INFO "%s: set to minimum %d\n",
335 __func__, max_size); 335 __func__, max_size);
336 } 336 }
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
760 **/ 760 **/
761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
762{ 762{
763 if (mask < PAGE_CACHE_SIZE - 1) { 763 if (mask < PAGE_SIZE - 1) {
764 mask = PAGE_CACHE_SIZE - 1; 764 mask = PAGE_SIZE - 1;
765 printk(KERN_INFO "%s: set to minimum %lx\n", 765 printk(KERN_INFO "%s: set to minimum %lx\n",
766 __func__, mask); 766 __func__, mask);
767 } 767 }
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index dd93763057ce..995b58d46ed1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
76static ssize_t queue_ra_show(struct request_queue *q, char *page) 76static ssize_t queue_ra_show(struct request_queue *q, char *page)
77{ 77{
78 unsigned long ra_kb = q->backing_dev_info.ra_pages << 78 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
79 (PAGE_CACHE_SHIFT - 10); 79 (PAGE_SHIFT - 10);
80 80
81 return queue_var_show(ra_kb, (page)); 81 return queue_var_show(ra_kb, (page));
82} 82}
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
90 if (ret < 0) 90 if (ret < 0)
91 return ret; 91 return ret;
92 92
93 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 93 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
94 94
95 return ret; 95 return ret;
96} 96}
@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
117 if (blk_queue_cluster(q)) 117 if (blk_queue_cluster(q))
118 return queue_var_show(queue_max_segment_size(q), (page)); 118 return queue_var_show(queue_max_segment_size(q), (page));
119 119
120 return queue_var_show(PAGE_CACHE_SIZE, (page)); 120 return queue_var_show(PAGE_SIZE, (page));
121} 121}
122 122
123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
198{ 198{
199 unsigned long max_sectors_kb, 199 unsigned long max_sectors_kb,
200 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 200 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
201 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 201 page_kb = 1 << (PAGE_SHIFT - 10);
202 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 202 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
203 203
204 if (ret < 0) 204 if (ret < 0)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e3c591dd8f19..4a349787bc62 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4075 * idle timer unplug to continue working. 4075 * idle timer unplug to continue working.
4076 */ 4076 */
4077 if (cfq_cfqq_wait_request(cfqq)) { 4077 if (cfq_cfqq_wait_request(cfqq)) {
4078 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 4078 if (blk_rq_bytes(rq) > PAGE_SIZE ||
4079 cfqd->busy_queues > 1) { 4079 cfqd->busy_queues > 1) {
4080 cfq_del_timer(cfqd, cfqq); 4080 cfq_del_timer(cfqd, cfqq);
4081 cfq_clear_cfqq_wait_request(cfqq); 4081 cfq_clear_cfqq_wait_request(cfqq);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f678c733df40..556826ac7cb4 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
710 return -EINVAL; 710 return -EINVAL;
711 bdi = blk_get_backing_dev_info(bdev); 711 bdi = blk_get_backing_dev_info(bdev);
712 return compat_put_long(arg, 712 return compat_put_long(arg,
713 (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 713 (bdi->ra_pages * PAGE_SIZE) / 512);
714 case BLKROGET: /* compatible */ 714 case BLKROGET: /* compatible */
715 return compat_put_int(arg, bdev_read_only(bdev) != 0); 715 return compat_put_int(arg, bdev_read_only(bdev) != 0);
716 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ 716 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
729 if (!capable(CAP_SYS_ADMIN)) 729 if (!capable(CAP_SYS_ADMIN))
730 return -EACCES; 730 return -EACCES;
731 bdi = blk_get_backing_dev_info(bdev); 731 bdi = blk_get_backing_dev_info(bdev);
732 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 732 bdi->ra_pages = (arg * 512) / PAGE_SIZE;
733 return 0; 733 return 0;
734 case BLKGETSIZE: 734 case BLKGETSIZE:
735 size = i_size_read(bdev->bd_inode); 735 size = i_size_read(bdev->bd_inode);
diff --git a/block/ioctl.c b/block/ioctl.c
index d8996bbd7f12..4ff1f92f89ca 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
550 if (!arg) 550 if (!arg)
551 return -EINVAL; 551 return -EINVAL;
552 bdi = blk_get_backing_dev_info(bdev); 552 bdi = blk_get_backing_dev_info(bdev);
553 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 553 return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
554 case BLKROGET: 554 case BLKROGET:
555 return put_int(arg, bdev_read_only(bdev) != 0); 555 return put_int(arg, bdev_read_only(bdev) != 0);
556 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ 556 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
578 if(!capable(CAP_SYS_ADMIN)) 578 if(!capable(CAP_SYS_ADMIN))
579 return -EACCES; 579 return -EACCES;
580 bdi = blk_get_backing_dev_info(bdev); 580 bdi = blk_get_backing_dev_info(bdev);
581 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 581 bdi->ra_pages = (arg * 512) / PAGE_SIZE;
582 return 0; 582 return 0;
583 case BLKBSZSET: 583 case BLKBSZSET:
584 return blkdev_bszset(bdev, mode, argp); 584 return blkdev_bszset(bdev, mode, argp);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5d8701941054..2c6ae2aed2c4 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
566{ 566{
567 struct address_space *mapping = bdev->bd_inode->i_mapping; 567 struct address_space *mapping = bdev->bd_inode->i_mapping;
568 568
569 return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), 569 return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
570 NULL); 570 NULL);
571} 571}
572 572
573unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) 573unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
@@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
584 if (PageError(page)) 584 if (PageError(page))
585 goto fail; 585 goto fail;
586 p->v = page; 586 p->v = page;
587 return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9); 587 return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
588fail: 588fail:
589 page_cache_release(page); 589 put_page(page);
590 } 590 }
591 p->v = NULL; 591 p->v = NULL;
592 return NULL; 592 return NULL;
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 3bbdcc79a3d3..7d7a39b47c62 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
178 int cached_ret = -ENOKEY; 178 int cached_ret = -ENOKEY;
179 int ret; 179 int ret;
180 180
181 *_trusted = false;
182
181 for (p = pkcs7->certs; p; p = p->next) 183 for (p = pkcs7->certs; p; p = p->next)
182 p->seen = false; 184 p->seen = false;
183 185
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b5e54f2da53d..0d92d0f915e9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
491} 491}
492#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 492#endif /* CONFIG_ACPI_HOTPLUG_CPU */
493 493
494#ifdef CONFIG_X86
495static bool acpi_hwp_native_thermal_lvt_set;
496static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
497 u32 lvl,
498 void *context,
499 void **rv)
500{
501 u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
502 u32 capbuf[2];
503 struct acpi_osc_context osc_context = {
504 .uuid_str = sb_uuid_str,
505 .rev = 1,
506 .cap.length = 8,
507 .cap.pointer = capbuf,
508 };
509
510 if (acpi_hwp_native_thermal_lvt_set)
511 return AE_CTRL_TERMINATE;
512
513 capbuf[0] = 0x0000;
514 capbuf[1] = 0x1000; /* set bit 12 */
515
516 if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
517 if (osc_context.ret.pointer && osc_context.ret.length > 1) {
518 u32 *capbuf_ret = osc_context.ret.pointer;
519
520 if (capbuf_ret[1] & 0x1000) {
521 acpi_handle_info(handle,
522 "_OSC native thermal LVT Acked\n");
523 acpi_hwp_native_thermal_lvt_set = true;
524 }
525 }
526 kfree(osc_context.ret.pointer);
527 }
528
529 return AE_OK;
530}
531
532void __init acpi_early_processor_osc(void)
533{
534 if (boot_cpu_has(X86_FEATURE_HWP)) {
535 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
536 ACPI_UINT32_MAX,
537 acpi_hwp_native_thermal_lvt_osc,
538 NULL, NULL, NULL);
539 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
540 acpi_hwp_native_thermal_lvt_osc,
541 NULL, NULL);
542 }
543}
544#endif
545
494/* 546/*
495 * The following ACPI IDs are known to be suitable for representing as 547 * The following ACPI IDs are known to be suitable for representing as
496 * processor devices. 548 * processor devices.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0e8567846f1a..c068c829b453 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1019,6 +1019,9 @@ static int __init acpi_bus_init(void)
1019 goto error1; 1019 goto error1;
1020 } 1020 }
1021 1021
1022 /* Set capability bits for _OSC under processor scope */
1023 acpi_early_processor_osc();
1024
1022 /* 1025 /*
1023 * _OSC method may exist in module level code, 1026 * _OSC method may exist in module level code,
1024 * so it must be run after ACPI_FULL_INITIALIZATION 1027 * so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a37508ef66c1..7c188472d9c2 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -145,6 +145,12 @@ void acpi_early_processor_set_pdc(void);
145static inline void acpi_early_processor_set_pdc(void) {} 145static inline void acpi_early_processor_set_pdc(void) {}
146#endif 146#endif
147 147
148#ifdef CONFIG_X86
149void acpi_early_processor_osc(void);
150#else
151static inline void acpi_early_processor_osc(void) {}
152#endif
153
148/* -------------------------------------------------------------------------- 154/* --------------------------------------------------------------------------
149 Embedded Controller 155 Embedded Controller
150 -------------------------------------------------------------------------- */ 156 -------------------------------------------------------------------------- */
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a1e0b9ab847a..5fb7718f256c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
246 return -EEXIST; 246 return -EEXIST;
247 } 247 }
248 dev->power.wakeup = ws; 248 dev->power.wakeup = ws;
249 if (dev->power.wakeirq)
250 device_wakeup_attach_irq(dev, dev->power.wakeirq);
249 spin_unlock_irq(&dev->power.lock); 251 spin_unlock_irq(&dev->power.lock);
250 return 0; 252 return 0;
251} 253}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..ec9d8610b25f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
397 WARN_ON(d->flags & DEVFL_UP); 397 WARN_ON(d->flags & DEVFL_UP);
398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); 398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
399 q->backing_dev_info.name = "aoe"; 399 q->backing_dev_info.name = "aoe";
400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; 400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
401 d->bufpool = mp; 401 d->bufpool = mp;
402 d->blkq = gd->queue = q; 402 d->blkq = gd->queue = q;
403 q->queuedata = d; 403 q->queuedata = d;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f7ecc287d733..51a071e32221 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
374 struct page *page, int rw) 374 struct page *page, int rw)
375{ 375{
376 struct brd_device *brd = bdev->bd_disk->private_data; 376 struct brd_device *brd = bdev->bd_disk->private_data;
377 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); 377 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
378 page_endio(page, rw & WRITE, err); 378 page_endio(page, rw & WRITE, err);
379 return err; 379 return err;
380} 380}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4cad75..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@ struct bm_extent {
1327#endif 1327#endif
1328#endif 1328#endif
1329 1329
1330/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 1330/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
1331 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 1331 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1332 * Since we may live in a mixed-platform cluster, 1332 * Since we may live in a mixed-platform cluster,
1333 * we limit us to a platform agnostic constant here for now. 1333 * we limit us to a platform agnostic constant here for now.
1334 * A followup commit may allow even bigger BIO sizes, 1334 * A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 226eb0c9f0fb..1fd1dccebb6b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1178 blk_queue_max_hw_sectors(q, max_hw_sectors); 1178 blk_queue_max_hw_sectors(q, max_hw_sectors);
1179 /* This is the workaround for "bio would need to, but cannot, be split" */ 1179 /* This is the workaround for "bio would need to, but cannot, be split" */
1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1181 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 1181 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1182 1182
1183 if (b) { 1183 if (b) {
1184 struct drbd_connection *connection = first_peer_device(device)->connection; 1184 struct drbd_connection *connection = first_peer_device(device)->connection;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c6234428607..94a1843b0426 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1953,7 +1953,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
1953 1953
1954 osdc = &rbd_dev->rbd_client->client->osdc; 1954 osdc = &rbd_dev->rbd_client->client->osdc;
1955 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, 1955 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1956 GFP_ATOMIC); 1956 GFP_NOIO);
1957 if (!osd_req) 1957 if (!osd_req)
1958 return NULL; /* ENOMEM */ 1958 return NULL; /* ENOMEM */
1959 1959
@@ -2002,7 +2002,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2002 rbd_dev = img_request->rbd_dev; 2002 rbd_dev = img_request->rbd_dev;
2003 osdc = &rbd_dev->rbd_client->client->osdc; 2003 osdc = &rbd_dev->rbd_client->client->osdc;
2004 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops, 2004 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2005 false, GFP_ATOMIC); 2005 false, GFP_NOIO);
2006 if (!osd_req) 2006 if (!osd_req)
2007 return NULL; /* ENOMEM */ 2007 return NULL; /* ENOMEM */
2008 2008
@@ -2504,7 +2504,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2504 bio_chain_clone_range(&bio_list, 2504 bio_chain_clone_range(&bio_list,
2505 &bio_offset, 2505 &bio_offset,
2506 clone_size, 2506 clone_size,
2507 GFP_ATOMIC); 2507 GFP_NOIO);
2508 if (!obj_request->bio_list) 2508 if (!obj_request->bio_list)
2509 goto out_unwind; 2509 goto out_unwind;
2510 } else if (type == OBJ_REQUEST_PAGES) { 2510 } else if (type == OBJ_REQUEST_PAGES) {
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 9e9fe4b19ac4..309049d41f1b 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
57 return mtk_reset_deassert(rcdev, id); 57 return mtk_reset_deassert(rcdev, id);
58} 58}
59 59
60static struct reset_control_ops mtk_reset_ops = { 60static const struct reset_control_ops mtk_reset_ops = {
61 .assert = mtk_reset_assert, 61 .assert = mtk_reset_assert,
62 .deassert = mtk_reset_deassert, 62 .deassert = mtk_reset_deassert,
63 .reset = mtk_reset, 63 .reset = mtk_reset,
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
index b54da1fe73f0..b4e4d6aa2631 100644
--- a/drivers/clk/mmp/reset.c
+++ b/drivers/clk/mmp/reset.c
@@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
74 return 0; 74 return 0;
75} 75}
76 76
77static struct reset_control_ops mmp_clk_reset_ops = { 77static const struct reset_control_ops mmp_clk_reset_ops = {
78 .assert = mmp_clk_reset_assert, 78 .assert = mmp_clk_reset_assert,
79 .deassert = mmp_clk_reset_deassert, 79 .deassert = mmp_clk_reset_deassert,
80}; 80};
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 5428efb9fbf5..3cd1af0af0d9 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -129,20 +129,10 @@ static const char * const gcc_xo_ddr_500_200[] = {
129}; 129};
130 130
131#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } 131#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
132#define P_XO 0
133#define FE_PLL_200 1
134#define FE_PLL_500 2
135#define DDRC_PLL_666 3
136
137#define DDRC_PLL_666_SDCC 1
138#define FE_PLL_125_DLY 1
139
140#define FE_PLL_WCSS2G 1
141#define FE_PLL_WCSS5G 1
142 132
143static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = { 133static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
144 F(48000000, P_XO, 1, 0, 0), 134 F(48000000, P_XO, 1, 0, 0),
145 F(200000000, FE_PLL_200, 1, 0, 0), 135 F(200000000, P_FEPLL200, 1, 0, 0),
146 { } 136 { }
147}; 137};
148 138
@@ -334,15 +324,15 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
334}; 324};
335 325
336static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = { 326static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = {
337 F(1843200, FE_PLL_200, 1, 144, 15625), 327 F(1843200, P_FEPLL200, 1, 144, 15625),
338 F(3686400, FE_PLL_200, 1, 288, 15625), 328 F(3686400, P_FEPLL200, 1, 288, 15625),
339 F(7372800, FE_PLL_200, 1, 576, 15625), 329 F(7372800, P_FEPLL200, 1, 576, 15625),
340 F(14745600, FE_PLL_200, 1, 1152, 15625), 330 F(14745600, P_FEPLL200, 1, 1152, 15625),
341 F(16000000, FE_PLL_200, 1, 2, 25), 331 F(16000000, P_FEPLL200, 1, 2, 25),
342 F(24000000, P_XO, 1, 1, 2), 332 F(24000000, P_XO, 1, 1, 2),
343 F(32000000, FE_PLL_200, 1, 4, 25), 333 F(32000000, P_FEPLL200, 1, 4, 25),
344 F(40000000, FE_PLL_200, 1, 1, 5), 334 F(40000000, P_FEPLL200, 1, 1, 5),
345 F(46400000, FE_PLL_200, 1, 29, 125), 335 F(46400000, P_FEPLL200, 1, 29, 125),
346 F(48000000, P_XO, 1, 0, 0), 336 F(48000000, P_XO, 1, 0, 0),
347 { } 337 { }
348}; 338};
@@ -410,9 +400,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
410}; 400};
411 401
412static const struct freq_tbl ftbl_gcc_gp_clk[] = { 402static const struct freq_tbl ftbl_gcc_gp_clk[] = {
413 F(1250000, FE_PLL_200, 1, 16, 0), 403 F(1250000, P_FEPLL200, 1, 16, 0),
414 F(2500000, FE_PLL_200, 1, 8, 0), 404 F(2500000, P_FEPLL200, 1, 8, 0),
415 F(5000000, FE_PLL_200, 1, 4, 0), 405 F(5000000, P_FEPLL200, 1, 4, 0),
416 { } 406 { }
417}; 407};
418 408
@@ -512,11 +502,11 @@ static struct clk_branch gcc_gp3_clk = {
512static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { 502static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
513 F(144000, P_XO, 1, 3, 240), 503 F(144000, P_XO, 1, 3, 240),
514 F(400000, P_XO, 1, 1, 0), 504 F(400000, P_XO, 1, 1, 0),
515 F(20000000, FE_PLL_500, 1, 1, 25), 505 F(20000000, P_FEPLL500, 1, 1, 25),
516 F(25000000, FE_PLL_500, 1, 1, 20), 506 F(25000000, P_FEPLL500, 1, 1, 20),
517 F(50000000, FE_PLL_500, 1, 1, 10), 507 F(50000000, P_FEPLL500, 1, 1, 10),
518 F(100000000, FE_PLL_500, 1, 1, 5), 508 F(100000000, P_FEPLL500, 1, 1, 5),
519 F(193000000, DDRC_PLL_666_SDCC, 1, 0, 0), 509 F(193000000, P_DDRPLL, 1, 0, 0),
520 { } 510 { }
521}; 511};
522 512
@@ -536,9 +526,9 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
536 526
537static const struct freq_tbl ftbl_gcc_apps_clk[] = { 527static const struct freq_tbl ftbl_gcc_apps_clk[] = {
538 F(48000000, P_XO, 1, 0, 0), 528 F(48000000, P_XO, 1, 0, 0),
539 F(200000000, FE_PLL_200, 1, 0, 0), 529 F(200000000, P_FEPLL200, 1, 0, 0),
540 F(500000000, FE_PLL_500, 1, 0, 0), 530 F(500000000, P_FEPLL500, 1, 0, 0),
541 F(626000000, DDRC_PLL_666, 1, 0, 0), 531 F(626000000, P_DDRPLLAPSS, 1, 0, 0),
542 { } 532 { }
543}; 533};
544 534
@@ -557,7 +547,7 @@ static struct clk_rcg2 apps_clk_src = {
557 547
558static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = { 548static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = {
559 F(48000000, P_XO, 1, 0, 0), 549 F(48000000, P_XO, 1, 0, 0),
560 F(100000000, FE_PLL_200, 2, 0, 0), 550 F(100000000, P_FEPLL200, 2, 0, 0),
561 { } 551 { }
562}; 552};
563 553
@@ -940,7 +930,7 @@ static struct clk_branch gcc_usb2_mock_utmi_clk = {
940}; 930};
941 931
942static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = { 932static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
943 F(2000000, FE_PLL_200, 10, 0, 0), 933 F(2000000, P_FEPLL200, 10, 0, 0),
944 { } 934 { }
945}; 935};
946 936
@@ -1007,7 +997,7 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = {
1007}; 997};
1008 998
1009static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = { 999static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
1010 F(125000000, FE_PLL_125_DLY, 1, 0, 0), 1000 F(125000000, P_FEPLL125DLY, 1, 0, 0),
1011 { } 1001 { }
1012}; 1002};
1013 1003
@@ -1027,7 +1017,7 @@ static struct clk_rcg2 fephy_125m_dly_clk_src = {
1027 1017
1028static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = { 1018static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
1029 F(48000000, P_XO, 1, 0, 0), 1019 F(48000000, P_XO, 1, 0, 0),
1030 F(250000000, FE_PLL_WCSS2G, 1, 0, 0), 1020 F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
1031 { } 1021 { }
1032}; 1022};
1033 1023
@@ -1097,7 +1087,7 @@ static struct clk_branch gcc_wcss2g_rtc_clk = {
1097 1087
1098static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = { 1088static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
1099 F(48000000, P_XO, 1, 0, 0), 1089 F(48000000, P_XO, 1, 0, 0),
1100 F(250000000, FE_PLL_WCSS5G, 1, 0, 0), 1090 F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
1101 { } 1091 { }
1102}; 1092};
1103 1093
@@ -1325,6 +1315,16 @@ MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table);
1325 1315
1326static int gcc_ipq4019_probe(struct platform_device *pdev) 1316static int gcc_ipq4019_probe(struct platform_device *pdev)
1327{ 1317{
1318 struct device *dev = &pdev->dev;
1319
1320 clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000);
1321 clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000);
1322 clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000);
1323 clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000);
1324 clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000);
1325 clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000);
1326 clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000);
1327
1328 return qcom_cc_probe(pdev, &gcc_ipq4019_desc); 1328 return qcom_cc_probe(pdev, &gcc_ipq4019_desc);
1329} 1329}
1330 1330
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 6c977d3a8590..0324d8daab9b 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
55 return regmap_update_bits(rst->regmap, map->reg, mask, 0); 55 return regmap_update_bits(rst->regmap, map->reg, mask, 0);
56} 56}
57 57
58struct reset_control_ops qcom_reset_ops = { 58const struct reset_control_ops qcom_reset_ops = {
59 .reset = qcom_reset, 59 .reset = qcom_reset,
60 .assert = qcom_reset_assert, 60 .assert = qcom_reset_assert,
61 .deassert = qcom_reset_deassert, 61 .deassert = qcom_reset_deassert,
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 0e11e2130f97..cda877927d43 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -32,6 +32,6 @@ struct qcom_reset_controller {
32#define to_qcom_reset_controller(r) \ 32#define to_qcom_reset_controller(r) \
33 container_of(r, struct qcom_reset_controller, rcdev); 33 container_of(r, struct qcom_reset_controller, rcdev);
34 34
35extern struct reset_control_ops qcom_reset_ops; 35extern const struct reset_control_ops qcom_reset_ops;
36 36
37#endif 37#endif
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
index 552f7bb15bc5..21218987bbc3 100644
--- a/drivers/clk/rockchip/softrst.c
+++ b/drivers/clk/rockchip/softrst.c
@@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
81 return 0; 81 return 0;
82} 82}
83 83
84static struct reset_control_ops rockchip_softrst_ops = { 84static const struct reset_control_ops rockchip_softrst_ops = {
85 .assert = rockchip_softrst_assert, 85 .assert = rockchip_softrst_assert,
86 .deassert = rockchip_softrst_deassert, 86 .deassert = rockchip_softrst_deassert,
87}; 87};
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index 957aae63e7cc..d0c6c9a2d06a 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev,
1423 return 0; 1423 return 0;
1424} 1424}
1425 1425
1426static struct reset_control_ops atlas7_rst_ops = { 1426static const struct reset_control_ops atlas7_rst_ops = {
1427 .reset = atlas7_reset_module, 1427 .reset = atlas7_reset_module,
1428}; 1428};
1429 1429
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index 044c1717b762..d9ea22ec4e25 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev,
85 return 0; 85 return 0;
86} 86}
87 87
88static struct reset_control_ops sunxi_ve_reset_ops = { 88static const struct reset_control_ops sunxi_ve_reset_ops = {
89 .assert = sunxi_ve_reset_assert, 89 .assert = sunxi_ve_reset_assert,
90 .deassert = sunxi_ve_reset_deassert, 90 .deassert = sunxi_ve_reset_deassert,
91}; 91};
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a9b176139aca..028dd832a39f 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
83 return 0; 83 return 0;
84} 84}
85 85
86static struct reset_control_ops sun9i_mmc_reset_ops = { 86static const struct reset_control_ops sun9i_mmc_reset_ops = {
87 .assert = sun9i_mmc_reset_assert, 87 .assert = sun9i_mmc_reset_assert,
88 .deassert = sun9i_mmc_reset_deassert, 88 .deassert = sun9i_mmc_reset_deassert,
89}; 89};
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 5432b1c198a4..fe0c3d169377 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev,
76 return 0; 76 return 0;
77} 77}
78 78
79static struct reset_control_ops sunxi_usb_reset_ops = { 79static const struct reset_control_ops sunxi_usb_reset_ops = {
80 .assert = sunxi_usb_reset_assert, 80 .assert = sunxi_usb_reset_assert,
81 .deassert = sunxi_usb_reset_deassert, 81 .deassert = sunxi_usb_reset_deassert,
82}; 82};
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 2a3a4fe803d6..f60fe2e344ca 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
271 } 271 }
272} 272}
273 273
274static struct reset_control_ops rst_ops = { 274static const struct reset_control_ops rst_ops = {
275 .assert = tegra_clk_rst_assert, 275 .assert = tegra_clk_rst_assert,
276 .deassert = tegra_clk_rst_deassert, 276 .deassert = tegra_clk_rst_deassert,
277}; 277};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..5f8dbe640a20 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -4,9 +4,6 @@
4 * Copyright (C) 2014 Linaro. 4 * Copyright (C) 2014 Linaro.
5 * Viresh Kumar <viresh.kumar@linaro.org> 5 * Viresh Kumar <viresh.kumar@linaro.org>
6 * 6 *
7 * The OPP code in function set_target() is reused from
8 * drivers/cpufreq/omap-cpufreq.c
9 *
10 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4b644526fd59..8b5a415ee14a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x)
64 return ret; 64 return ret;
65} 65}
66 66
67/**
68 * struct sample - Store performance sample
69 * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
70 * performance during last sample period
71 * @busy_scaled: Scaled busy value which is used to calculate next
72 * P state. This can be different than core_pct_busy
73 * to account for cpu idle period
74 * @aperf: Difference of actual performance frequency clock count
75 * read from APERF MSR between last and current sample
76 * @mperf: Difference of maximum performance frequency clock count
77 * read from MPERF MSR between last and current sample
78 * @tsc: Difference of time stamp counter between last and
79 * current sample
80 * @freq: Effective frequency calculated from APERF/MPERF
81 * @time: Current time from scheduler
82 *
83 * This structure is used in the cpudata structure to store performance sample
84 * data for choosing next P State.
85 */
67struct sample { 86struct sample {
68 int32_t core_pct_busy; 87 int32_t core_pct_busy;
69 int32_t busy_scaled; 88 int32_t busy_scaled;
@@ -74,6 +93,20 @@ struct sample {
74 u64 time; 93 u64 time;
75}; 94};
76 95
96/**
97 * struct pstate_data - Store P state data
98 * @current_pstate: Current requested P state
99 * @min_pstate: Min P state possible for this platform
100 * @max_pstate: Max P state possible for this platform
101 * @max_pstate_physical:This is physical Max P state for a processor
102 * This can be higher than the max_pstate which can
103 * be limited by platform thermal design power limits
104 * @scaling: Scaling factor to convert frequency to cpufreq
105 * frequency units
106 * @turbo_pstate: Max Turbo P state possible for this platform
107 *
108 * Stores the per cpu model P state limits and current P state.
109 */
77struct pstate_data { 110struct pstate_data {
78 int current_pstate; 111 int current_pstate;
79 int min_pstate; 112 int min_pstate;
@@ -83,6 +116,19 @@ struct pstate_data {
83 int turbo_pstate; 116 int turbo_pstate;
84}; 117};
85 118
119/**
120 * struct vid_data - Stores voltage information data
121 * @min: VID data for this platform corresponding to
122 * the lowest P state
123 * @max: VID data corresponding to the highest P State.
124 * @turbo: VID data for turbo P state
125 * @ratio: Ratio of (vid max - vid min) /
126 * (max P state - Min P State)
127 *
128 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
129 * This data is used in Atom platforms, where in addition to target P state,
130 * the voltage data needs to be specified to select next P State.
131 */
86struct vid_data { 132struct vid_data {
87 int min; 133 int min;
88 int max; 134 int max;
@@ -90,6 +136,18 @@ struct vid_data {
90 int32_t ratio; 136 int32_t ratio;
91}; 137};
92 138
139/**
140 * struct _pid - Stores PID data
141 * @setpoint: Target set point for busyness or performance
142 * @integral: Storage for accumulated error values
143 * @p_gain: PID proportional gain
144 * @i_gain: PID integral gain
145 * @d_gain: PID derivative gain
146 * @deadband: PID deadband
147 * @last_err: Last error storage for integral part of PID calculation
148 *
149 * Stores PID coefficients and last error for PID controller.
150 */
93struct _pid { 151struct _pid {
94 int setpoint; 152 int setpoint;
95 int32_t integral; 153 int32_t integral;
@@ -100,6 +158,23 @@ struct _pid {
100 int32_t last_err; 158 int32_t last_err;
101}; 159};
102 160
161/**
162 * struct cpudata - Per CPU instance data storage
163 * @cpu: CPU number for this instance data
164 * @update_util: CPUFreq utility callback information
165 * @pstate: Stores P state limits for this CPU
166 * @vid: Stores VID limits for this CPU
167 * @pid: Stores PID parameters for this CPU
168 * @last_sample_time: Last Sample time
169 * @prev_aperf: Last APERF value read from APERF MSR
170 * @prev_mperf: Last MPERF value read from MPERF MSR
171 * @prev_tsc: Last timestamp counter (TSC) value
172 * @prev_cummulative_iowait: IO Wait time difference from last and
173 * current sample
174 * @sample: Storage for storing last Sample data
175 *
176 * This structure stores per CPU instance data for all CPUs.
177 */
103struct cpudata { 178struct cpudata {
104 int cpu; 179 int cpu;
105 180
@@ -118,6 +193,19 @@ struct cpudata {
118}; 193};
119 194
120static struct cpudata **all_cpu_data; 195static struct cpudata **all_cpu_data;
196
197/**
198 * struct pid_adjust_policy - Stores static PID configuration data
199 * @sample_rate_ms: PID calculation sample rate in ms
200 * @sample_rate_ns: Sample rate calculation in ns
201 * @deadband: PID deadband
202 * @setpoint: PID Setpoint
203 * @p_gain_pct: PID proportional gain
204 * @i_gain_pct: PID integral gain
205 * @d_gain_pct: PID derivative gain
206 *
207 * Stores per CPU model static PID configuration data.
208 */
121struct pstate_adjust_policy { 209struct pstate_adjust_policy {
122 int sample_rate_ms; 210 int sample_rate_ms;
123 s64 sample_rate_ns; 211 s64 sample_rate_ns;
@@ -128,6 +216,20 @@ struct pstate_adjust_policy {
128 int i_gain_pct; 216 int i_gain_pct;
129}; 217};
130 218
219/**
220 * struct pstate_funcs - Per CPU model specific callbacks
221 * @get_max: Callback to get maximum non turbo effective P state
222 * @get_max_physical: Callback to get maximum non turbo physical P state
223 * @get_min: Callback to get minimum P state
224 * @get_turbo: Callback to get turbo P state
225 * @get_scaling: Callback to get frequency scaling factor
226 * @get_val: Callback to convert P state to actual MSR write value
227 * @get_vid: Callback to get VID data for Atom platforms
228 * @get_target_pstate: Callback to a function to calculate next P state to use
229 *
230 * Core and Atom CPU models have different way to get P State limits. This
231 * structure is used to store those callbacks.
232 */
131struct pstate_funcs { 233struct pstate_funcs {
132 int (*get_max)(void); 234 int (*get_max)(void);
133 int (*get_max_physical)(void); 235 int (*get_max_physical)(void);
@@ -139,6 +241,11 @@ struct pstate_funcs {
139 int32_t (*get_target_pstate)(struct cpudata *); 241 int32_t (*get_target_pstate)(struct cpudata *);
140}; 242};
141 243
244/**
245 * struct cpu_defaults- Per CPU model default config data
246 * @pid_policy: PID config data
247 * @funcs: Callback function data
248 */
142struct cpu_defaults { 249struct cpu_defaults {
143 struct pstate_adjust_policy pid_policy; 250 struct pstate_adjust_policy pid_policy;
144 struct pstate_funcs funcs; 251 struct pstate_funcs funcs;
@@ -151,6 +258,34 @@ static struct pstate_adjust_policy pid_params;
151static struct pstate_funcs pstate_funcs; 258static struct pstate_funcs pstate_funcs;
152static int hwp_active; 259static int hwp_active;
153 260
261
262/**
263 * struct perf_limits - Store user and policy limits
264 * @no_turbo: User requested turbo state from intel_pstate sysfs
265 * @turbo_disabled: Platform turbo status either from msr
266 * MSR_IA32_MISC_ENABLE or when maximum available pstate
267 * matches the maximum turbo pstate
268 * @max_perf_pct: Effective maximum performance limit in percentage, this
269 * is minimum of either limits enforced by cpufreq policy
270 * or limits from user set limits via intel_pstate sysfs
271 * @min_perf_pct: Effective minimum performance limit in percentage, this
272 * is maximum of either limits enforced by cpufreq policy
273 * or limits from user set limits via intel_pstate sysfs
274 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
275 * This value is used to limit max pstate
276 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
277 * This value is used to limit min pstate
278 * @max_policy_pct: The maximum performance in percentage enforced by
279 * cpufreq setpolicy interface
280 * @max_sysfs_pct: The maximum performance in percentage enforced by
281 * intel pstate sysfs interface
282 * @min_policy_pct: The minimum performance in percentage enforced by
283 * cpufreq setpolicy interface
284 * @min_sysfs_pct: The minimum performance in percentage enforced by
285 * intel pstate sysfs interface
286 *
287 * Storage for user and policy defined limits.
288 */
154struct perf_limits { 289struct perf_limits {
155 int no_turbo; 290 int no_turbo;
156 int turbo_disabled; 291 int turbo_disabled;
@@ -910,7 +1045,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
910 cpu->prev_aperf = aperf; 1045 cpu->prev_aperf = aperf;
911 cpu->prev_mperf = mperf; 1046 cpu->prev_mperf = mperf;
912 cpu->prev_tsc = tsc; 1047 cpu->prev_tsc = tsc;
913 return true; 1048 /*
1049 * First time this function is invoked in a given cycle, all of the
1050 * previous sample data fields are equal to zero or stale and they must
1051 * be populated with meaningful numbers for things to work, so assume
1052 * that sample.time will always be reset before setting the utilization
1053 * update hook and make the caller skip the sample then.
1054 */
1055 return !!cpu->last_sample_time;
914} 1056}
915 1057
916static inline int32_t get_avg_frequency(struct cpudata *cpu) 1058static inline int32_t get_avg_frequency(struct cpudata *cpu)
@@ -984,8 +1126,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
984 * enough period of time to adjust our busyness. 1126 * enough period of time to adjust our busyness.
985 */ 1127 */
986 duration_ns = cpu->sample.time - cpu->last_sample_time; 1128 duration_ns = cpu->sample.time - cpu->last_sample_time;
987 if ((s64)duration_ns > pid_params.sample_rate_ns * 3 1129 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
988 && cpu->last_sample_time > 0) {
989 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1130 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
990 int_tofp(duration_ns)); 1131 int_tofp(duration_ns));
991 core_busy = mul_fp(core_busy, sample_ratio); 1132 core_busy = mul_fp(core_busy, sample_ratio);
@@ -1100,10 +1241,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1100 intel_pstate_get_cpu_pstates(cpu); 1241 intel_pstate_get_cpu_pstates(cpu);
1101 1242
1102 intel_pstate_busy_pid_reset(cpu); 1243 intel_pstate_busy_pid_reset(cpu);
1103 intel_pstate_sample(cpu, 0);
1104 1244
1105 cpu->update_util.func = intel_pstate_update_util; 1245 cpu->update_util.func = intel_pstate_update_util;
1106 cpufreq_set_update_util_data(cpunum, &cpu->update_util);
1107 1246
1108 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1247 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1109 1248
@@ -1122,22 +1261,54 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
1122 return get_avg_frequency(cpu); 1261 return get_avg_frequency(cpu);
1123} 1262}
1124 1263
1264static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1265{
1266 struct cpudata *cpu = all_cpu_data[cpu_num];
1267
1268 /* Prevent intel_pstate_update_util() from using stale data. */
1269 cpu->sample.time = 0;
1270 cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
1271}
1272
1273static void intel_pstate_clear_update_util_hook(unsigned int cpu)
1274{
1275 cpufreq_set_update_util_data(cpu, NULL);
1276 synchronize_sched();
1277}
1278
1279static void intel_pstate_set_performance_limits(struct perf_limits *limits)
1280{
1281 limits->no_turbo = 0;
1282 limits->turbo_disabled = 0;
1283 limits->max_perf_pct = 100;
1284 limits->max_perf = int_tofp(1);
1285 limits->min_perf_pct = 100;
1286 limits->min_perf = int_tofp(1);
1287 limits->max_policy_pct = 100;
1288 limits->max_sysfs_pct = 100;
1289 limits->min_policy_pct = 0;
1290 limits->min_sysfs_pct = 0;
1291}
1292
1125static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1293static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1126{ 1294{
1127 if (!policy->cpuinfo.max_freq) 1295 if (!policy->cpuinfo.max_freq)
1128 return -ENODEV; 1296 return -ENODEV;
1129 1297
1130 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1298 intel_pstate_clear_update_util_hook(policy->cpu);
1131 policy->max >= policy->cpuinfo.max_freq) { 1299
1132 pr_debug("intel_pstate: set performance\n"); 1300 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
1133 limits = &performance_limits; 1301 limits = &performance_limits;
1134 if (hwp_active) 1302 if (policy->max >= policy->cpuinfo.max_freq) {
1135 intel_pstate_hwp_set(policy->cpus); 1303 pr_debug("intel_pstate: set performance\n");
1136 return 0; 1304 intel_pstate_set_performance_limits(limits);
1305 goto out;
1306 }
1307 } else {
1308 pr_debug("intel_pstate: set powersave\n");
1309 limits = &powersave_limits;
1137 } 1310 }
1138 1311
1139 pr_debug("intel_pstate: set powersave\n");
1140 limits = &powersave_limits;
1141 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1312 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1142 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1313 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1143 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1314 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1163,6 +1334,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1163 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1334 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1164 int_tofp(100)); 1335 int_tofp(100));
1165 1336
1337 out:
1338 intel_pstate_set_update_util_hook(policy->cpu);
1339
1166 if (hwp_active) 1340 if (hwp_active)
1167 intel_pstate_hwp_set(policy->cpus); 1341 intel_pstate_hwp_set(policy->cpus);
1168 1342
@@ -1187,8 +1361,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1187 1361
1188 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1362 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1189 1363
1190 cpufreq_set_update_util_data(cpu_num, NULL); 1364 intel_pstate_clear_update_util_hook(cpu_num);
1191 synchronize_sched();
1192 1365
1193 if (hwp_active) 1366 if (hwp_active)
1194 return; 1367 return;
@@ -1455,8 +1628,7 @@ out:
1455 get_online_cpus(); 1628 get_online_cpus();
1456 for_each_online_cpu(cpu) { 1629 for_each_online_cpu(cpu) {
1457 if (all_cpu_data[cpu]) { 1630 if (all_cpu_data[cpu]) {
1458 cpufreq_set_update_util_data(cpu, NULL); 1631 intel_pstate_clear_update_util_hook(cpu);
1459 synchronize_sched();
1460 kfree(all_cpu_data[cpu]); 1632 kfree(all_cpu_data[cpu]);
1461 } 1633 }
1462 } 1634 }
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index fedbff55a7f3..815c4a5cae54 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -77,12 +77,28 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
77static inline void fw_cfg_read_blob(u16 key, 77static inline void fw_cfg_read_blob(u16 key,
78 void *buf, loff_t pos, size_t count) 78 void *buf, loff_t pos, size_t count)
79{ 79{
80 u32 glk;
81 acpi_status status;
82
83 /* If we have ACPI, ensure mutual exclusion against any potential
84 * device access by the firmware, e.g. via AML methods:
85 */
86 status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
87 if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
88 /* Should never get here */
89 WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
90 memset(buf, 0, count);
91 return;
92 }
93
80 mutex_lock(&fw_cfg_dev_lock); 94 mutex_lock(&fw_cfg_dev_lock);
81 iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl); 95 iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
82 while (pos-- > 0) 96 while (pos-- > 0)
83 ioread8(fw_cfg_reg_data); 97 ioread8(fw_cfg_reg_data);
84 ioread8_rep(fw_cfg_reg_data, buf, count); 98 ioread8_rep(fw_cfg_reg_data, buf, count);
85 mutex_unlock(&fw_cfg_dev_lock); 99 mutex_unlock(&fw_cfg_dev_lock);
100
101 acpi_release_global_lock(glk);
86} 102}
87 103
88/* clean up fw_cfg device i/o */ 104/* clean up fw_cfg device i/o */
@@ -727,12 +743,18 @@ device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
727 743
728static int __init fw_cfg_sysfs_init(void) 744static int __init fw_cfg_sysfs_init(void)
729{ 745{
746 int ret;
747
730 /* create /sys/firmware/qemu_fw_cfg/ top level directory */ 748 /* create /sys/firmware/qemu_fw_cfg/ top level directory */
731 fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj); 749 fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
732 if (!fw_cfg_top_ko) 750 if (!fw_cfg_top_ko)
733 return -ENOMEM; 751 return -ENOMEM;
734 752
735 return platform_driver_register(&fw_cfg_sysfs_driver); 753 ret = platform_driver_register(&fw_cfg_sysfs_driver);
754 if (ret)
755 fw_cfg_kobj_cleanup(fw_cfg_top_ko);
756
757 return ret;
736} 758}
737 759
738static void __exit fw_cfg_sysfs_exit(void) 760static void __exit fw_cfg_sysfs_exit(void)
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a68e199d579d..c5c9599a3a71 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -37,7 +37,6 @@ struct men_z127_gpio {
37 void __iomem *reg_base; 37 void __iomem *reg_base;
38 struct mcb_device *mdev; 38 struct mcb_device *mdev;
39 struct resource *mem; 39 struct resource *mem;
40 spinlock_t lock;
41}; 40};
42 41
43static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, 42static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
@@ -69,7 +68,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
69 debounce /= 50; 68 debounce /= 50;
70 } 69 }
71 70
72 spin_lock(&priv->lock); 71 spin_lock(&gc->bgpio_lock);
73 72
74 db_en = readl(priv->reg_base + MEN_Z127_DBER); 73 db_en = readl(priv->reg_base + MEN_Z127_DBER);
75 74
@@ -84,7 +83,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
84 writel(db_en, priv->reg_base + MEN_Z127_DBER); 83 writel(db_en, priv->reg_base + MEN_Z127_DBER);
85 writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio)); 84 writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
86 85
87 spin_unlock(&priv->lock); 86 spin_unlock(&gc->bgpio_lock);
88 87
89 return 0; 88 return 0;
90} 89}
@@ -97,7 +96,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
97 if (gpio_pin >= gc->ngpio) 96 if (gpio_pin >= gc->ngpio)
98 return -EINVAL; 97 return -EINVAL;
99 98
100 spin_lock(&priv->lock); 99 spin_lock(&gc->bgpio_lock);
101 od_en = readl(priv->reg_base + MEN_Z127_ODER); 100 od_en = readl(priv->reg_base + MEN_Z127_ODER);
102 101
103 if (gpiochip_line_is_open_drain(gc, gpio_pin)) 102 if (gpiochip_line_is_open_drain(gc, gpio_pin))
@@ -106,7 +105,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
106 od_en &= ~BIT(gpio_pin); 105 od_en &= ~BIT(gpio_pin);
107 106
108 writel(od_en, priv->reg_base + MEN_Z127_ODER); 107 writel(od_en, priv->reg_base + MEN_Z127_ODER);
109 spin_unlock(&priv->lock); 108 spin_unlock(&gc->bgpio_lock);
110 109
111 return 0; 110 return 0;
112} 111}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d0d3065a7557..e66084c295fb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -18,6 +18,7 @@
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/platform_data/pca953x.h> 19#include <linux/platform_data/pca953x.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <asm/unaligned.h>
21#include <linux/of_platform.h> 22#include <linux/of_platform.h>
22#include <linux/acpi.h> 23#include <linux/acpi.h>
23 24
@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
159 switch (chip->chip_type) { 160 switch (chip->chip_type) {
160 case PCA953X_TYPE: 161 case PCA953X_TYPE:
161 ret = i2c_smbus_write_word_data(chip->client, 162 ret = i2c_smbus_write_word_data(chip->client,
162 reg << 1, (u16) *val); 163 reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
163 break; 164 break;
164 case PCA957X_TYPE: 165 case PCA957X_TYPE:
165 ret = i2c_smbus_write_byte_data(chip->client, reg << 1, 166 ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2b7b78664b8..76ac906b4d78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
283 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET)); 283 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
284 284
285 ret = pinctrl_gpio_direction_output(chip->base + offset); 285 ret = pinctrl_gpio_direction_output(chip->base + offset);
286 if (!ret) 286 if (ret)
287 return 0; 287 return ret;
288 288
289 spin_lock_irqsave(&gpio_lock, flags); 289 spin_lock_irqsave(&gpio_lock, flags);
290 290
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index c0aa387664bf..0dc916191689 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
173 } 173 }
174 174
175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 if (!res) {
177 err = -EINVAL;
178 goto err;
179 }
180
176 gpio->base = devm_ioremap_nocache(&pdev->dev, res->start, 181 gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
177 resource_size(res)); 182 resource_size(res));
178 if (!gpio->base) { 183 if (!gpio->base) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 72065532c1c7..b747c76fd2b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -68,6 +68,7 @@ LIST_HEAD(gpio_devices);
68static void gpiochip_free_hogs(struct gpio_chip *chip); 68static void gpiochip_free_hogs(struct gpio_chip *chip);
69static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 69static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
70 70
71static bool gpiolib_initialized;
71 72
72static inline void desc_set_label(struct gpio_desc *d, const char *label) 73static inline void desc_set_label(struct gpio_desc *d, const char *label)
73{ 74{
@@ -440,9 +441,63 @@ static void gpiodevice_release(struct device *dev)
440 cdev_del(&gdev->chrdev); 441 cdev_del(&gdev->chrdev);
441 list_del(&gdev->list); 442 list_del(&gdev->list);
442 ida_simple_remove(&gpio_ida, gdev->id); 443 ida_simple_remove(&gpio_ida, gdev->id);
444 kfree(gdev->label);
445 kfree(gdev->descs);
443 kfree(gdev); 446 kfree(gdev);
444} 447}
445 448
449static int gpiochip_setup_dev(struct gpio_device *gdev)
450{
451 int status;
452
453 cdev_init(&gdev->chrdev, &gpio_fileops);
454 gdev->chrdev.owner = THIS_MODULE;
455 gdev->chrdev.kobj.parent = &gdev->dev.kobj;
456 gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
457 status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
458 if (status < 0)
459 chip_warn(gdev->chip, "failed to add char device %d:%d\n",
460 MAJOR(gpio_devt), gdev->id);
461 else
462 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
463 MAJOR(gpio_devt), gdev->id);
464 status = device_add(&gdev->dev);
465 if (status)
466 goto err_remove_chardev;
467
468 status = gpiochip_sysfs_register(gdev);
469 if (status)
470 goto err_remove_device;
471
472 /* From this point, the .release() function cleans up gpio_device */
473 gdev->dev.release = gpiodevice_release;
474 get_device(&gdev->dev);
475 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
476 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
477 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
478
479 return 0;
480
481err_remove_device:
482 device_del(&gdev->dev);
483err_remove_chardev:
484 cdev_del(&gdev->chrdev);
485 return status;
486}
487
488static void gpiochip_setup_devs(void)
489{
490 struct gpio_device *gdev;
491 int err;
492
493 list_for_each_entry(gdev, &gpio_devices, list) {
494 err = gpiochip_setup_dev(gdev);
495 if (err)
496 pr_err("%s: Failed to initialize gpio device (%d)\n",
497 dev_name(&gdev->dev), err);
498 }
499}
500
446/** 501/**
447 * gpiochip_add_data() - register a gpio_chip 502 * gpiochip_add_data() - register a gpio_chip
448 * @chip: the chip to register, with chip->base initialized 503 * @chip: the chip to register, with chip->base initialized
@@ -457,6 +512,9 @@ static void gpiodevice_release(struct device *dev)
457 * the gpio framework's arch_initcall(). Otherwise sysfs initialization 512 * the gpio framework's arch_initcall(). Otherwise sysfs initialization
458 * for GPIOs will fail rudely. 513 * for GPIOs will fail rudely.
459 * 514 *
515 * gpiochip_add_data() must only be called after gpiolib initialization,
516 * ie after core_initcall().
517 *
460 * If chip->base is negative, this requests dynamic assignment of 518 * If chip->base is negative, this requests dynamic assignment of
461 * a range of valid GPIOs. 519 * a range of valid GPIOs.
462 */ 520 */
@@ -504,8 +562,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
504 else 562 else
505 gdev->owner = THIS_MODULE; 563 gdev->owner = THIS_MODULE;
506 564
507 gdev->descs = devm_kcalloc(&gdev->dev, chip->ngpio, 565 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
508 sizeof(gdev->descs[0]), GFP_KERNEL);
509 if (!gdev->descs) { 566 if (!gdev->descs) {
510 status = -ENOMEM; 567 status = -ENOMEM;
511 goto err_free_gdev; 568 goto err_free_gdev;
@@ -514,16 +571,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
514 if (chip->ngpio == 0) { 571 if (chip->ngpio == 0) {
515 chip_err(chip, "tried to insert a GPIO chip with zero lines\n"); 572 chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
516 status = -EINVAL; 573 status = -EINVAL;
517 goto err_free_gdev; 574 goto err_free_descs;
518 } 575 }
519 576
520 if (chip->label) 577 if (chip->label)
521 gdev->label = devm_kstrdup(&gdev->dev, chip->label, GFP_KERNEL); 578 gdev->label = kstrdup(chip->label, GFP_KERNEL);
522 else 579 else
523 gdev->label = devm_kstrdup(&gdev->dev, "unknown", GFP_KERNEL); 580 gdev->label = kstrdup("unknown", GFP_KERNEL);
524 if (!gdev->label) { 581 if (!gdev->label) {
525 status = -ENOMEM; 582 status = -ENOMEM;
526 goto err_free_gdev; 583 goto err_free_descs;
527 } 584 }
528 585
529 gdev->ngpio = chip->ngpio; 586 gdev->ngpio = chip->ngpio;
@@ -543,7 +600,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
543 if (base < 0) { 600 if (base < 0) {
544 status = base; 601 status = base;
545 spin_unlock_irqrestore(&gpio_lock, flags); 602 spin_unlock_irqrestore(&gpio_lock, flags);
546 goto err_free_gdev; 603 goto err_free_label;
547 } 604 }
548 /* 605 /*
549 * TODO: it should not be necessary to reflect the assigned 606 * TODO: it should not be necessary to reflect the assigned
@@ -558,7 +615,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
558 status = gpiodev_add_to_list(gdev); 615 status = gpiodev_add_to_list(gdev);
559 if (status) { 616 if (status) {
560 spin_unlock_irqrestore(&gpio_lock, flags); 617 spin_unlock_irqrestore(&gpio_lock, flags);
561 goto err_free_gdev; 618 goto err_free_label;
562 } 619 }
563 620
564 for (i = 0; i < chip->ngpio; i++) { 621 for (i = 0; i < chip->ngpio; i++) {
@@ -596,39 +653,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
596 * we get a device node entry in sysfs under 653 * we get a device node entry in sysfs under
597 * /sys/bus/gpio/devices/gpiochipN/dev that can be used for 654 * /sys/bus/gpio/devices/gpiochipN/dev that can be used for
598 * coldplug of device nodes and other udev business. 655 * coldplug of device nodes and other udev business.
656 * We can do this only if gpiolib has been initialized.
657 * Otherwise, defer until later.
599 */ 658 */
600 cdev_init(&gdev->chrdev, &gpio_fileops); 659 if (gpiolib_initialized) {
601 gdev->chrdev.owner = THIS_MODULE; 660 status = gpiochip_setup_dev(gdev);
602 gdev->chrdev.kobj.parent = &gdev->dev.kobj; 661 if (status)
603 gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id); 662 goto err_remove_chip;
604 status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1); 663 }
605 if (status < 0)
606 chip_warn(chip, "failed to add char device %d:%d\n",
607 MAJOR(gpio_devt), gdev->id);
608 else
609 chip_dbg(chip, "added GPIO chardev (%d:%d)\n",
610 MAJOR(gpio_devt), gdev->id);
611 status = device_add(&gdev->dev);
612 if (status)
613 goto err_remove_chardev;
614
615 status = gpiochip_sysfs_register(gdev);
616 if (status)
617 goto err_remove_device;
618
619 /* From this point, the .release() function cleans up gpio_device */
620 gdev->dev.release = gpiodevice_release;
621 get_device(&gdev->dev);
622 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
623 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
624 dev_name(&gdev->dev), chip->label ? : "generic");
625
626 return 0; 664 return 0;
627 665
628err_remove_device:
629 device_del(&gdev->dev);
630err_remove_chardev:
631 cdev_del(&gdev->chrdev);
632err_remove_chip: 666err_remove_chip:
633 acpi_gpiochip_remove(chip); 667 acpi_gpiochip_remove(chip);
634 gpiochip_free_hogs(chip); 668 gpiochip_free_hogs(chip);
@@ -637,6 +671,10 @@ err_remove_from_list:
637 spin_lock_irqsave(&gpio_lock, flags); 671 spin_lock_irqsave(&gpio_lock, flags);
638 list_del(&gdev->list); 672 list_del(&gdev->list);
639 spin_unlock_irqrestore(&gpio_lock, flags); 673 spin_unlock_irqrestore(&gpio_lock, flags);
674err_free_label:
675 kfree(gdev->label);
676err_free_descs:
677 kfree(gdev->descs);
640err_free_gdev: 678err_free_gdev:
641 ida_simple_remove(&gpio_ida, gdev->id); 679 ida_simple_remove(&gpio_ida, gdev->id);
642 /* failures here can mean systems won't boot... */ 680 /* failures here can mean systems won't boot... */
@@ -2231,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2231 return desc; 2269 return desc;
2232} 2270}
2233 2271
2234static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, 2272static struct gpio_desc *acpi_find_gpio(struct device *dev,
2273 const char *con_id,
2235 unsigned int idx, 2274 unsigned int idx,
2236 enum gpio_lookup_flags *flags) 2275 enum gpiod_flags flags,
2276 enum gpio_lookup_flags *lookupflags)
2237{ 2277{
2238 struct acpi_device *adev = ACPI_COMPANION(dev); 2278 struct acpi_device *adev = ACPI_COMPANION(dev);
2239 struct acpi_gpio_info info; 2279 struct acpi_gpio_info info;
@@ -2264,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
2264 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); 2304 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
2265 if (IS_ERR(desc)) 2305 if (IS_ERR(desc))
2266 return desc; 2306 return desc;
2307
2308 if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
2309 info.gpioint) {
2310 dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
2311 return ERR_PTR(-ENOENT);
2312 }
2267 } 2313 }
2268 2314
2269 if (info.polarity == GPIO_ACTIVE_LOW) 2315 if (info.polarity == GPIO_ACTIVE_LOW)
2270 *flags |= GPIO_ACTIVE_LOW; 2316 *lookupflags |= GPIO_ACTIVE_LOW;
2271 2317
2272 return desc; 2318 return desc;
2273} 2319}
@@ -2530,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2530 desc = of_find_gpio(dev, con_id, idx, &lookupflags); 2576 desc = of_find_gpio(dev, con_id, idx, &lookupflags);
2531 } else if (ACPI_COMPANION(dev)) { 2577 } else if (ACPI_COMPANION(dev)) {
2532 dev_dbg(dev, "using ACPI for GPIO lookup\n"); 2578 dev_dbg(dev, "using ACPI for GPIO lookup\n");
2533 desc = acpi_find_gpio(dev, con_id, idx, &lookupflags); 2579 desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
2534 } 2580 }
2535 } 2581 }
2536 2582
@@ -2829,6 +2875,9 @@ static int __init gpiolib_dev_init(void)
2829 if (ret < 0) { 2875 if (ret < 0) {
2830 pr_err("gpiolib: failed to allocate char dev region\n"); 2876 pr_err("gpiolib: failed to allocate char dev region\n");
2831 bus_unregister(&gpio_bus_type); 2877 bus_unregister(&gpio_bus_type);
2878 } else {
2879 gpiolib_initialized = true;
2880 gpiochip_setup_devs();
2832 } 2881 }
2833 return ret; 2882 return ret;
2834} 2883}
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 0f734ee05274..ca77ec10147c 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,10 +1,14 @@
1menu "ACP Configuration" 1menu "ACP (Audio CoProcessor) Configuration"
2 2
3config DRM_AMD_ACP 3config DRM_AMD_ACP
4 bool "Enable ACP IP support" 4 bool "Enable AMD Audio CoProcessor IP support"
5 select MFD_CORE 5 select MFD_CORE
6 select PM_GENERIC_DOMAINS if PM 6 select PM_GENERIC_DOMAINS if PM
7 help 7 help
8 Choose this option to enable ACP IP support for AMD SOCs. 8 Choose this option to enable ACP IP support for AMD SOCs.
9 This adds the ACP (Audio CoProcessor) IP driver and wires
10 it up into the amdgpu driver. The ACP block provides the DMA
11 engine for the i2s-based ALSA driver. It is required for audio
12 on APUs which utilize an i2s codec.
9 13
10endmenu 14endmenu
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c4a21c6428f5..62a778012fe0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
1591 struct amdgpu_bo *vcpu_bo; 1591 struct amdgpu_bo *vcpu_bo;
1592 void *cpu_addr; 1592 void *cpu_addr;
1593 uint64_t gpu_addr; 1593 uint64_t gpu_addr;
1594 void *saved_bo;
1594 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1595 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1595 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1596 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1596 struct delayed_work idle_work; 1597 struct delayed_work idle_work;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a4b101e10c6..6043dc7c3a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
816 struct drm_device *ddev = adev->ddev; 816 struct drm_device *ddev = adev->ddev;
817 struct drm_crtc *crtc; 817 struct drm_crtc *crtc;
818 uint32_t line_time_us, vblank_lines; 818 uint32_t line_time_us, vblank_lines;
819 struct cgs_mode_info *mode_info;
819 820
820 if (info == NULL) 821 if (info == NULL)
821 return -EINVAL; 822 return -EINVAL;
822 823
824 mode_info = info->mode_info;
825
823 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 826 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
824 list_for_each_entry(crtc, 827 list_for_each_entry(crtc,
825 &ddev->mode_config.crtc_list, head) { 828 &ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
828 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); 831 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
829 info->display_count++; 832 info->display_count++;
830 } 833 }
831 if (info->mode_info != NULL && 834 if (mode_info != NULL &&
832 crtc->enabled && amdgpu_crtc->enabled && 835 crtc->enabled && amdgpu_crtc->enabled &&
833 amdgpu_crtc->hw_mode.clock) { 836 amdgpu_crtc->hw_mode.clock) {
834 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 837 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
836 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 839 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
837 amdgpu_crtc->hw_mode.crtc_vdisplay + 840 amdgpu_crtc->hw_mode.crtc_vdisplay +
838 (amdgpu_crtc->v_border * 2); 841 (amdgpu_crtc->v_border * 2);
839 info->mode_info->vblank_time_us = vblank_lines * line_time_us; 842 mode_info->vblank_time_us = vblank_lines * line_time_us;
840 info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 843 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
841 info->mode_info->ref_clock = adev->clock.spll.reference_freq; 844 mode_info->ref_clock = adev->clock.spll.reference_freq;
842 info->mode_info++; 845 mode_info = NULL;
843 } 846 }
844 } 847 }
845 } 848 }
@@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
847 return 0; 850 return 0;
848} 851}
849 852
853
854static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
855{
856 CGS_FUNC_ADEV;
857
858 adev->pm.dpm_enabled = enabled;
859
860 return 0;
861}
862
850/** \brief evaluate acpi namespace object, handle or pathname must be valid 863/** \brief evaluate acpi namespace object, handle or pathname must be valid
851 * \param cgs_device 864 * \param cgs_device
852 * \param info input/output arguments for the control method 865 * \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
1097 amdgpu_cgs_set_powergating_state, 1110 amdgpu_cgs_set_powergating_state,
1098 amdgpu_cgs_set_clockgating_state, 1111 amdgpu_cgs_set_clockgating_state,
1099 amdgpu_cgs_get_active_displays_info, 1112 amdgpu_cgs_get_active_displays_info,
1113 amdgpu_cgs_notify_dpm_enabled,
1100 amdgpu_cgs_call_acpi_method, 1114 amdgpu_cgs_call_acpi_method,
1101 amdgpu_cgs_query_system_info, 1115 amdgpu_cgs_query_system_info,
1102}; 1116};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f0ed974bd4e0..3fb405b3a614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
57 if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) 57 if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
58 return true; 58 return true;
59 59
60 fence_put(*f); 60 fence_put(fence);
61 return false; 61 return false;
62} 62}
63 63
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 4303b447efe8..d81f1f4883a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
121{ 121{
122 struct amdgpu_device *adev = ring->adev; 122 struct amdgpu_device *adev = ring->adev;
123 struct amdgpu_fence *fence; 123 struct amdgpu_fence *fence;
124 struct fence **ptr; 124 struct fence *old, **ptr;
125 uint32_t seq; 125 uint32_t seq;
126 126
127 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 127 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
141 /* This function can't be called concurrently anyway, otherwise 141 /* This function can't be called concurrently anyway, otherwise
142 * emitting the fence would mess up the hardware ring buffer. 142 * emitting the fence would mess up the hardware ring buffer.
143 */ 143 */
144 BUG_ON(rcu_dereference_protected(*ptr, 1)); 144 old = rcu_dereference_protected(*ptr, 1);
145 if (old && !fence_is_signaled(old)) {
146 DRM_INFO("rcu slot is busy\n");
147 fence_wait(old, false);
148 }
145 149
146 rcu_assign_pointer(*ptr, fence_get(&fence->base)); 150 rcu_assign_pointer(*ptr, fence_get(&fence->base));
147 151
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f594cfaa97e5..762cfdb85147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
219 if (r) { 219 if (r) {
220 return r; 220 return r;
221 } 221 }
222 adev->ddev->vblank_disable_allowed = true;
223
222 /* enable msi */ 224 /* enable msi */
223 adev->irq.msi_enabled = false; 225 adev->irq.msi_enabled = false;
224 226
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7805a8706af7..598eb0cd5aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -382,6 +382,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
382 struct drm_amdgpu_info_vram_gtt vram_gtt; 382 struct drm_amdgpu_info_vram_gtt vram_gtt;
383 383
384 vram_gtt.vram_size = adev->mc.real_vram_size; 384 vram_gtt.vram_size = adev->mc.real_vram_size;
385 vram_gtt.vram_size -= adev->vram_pin_size;
385 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; 386 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
386 vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size; 387 vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
387 vram_gtt.gtt_size = adev->mc.gtt_size; 388 vram_gtt.gtt_size = adev->mc.gtt_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 151a2d42c639..5b6639faa731 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -476,6 +476,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
476 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); 476 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
477} 477}
478 478
479static const char *amdgpu_vram_names[] = {
480 "UNKNOWN",
481 "GDDR1",
482 "DDR2",
483 "GDDR3",
484 "GDDR4",
485 "GDDR5",
486 "HBM",
487 "DDR3"
488};
489
479int amdgpu_bo_init(struct amdgpu_device *adev) 490int amdgpu_bo_init(struct amdgpu_device *adev)
480{ 491{
481 /* Add an MTRR for the VRAM */ 492 /* Add an MTRR for the VRAM */
@@ -484,8 +495,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
484 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 495 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
485 adev->mc.mc_vram_size >> 20, 496 adev->mc.mc_vram_size >> 20,
486 (unsigned long long)adev->mc.aper_size >> 20); 497 (unsigned long long)adev->mc.aper_size >> 20);
487 DRM_INFO("RAM width %dbits DDR\n", 498 DRM_INFO("RAM width %dbits %s\n",
488 adev->mc.vram_width); 499 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
489 return amdgpu_ttm_init(adev); 500 return amdgpu_ttm_init(adev);
490} 501}
491 502
@@ -608,6 +619,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
608 if ((offset + size) <= adev->mc.visible_vram_size) 619 if ((offset + size) <= adev->mc.visible_vram_size)
609 return 0; 620 return 0;
610 621
622 /* Can't move a pinned BO to visible VRAM */
623 if (abo->pin_count > 0)
624 return -EINVAL;
625
611 /* hurrah the memory is not visible ! */ 626 /* hurrah the memory is not visible ! */
612 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); 627 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
613 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 628 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3cb6d6c413c7..e9c6ae6ed2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
143 adev->powerplay.pp_handle); 143 adev->powerplay.pp_handle);
144 144
145#ifdef CONFIG_DRM_AMD_POWERPLAY 145#ifdef CONFIG_DRM_AMD_POWERPLAY
146 if (adev->pp_enabled) { 146 if (adev->pp_enabled && adev->pm.dpm_enabled) {
147 amdgpu_pm_sysfs_init(adev); 147 amdgpu_pm_sysfs_init(adev);
148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); 148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
149 } 149 }
@@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
161 adev->powerplay.pp_handle); 161 adev->powerplay.pp_handle);
162 162
163#ifdef CONFIG_DRM_AMD_POWERPLAY 163#ifdef CONFIG_DRM_AMD_POWERPLAY
164 if (adev->pp_enabled) { 164 if (adev->pp_enabled)
165 if (amdgpu_dpm == 0) 165 adev->pm.dpm_enabled = true;
166 adev->pm.dpm_enabled = false;
167 else
168 adev->pm.dpm_enabled = true;
169 }
170#endif 166#endif
171 167
172 return ret; 168 return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ab34190859a8..6f3369de232f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -384,9 +384,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
384 struct ttm_mem_reg *new_mem) 384 struct ttm_mem_reg *new_mem)
385{ 385{
386 struct amdgpu_device *adev; 386 struct amdgpu_device *adev;
387 struct amdgpu_bo *abo;
387 struct ttm_mem_reg *old_mem = &bo->mem; 388 struct ttm_mem_reg *old_mem = &bo->mem;
388 int r; 389 int r;
389 390
391 /* Can't move a pinned BO */
392 abo = container_of(bo, struct amdgpu_bo, tbo);
393 if (WARN_ON_ONCE(abo->pin_count > 0))
394 return -EINVAL;
395
390 adev = amdgpu_get_adev(bo->bdev); 396 adev = amdgpu_get_adev(bo->bdev);
391 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 397 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
392 amdgpu_move_null(bo, new_mem); 398 amdgpu_move_null(bo, new_mem);
@@ -616,7 +622,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
616 set_page_dirty(page); 622 set_page_dirty(page);
617 623
618 mark_page_accessed(page); 624 mark_page_accessed(page);
619 page_cache_release(page); 625 put_page(page);
620 } 626 }
621 627
622 sg_free_table(ttm->sg); 628 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c1a581044417..338da80006b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -241,32 +241,28 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
241 241
242int amdgpu_uvd_suspend(struct amdgpu_device *adev) 242int amdgpu_uvd_suspend(struct amdgpu_device *adev)
243{ 243{
244 struct amdgpu_ring *ring = &adev->uvd.ring; 244 unsigned size;
245 int i, r; 245 void *ptr;
246 int i;
246 247
247 if (adev->uvd.vcpu_bo == NULL) 248 if (adev->uvd.vcpu_bo == NULL)
248 return 0; 249 return 0;
249 250
250 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 251 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
251 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 252 if (atomic_read(&adev->uvd.handles[i]))
252 if (handle != 0) { 253 break;
253 struct fence *fence;
254 254
255 amdgpu_uvd_note_usage(adev); 255 if (i == AMDGPU_MAX_UVD_HANDLES)
256 return 0;
256 257
257 r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); 258 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
258 if (r) { 259 ptr = adev->uvd.cpu_addr;
259 DRM_ERROR("Error destroying UVD (%d)!\n", r);
260 continue;
261 }
262 260
263 fence_wait(fence, false); 261 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
264 fence_put(fence); 262 if (!adev->uvd.saved_bo)
263 return -ENOMEM;
265 264
266 adev->uvd.filp[i] = NULL; 265 memcpy(adev->uvd.saved_bo, ptr, size);
267 atomic_set(&adev->uvd.handles[i], 0);
268 }
269 }
270 266
271 return 0; 267 return 0;
272} 268}
@@ -275,23 +271,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
275{ 271{
276 unsigned size; 272 unsigned size;
277 void *ptr; 273 void *ptr;
278 const struct common_firmware_header *hdr;
279 unsigned offset;
280 274
281 if (adev->uvd.vcpu_bo == NULL) 275 if (adev->uvd.vcpu_bo == NULL)
282 return -EINVAL; 276 return -EINVAL;
283 277
284 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
285 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
286 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
287 (adev->uvd.fw->size) - offset);
288
289 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 278 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
290 size -= le32_to_cpu(hdr->ucode_size_bytes);
291 ptr = adev->uvd.cpu_addr; 279 ptr = adev->uvd.cpu_addr;
292 ptr += le32_to_cpu(hdr->ucode_size_bytes);
293 280
294 memset(ptr, 0, size); 281 if (adev->uvd.saved_bo != NULL) {
282 memcpy(ptr, adev->uvd.saved_bo, size);
283 kfree(adev->uvd.saved_bo);
284 adev->uvd.saved_bo = NULL;
285 } else {
286 const struct common_firmware_header *hdr;
287 unsigned offset;
288
289 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
290 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
291 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
292 (adev->uvd.fw->size) - offset);
293 size -= le32_to_cpu(hdr->ucode_size_bytes);
294 ptr += le32_to_cpu(hdr->ucode_size_bytes);
295 memset(ptr, 0, size);
296 }
295 297
296 return 0; 298 return 0;
297} 299}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 82ce7d943884..05b0353d3880 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
903 gmc_v7_0_set_gart_funcs(adev); 903 gmc_v7_0_set_gart_funcs(adev);
904 gmc_v7_0_set_irq_funcs(adev); 904 gmc_v7_0_set_irq_funcs(adev);
905 905
906 if (adev->flags & AMD_IS_APU) {
907 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
908 } else {
909 u32 tmp = RREG32(mmMC_SEQ_MISC0);
910 tmp &= MC_SEQ_MISC0__MT__MASK;
911 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
912 }
913
914 return 0; 906 return 0;
915} 907}
916 908
@@ -927,6 +919,14 @@ static int gmc_v7_0_sw_init(void *handle)
927 int dma_bits; 919 int dma_bits;
928 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 920 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
929 921
922 if (adev->flags & AMD_IS_APU) {
923 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
924 } else {
925 u32 tmp = RREG32(mmMC_SEQ_MISC0);
926 tmp &= MC_SEQ_MISC0__MT__MASK;
927 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
928 }
929
930 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 930 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
931 if (r) 931 if (r)
932 return r; 932 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 29bd7b57dc91..02deb3229405 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
863 gmc_v8_0_set_gart_funcs(adev); 863 gmc_v8_0_set_gart_funcs(adev);
864 gmc_v8_0_set_irq_funcs(adev); 864 gmc_v8_0_set_irq_funcs(adev);
865 865
866 if (adev->flags & AMD_IS_APU) {
867 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
868 } else {
869 u32 tmp = RREG32(mmMC_SEQ_MISC0);
870 tmp &= MC_SEQ_MISC0__MT__MASK;
871 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
872 }
873
874 return 0; 866 return 0;
875} 867}
876 868
@@ -881,12 +873,27 @@ static int gmc_v8_0_late_init(void *handle)
881 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 873 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
882} 874}
883 875
876#define mmMC_SEQ_MISC0_FIJI 0xA71
877
884static int gmc_v8_0_sw_init(void *handle) 878static int gmc_v8_0_sw_init(void *handle)
885{ 879{
886 int r; 880 int r;
887 int dma_bits; 881 int dma_bits;
888 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 882 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
889 883
884 if (adev->flags & AMD_IS_APU) {
885 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
886 } else {
887 u32 tmp;
888
889 if (adev->asic_type == CHIP_FIJI)
890 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
891 else
892 tmp = RREG32(mmMC_SEQ_MISC0);
893 tmp &= MC_SEQ_MISC0__MT__MASK;
894 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
895 }
896
890 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 897 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
891 if (r) 898 if (r)
892 return r; 899 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index c606ccb38d8b..cb463753115b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
224 int r; 224 int r;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 226
227 r = amdgpu_uvd_suspend(adev); 227 r = uvd_v4_2_hw_fini(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
231 r = uvd_v4_2_hw_fini(adev); 231 r = amdgpu_uvd_suspend(adev);
232 if (r) 232 if (r)
233 return r; 233 return r;
234 234
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e3c852d9d79a..16476d80f475 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
220 int r; 220 int r;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 222
223 r = amdgpu_uvd_suspend(adev); 223 r = uvd_v5_0_hw_fini(adev);
224 if (r) 224 if (r)
225 return r; 225 return r;
226 226
227 r = uvd_v5_0_hw_fini(adev); 227 r = amdgpu_uvd_suspend(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3375e614ac67..d49379145ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
214 int r; 214 int r;
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216 216
217 r = uvd_v6_0_hw_fini(adev);
218 if (r)
219 return r;
220
217 /* Skip this for APU for now */ 221 /* Skip this for APU for now */
218 if (!(adev->flags & AMD_IS_APU)) { 222 if (!(adev->flags & AMD_IS_APU)) {
219 r = amdgpu_uvd_suspend(adev); 223 r = amdgpu_uvd_suspend(adev);
220 if (r) 224 if (r)
221 return r; 225 return r;
222 } 226 }
223 r = uvd_v6_0_hw_fini(adev);
224 if (r)
225 return r;
226 227
227 return r; 228 return r;
228} 229}
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index aec38fc3834f..ab84d4947247 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
589 void *cgs_device, 589 void *cgs_device,
590 struct cgs_display_info *info); 590 struct cgs_display_info *info);
591 591
592typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
593
592typedef int (*cgs_call_acpi_method)(void *cgs_device, 594typedef int (*cgs_call_acpi_method)(void *cgs_device,
593 uint32_t acpi_method, 595 uint32_t acpi_method,
594 uint32_t acpi_function, 596 uint32_t acpi_function,
@@ -644,6 +646,8 @@ struct cgs_ops {
644 cgs_set_clockgating_state set_clockgating_state; 646 cgs_set_clockgating_state set_clockgating_state;
645 /* display manager */ 647 /* display manager */
646 cgs_get_active_displays_info get_active_displays_info; 648 cgs_get_active_displays_info get_active_displays_info;
649 /* notify dpm enabled */
650 cgs_notify_dpm_enabled notify_dpm_enabled;
647 /* ACPI */ 651 /* ACPI */
648 cgs_call_acpi_method call_acpi_method; 652 cgs_call_acpi_method call_acpi_method;
649 /* get system info */ 653 /* get system info */
@@ -734,8 +738,12 @@ struct cgs_device
734 CGS_CALL(set_powergating_state, dev, block_type, state) 738 CGS_CALL(set_powergating_state, dev, block_type, state)
735#define cgs_set_clockgating_state(dev, block_type, state) \ 739#define cgs_set_clockgating_state(dev, block_type, state) \
736 CGS_CALL(set_clockgating_state, dev, block_type, state) 740 CGS_CALL(set_clockgating_state, dev, block_type, state)
741#define cgs_notify_dpm_enabled(dev, enabled) \
742 CGS_CALL(notify_dpm_enabled, dev, enabled)
743
737#define cgs_get_active_displays_info(dev, info) \ 744#define cgs_get_active_displays_info(dev, info) \
738 CGS_CALL(get_active_displays_info, dev, info) 745 CGS_CALL(get_active_displays_info, dev, info)
746
739#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \ 747#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
740 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) 748 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
741#define cgs_query_system_info(dev, sys_info) \ 749#define cgs_query_system_info(dev, sys_info) \
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 6b52c78cb404..56856a2864d1 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
137 reset_display_configCounter_tasks, 137 reset_display_configCounter_tasks,
138 update_dal_configuration_tasks, 138 update_dal_configuration_tasks,
139 vari_bright_resume_tasks, 139 vari_bright_resume_tasks,
140 block_adjust_power_state_tasks,
141 setup_asic_tasks, 140 setup_asic_tasks,
142 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ 141 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
143 enable_dynamic_state_management_tasks, 142 enable_dynamic_state_management_tasks,
144 enable_clock_power_gatings_tasks, 143 enable_clock_power_gatings_tasks,
145 enable_disable_bapm_tasks, 144 enable_disable_bapm_tasks,
146 initialize_thermal_controller_tasks, 145 initialize_thermal_controller_tasks,
147 reset_boot_state_tasks, 146 get_2d_performance_state_tasks,
147 set_performance_state_tasks,
148 adjust_power_state_tasks, 148 adjust_power_state_tasks,
149 enable_disable_fps_tasks, 149 enable_disable_fps_tasks,
150 notify_hw_power_source_tasks, 150 notify_hw_power_source_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 51dedf84623c..89f31bc5b68b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
2389 2389
2390 for(count = 0; count < table->VceLevelCount; count++) { 2390 for(count = 0; count < table->VceLevelCount; count++) {
2391 table->VceLevel[count].Frequency = mm_table->entries[count].eclk; 2391 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
2392 table->VceLevel[count].MinVoltage = 0;
2392 table->VceLevel[count].MinVoltage |= 2393 table->VceLevel[count].MinVoltage |=
2393 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; 2394 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
2394 table->VceLevel[count].MinVoltage |= 2395 table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
2465 2466
2466 for (count = 0; count < table->SamuLevelCount; count++) { 2467 for (count = 0; count < table->SamuLevelCount; count++) {
2467 /* not sure whether we need evclk or not */ 2468 /* not sure whether we need evclk or not */
2469 table->SamuLevel[count].MinVoltage = 0;
2468 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; 2470 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
2469 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 2471 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2470 VOLTAGE_SCALE) << VDDC_SHIFT; 2472 VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
2562 table->UvdBootLevel = 0; 2564 table->UvdBootLevel = 0;
2563 2565
2564 for (count = 0; count < table->UvdLevelCount; count++) { 2566 for (count = 0; count < table->UvdLevelCount; count++) {
2567 table->UvdLevel[count].MinVoltage = 0;
2565 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; 2568 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
2566 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; 2569 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
2567 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 2570 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2900 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control) 2903 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
2901 fiji_populate_smc_voltage_tables(hwmgr, table); 2904 fiji_populate_smc_voltage_tables(hwmgr, table);
2902 2905
2906 table->SystemFlags = 0;
2907
2903 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2908 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2904 PHM_PlatformCaps_AutomaticDCTransition)) 2909 PHM_PlatformCaps_AutomaticDCTransition))
2905 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 2910 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2997 table->MemoryThermThrottleEnable = 1; 3002 table->MemoryThermThrottleEnable = 1;
2998 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ 3003 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
2999 table->PCIeGenInterval = 1; 3004 table->PCIeGenInterval = 1;
3005 table->VRConfig = 0;
3000 3006
3001 result = fiji_populate_vr_config(hwmgr, table); 3007 result = fiji_populate_vr_config(hwmgr, table);
3002 PP_ASSERT_WITH_CODE(0 == result, 3008 PP_ASSERT_WITH_CODE(0 == result,
@@ -5195,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
5195 return size; 5201 return size;
5196} 5202}
5197 5203
5204static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
5205 const struct fiji_performance_level *pl2)
5206{
5207 return ((pl1->memory_clock == pl2->memory_clock) &&
5208 (pl1->engine_clock == pl2->engine_clock) &&
5209 (pl1->pcie_gen == pl2->pcie_gen) &&
5210 (pl1->pcie_lane == pl2->pcie_lane));
5211}
5212
5213int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
5214{
5215 const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
5216 const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
5217 int i;
5218
5219 if (equal == NULL || psa == NULL || psb == NULL)
5220 return -EINVAL;
5221
5222 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5223 if (psa->performance_level_count != psb->performance_level_count) {
5224 *equal = false;
5225 return 0;
5226 }
5227
5228 for (i = 0; i < psa->performance_level_count; i++) {
5229 if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5230 /* If we have found even one performance level pair that is different the states are different. */
5231 *equal = false;
5232 return 0;
5233 }
5234 }
5235
5236 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5237 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
5238 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
5239 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5240 *equal &= (psa->acp_clk == psb->acp_clk);
5241
5242 return 0;
5243}
5244
5245bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5246{
5247 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5248 bool is_update_required = false;
5249 struct cgs_display_info info = {0,0,NULL};
5250
5251 cgs_get_active_displays_info(hwmgr->device, &info);
5252
5253 if (data->display_timing.num_existing_displays != info.display_count)
5254 is_update_required = true;
5255/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
5256 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5257 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
5258 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
5259 is_update_required = true;
5260*/
5261 return is_update_required;
5262}
5263
5264
5198static const struct pp_hwmgr_func fiji_hwmgr_funcs = { 5265static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5199 .backend_init = &fiji_hwmgr_backend_init, 5266 .backend_init = &fiji_hwmgr_backend_init,
5200 .backend_fini = &tonga_hwmgr_backend_fini, 5267 .backend_fini = &tonga_hwmgr_backend_fini,
@@ -5230,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5230 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, 5297 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
5231 .set_fan_control_mode = fiji_set_fan_control_mode, 5298 .set_fan_control_mode = fiji_set_fan_control_mode,
5232 .get_fan_control_mode = fiji_get_fan_control_mode, 5299 .get_fan_control_mode = fiji_get_fan_control_mode,
5300 .check_states_equal = fiji_check_states_equal,
5301 .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
5233 .get_pp_table = fiji_get_pp_table, 5302 .get_pp_table = fiji_get_pp_table,
5234 .set_pp_table = fiji_set_pp_table, 5303 .set_pp_table = fiji_set_pp_table,
5235 .force_clock_level = fiji_force_clock_level, 5304 .force_clock_level = fiji_force_clock_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index be31bed2538a..fa208ada6892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
58 58
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); 59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
60 60
61 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
62 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
63
61 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && 64 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
62 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) 65 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
63 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); 66 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
130 133
131int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) 134int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
132{ 135{
136 int ret = 1;
137 bool enabled;
133 PHM_FUNC_CHECK(hwmgr); 138 PHM_FUNC_CHECK(hwmgr);
134 139
135 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 140 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
136 PHM_PlatformCaps_TablelessHardwareInterface)) { 141 PHM_PlatformCaps_TablelessHardwareInterface)) {
137 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) 142 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
138 return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); 143 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
139 } else { 144 } else {
140 return phm_dispatch_table(hwmgr, 145 ret = phm_dispatch_table(hwmgr,
141 &(hwmgr->enable_dynamic_state_management), 146 &(hwmgr->enable_dynamic_state_management),
142 NULL, NULL); 147 NULL, NULL);
143 } 148 }
144 return 0; 149
150 enabled = ret == 0 ? true : false;
151
152 cgs_notify_dpm_enabled(hwmgr->device, enabled);
153
154 return ret;
145} 155}
146 156
147int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) 157int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 56b829f97699..3ac1ae4d8caf 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
57 DRM_ERROR("failed to map control registers area\n"); 57 DRM_ERROR("failed to map control registers area\n");
58 ret = PTR_ERR(hdlcd->mmio); 58 ret = PTR_ERR(hdlcd->mmio);
59 hdlcd->mmio = NULL; 59 hdlcd->mmio = NULL;
60 goto fail; 60 return ret;
61 } 61 }
62 62
63 version = hdlcd_read(hdlcd, HDLCD_REG_VERSION); 63 version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
64 if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) { 64 if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
65 DRM_ERROR("unknown product id: 0x%x\n", version); 65 DRM_ERROR("unknown product id: 0x%x\n", version);
66 ret = -EINVAL; 66 return -EINVAL;
67 goto fail;
68 } 67 }
69 DRM_INFO("found ARM HDLCD version r%dp%d\n", 68 DRM_INFO("found ARM HDLCD version r%dp%d\n",
70 (version & HDLCD_VERSION_MAJOR_MASK) >> 8, 69 (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
@@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
73 /* Get the optional framebuffer memory resource */ 72 /* Get the optional framebuffer memory resource */
74 ret = of_reserved_mem_device_init(drm->dev); 73 ret = of_reserved_mem_device_init(drm->dev);
75 if (ret && ret != -ENODEV) 74 if (ret && ret != -ENODEV)
76 goto fail; 75 return ret;
77 76
78 ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); 77 ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
79 if (ret) 78 if (ret)
@@ -101,8 +100,6 @@ irq_fail:
101 drm_crtc_cleanup(&hdlcd->crtc); 100 drm_crtc_cleanup(&hdlcd->crtc);
102setup_fail: 101setup_fail:
103 of_reserved_mem_device_release(drm->dev); 102 of_reserved_mem_device_release(drm->dev);
104fail:
105 devm_clk_put(drm->dev, hdlcd->clk);
106 103
107 return ret; 104 return ret;
108} 105}
@@ -412,7 +409,6 @@ err_unload:
412 pm_runtime_put_sync(drm->dev); 409 pm_runtime_put_sync(drm->dev);
413 pm_runtime_disable(drm->dev); 410 pm_runtime_disable(drm->dev);
414 of_reserved_mem_device_release(drm->dev); 411 of_reserved_mem_device_release(drm->dev);
415 devm_clk_put(dev, hdlcd->clk);
416err_free: 412err_free:
417 drm_dev_unref(drm); 413 drm_dev_unref(drm);
418 414
@@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev)
436 pm_runtime_put_sync(drm->dev); 432 pm_runtime_put_sync(drm->dev);
437 pm_runtime_disable(drm->dev); 433 pm_runtime_disable(drm->dev);
438 of_reserved_mem_device_release(drm->dev); 434 of_reserved_mem_device_release(drm->dev);
439 if (!IS_ERR(hdlcd->clk)) {
440 devm_clk_put(drm->dev, hdlcd->clk);
441 hdlcd->clk = NULL;
442 }
443 drm_mode_config_cleanup(drm); 435 drm_mode_config_cleanup(drm);
444 drm_dev_unregister(drm); 436 drm_dev_unregister(drm);
445 drm_dev_unref(drm); 437 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db31aa4..aca7f9cc6109 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
481 481
482 release: 482 release:
483 for_each_sg(sgt->sgl, sg, num, i) 483 for_each_sg(sgt->sgl, sg, num, i)
484 page_cache_release(sg_page(sg)); 484 put_page(sg_page(sg));
485 free_table: 485 free_table:
486 sg_free_table(sgt); 486 sg_free_table(sgt);
487 free_sgt: 487 free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
502 if (dobj->obj.filp) { 502 if (dobj->obj.filp) {
503 struct scatterlist *sg; 503 struct scatterlist *sg;
504 for_each_sg(sgt->sgl, sg, sgt->nents, i) 504 for_each_sg(sgt->sgl, sg, sgt->nents, i)
505 page_cache_release(sg_page(sg)); 505 put_page(sg_page(sg));
506 } 506 }
507 507
508 sg_free_table(sgt); 508 sg_free_table(sgt);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7d58f594cffe..df64ed1c0139 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -179,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
179{ 179{
180 struct drm_dp_aux_msg msg; 180 struct drm_dp_aux_msg msg;
181 unsigned int retry; 181 unsigned int retry;
182 int err; 182 int err = 0;
183 183
184 memset(&msg, 0, sizeof(msg)); 184 memset(&msg, 0, sizeof(msg));
185 msg.address = offset; 185 msg.address = offset;
@@ -187,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
187 msg.buffer = buffer; 187 msg.buffer = buffer;
188 msg.size = size; 188 msg.size = size;
189 189
190 mutex_lock(&aux->hw_mutex);
191
190 /* 192 /*
191 * The specification doesn't give any recommendation on how often to 193 * The specification doesn't give any recommendation on how often to
192 * retry native transactions. We used to retry 7 times like for 194 * retry native transactions. We used to retry 7 times like for
@@ -195,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
195 */ 197 */
196 for (retry = 0; retry < 32; retry++) { 198 for (retry = 0; retry < 32; retry++) {
197 199
198 mutex_lock(&aux->hw_mutex);
199 err = aux->transfer(aux, &msg); 200 err = aux->transfer(aux, &msg);
200 mutex_unlock(&aux->hw_mutex);
201 if (err < 0) { 201 if (err < 0) {
202 if (err == -EBUSY) 202 if (err == -EBUSY)
203 continue; 203 continue;
204 204
205 return err; 205 goto unlock;
206 } 206 }
207 207
208 208
209 switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { 209 switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
210 case DP_AUX_NATIVE_REPLY_ACK: 210 case DP_AUX_NATIVE_REPLY_ACK:
211 if (err < size) 211 if (err < size)
212 return -EPROTO; 212 err = -EPROTO;
213 return err; 213 goto unlock;
214 214
215 case DP_AUX_NATIVE_REPLY_NACK: 215 case DP_AUX_NATIVE_REPLY_NACK:
216 return -EIO; 216 err = -EIO;
217 goto unlock;
217 218
218 case DP_AUX_NATIVE_REPLY_DEFER: 219 case DP_AUX_NATIVE_REPLY_DEFER:
219 usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); 220 usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -222,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
222 } 223 }
223 224
224 DRM_DEBUG_KMS("too many retries, giving up\n"); 225 DRM_DEBUG_KMS("too many retries, giving up\n");
225 return -EIO; 226 err = -EIO;
227
228unlock:
229 mutex_unlock(&aux->hw_mutex);
230 return err;
226} 231}
227 232
228/** 233/**
@@ -544,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
544 int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); 549 int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
545 550
546 for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { 551 for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
547 mutex_lock(&aux->hw_mutex);
548 ret = aux->transfer(aux, msg); 552 ret = aux->transfer(aux, msg);
549 mutex_unlock(&aux->hw_mutex);
550 if (ret < 0) { 553 if (ret < 0) {
551 if (ret == -EBUSY) 554 if (ret == -EBUSY)
552 continue; 555 continue;
@@ -685,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
685 688
686 memset(&msg, 0, sizeof(msg)); 689 memset(&msg, 0, sizeof(msg));
687 690
691 mutex_lock(&aux->hw_mutex);
692
688 for (i = 0; i < num; i++) { 693 for (i = 0; i < num; i++) {
689 msg.address = msgs[i].addr; 694 msg.address = msgs[i].addr;
690 drm_dp_i2c_msg_set_request(&msg, &msgs[i]); 695 drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -739,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
739 msg.size = 0; 744 msg.size = 0;
740 (void)drm_dp_i2c_do_msg(aux, &msg); 745 (void)drm_dp_i2c_do_msg(aux, &msg);
741 746
747 mutex_unlock(&aux->hw_mutex);
748
742 return err; 749 return err;
743} 750}
744 751
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..da0c5320789f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
534 534
535fail: 535fail:
536 while (i--) 536 while (i--)
537 page_cache_release(pages[i]); 537 put_page(pages[i]);
538 538
539 drm_free_large(pages); 539 drm_free_large(pages);
540 return ERR_CAST(p); 540 return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
569 mark_page_accessed(pages[i]); 569 mark_page_accessed(pages[i]);
570 570
571 /* Undo the reference we took when populating the table */ 571 /* Undo the reference we took when populating the table */
572 page_cache_release(pages[i]); 572 put_page(pages[i]);
573 } 573 }
574 574
575 drm_free_large(pages); 575 drm_free_large(pages);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1aff932..c52f9adf5e04 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to 220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon. 221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
222 */ 222 */
223static int __deprecated 223static int
224i2c_dp_aux_add_bus(struct i2c_adapter *adapter) 224i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
225{ 225{
226 int error; 226 int error;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3ac589e..dabc08987b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 drm_clflush_virt_range(vaddr, PAGE_SIZE); 177 drm_clflush_virt_range(vaddr, PAGE_SIZE);
178 kunmap_atomic(src); 178 kunmap_atomic(src);
179 179
180 page_cache_release(page); 180 put_page(page);
181 vaddr += PAGE_SIZE; 181 vaddr += PAGE_SIZE;
182 } 182 }
183 183
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
243 set_page_dirty(page); 243 set_page_dirty(page);
244 if (obj->madv == I915_MADV_WILLNEED) 244 if (obj->madv == I915_MADV_WILLNEED)
245 mark_page_accessed(page); 245 mark_page_accessed(page);
246 page_cache_release(page); 246 put_page(page);
247 vaddr += PAGE_SIZE; 247 vaddr += PAGE_SIZE;
248 } 248 }
249 obj->dirty = 0; 249 obj->dirty = 0;
@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2206 if (obj->madv == I915_MADV_WILLNEED) 2206 if (obj->madv == I915_MADV_WILLNEED)
2207 mark_page_accessed(page); 2207 mark_page_accessed(page);
2208 2208
2209 page_cache_release(page); 2209 put_page(page);
2210 } 2210 }
2211 obj->dirty = 0; 2211 obj->dirty = 0;
2212 2212
@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2346err_pages: 2346err_pages:
2347 sg_mark_end(sg); 2347 sg_mark_end(sg);
2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2349 page_cache_release(sg_page_iter_page(&sg_iter)); 2349 put_page(sg_page_iter_page(&sg_iter));
2350 sg_free_table(st); 2350 sg_free_table(st);
2351 kfree(st); 2351 kfree(st);
2352 2352
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 6be40f3ba2c7..18ba8139e922 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683 set_page_dirty(page); 683 set_page_dirty(page);
684 684
685 mark_page_accessed(page); 685 mark_page_accessed(page);
686 page_cache_release(page); 686 put_page(page);
687 } 687 }
688 obj->dirty = 0; 688 obj->dirty = 0;
689 689
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 2a95d10e9d92..a24631fdf4ad 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
225 if (!iores) 225 if (!iores)
226 return -ENXIO; 226 return -ENXIO;
227 227
228 platform_set_drvdata(pdev, hdmi);
229
230 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); 228 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
231 /* 229 /*
232 * If we failed to find the CRTC(s) which this encoder is 230 * If we failed to find the CRTC(s) which this encoder is
@@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
245 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, 243 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS, NULL); 244 DRM_MODE_ENCODER_TMDS, NULL);
247 245
248 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 246 ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
247
248 /*
249 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
250 * which would have called the encoder cleanup. Do it manually.
251 */
252 if (ret)
253 drm_encoder_cleanup(encoder);
254
255 return ret;
249} 256}
250 257
251static void dw_hdmi_imx_unbind(struct device *dev, struct device *master, 258static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9876e0f0c3e1..e26dcdec2aba 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
326{ 326{
327 struct imx_drm_device *imxdrm = drm->dev_private; 327 struct imx_drm_device *imxdrm = drm->dev_private;
328 struct imx_drm_crtc *imx_drm_crtc; 328 struct imx_drm_crtc *imx_drm_crtc;
329 int ret;
330 329
331 /* 330 /*
332 * The vblank arrays are dimensioned by MAX_CRTC - we can't 331 * The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
351 350
352 *new_crtc = imx_drm_crtc; 351 *new_crtc = imx_drm_crtc;
353 352
354 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
355 if (ret)
356 goto err_register;
357
358 drm_crtc_helper_add(crtc, 353 drm_crtc_helper_add(crtc,
359 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 354 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
360 355
@@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
362 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL); 357 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
363 358
364 return 0; 359 return 0;
365
366err_register:
367 imxdrm->crtc[--imxdrm->pipes] = NULL;
368 kfree(imx_drm_crtc);
369 return ret;
370} 360}
371EXPORT_SYMBOL_GPL(imx_drm_add_crtc); 361EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
372 362
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 588827844f30..681ec6eb77d9 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
72int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, 72int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
73 int x, int y) 73 int x, int y)
74{ 74{
75 struct drm_gem_cma_object *cma_obj; 75 struct drm_gem_cma_object *cma_obj[3];
76 unsigned long eba; 76 unsigned long eba, ubo, vbo;
77 int active; 77 int active, i;
78 78
79 cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 79 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
80 if (!cma_obj) { 80 cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
81 DRM_DEBUG_KMS("entry is null.\n"); 81 if (!cma_obj[i]) {
82 return -EFAULT; 82 DRM_DEBUG_KMS("plane %d entry is null.\n", i);
83 return -EFAULT;
84 }
83 } 85 }
84 86
85 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", 87 eba = cma_obj[0]->paddr + fb->offsets[0] +
86 &cma_obj->paddr, x, y);
87
88 eba = cma_obj->paddr + fb->offsets[0] +
89 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; 88 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
90 89
90 if (eba & 0x7) {
91 DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
92 return -EINVAL;
93 }
94
95 if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
96 DRM_DEBUG_KMS("pitches out of range.\n");
97 return -EINVAL;
98 }
99
100 if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
101 DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
102 return -EINVAL;
103 }
104
105 ipu_plane->stride[0] = fb->pitches[0];
106
107 switch (fb->pixel_format) {
108 case DRM_FORMAT_YUV420:
109 case DRM_FORMAT_YVU420:
110 /*
111 * Multiplanar formats have to meet the following restrictions:
112 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
113 * - EBA, UBO and VBO are a multiple of 8
114 * - UBO and VBO are unsigned and not larger than 0xfffff8
115 * - Only EBA may be changed while scanout is active
116 * - The strides of U and V planes must be identical.
117 */
118 ubo = cma_obj[1]->paddr + fb->offsets[1] +
119 fb->pitches[1] * y / 2 + x / 2 - eba;
120 vbo = cma_obj[2]->paddr + fb->offsets[2] +
121 fb->pitches[2] * y / 2 + x / 2 - eba;
122
123 if ((ubo & 0x7) || (vbo & 0x7)) {
124 DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
125 return -EINVAL;
126 }
127
128 if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
129 DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
130 return -EINVAL;
131 }
132
133 if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
134 (ipu_plane->v_offset != vbo))) {
135 DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
136 return -EINVAL;
137 }
138
139 if (fb->pitches[1] != fb->pitches[2]) {
140 DRM_DEBUG_KMS("U/V pitches must be identical.\n");
141 return -EINVAL;
142 }
143
144 if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
145 DRM_DEBUG_KMS("U/V pitches out of range.\n");
146 return -EINVAL;
147 }
148
149 if (ipu_plane->enabled &&
150 (ipu_plane->stride[1] != fb->pitches[1])) {
151 DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
152 return -EINVAL;
153 }
154
155 ipu_plane->u_offset = ubo;
156 ipu_plane->v_offset = vbo;
157 ipu_plane->stride[1] = fb->pitches[1];
158
159 dev_dbg(ipu_plane->base.dev->dev,
160 "phys = %pad %pad %pad, x = %d, y = %d",
161 &cma_obj[0]->paddr, &cma_obj[1]->paddr,
162 &cma_obj[2]->paddr, x, y);
163 break;
164 default:
165 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
166 &cma_obj[0]->paddr, x, y);
167 break;
168 }
169
91 if (ipu_plane->enabled) { 170 if (ipu_plane->enabled) {
92 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); 171 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
93 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); 172 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
201 } 280 }
202 } 281 }
203 282
204 ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
205 if (ret) {
206 dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
207 return ret;
208 }
209
210 ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc, 283 ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
211 calc_bandwidth(crtc_w, crtc_h, 284 calc_bandwidth(crtc_w, crtc_h,
212 calc_vref(mode)), 64); 285 calc_vref(mode)), 64);
@@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
215 return ret; 288 return ret;
216 } 289 }
217 290
291 ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
292
218 ipu_cpmem_zero(ipu_plane->ipu_ch); 293 ipu_cpmem_zero(ipu_plane->ipu_ch);
219 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h); 294 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
220 ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format); 295 ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
233 if (interlaced) 308 if (interlaced)
234 ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]); 309 ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
235 310
311 if (fb->pixel_format == DRM_FORMAT_YUV420) {
312 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
313 ipu_plane->stride[1],
314 ipu_plane->u_offset,
315 ipu_plane->v_offset);
316 } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
317 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
318 ipu_plane->stride[1],
319 ipu_plane->v_offset,
320 ipu_plane->u_offset);
321 }
322
236 ipu_plane->w = src_w; 323 ipu_plane->w = src_w;
237 ipu_plane->h = src_h; 324 ipu_plane->h = src_h;
238 325
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 3a443b413c60..4448fd4ad4eb 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -29,6 +29,10 @@ struct ipu_plane {
29 int w; 29 int w;
30 int h; 30 int h;
31 31
32 unsigned int u_offset;
33 unsigned int v_offset;
34 unsigned int stride[2];
35
32 bool enabled; 36 bool enabled;
33}; 37};
34 38
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b04a64664673..65428cf233ce 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -196,7 +196,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
196int msm_hdmi_pll_8960_init(struct platform_device *pdev); 196int msm_hdmi_pll_8960_init(struct platform_device *pdev);
197int msm_hdmi_pll_8996_init(struct platform_device *pdev); 197int msm_hdmi_pll_8996_init(struct platform_device *pdev);
198#else 198#else
199static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev); 199static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
200{ 200{
201 return -ENODEV; 201 return -ENODEV;
202} 202}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d52910e2c26c..c03b96709179 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -467,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
467 struct msm_file_private *ctx = file->driver_priv; 467 struct msm_file_private *ctx = file->driver_priv;
468 struct msm_kms *kms = priv->kms; 468 struct msm_kms *kms = priv->kms;
469 469
470 if (kms)
471 kms->funcs->preclose(kms, file);
472
473 mutex_lock(&dev->struct_mutex); 470 mutex_lock(&dev->struct_mutex);
474 if (ctx == priv->lastctx) 471 if (ctx == priv->lastctx)
475 priv->lastctx = NULL; 472 priv->lastctx = NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..e32222c3d44f 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -55,7 +55,6 @@ struct msm_kms_funcs {
55 struct drm_encoder *slave_encoder, 55 struct drm_encoder *slave_encoder,
56 bool is_cmd_mode); 56 bool is_cmd_mode);
57 /* cleanup: */ 57 /* cleanup: */
58 void (*preclose)(struct msm_kms *kms, struct drm_file *file);
59 void (*destroy)(struct msm_kms *kms); 58 void (*destroy)(struct msm_kms *kms);
60}; 59};
61 60
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 16641cec18a2..b5370cb56e3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -11,6 +11,7 @@ struct nvkm_device_tegra {
11 11
12 struct reset_control *rst; 12 struct reset_control *rst;
13 struct clk *clk; 13 struct clk *clk;
14 struct clk *clk_ref;
14 struct clk *clk_pwr; 15 struct clk *clk_pwr;
15 16
16 struct regulator *vdd; 17 struct regulator *vdd;
@@ -36,6 +37,10 @@ struct nvkm_device_tegra_func {
36 * bypassed). A value of 0 means an IOMMU is never used. 37 * bypassed). A value of 0 means an IOMMU is never used.
37 */ 38 */
38 u8 iommu_bit; 39 u8 iommu_bit;
40 /*
41 * Whether the chip requires a reference clock
42 */
43 bool require_ref_clk;
39}; 44};
40 45
41int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *, 46int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 2dfe58af12e4..4c4cc2260257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = {
55 .iommu_bit = 34, 55 .iommu_bit = 34,
56}; 56};
57 57
58static const struct nvkm_device_tegra_func gm20b_platform_data = {
59 .iommu_bit = 34,
60 .require_ref_clk = true,
61};
62
58static const struct of_device_id nouveau_platform_match[] = { 63static const struct of_device_id nouveau_platform_match[] = {
59 { 64 {
60 .compatible = "nvidia,gk20a", 65 .compatible = "nvidia,gk20a",
@@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = {
62 }, 67 },
63 { 68 {
64 .compatible = "nvidia,gm20b", 69 .compatible = "nvidia,gm20b",
65 .data = &gk20a_platform_data, 70 .data = &gm20b_platform_data,
66 }, 71 },
67 { } 72 { }
68}; 73};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9afa5f3e3c1c..ec12efb4689a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
35 ret = clk_prepare_enable(tdev->clk); 35 ret = clk_prepare_enable(tdev->clk);
36 if (ret) 36 if (ret)
37 goto err_clk; 37 goto err_clk;
38 if (tdev->clk_ref) {
39 ret = clk_prepare_enable(tdev->clk_ref);
40 if (ret)
41 goto err_clk_ref;
42 }
38 ret = clk_prepare_enable(tdev->clk_pwr); 43 ret = clk_prepare_enable(tdev->clk_pwr);
39 if (ret) 44 if (ret)
40 goto err_clk_pwr; 45 goto err_clk_pwr;
@@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
57err_clamp: 62err_clamp:
58 clk_disable_unprepare(tdev->clk_pwr); 63 clk_disable_unprepare(tdev->clk_pwr);
59err_clk_pwr: 64err_clk_pwr:
65 if (tdev->clk_ref)
66 clk_disable_unprepare(tdev->clk_ref);
67err_clk_ref:
60 clk_disable_unprepare(tdev->clk); 68 clk_disable_unprepare(tdev->clk);
61err_clk: 69err_clk:
62 regulator_disable(tdev->vdd); 70 regulator_disable(tdev->vdd);
@@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
71 udelay(10); 79 udelay(10);
72 80
73 clk_disable_unprepare(tdev->clk_pwr); 81 clk_disable_unprepare(tdev->clk_pwr);
82 if (tdev->clk_ref)
83 clk_disable_unprepare(tdev->clk_ref);
74 clk_disable_unprepare(tdev->clk); 84 clk_disable_unprepare(tdev->clk);
75 udelay(10); 85 udelay(10);
76 86
@@ -274,6 +284,13 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
274 goto free; 284 goto free;
275 } 285 }
276 286
287 if (func->require_ref_clk)
288 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
289 if (IS_ERR(tdev->clk_ref)) {
290 ret = PTR_ERR(tdev->clk_ref);
291 goto free;
292 }
293
277 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 294 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
278 if (IS_ERR(tdev->clk_pwr)) { 295 if (IS_ERR(tdev->clk_pwr)) {
279 ret = PTR_ERR(tdev->clk_pwr); 296 ret = PTR_ERR(tdev->clk_pwr);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index cf61e0856f4a..b80b08f71cb4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
275 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 275 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
277 atombios_blank_crtc(crtc, ATOM_DISABLE); 277 atombios_blank_crtc(crtc, ATOM_DISABLE);
278 drm_vblank_on(dev, radeon_crtc->crtc_id); 278 if (dev->num_crtcs > radeon_crtc->crtc_id)
279 drm_vblank_on(dev, radeon_crtc->crtc_id);
279 radeon_crtc_load_lut(crtc); 280 radeon_crtc_load_lut(crtc);
280 break; 281 break;
281 case DRM_MODE_DPMS_STANDBY: 282 case DRM_MODE_DPMS_STANDBY:
282 case DRM_MODE_DPMS_SUSPEND: 283 case DRM_MODE_DPMS_SUSPEND:
283 case DRM_MODE_DPMS_OFF: 284 case DRM_MODE_DPMS_OFF:
284 drm_vblank_off(dev, radeon_crtc->crtc_id); 285 if (dev->num_crtcs > radeon_crtc->crtc_id)
286 drm_vblank_off(dev, radeon_crtc->crtc_id);
285 if (radeon_crtc->enabled) 287 if (radeon_crtc->enabled)
286 atombios_blank_crtc(crtc, ATOM_ENABLE); 288 atombios_blank_crtc(crtc, ATOM_ENABLE);
287 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 979f3bf65f2c..1e9304d1c88f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
291 if (r) { 291 if (r) {
292 return r; 292 return r;
293 } 293 }
294 rdev->ddev->vblank_disable_allowed = true;
295
294 /* enable msi */ 296 /* enable msi */
295 rdev->msi_enabled = 0; 297 rdev->msi_enabled = 0;
296 298
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 24152dfef199..478d4099b0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
331 RADEON_CRTC_DISP_REQ_EN_B)); 331 RADEON_CRTC_DISP_REQ_EN_B));
332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); 332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
333 } 333 }
334 drm_vblank_on(dev, radeon_crtc->crtc_id); 334 if (dev->num_crtcs > radeon_crtc->crtc_id)
335 drm_vblank_on(dev, radeon_crtc->crtc_id);
335 radeon_crtc_load_lut(crtc); 336 radeon_crtc_load_lut(crtc);
336 break; 337 break;
337 case DRM_MODE_DPMS_STANDBY: 338 case DRM_MODE_DPMS_STANDBY:
338 case DRM_MODE_DPMS_SUSPEND: 339 case DRM_MODE_DPMS_SUSPEND:
339 case DRM_MODE_DPMS_OFF: 340 case DRM_MODE_DPMS_OFF:
340 drm_vblank_off(dev, radeon_crtc->crtc_id); 341 if (dev->num_crtcs > radeon_crtc->crtc_id)
342 drm_vblank_off(dev, radeon_crtc->crtc_id);
341 if (radeon_crtc->crtc_id) 343 if (radeon_crtc->crtc_id)
342 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); 344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
343 else { 345 else {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index dd46c38676db..2d901bf28a94 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
799 if ((offset + size) <= rdev->mc.visible_vram_size) 799 if ((offset + size) <= rdev->mc.visible_vram_size)
800 return 0; 800 return 0;
801 801
802 /* Can't move a pinned BO to visible VRAM */
803 if (rbo->pin_count > 0)
804 return -EINVAL;
805
802 /* hurrah the memory is not visible ! */ 806 /* hurrah the memory is not visible ! */
803 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 807 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
804 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 808 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6d8c32377c6f..7dddfdce85e6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -397,9 +397,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
397 struct ttm_mem_reg *new_mem) 397 struct ttm_mem_reg *new_mem)
398{ 398{
399 struct radeon_device *rdev; 399 struct radeon_device *rdev;
400 struct radeon_bo *rbo;
400 struct ttm_mem_reg *old_mem = &bo->mem; 401 struct ttm_mem_reg *old_mem = &bo->mem;
401 int r; 402 int r;
402 403
404 /* Can't move a pinned BO */
405 rbo = container_of(bo, struct radeon_bo, tbo);
406 if (WARN_ON_ONCE(rbo->pin_count > 0))
407 return -EINVAL;
408
403 rdev = radeon_get_rdev(bo->bdev); 409 rdev = radeon_get_rdev(bo->bdev);
404 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 410 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
405 radeon_move_null(bo, new_mem); 411 radeon_move_null(bo, new_mem);
@@ -609,7 +615,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
609 set_page_dirty(page); 615 set_page_dirty(page);
610 616
611 mark_page_accessed(page); 617 mark_page_accessed(page);
612 page_cache_release(page); 618 put_page(page);
613 } 619 }
614 620
615 sg_free_table(ttm->sg); 621 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cb75ab72098a..af4df81c4e0c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ 2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2930 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2932 { 0, 0, 0, 0 }, 2934 { 0, 0, 0, 0 },
2933}; 2935};
2934 2936
@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3008 } 3010 }
3009 ++p; 3011 ++p;
3010 } 3012 }
3013 /* limit mclk on all R7 370 parts for stability */
3014 if (rdev->pdev->device == 0x6811 &&
3015 rdev->pdev->revision == 0x81)
3016 max_mclk = 120000;
3011 3017
3012 if (rps->vce_active) { 3018 if (rps->vce_active) {
3013 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 3019 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 3d3cf2f8891e..d5cfef75fc80 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
271 if (!iores) 271 if (!iores)
272 return -ENXIO; 272 return -ENXIO;
273 273
274 platform_set_drvdata(pdev, hdmi);
275
276 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); 274 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
277 /* 275 /*
278 * If we failed to find the CRTC(s) which this encoder is 276 * If we failed to find the CRTC(s) which this encoder is
@@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
293 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, 291 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
294 DRM_MODE_ENCODER_TMDS, NULL); 292 DRM_MODE_ENCODER_TMDS, NULL);
295 293
296 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 294 ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
295
296 /*
297 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
298 * which would have called the encoder cleanup. Do it manually.
299 */
300 if (ret)
301 drm_encoder_cleanup(encoder);
302
303 return ret;
297} 304}
298 305
299static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master, 306static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 896da09e49ee..f556a8f4fde6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
251 return 0; 251 return 0;
252} 252}
253 253
254static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
255 struct drm_file *file_priv)
256{
257 struct rockchip_drm_private *priv = crtc->dev->dev_private;
258 int pipe = drm_crtc_index(crtc);
259
260 if (pipe < ROCKCHIP_MAX_CRTC &&
261 priv->crtc_funcs[pipe] &&
262 priv->crtc_funcs[pipe]->cancel_pending_vblank)
263 priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
264}
265
266static void rockchip_drm_preclose(struct drm_device *dev,
267 struct drm_file *file_priv)
268{
269 struct drm_crtc *crtc;
270
271 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
272 rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
273}
274
254void rockchip_drm_lastclose(struct drm_device *dev) 275void rockchip_drm_lastclose(struct drm_device *dev)
255{ 276{
256 struct rockchip_drm_private *priv = dev->dev_private; 277 struct rockchip_drm_private *priv = dev->dev_private;
@@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = {
281 DRIVER_PRIME | DRIVER_ATOMIC, 302 DRIVER_PRIME | DRIVER_ATOMIC,
282 .load = rockchip_drm_load, 303 .load = rockchip_drm_load,
283 .unload = rockchip_drm_unload, 304 .unload = rockchip_drm_unload,
305 .preclose = rockchip_drm_preclose,
284 .lastclose = rockchip_drm_lastclose, 306 .lastclose = rockchip_drm_lastclose,
285 .get_vblank_counter = drm_vblank_no_hw_counter, 307 .get_vblank_counter = drm_vblank_no_hw_counter,
286 .enable_vblank = rockchip_drm_crtc_enable_vblank, 308 .enable_vblank = rockchip_drm_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 3529f692edb8..00d17d71aa4c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,6 +40,7 @@ struct rockchip_crtc_funcs {
40 int (*enable_vblank)(struct drm_crtc *crtc); 40 int (*enable_vblank)(struct drm_crtc *crtc);
41 void (*disable_vblank)(struct drm_crtc *crtc); 41 void (*disable_vblank)(struct drm_crtc *crtc);
42 void (*wait_for_update)(struct drm_crtc *crtc); 42 void (*wait_for_update)(struct drm_crtc *crtc);
43 void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
43}; 44};
44 45
45struct rockchip_atomic_commit { 46struct rockchip_atomic_commit {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fd370548d7d7..a619f120f801 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -499,10 +499,25 @@ err_disable_hclk:
499static void vop_crtc_disable(struct drm_crtc *crtc) 499static void vop_crtc_disable(struct drm_crtc *crtc)
500{ 500{
501 struct vop *vop = to_vop(crtc); 501 struct vop *vop = to_vop(crtc);
502 int i;
502 503
503 if (!vop->is_enabled) 504 if (!vop->is_enabled)
504 return; 505 return;
505 506
507 /*
508 * We need to make sure that all windows are disabled before we
509 * disable that crtc. Otherwise we might try to scan from a destroyed
510 * buffer later.
511 */
512 for (i = 0; i < vop->data->win_size; i++) {
513 struct vop_win *vop_win = &vop->win[i];
514 const struct vop_win_data *win = vop_win->data;
515
516 spin_lock(&vop->reg_lock);
517 VOP_WIN_SET(vop, win, enable, 0);
518 spin_unlock(&vop->reg_lock);
519 }
520
506 drm_crtc_vblank_off(crtc); 521 drm_crtc_vblank_off(crtc);
507 522
508 /* 523 /*
@@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
549 struct drm_plane_state *state) 564 struct drm_plane_state *state)
550{ 565{
551 struct drm_crtc *crtc = state->crtc; 566 struct drm_crtc *crtc = state->crtc;
567 struct drm_crtc_state *crtc_state;
552 struct drm_framebuffer *fb = state->fb; 568 struct drm_framebuffer *fb = state->fb;
553 struct vop_win *vop_win = to_vop_win(plane); 569 struct vop_win *vop_win = to_vop_win(plane);
554 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state); 570 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
@@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
563 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 579 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
564 DRM_PLANE_HELPER_NO_SCALING; 580 DRM_PLANE_HELPER_NO_SCALING;
565 581
566 crtc = crtc ? crtc : plane->state->crtc;
567 /*
568 * Both crtc or plane->state->crtc can be null.
569 */
570 if (!crtc || !fb) 582 if (!crtc || !fb)
571 goto out_disable; 583 goto out_disable;
584
585 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
586 if (WARN_ON(!crtc_state))
587 return -EINVAL;
588
572 src->x1 = state->src_x; 589 src->x1 = state->src_x;
573 src->y1 = state->src_y; 590 src->y1 = state->src_y;
574 src->x2 = state->src_x + state->src_w; 591 src->x2 = state->src_x + state->src_w;
@@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
580 597
581 clip.x1 = 0; 598 clip.x1 = 0;
582 clip.y1 = 0; 599 clip.y1 = 0;
583 clip.x2 = crtc->mode.hdisplay; 600 clip.x2 = crtc_state->adjusted_mode.hdisplay;
584 clip.y2 = crtc->mode.vdisplay; 601 clip.y2 = crtc_state->adjusted_mode.vdisplay;
585 602
586 ret = drm_plane_helper_check_update(plane, crtc, state->fb, 603 ret = drm_plane_helper_check_update(plane, crtc, state->fb,
587 src, dest, &clip, 604 src, dest, &clip,
@@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
873 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); 890 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
874} 891}
875 892
893static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
894 struct drm_file *file_priv)
895{
896 struct drm_device *drm = crtc->dev;
897 struct vop *vop = to_vop(crtc);
898 struct drm_pending_vblank_event *e;
899 unsigned long flags;
900
901 spin_lock_irqsave(&drm->event_lock, flags);
902 e = vop->event;
903 if (e && e->base.file_priv == file_priv) {
904 vop->event = NULL;
905
906 e->base.destroy(&e->base);
907 file_priv->event_space += sizeof(e->event);
908 }
909 spin_unlock_irqrestore(&drm->event_lock, flags);
910}
911
876static const struct rockchip_crtc_funcs private_crtc_funcs = { 912static const struct rockchip_crtc_funcs private_crtc_funcs = {
877 .enable_vblank = vop_crtc_enable_vblank, 913 .enable_vblank = vop_crtc_enable_vblank,
878 .disable_vblank = vop_crtc_disable_vblank, 914 .disable_vblank = vop_crtc_disable_vblank,
879 .wait_for_update = vop_crtc_wait_for_update, 915 .wait_for_update = vop_crtc_wait_for_update,
916 .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
880}; 917};
881 918
882static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 919static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
885{ 922{
886 struct vop *vop = to_vop(crtc); 923 struct vop *vop = to_vop(crtc);
887 924
888 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
889 return false;
890
891 adjusted_mode->clock = 925 adjusted_mode->clock =
892 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; 926 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
893 927
@@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop)
1108 const struct vop_data *vop_data = vop->data; 1142 const struct vop_data *vop_data = vop->data;
1109 struct device *dev = vop->dev; 1143 struct device *dev = vop->dev;
1110 struct drm_device *drm_dev = vop->drm_dev; 1144 struct drm_device *drm_dev = vop->drm_dev;
1111 struct drm_plane *primary = NULL, *cursor = NULL, *plane; 1145 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
1112 struct drm_crtc *crtc = &vop->crtc; 1146 struct drm_crtc *crtc = &vop->crtc;
1113 struct device_node *port; 1147 struct device_node *port;
1114 int ret; 1148 int ret;
@@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop)
1148 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1182 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
1149 &vop_crtc_funcs, NULL); 1183 &vop_crtc_funcs, NULL);
1150 if (ret) 1184 if (ret)
1151 return ret; 1185 goto err_cleanup_planes;
1152 1186
1153 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); 1187 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1154 1188
@@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop)
1181 if (!port) { 1215 if (!port) {
1182 DRM_ERROR("no port node found in %s\n", 1216 DRM_ERROR("no port node found in %s\n",
1183 dev->of_node->full_name); 1217 dev->of_node->full_name);
1218 ret = -ENOENT;
1184 goto err_cleanup_crtc; 1219 goto err_cleanup_crtc;
1185 } 1220 }
1186 1221
@@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop)
1194err_cleanup_crtc: 1229err_cleanup_crtc:
1195 drm_crtc_cleanup(crtc); 1230 drm_crtc_cleanup(crtc);
1196err_cleanup_planes: 1231err_cleanup_planes:
1197 list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head) 1232 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1233 head)
1198 drm_plane_cleanup(plane); 1234 drm_plane_cleanup(plane);
1199 return ret; 1235 return ret;
1200} 1236}
@@ -1202,9 +1238,28 @@ err_cleanup_planes:
1202static void vop_destroy_crtc(struct vop *vop) 1238static void vop_destroy_crtc(struct vop *vop)
1203{ 1239{
1204 struct drm_crtc *crtc = &vop->crtc; 1240 struct drm_crtc *crtc = &vop->crtc;
1241 struct drm_device *drm_dev = vop->drm_dev;
1242 struct drm_plane *plane, *tmp;
1205 1243
1206 rockchip_unregister_crtc_funcs(crtc); 1244 rockchip_unregister_crtc_funcs(crtc);
1207 of_node_put(crtc->port); 1245 of_node_put(crtc->port);
1246
1247 /*
1248 * We need to cleanup the planes now. Why?
1249 *
1250 * The planes are "&vop->win[i].base". That means the memory is
1251 * all part of the big "struct vop" chunk of memory. That memory
1252 * was devm allocated and associated with this component. We need to
1253 * free it ourselves before vop_unbind() finishes.
1254 */
1255 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1256 head)
1257 vop_plane_destroy(plane);
1258
1259 /*
1260 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1261 * references the CRTC.
1262 */
1208 drm_crtc_cleanup(crtc); 1263 drm_crtc_cleanup(crtc);
1209} 1264}
1210 1265
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f9cc30..077ae9b2865d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
311 goto out_err; 311 goto out_err;
312 312
313 copy_highpage(to_page, from_page); 313 copy_highpage(to_page, from_page);
314 page_cache_release(from_page); 314 put_page(from_page);
315 } 315 }
316 316
317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
361 copy_highpage(to_page, from_page); 361 copy_highpage(to_page, from_page);
362 set_page_dirty(to_page); 362 set_page_dirty(to_page);
363 mark_page_accessed(to_page); 363 mark_page_accessed(to_page);
364 page_cache_release(to_page); 364 put_page(to_page);
365 } 365 }
366 366
367 ttm_tt_unpopulate(ttm); 367 ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 33239a2b264a..fd1eb9d03f0b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -536,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
536out_destroy_fbi: 536out_destroy_fbi:
537 drm_fb_helper_release_fbi(helper); 537 drm_fb_helper_release_fbi(helper);
538out_gfree: 538out_gfree:
539 drm_gem_object_unreference(&ufbdev->ufb.obj->base); 539 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
540out: 540out:
541 return ret; 541 return ret;
542} 542}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
52 return ret; 52 return ret;
53 } 53 }
54 54
55 drm_gem_object_unreference(&obj->base); 55 drm_gem_object_unreference_unlocked(&obj->base);
56 *handle_p = handle; 56 *handle_p = handle;
57 return 0; 57 return 0;
58} 58}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index e797dfc07ae3..7e2a12c4fed2 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
188 if (NULL != (page = vsg->pages[i])) { 188 if (NULL != (page = vsg->pages[i])) {
189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
190 SetPageDirty(page); 190 SetPageDirty(page);
191 page_cache_release(page); 191 put_page(page);
192 } 192 }
193 } 193 }
194 case dr_via_pages_alloc: 194 case dr_via_pages_alloc:
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 883a314cd83a..6494a4d28171 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
395EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved); 395EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
396 396
397void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, 397void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
398 u32 pixel_format, int stride, 398 unsigned int uv_stride,
399 int u_offset, int v_offset) 399 unsigned int u_offset, unsigned int v_offset)
400{ 400{
401 switch (pixel_format) { 401 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
402 case V4L2_PIX_FMT_YUV420: 402 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
403 case V4L2_PIX_FMT_YUV422P: 403 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
404 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
405 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
406 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
407 break;
408 case V4L2_PIX_FMT_YVU420:
409 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
410 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
411 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
412 break;
413 case V4L2_PIX_FMT_NV12:
414 case V4L2_PIX_FMT_NV16:
415 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
416 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
417 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
418 break;
419 }
420} 404}
421EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full); 405EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
422 406
423void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, 407void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
424 u32 pixel_format, int stride, int height) 408 u32 pixel_format, int stride, int height)
425{ 409{
426 int u_offset, v_offset; 410 int fourcc, u_offset, v_offset;
427 int uv_stride = 0; 411 int uv_stride = 0;
428 412
429 switch (pixel_format) { 413 fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
430 case V4L2_PIX_FMT_YUV420: 414 switch (fourcc) {
431 case V4L2_PIX_FMT_YVU420: 415 case DRM_FORMAT_YUV420:
432 uv_stride = stride / 2; 416 uv_stride = stride / 2;
433 u_offset = stride * height; 417 u_offset = stride * height;
434 v_offset = u_offset + (uv_stride * height / 2); 418 v_offset = u_offset + (uv_stride * height / 2);
435 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
436 u_offset, v_offset);
437 break; 419 break;
438 case V4L2_PIX_FMT_YUV422P: 420 case DRM_FORMAT_YVU420:
421 uv_stride = stride / 2;
422 v_offset = stride * height;
423 u_offset = v_offset + (uv_stride * height / 2);
424 break;
425 case DRM_FORMAT_YUV422:
439 uv_stride = stride / 2; 426 uv_stride = stride / 2;
440 u_offset = stride * height; 427 u_offset = stride * height;
441 v_offset = u_offset + (uv_stride * height); 428 v_offset = u_offset + (uv_stride * height);
442 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
443 u_offset, v_offset);
444 break; 429 break;
445 case V4L2_PIX_FMT_NV12: 430 case DRM_FORMAT_NV12:
446 case V4L2_PIX_FMT_NV16: 431 case DRM_FORMAT_NV16:
432 uv_stride = stride;
447 u_offset = stride * height; 433 u_offset = stride * height;
448 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, 434 v_offset = 0;
449 u_offset, 0);
450 break; 435 break;
436 default:
437 return;
451 } 438 }
439 ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
452} 440}
453EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar); 441EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
454 442
@@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
684 672
685 switch (pix->pixelformat) { 673 switch (pix->pixelformat) {
686 case V4L2_PIX_FMT_YUV420: 674 case V4L2_PIX_FMT_YUV420:
687 case V4L2_PIX_FMT_YVU420:
688 offset = Y_OFFSET(pix, image->rect.left, image->rect.top); 675 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
689 u_offset = U_OFFSET(pix, image->rect.left, 676 u_offset = U_OFFSET(pix, image->rect.left,
690 image->rect.top) - offset; 677 image->rect.top) - offset;
691 v_offset = V_OFFSET(pix, image->rect.left, 678 v_offset = V_OFFSET(pix, image->rect.left,
692 image->rect.top) - offset; 679 image->rect.top) - offset;
693 680
694 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 681 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
695 pix->bytesperline,
696 u_offset, v_offset); 682 u_offset, v_offset);
697 break; 683 break;
684 case V4L2_PIX_FMT_YVU420:
685 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
686 u_offset = U_OFFSET(pix, image->rect.left,
687 image->rect.top) - offset;
688 v_offset = V_OFFSET(pix, image->rect.left,
689 image->rect.top) - offset;
690
691 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
692 v_offset, u_offset);
693 break;
698 case V4L2_PIX_FMT_YUV422P: 694 case V4L2_PIX_FMT_YUV422P:
699 offset = Y_OFFSET(pix, image->rect.left, image->rect.top); 695 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
700 u_offset = U2_OFFSET(pix, image->rect.left, 696 u_offset = U2_OFFSET(pix, image->rect.left,
@@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
702 v_offset = V2_OFFSET(pix, image->rect.left, 698 v_offset = V2_OFFSET(pix, image->rect.left,
703 image->rect.top) - offset; 699 image->rect.top) - offset;
704 700
705 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 701 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
706 pix->bytesperline,
707 u_offset, v_offset); 702 u_offset, v_offset);
708 break; 703 break;
709 case V4L2_PIX_FMT_NV12: 704 case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
712 image->rect.top) - offset; 707 image->rect.top) - offset;
713 v_offset = 0; 708 v_offset = 0;
714 709
715 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 710 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
716 pix->bytesperline,
717 u_offset, v_offset); 711 u_offset, v_offset);
718 break; 712 break;
719 case V4L2_PIX_FMT_NV16: 713 case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
722 image->rect.top) - offset; 716 image->rect.top) - offset;
723 v_offset = 0; 717 v_offset = 0;
724 718
725 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 719 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
726 pix->bytesperline,
727 u_offset, v_offset); 720 u_offset, v_offset);
728 break; 721 break;
729 case V4L2_PIX_FMT_UYVY: 722 case V4L2_PIX_FMT_UYVY:
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 042c3958e2a0..837b1ec22800 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -350,11 +350,13 @@ out:
350} 350}
351EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth); 351EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
352 352
353int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width) 353void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
354{ 354{
355 struct ipu_dmfc_priv *priv = dmfc->priv; 355 struct ipu_dmfc_priv *priv = dmfc->priv;
356 u32 dmfc_gen1; 356 u32 dmfc_gen1;
357 357
358 mutex_lock(&priv->mutex);
359
358 dmfc_gen1 = readl(priv->base + DMFC_GENERAL1); 360 dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
359 361
360 if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines) 362 if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
364 366
365 writel(dmfc_gen1, priv->base + DMFC_GENERAL1); 367 writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
366 368
367 return 0; 369 mutex_unlock(&priv->mutex);
368} 370}
369EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel); 371EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
370 372
371struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel) 373struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
372{ 374{
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4f653c..303d0c9df907 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
85 85
86int max1111_read_channel(int channel) 86int max1111_read_channel(int channel)
87{ 87{
88 if (!the_max1111 || !the_max1111->spi)
89 return -ENODEV;
90
88 return max1111_read(&the_max1111->spi->dev, channel); 91 return max1111_read(&the_max1111->spi->dev, channel);
89} 92}
90EXPORT_SYMBOL(max1111_read_channel); 93EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
258{ 261{
259 struct max1111_data *data = spi_get_drvdata(spi); 262 struct max1111_data *data = spi_get_drvdata(spi);
260 263
264#ifdef CONFIG_SHARPSL_PM
265 the_max1111 = NULL;
266#endif
261 hwmon_device_unregister(data->hwmon_dev); 267 hwmon_device_unregister(data->hwmon_dev);
262 sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group); 268 sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
263 sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); 269 sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..ba14a863b451 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", 771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
772 &clk_freq); 772 &clk_freq);
773 if (ret) { 773 if (ret) {
774 dev_err(&pdev->dev, "clock-frequency not specified in DT"); 774 dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
775 goto err; 775 goto err;
776 } 776 }
777 777
778 i2c->speed = clk_freq / 1000; 778 i2c->speed = clk_freq / 1000;
779 if (i2c->speed == 0) {
780 ret = -EINVAL;
781 dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
782 goto err;
783 }
779 jz4780_i2c_set_speed(i2c); 784 jz4780_i2c_set_speed(i2c);
780 785
781 dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed); 786 dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0f2f8484e8ec..e584d88ee337 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -525,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
525 return 0; 525 return 0;
526} 526}
527 527
528
529/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
530static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env) 528static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
531{ 529{
532 struct i2c_client *client = to_i2c_client(dev); 530 struct i2c_client *client = to_i2c_client(dev);
533 int rc; 531 int rc;
534 532
535 rc = acpi_device_uevent_modalias(dev, env); 533 rc = acpi_device_uevent_modalias(dev, env);
536 if (rc != -ENODEV) 534 if (rc != -ENODEV)
537 return rc; 535 return rc;
538 536
539 if (add_uevent_var(env, "MODALIAS=%s%s", 537 return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
540 I2C_MODULE_PREFIX, client->name))
541 return -ENOMEM;
542 dev_dbg(dev, "uevent\n");
543 return 0;
544} 538}
545 539
546/* i2c bus recovery routines */ 540/* i2c bus recovery routines */
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 7748a0a5ddb9..8de073aed001 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -140,22 +140,34 @@ static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_
140 return i2c_demux_activate_master(priv, new_chan); 140 return i2c_demux_activate_master(priv, new_chan);
141} 141}
142 142
143static ssize_t cur_master_show(struct device *dev, struct device_attribute *attr, 143static ssize_t available_masters_show(struct device *dev,
144 char *buf) 144 struct device_attribute *attr,
145 char *buf)
145{ 146{
146 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); 147 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
147 int count = 0, i; 148 int count = 0, i;
148 149
149 for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++) 150 for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
150 count += scnprintf(buf + count, PAGE_SIZE - count, "%c %d - %s\n", 151 count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
151 i == priv->cur_chan ? '*' : ' ', i, 152 i, priv->chan[i].parent_np->full_name,
152 priv->chan[i].parent_np->full_name); 153 i == priv->num_chan - 1 ? '\n' : ' ');
153 154
154 return count; 155 return count;
155} 156}
157static DEVICE_ATTR_RO(available_masters);
156 158
157static ssize_t cur_master_store(struct device *dev, struct device_attribute *attr, 159static ssize_t current_master_show(struct device *dev,
158 const char *buf, size_t count) 160 struct device_attribute *attr,
161 char *buf)
162{
163 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
164
165 return sprintf(buf, "%d\n", priv->cur_chan);
166}
167
168static ssize_t current_master_store(struct device *dev,
169 struct device_attribute *attr,
170 const char *buf, size_t count)
159{ 171{
160 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); 172 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
161 unsigned int val; 173 unsigned int val;
@@ -172,7 +184,7 @@ static ssize_t cur_master_store(struct device *dev, struct device_attribute *att
172 184
173 return ret < 0 ? ret : count; 185 return ret < 0 ? ret : count;
174} 186}
175static DEVICE_ATTR_RW(cur_master); 187static DEVICE_ATTR_RW(current_master);
176 188
177static int i2c_demux_pinctrl_probe(struct platform_device *pdev) 189static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
178{ 190{
@@ -218,12 +230,18 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
218 /* switch to first parent as active master */ 230 /* switch to first parent as active master */
219 i2c_demux_activate_master(priv, 0); 231 i2c_demux_activate_master(priv, 0);
220 232
221 err = device_create_file(&pdev->dev, &dev_attr_cur_master); 233 err = device_create_file(&pdev->dev, &dev_attr_available_masters);
222 if (err) 234 if (err)
223 goto err_rollback; 235 goto err_rollback;
224 236
237 err = device_create_file(&pdev->dev, &dev_attr_current_master);
238 if (err)
239 goto err_rollback_available;
240
225 return 0; 241 return 0;
226 242
243err_rollback_available:
244 device_remove_file(&pdev->dev, &dev_attr_available_masters);
227err_rollback: 245err_rollback:
228 for (j = 0; j < i; j++) { 246 for (j = 0; j < i; j++) {
229 of_node_put(priv->chan[j].parent_np); 247 of_node_put(priv->chan[j].parent_np);
@@ -238,7 +256,8 @@ static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
238 struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev); 256 struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
239 int i; 257 int i;
240 258
241 device_remove_file(&pdev->dev, &dev_attr_cur_master); 259 device_remove_file(&pdev->dev, &dev_attr_current_master);
260 device_remove_file(&pdev->dev, &dev_attr_available_masters);
242 261
243 i2c_demux_deactivate_master(priv); 262 i2c_demux_deactivate_master(priv);
244 263
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 9f0a48e39b8a..80e933b296f6 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -451,7 +451,7 @@ err_free:
451 return ret; 451 return ret;
452} 452}
453 453
454static const struct ide_port_info icside_v6_port_info __initconst = { 454static const struct ide_port_info icside_v6_port_info = {
455 .init_dma = icside_dma_off_init, 455 .init_dma = icside_dma_off_init,
456 .port_ops = &icside_v6_no_dma_port_ops, 456 .port_ops = &icside_v6_no_dma_port_ops,
457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 8012e43bf8f6..46427ea01753 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
325 325
326 clk_enable(clk); 326 clk_enable(clk);
327 rate = clk_get_rate(clk); 327 rate = clk_get_rate(clk);
328 if (!rate)
329 return -EINVAL;
328 330
329 /* NOTE: round *down* to meet minimum timings; we count in clocks */ 331 /* NOTE: round *down* to meet minimum timings; we count in clocks */
330 ideclk_period = 1000000000UL / rate; 332 ideclk_period = 1000000000UL / rate;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ba947df5a8c7..c6935de425fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = {
660 .enter = NULL } 660 .enter = NULL }
661}; 661};
662 662
663static struct cpuidle_state skx_cstates[] = {
664 {
665 .name = "C1-SKX",
666 .desc = "MWAIT 0x00",
667 .flags = MWAIT2flg(0x00),
668 .exit_latency = 2,
669 .target_residency = 2,
670 .enter = &intel_idle,
671 .enter_freeze = intel_idle_freeze, },
672 {
673 .name = "C1E-SKX",
674 .desc = "MWAIT 0x01",
675 .flags = MWAIT2flg(0x01),
676 .exit_latency = 10,
677 .target_residency = 20,
678 .enter = &intel_idle,
679 .enter_freeze = intel_idle_freeze, },
680 {
681 .name = "C6-SKX",
682 .desc = "MWAIT 0x20",
683 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
684 .exit_latency = 133,
685 .target_residency = 600,
686 .enter = &intel_idle,
687 .enter_freeze = intel_idle_freeze, },
688 {
689 .enter = NULL }
690};
691
663static struct cpuidle_state atom_cstates[] = { 692static struct cpuidle_state atom_cstates[] = {
664 { 693 {
665 .name = "C1E-ATM", 694 .name = "C1E-ATM",
@@ -818,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n,
818 * driver in this case 847 * driver in this case
819 */ 848 */
820 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); 849 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
821 if (!dev->registered) 850 if (dev->registered)
822 intel_idle_cpu_init(hotcpu); 851 break;
852
853 if (intel_idle_cpu_init(hotcpu))
854 return NOTIFY_BAD;
823 855
824 break; 856 break;
825 } 857 }
@@ -904,6 +936,10 @@ static const struct idle_cpu idle_cpu_skl = {
904 .disable_promotion_to_c1e = true, 936 .disable_promotion_to_c1e = true,
905}; 937};
906 938
939static const struct idle_cpu idle_cpu_skx = {
940 .state_table = skx_cstates,
941 .disable_promotion_to_c1e = true,
942};
907 943
908static const struct idle_cpu idle_cpu_avn = { 944static const struct idle_cpu idle_cpu_avn = {
909 .state_table = avn_cstates, 945 .state_table = avn_cstates,
@@ -945,6 +981,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
945 ICPU(0x56, idle_cpu_bdw), 981 ICPU(0x56, idle_cpu_bdw),
946 ICPU(0x4e, idle_cpu_skl), 982 ICPU(0x4e, idle_cpu_skl),
947 ICPU(0x5e, idle_cpu_skl), 983 ICPU(0x5e, idle_cpu_skl),
984 ICPU(0x8e, idle_cpu_skl),
985 ICPU(0x9e, idle_cpu_skl),
986 ICPU(0x55, idle_cpu_skx),
948 ICPU(0x57, idle_cpu_knl), 987 ICPU(0x57, idle_cpu_knl),
949 {} 988 {}
950}; 989};
@@ -987,22 +1026,15 @@ static int __init intel_idle_probe(void)
987 icpu = (const struct idle_cpu *)id->driver_data; 1026 icpu = (const struct idle_cpu *)id->driver_data;
988 cpuidle_state_table = icpu->state_table; 1027 cpuidle_state_table = icpu->state_table;
989 1028
990 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
991 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
992 else
993 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
994
995 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 1029 pr_debug(PREFIX "v" INTEL_IDLE_VERSION
996 " model 0x%X\n", boot_cpu_data.x86_model); 1030 " model 0x%X\n", boot_cpu_data.x86_model);
997 1031
998 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
999 lapic_timer_reliable_states);
1000 return 0; 1032 return 0;
1001} 1033}
1002 1034
1003/* 1035/*
1004 * intel_idle_cpuidle_devices_uninit() 1036 * intel_idle_cpuidle_devices_uninit()
1005 * unregister, free cpuidle_devices 1037 * Unregisters the cpuidle devices.
1006 */ 1038 */
1007static void intel_idle_cpuidle_devices_uninit(void) 1039static void intel_idle_cpuidle_devices_uninit(void)
1008{ 1040{
@@ -1013,9 +1045,6 @@ static void intel_idle_cpuidle_devices_uninit(void)
1013 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 1045 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1014 cpuidle_unregister_device(dev); 1046 cpuidle_unregister_device(dev);
1015 } 1047 }
1016
1017 free_percpu(intel_idle_cpuidle_devices);
1018 return;
1019} 1048}
1020 1049
1021/* 1050/*
@@ -1111,7 +1140,7 @@ static void intel_idle_state_table_update(void)
1111 * intel_idle_cpuidle_driver_init() 1140 * intel_idle_cpuidle_driver_init()
1112 * allocate, initialize cpuidle_states 1141 * allocate, initialize cpuidle_states
1113 */ 1142 */
1114static int __init intel_idle_cpuidle_driver_init(void) 1143static void __init intel_idle_cpuidle_driver_init(void)
1115{ 1144{
1116 int cstate; 1145 int cstate;
1117 struct cpuidle_driver *drv = &intel_idle_driver; 1146 struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1163,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
1163 drv->state_count += 1; 1192 drv->state_count += 1;
1164 } 1193 }
1165 1194
1166 if (icpu->auto_demotion_disable_flags)
1167 on_each_cpu(auto_demotion_disable, NULL, 1);
1168
1169 if (icpu->byt_auto_demotion_disable_flag) { 1195 if (icpu->byt_auto_demotion_disable_flag) {
1170 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); 1196 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
1171 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); 1197 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
1172 } 1198 }
1173
1174 if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
1175 on_each_cpu(c1e_promotion_disable, NULL, 1);
1176
1177 return 0;
1178} 1199}
1179 1200
1180 1201
@@ -1193,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu)
1193 1214
1194 if (cpuidle_register_device(dev)) { 1215 if (cpuidle_register_device(dev)) {
1195 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); 1216 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
1196 intel_idle_cpuidle_devices_uninit();
1197 return -EIO; 1217 return -EIO;
1198 } 1218 }
1199 1219
@@ -1218,40 +1238,51 @@ static int __init intel_idle_init(void)
1218 if (retval) 1238 if (retval)
1219 return retval; 1239 return retval;
1220 1240
1241 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
1242 if (intel_idle_cpuidle_devices == NULL)
1243 return -ENOMEM;
1244
1221 intel_idle_cpuidle_driver_init(); 1245 intel_idle_cpuidle_driver_init();
1222 retval = cpuidle_register_driver(&intel_idle_driver); 1246 retval = cpuidle_register_driver(&intel_idle_driver);
1223 if (retval) { 1247 if (retval) {
1224 struct cpuidle_driver *drv = cpuidle_get_driver(); 1248 struct cpuidle_driver *drv = cpuidle_get_driver();
1225 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 1249 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
1226 drv ? drv->name : "none"); 1250 drv ? drv->name : "none");
1251 free_percpu(intel_idle_cpuidle_devices);
1227 return retval; 1252 return retval;
1228 } 1253 }
1229 1254
1230 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
1231 if (intel_idle_cpuidle_devices == NULL)
1232 return -ENOMEM;
1233
1234 cpu_notifier_register_begin(); 1255 cpu_notifier_register_begin();
1235 1256
1236 for_each_online_cpu(i) { 1257 for_each_online_cpu(i) {
1237 retval = intel_idle_cpu_init(i); 1258 retval = intel_idle_cpu_init(i);
1238 if (retval) { 1259 if (retval) {
1260 intel_idle_cpuidle_devices_uninit();
1239 cpu_notifier_register_done(); 1261 cpu_notifier_register_done();
1240 cpuidle_unregister_driver(&intel_idle_driver); 1262 cpuidle_unregister_driver(&intel_idle_driver);
1263 free_percpu(intel_idle_cpuidle_devices);
1241 return retval; 1264 return retval;
1242 } 1265 }
1243 } 1266 }
1244 __register_cpu_notifier(&cpu_hotplug_notifier); 1267 __register_cpu_notifier(&cpu_hotplug_notifier);
1245 1268
1269 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
1270 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1271 else
1272 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1273
1246 cpu_notifier_register_done(); 1274 cpu_notifier_register_done();
1247 1275
1276 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
1277 lapic_timer_reliable_states);
1278
1248 return 0; 1279 return 0;
1249} 1280}
1250 1281
1251static void __exit intel_idle_exit(void) 1282static void __exit intel_idle_exit(void)
1252{ 1283{
1253 intel_idle_cpuidle_devices_uninit(); 1284 struct cpuidle_device *dev;
1254 cpuidle_unregister_driver(&intel_idle_driver); 1285 int i;
1255 1286
1256 cpu_notifier_register_begin(); 1287 cpu_notifier_register_begin();
1257 1288
@@ -1259,9 +1290,15 @@ static void __exit intel_idle_exit(void)
1259 on_each_cpu(__setup_broadcast_timer, (void *)false, 1); 1290 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1260 __unregister_cpu_notifier(&cpu_hotplug_notifier); 1291 __unregister_cpu_notifier(&cpu_hotplug_notifier);
1261 1292
1293 for_each_possible_cpu(i) {
1294 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1295 cpuidle_unregister_device(dev);
1296 }
1297
1262 cpu_notifier_register_done(); 1298 cpu_notifier_register_done();
1263 1299
1264 return; 1300 cpuidle_unregister_driver(&intel_idle_driver);
1301 free_percpu(intel_idle_cpuidle_devices);
1265} 1302}
1266 1303
1267module_init(intel_idle_init); 1304module_init(intel_idle_init);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 91a52c275d0e..197e693e7e7b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -553,7 +553,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
553 struct device *dev = regmap_get_device(data->regmap); 553 struct device *dev = regmap_get_device(data->regmap);
554 int ret; 554 int ret;
555 int axis = chan->scan_index; 555 int axis = chan->scan_index;
556 unsigned int raw_val; 556 __le16 raw_val;
557 557
558 mutex_lock(&data->mutex); 558 mutex_lock(&data->mutex);
559 ret = bmc150_accel_set_power_state(data, true); 559 ret = bmc150_accel_set_power_state(data, true);
@@ -563,14 +563,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
563 } 563 }
564 564
565 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis), 565 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
566 &raw_val, 2); 566 &raw_val, sizeof(raw_val));
567 if (ret < 0) { 567 if (ret < 0) {
568 dev_err(dev, "Error reading axis %d\n", axis); 568 dev_err(dev, "Error reading axis %d\n", axis);
569 bmc150_accel_set_power_state(data, false); 569 bmc150_accel_set_power_state(data, false);
570 mutex_unlock(&data->mutex); 570 mutex_unlock(&data->mutex);
571 return ret; 571 return ret;
572 } 572 }
573 *val = sign_extend32(raw_val >> chan->scan_type.shift, 573 *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
574 chan->scan_type.realbits - 1); 574 chan->scan_type.realbits - 1);
575 ret = bmc150_accel_set_power_state(data, false); 575 ret = bmc150_accel_set_power_state(data, false);
576 mutex_unlock(&data->mutex); 576 mutex_unlock(&data->mutex);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 929508e5266c..998dc3caad4c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1386 }, 1386 },
1387 [max11644] = { 1387 [max11644] = {
1388 .bits = 12, 1388 .bits = 12,
1389 .int_vref_mv = 2048, 1389 .int_vref_mv = 4096,
1390 .mode_list = max11644_mode_list, 1390 .mode_list = max11644_mode_list,
1391 .num_modes = ARRAY_SIZE(max11644_mode_list), 1391 .num_modes = ARRAY_SIZE(max11644_mode_list),
1392 .default_mode = s0to1, 1392 .default_mode = s0to1,
@@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1396 }, 1396 },
1397 [max11645] = { 1397 [max11645] = {
1398 .bits = 12, 1398 .bits = 12,
1399 .int_vref_mv = 4096, 1399 .int_vref_mv = 2048,
1400 .mode_list = max11644_mode_list, 1400 .mode_list = max11644_mode_list,
1401 .num_modes = ARRAY_SIZE(max11644_mode_list), 1401 .num_modes = ARRAY_SIZE(max11644_mode_list),
1402 .default_mode = s0to1, 1402 .default_mode = s0to1,
@@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1406 }, 1406 },
1407 [max11646] = { 1407 [max11646] = {
1408 .bits = 10, 1408 .bits = 10,
1409 .int_vref_mv = 2048, 1409 .int_vref_mv = 4096,
1410 .mode_list = max11644_mode_list, 1410 .mode_list = max11644_mode_list,
1411 .num_modes = ARRAY_SIZE(max11644_mode_list), 1411 .num_modes = ARRAY_SIZE(max11644_mode_list),
1412 .default_mode = s0to1, 1412 .default_mode = s0to1,
@@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1416 }, 1416 },
1417 [max11647] = { 1417 [max11647] = {
1418 .bits = 10, 1418 .bits = 10,
1419 .int_vref_mv = 4096, 1419 .int_vref_mv = 2048,
1420 .mode_list = max11644_mode_list, 1420 .mode_list = max11644_mode_list,
1421 .num_modes = ARRAY_SIZE(max11644_mode_list), 1421 .num_modes = ARRAY_SIZE(max11644_mode_list),
1422 .default_mode = s0to1, 1422 .default_mode = s0to1,
@@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = {
1680 { "max11615", max11615 }, 1680 { "max11615", max11615 },
1681 { "max11616", max11616 }, 1681 { "max11616", max11616 },
1682 { "max11617", max11617 }, 1682 { "max11617", max11617 },
1683 { "max11644", max11644 },
1684 { "max11645", max11645 },
1685 { "max11646", max11646 },
1686 { "max11647", max11647 },
1683 {} 1687 {}
1684}; 1688};
1685 1689
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index b2b1071c1892..7ccc044063f6 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -453,7 +453,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
453{ 453{
454 struct device *dev = regmap_get_device(data->regmap); 454 struct device *dev = regmap_get_device(data->regmap);
455 int ret; 455 int ret;
456 unsigned int raw_val; 456 __le16 raw_val;
457 457
458 mutex_lock(&data->mutex); 458 mutex_lock(&data->mutex);
459 ret = bmg160_set_power_state(data, true); 459 ret = bmg160_set_power_state(data, true);
@@ -463,7 +463,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
463 } 463 }
464 464
465 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val, 465 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
466 2); 466 sizeof(raw_val));
467 if (ret < 0) { 467 if (ret < 0) {
468 dev_err(dev, "Error reading axis %d\n", axis); 468 dev_err(dev, "Error reading axis %d\n", axis);
469 bmg160_set_power_state(data, false); 469 bmg160_set_power_state(data, false);
@@ -471,7 +471,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
471 return ret; 471 return ret;
472 } 472 }
473 473
474 *val = sign_extend32(raw_val, 15); 474 *val = sign_extend32(le16_to_cpu(raw_val), 15);
475 ret = bmg160_set_power_state(data, false); 475 ret = bmg160_set_power_state(data, false);
476 mutex_unlock(&data->mutex); 476 mutex_unlock(&data->mutex);
477 if (ret < 0) 477 if (ret < 0)
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 09db89359544..90ab8a2d2846 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
238 238
239 mutex_lock(&data->lock); 239 mutex_lock(&data->lock);
240 240
241 while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) { 241 while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
242 ret = max30100_read_measurement(data); 242 ret = max30100_read_measurement(data);
243 if (ret) 243 if (ret)
244 break; 244 break;
245 245
246 iio_push_to_buffers(data->indio_dev, data->buffer); 246 iio_push_to_buffers(data->indio_dev, data->buffer);
247 cnt--;
247 } 248 }
248 249
249 mutex_unlock(&data->lock); 250 mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index c05d474587d2..f756feecfa4c 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -9,9 +9,8 @@ config INV_MPU6050_IIO
9 9
10config INV_MPU6050_I2C 10config INV_MPU6050_I2C
11 tristate "Invensense MPU6050 devices (I2C)" 11 tristate "Invensense MPU6050 devices (I2C)"
12 depends on I2C 12 depends on I2C_MUX
13 select INV_MPU6050_IIO 13 select INV_MPU6050_IIO
14 select I2C_MUX
15 select REGMAP_I2C 14 select REGMAP_I2C
16 help 15 help
17 This driver supports the Invensense MPU6050/6500/9150 motion tracking 16 This driver supports the Invensense MPU6050/6500/9150 motion tracking
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b976332d45d3..90462fcf5436 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -653,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
653 unsigned int modes; 653 unsigned int modes;
654 654
655 memset(config, 0, sizeof(*config)); 655 memset(config, 0, sizeof(*config));
656 config->watermark = ~0;
656 657
657 /* 658 /*
658 * If there is just one buffer and we are removing it there is nothing 659 * If there is just one buffer and we are removing it there is nothing
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 35928fb1b66a..b4dbb3912977 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -774,7 +774,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
774 mutex_lock(&data->lock); 774 mutex_lock(&data->lock);
775 data->gesture_mode_running = 1; 775 data->gesture_mode_running = 1;
776 776
777 while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) { 777 while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
778 ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE, 778 ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
779 &data->buffer, 4); 779 &data->buffer, 4);
780 780
@@ -782,6 +782,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
782 goto err_read; 782 goto err_read;
783 783
784 iio_push_to_buffers(data->indio_dev, data->buffer); 784 iio_push_to_buffers(data->indio_dev, data->buffer);
785 cnt--;
785 } 786 }
786 787
787err_read: 788err_read:
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
44static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev) 44static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
45{ 45{
46} 46}
47#define ST_MAGN_TRIGGER_SET_STATE NULL
47#endif /* CONFIG_IIO_BUFFER */ 48#endif /* CONFIG_IIO_BUFFER */
48 49
49#endif /* ST_MAGN_H */ 50#endif /* ST_MAGN_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 92745d755272..38f917a6c778 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1992,7 +1992,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
1992/** 1992/**
1993 * i40iw_get_dst_ipv6 1993 * i40iw_get_dst_ipv6
1994 */ 1994 */
1995#if IS_ENABLED(CONFIG_IPV6)
1996static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, 1995static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
1997 struct sockaddr_in6 *dst_addr) 1996 struct sockaddr_in6 *dst_addr)
1998{ 1997{
@@ -2008,7 +2007,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2008 dst = ip6_route_output(&init_net, NULL, &fl6); 2007 dst = ip6_route_output(&init_net, NULL, &fl6);
2009 return dst; 2008 return dst;
2010} 2009}
2011#endif
2012 2010
2013/** 2011/**
2014 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address 2012 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
@@ -2016,7 +2014,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2016 * @dst_ip: remote ip address 2014 * @dst_ip: remote ip address
2017 * @arpindex: if there is an arp entry 2015 * @arpindex: if there is an arp entry
2018 */ 2016 */
2019#if IS_ENABLED(CONFIG_IPV6)
2020static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, 2017static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2021 u32 *src, 2018 u32 *src,
2022 u32 *dest, 2019 u32 *dest,
@@ -2089,7 +2086,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2089 dst_release(dst); 2086 dst_release(dst);
2090 return rc; 2087 return rc;
2091} 2088}
2092#endif
2093 2089
2094/** 2090/**
2095 * i40iw_ipv4_is_loopback - check if loopback 2091 * i40iw_ipv4_is_loopback - check if loopback
@@ -2190,13 +2186,13 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
2190 cm_info->loc_addr[0], 2186 cm_info->loc_addr[0],
2191 cm_info->rem_addr[0], 2187 cm_info->rem_addr[0],
2192 oldarpindex); 2188 oldarpindex);
2193#if IS_ENABLED(CONFIG_IPV6) 2189 else if (IS_ENABLED(CONFIG_IPV6))
2194 else
2195 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, 2190 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
2196 cm_info->loc_addr, 2191 cm_info->loc_addr,
2197 cm_info->rem_addr, 2192 cm_info->rem_addr,
2198 oldarpindex); 2193 oldarpindex);
2199#endif 2194 else
2195 arpindex = -EINVAL;
2200 } 2196 }
2201 if (arpindex < 0) { 2197 if (arpindex < 0) {
2202 i40iw_pr_err("cm_node arpindex\n"); 2198 i40iw_pr_err("cm_node arpindex\n");
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f16c818ad2e6..b46c25542a7c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -776,15 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); 776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, 777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
778 unsigned long end); 778 unsigned long end);
779int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
780 u8 port, struct ifla_vf_info *info);
781int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
782 u8 port, int state);
783int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
784 u8 port, struct ifla_vf_stats *stats);
785int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
786 u64 guid, int type);
787
788#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 779#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
789static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) 780static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
790{ 781{
@@ -801,6 +792,15 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
801 792
802#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 793#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
803 794
795int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
796 u8 port, struct ifla_vf_info *info);
797int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
798 u8 port, int state);
799int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
800 u8 port, struct ifla_vf_stats *stats);
801int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
802 u64 guid, int type);
803
804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
805 int index); 805 int index);
806 806
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 60b30d338a81..411e4464ca23 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -63,7 +63,6 @@ isert_rdma_accept(struct isert_conn *isert_conn);
63struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 63struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
64 64
65static void isert_release_work(struct work_struct *work); 65static void isert_release_work(struct work_struct *work);
66static void isert_wait4flush(struct isert_conn *isert_conn);
67static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 66static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
68static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 67static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
69static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 68static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -141,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn,
141 attr.qp_context = isert_conn; 140 attr.qp_context = isert_conn;
142 attr.send_cq = comp->cq; 141 attr.send_cq = comp->cq;
143 attr.recv_cq = comp->cq; 142 attr.recv_cq = comp->cq;
144 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 143 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
145 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 144 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
146 attr.cap.max_send_sge = device->ib_device->attrs.max_sge; 145 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
147 isert_conn->max_sge = min(device->ib_device->attrs.max_sge, 146 isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -887,7 +886,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
887 break; 886 break;
888 case ISER_CONN_UP: 887 case ISER_CONN_UP:
889 isert_conn_terminate(isert_conn); 888 isert_conn_terminate(isert_conn);
890 isert_wait4flush(isert_conn); 889 ib_drain_qp(isert_conn->qp);
891 isert_handle_unbound_conn(isert_conn); 890 isert_handle_unbound_conn(isert_conn);
892 break; 891 break;
893 case ISER_CONN_BOUND: 892 case ISER_CONN_BOUND:
@@ -3213,36 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn)
3213 } 3212 }
3214} 3213}
3215 3214
3216static void
3217isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
3218{
3219 struct isert_conn *isert_conn = wc->qp->qp_context;
3220
3221 isert_print_wc(wc, "beacon");
3222
3223 isert_info("conn %p completing wait_comp_err\n", isert_conn);
3224 complete(&isert_conn->wait_comp_err);
3225}
3226
3227static void
3228isert_wait4flush(struct isert_conn *isert_conn)
3229{
3230 struct ib_recv_wr *bad_wr;
3231 static struct ib_cqe cqe = { .done = isert_beacon_done };
3232
3233 isert_info("conn %p\n", isert_conn);
3234
3235 init_completion(&isert_conn->wait_comp_err);
3236 isert_conn->beacon.wr_cqe = &cqe;
3237 /* post an indication that all flush errors were consumed */
3238 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
3239 isert_err("conn %p failed to post beacon", isert_conn);
3240 return;
3241 }
3242
3243 wait_for_completion(&isert_conn->wait_comp_err);
3244}
3245
3246/** 3215/**
3247 * isert_put_unsol_pending_cmds() - Drop commands waiting for 3216 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3248 * unsolicitate dataout 3217 * unsolicitate dataout
@@ -3288,7 +3257,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3288 isert_conn_terminate(isert_conn); 3257 isert_conn_terminate(isert_conn);
3289 mutex_unlock(&isert_conn->mutex); 3258 mutex_unlock(&isert_conn->mutex);
3290 3259
3291 isert_wait4flush(isert_conn); 3260 ib_drain_qp(isert_conn->qp);
3292 isert_put_unsol_pending_cmds(conn); 3261 isert_put_unsol_pending_cmds(conn);
3293 isert_wait4cmds(conn); 3262 isert_wait4cmds(conn);
3294 isert_wait4logout(isert_conn); 3263 isert_wait4logout(isert_conn);
@@ -3300,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
3300{ 3269{
3301 struct isert_conn *isert_conn = conn->context; 3270 struct isert_conn *isert_conn = conn->context;
3302 3271
3303 isert_wait4flush(isert_conn); 3272 ib_drain_qp(isert_conn->qp);
3304 isert_put_conn(isert_conn); 3273 isert_put_conn(isert_conn);
3305} 3274}
3306 3275
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 192788a4820c..147900cbb578 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -209,14 +209,12 @@ struct isert_conn {
209 struct ib_qp *qp; 209 struct ib_qp *qp;
210 struct isert_device *device; 210 struct isert_device *device;
211 struct mutex mutex; 211 struct mutex mutex;
212 struct completion wait_comp_err;
213 struct kref kref; 212 struct kref kref;
214 struct list_head fr_pool; 213 struct list_head fr_pool;
215 int fr_pool_size; 214 int fr_pool_size;
216 /* lock to protect fastreg pool */ 215 /* lock to protect fastreg pool */
217 spinlock_t pool_lock; 216 spinlock_t pool_lock;
218 struct work_struct release_work; 217 struct work_struct release_work;
219 struct ib_recv_wr beacon;
220 bool logout_posted; 218 bool logout_posted;
221 bool snd_w_inv; 219 bool snd_w_inv;
222}; 220};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0bd3cb2f3c67..8b42401d4795 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1264,26 +1264,40 @@ free_mem:
1264 */ 1264 */
1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1266{ 1266{
1267 struct se_session *se_sess;
1268 struct srpt_send_ioctx *ioctx; 1267 struct srpt_send_ioctx *ioctx;
1269 int tag; 1268 unsigned long flags;
1270 1269
1271 BUG_ON(!ch); 1270 BUG_ON(!ch);
1272 se_sess = ch->sess;
1273 1271
1274 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 1272 ioctx = NULL;
1275 if (tag < 0) { 1273 spin_lock_irqsave(&ch->spinlock, flags);
1276 pr_err("Unable to obtain tag for srpt_send_ioctx\n"); 1274 if (!list_empty(&ch->free_list)) {
1277 return NULL; 1275 ioctx = list_first_entry(&ch->free_list,
1276 struct srpt_send_ioctx, free_list);
1277 list_del(&ioctx->free_list);
1278 } 1278 }
1279 ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag]; 1279 spin_unlock_irqrestore(&ch->spinlock, flags);
1280 memset(ioctx, 0, sizeof(struct srpt_send_ioctx)); 1280
1281 ioctx->ch = ch; 1281 if (!ioctx)
1282 return ioctx;
1283
1284 BUG_ON(ioctx->ch != ch);
1282 spin_lock_init(&ioctx->spinlock); 1285 spin_lock_init(&ioctx->spinlock);
1283 ioctx->state = SRPT_STATE_NEW; 1286 ioctx->state = SRPT_STATE_NEW;
1287 ioctx->n_rbuf = 0;
1288 ioctx->rbufs = NULL;
1289 ioctx->n_rdma = 0;
1290 ioctx->n_rdma_wrs = 0;
1291 ioctx->rdma_wrs = NULL;
1292 ioctx->mapped_sg_count = 0;
1284 init_completion(&ioctx->tx_done); 1293 init_completion(&ioctx->tx_done);
1285 1294 ioctx->queue_status_only = false;
1286 ioctx->cmd.map_tag = tag; 1295 /*
1296 * transport_init_se_cmd() does not initialize all fields, so do it
1297 * here.
1298 */
1299 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1300 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1287 1301
1288 return ioctx; 1302 return ioctx;
1289} 1303}
@@ -2021,7 +2035,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2021 struct ib_cm_rep_param *rep_param; 2035 struct ib_cm_rep_param *rep_param;
2022 struct srpt_rdma_ch *ch, *tmp_ch; 2036 struct srpt_rdma_ch *ch, *tmp_ch;
2023 u32 it_iu_len; 2037 u32 it_iu_len;
2024 int ret = 0; 2038 int i, ret = 0;
2025 unsigned char *p; 2039 unsigned char *p;
2026 2040
2027 WARN_ON_ONCE(irqs_disabled()); 2041 WARN_ON_ONCE(irqs_disabled());
@@ -2143,6 +2157,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2143 if (!ch->ioctx_ring) 2157 if (!ch->ioctx_ring)
2144 goto free_ch; 2158 goto free_ch;
2145 2159
2160 INIT_LIST_HEAD(&ch->free_list);
2161 for (i = 0; i < ch->rq_size; i++) {
2162 ch->ioctx_ring[i]->ch = ch;
2163 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2164 }
2165
2146 ret = srpt_create_ch_ib(ch); 2166 ret = srpt_create_ch_ib(ch);
2147 if (ret) { 2167 if (ret) {
2148 rej->reason = cpu_to_be32( 2168 rej->reason = cpu_to_be32(
@@ -2173,8 +2193,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2173 p = &ch->sess_name[0]; 2193 p = &ch->sess_name[0];
2174 2194
2175try_again: 2195try_again:
2176 ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size, 2196 ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
2177 sizeof(struct srpt_send_ioctx),
2178 TARGET_PROT_NORMAL, p, ch, NULL); 2197 TARGET_PROT_NORMAL, p, ch, NULL);
2179 if (IS_ERR(ch->sess)) { 2198 if (IS_ERR(ch->sess)) {
2180 pr_info("Rejected login because no ACL has been" 2199 pr_info("Rejected login because no ACL has been"
@@ -2881,7 +2900,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2881 struct srpt_send_ioctx *ioctx = container_of(se_cmd, 2900 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
2882 struct srpt_send_ioctx, cmd); 2901 struct srpt_send_ioctx, cmd);
2883 struct srpt_rdma_ch *ch = ioctx->ch; 2902 struct srpt_rdma_ch *ch = ioctx->ch;
2884 struct se_session *se_sess = ch->sess; 2903 unsigned long flags;
2885 2904
2886 WARN_ON(ioctx->state != SRPT_STATE_DONE); 2905 WARN_ON(ioctx->state != SRPT_STATE_DONE);
2887 WARN_ON(ioctx->mapped_sg_count != 0); 2906 WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2892,7 +2911,9 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2892 ioctx->n_rbuf = 0; 2911 ioctx->n_rbuf = 0;
2893 } 2912 }
2894 2913
2895 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 2914 spin_lock_irqsave(&ch->spinlock, flags);
2915 list_add(&ioctx->free_list, &ch->free_list);
2916 spin_unlock_irqrestore(&ch->spinlock, flags);
2896} 2917}
2897 2918
2898/** 2919/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ca288f019315..af9b8b527340 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -179,6 +179,7 @@ struct srpt_recv_ioctx {
179 * struct srpt_send_ioctx - SRPT send I/O context. 179 * struct srpt_send_ioctx - SRPT send I/O context.
180 * @ioctx: See above. 180 * @ioctx: See above.
181 * @ch: Channel pointer. 181 * @ch: Channel pointer.
182 * @free_list: Node in srpt_rdma_ch.free_list.
182 * @n_rbuf: Number of data buffers in the received SRP command. 183 * @n_rbuf: Number of data buffers in the received SRP command.
183 * @rbufs: Pointer to SRP data buffer array. 184 * @rbufs: Pointer to SRP data buffer array.
184 * @single_rbuf: SRP data buffer if the command has only a single buffer. 185 * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -201,6 +202,7 @@ struct srpt_send_ioctx {
201 struct srp_direct_buf *rbufs; 202 struct srp_direct_buf *rbufs;
202 struct srp_direct_buf single_rbuf; 203 struct srp_direct_buf single_rbuf;
203 struct scatterlist *sg; 204 struct scatterlist *sg;
205 struct list_head free_list;
204 spinlock_t spinlock; 206 spinlock_t spinlock;
205 enum srpt_command_state state; 207 enum srpt_command_state state;
206 struct se_cmd cmd; 208 struct se_cmd cmd;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182666cb..58f2fe687a24 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
403 unsigned int s_length = sg_dma_len(s); 403 unsigned int s_length = sg_dma_len(s);
404 unsigned int s_dma_len = s->length; 404 unsigned int s_dma_len = s->length;
405 405
406 s->offset = s_offset; 406 s->offset += s_offset;
407 s->length = s_length; 407 s->length = s_length;
408 sg_dma_address(s) = dma_addr + s_offset; 408 sg_dma_address(s) = dma_addr + s_offset;
409 dma_addr += s_dma_len; 409 dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
422 422
423 for_each_sg(sg, s, nents, i) { 423 for_each_sg(sg, s, nents, i) {
424 if (sg_dma_address(s) != DMA_ERROR_CODE) 424 if (sg_dma_address(s) != DMA_ERROR_CODE)
425 s->offset = sg_dma_address(s); 425 s->offset += sg_dma_address(s);
426 if (sg_dma_len(s)) 426 if (sg_dma_len(s))
427 s->length = sg_dma_len(s); 427 s->length = sg_dma_len(s);
428 sg_dma_address(s) = DMA_ERROR_CODE; 428 sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2e1b7f14df2..e1852e845d21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2458 } 2458 }
2459 2459
2460 /* register PCI DMA alias device */ 2460 /* register PCI DMA alias device */
2461 if (req_id != dma_alias && dev_is_pci(dev)) { 2461 if (dev_is_pci(dev) && req_id != dma_alias) {
2462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), 2462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2463 dma_alias & 0xff, NULL, domain); 2463 dma_alias & 0xff, NULL, domain);
2464 2464
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index bfd4f7c3b1d8..b9df1411c894 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
848 if (!group->default_domain) { 848 if (!group->default_domain) {
849 group->default_domain = __iommu_domain_alloc(dev->bus, 849 group->default_domain = __iommu_domain_alloc(dev->bus,
850 IOMMU_DOMAIN_DMA); 850 IOMMU_DOMAIN_DMA);
851 group->domain = group->default_domain; 851 if (!group->domain)
852 group->domain = group->default_domain;
852 } 853 }
853 854
854 ret = iommu_group_add_device(group, dev); 855 ret = iommu_group_add_device(group, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a6f593a0a29e..5710a06c3049 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -315,8 +315,8 @@ static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
315 int i; 315 int i;
316 316
317 for (i = 0; i < iommu->num_mmu; i++) 317 for (i = 0; i < iommu->num_mmu; i++)
318 active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 318 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
319 RK_MMU_STATUS_STALL_ACTIVE; 319 RK_MMU_STATUS_STALL_ACTIVE);
320 320
321 return active; 321 return active;
322} 322}
@@ -327,8 +327,8 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
327 int i; 327 int i;
328 328
329 for (i = 0; i < iommu->num_mmu; i++) 329 for (i = 0; i < iommu->num_mmu; i++)
330 enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 330 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
331 RK_MMU_STATUS_PAGING_ENABLED; 331 RK_MMU_STATUS_PAGING_ENABLED);
332 332
333 return enable; 333 return enable;
334} 334}
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 7fdf78f46433..df7e05ca8f9c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -215,9 +215,11 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
215 if (count == 0) 215 if (count == 0)
216 count = 32; 216 count = 32;
217 isac_empty_fifo(cs, count); 217 isac_empty_fifo(cs, count);
218 if ((count = cs->rcvidx) > 0) { 218 count = cs->rcvidx;
219 if (count > 0) {
219 cs->rcvidx = 0; 220 cs->rcvidx = 0;
220 if (!(skb = alloc_skb(count, GFP_ATOMIC))) 221 skb = alloc_skb(count, GFP_ATOMIC);
222 if (!skb)
221 printk(KERN_WARNING "HiSax: D receive out of memory\n"); 223 printk(KERN_WARNING "HiSax: D receive out of memory\n");
222 else { 224 else {
223 memcpy(skb_put(skb, count), cs->rcvbuf, count); 225 memcpy(skb_put(skb, count), cs->rcvbuf, count);
@@ -251,7 +253,8 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
251 cs->tx_skb = NULL; 253 cs->tx_skb = NULL;
252 } 254 }
253 } 255 }
254 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { 256 cs->tx_skb = skb_dequeue(&cs->sq);
257 if (cs->tx_skb) {
255 cs->tx_cnt = 0; 258 cs->tx_cnt = 0;
256 isac_fill_fifo(cs); 259 isac_fill_fifo(cs);
257 } else 260 } else
@@ -313,7 +316,8 @@ afterXPR:
313#if ARCOFI_USE 316#if ARCOFI_USE
314 if (v1 & 0x08) { 317 if (v1 & 0x08) {
315 if (!cs->dc.isac.mon_rx) { 318 if (!cs->dc.isac.mon_rx) {
316 if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { 319 cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
320 if (!cs->dc.isac.mon_rx) {
317 if (cs->debug & L1_DEB_WARN) 321 if (cs->debug & L1_DEB_WARN)
318 debugl1(cs, "ISAC MON RX out of memory!"); 322 debugl1(cs, "ISAC MON RX out of memory!");
319 cs->dc.isac.mocr &= 0xf0; 323 cs->dc.isac.mocr &= 0xf0;
@@ -343,7 +347,8 @@ afterXPR:
343 afterMONR0: 347 afterMONR0:
344 if (v1 & 0x80) { 348 if (v1 & 0x80) {
345 if (!cs->dc.isac.mon_rx) { 349 if (!cs->dc.isac.mon_rx) {
346 if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { 350 cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
351 if (!cs->dc.isac.mon_rx) {
347 if (cs->debug & L1_DEB_WARN) 352 if (cs->debug & L1_DEB_WARN)
348 debugl1(cs, "ISAC MON RX out of memory!"); 353 debugl1(cs, "ISAC MON RX out of memory!");
349 cs->dc.isac.mocr &= 0x0f; 354 cs->dc.isac.mocr &= 0x0f;
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0ddf638d60f3..043828d541f7 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -361,8 +361,6 @@ static int __init acpi_pcc_probe(void)
361 struct acpi_generic_address *db_reg; 361 struct acpi_generic_address *db_reg;
362 struct acpi_pcct_hw_reduced *pcct_ss; 362 struct acpi_pcct_hw_reduced *pcct_ss;
363 pcc_mbox_channels[i].con_priv = pcct_entry; 363 pcc_mbox_channels[i].con_priv = pcct_entry;
364 pcct_entry = (struct acpi_subtable_header *)
365 ((unsigned long) pcct_entry + pcct_entry->length);
366 364
367 /* If doorbell is in system memory cache the virt address */ 365 /* If doorbell is in system memory cache the virt address */
368 pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry; 366 pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
@@ -370,6 +368,8 @@ static int __init acpi_pcc_probe(void)
370 if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 368 if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
371 pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address, 369 pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
372 db_reg->bit_width/8); 370 db_reg->bit_width/8);
371 pcct_entry = (struct acpi_subtable_header *)
372 ((unsigned long) pcct_entry + pcct_entry->length);
373 } 373 }
374 374
375 pcc_mbox_ctrl.num_chans = count; 375 pcc_mbox_ctrl.num_chans = count;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7df6b4f1548a..3fe86b54d50b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
322{ 322{
323 ClearPagePrivate(page); 323 ClearPagePrivate(page);
324 set_page_private(page, 0); 324 set_page_private(page, 0);
325 page_cache_release(page); 325 put_page(page);
326} 326}
327static void free_buffers(struct page *page) 327static void free_buffers(struct page *page)
328{ 328{
@@ -1673,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap)
1673 if (!bitmap) /* there was no bitmap */ 1673 if (!bitmap) /* there was no bitmap */
1674 return; 1674 return;
1675 1675
1676 if (bitmap->sysfs_can_clear)
1677 sysfs_put(bitmap->sysfs_can_clear);
1678
1676 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && 1679 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1677 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) 1680 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1678 md_cluster_stop(bitmap->mddev); 1681 md_cluster_stop(bitmap->mddev);
@@ -1712,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev)
1712 if (mddev->thread) 1715 if (mddev->thread)
1713 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1716 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1714 1717
1715 if (bitmap->sysfs_can_clear)
1716 sysfs_put(bitmap->sysfs_can_clear);
1717
1718 bitmap_free(bitmap); 1718 bitmap_free(bitmap);
1719} 1719}
1720 1720
1721/* 1721/*
1722 * initialize the bitmap structure 1722 * initialize the bitmap structure
1723 * if this returns an error, bitmap_destroy must be called to do clean up 1723 * if this returns an error, bitmap_destroy must be called to do clean up
1724 * once mddev->bitmap is set
1724 */ 1725 */
1725struct bitmap *bitmap_create(struct mddev *mddev, int slot) 1726struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1726{ 1727{
@@ -1865,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1865 struct bitmap_counts *counts; 1866 struct bitmap_counts *counts;
1866 struct bitmap *bitmap = bitmap_create(mddev, slot); 1867 struct bitmap *bitmap = bitmap_create(mddev, slot);
1867 1868
1868 if (IS_ERR(bitmap)) 1869 if (IS_ERR(bitmap)) {
1870 bitmap_free(bitmap);
1869 return PTR_ERR(bitmap); 1871 return PTR_ERR(bitmap);
1872 }
1870 1873
1871 rv = bitmap_init_from_disk(bitmap, 0); 1874 rv = bitmap_init_from_disk(bitmap, 0);
1872 if (rv) 1875 if (rv)
@@ -2170,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2170 else { 2173 else {
2171 mddev->bitmap = bitmap; 2174 mddev->bitmap = bitmap;
2172 rv = bitmap_load(mddev); 2175 rv = bitmap_load(mddev);
2173 if (rv) { 2176 if (rv)
2174 bitmap_destroy(mddev);
2175 mddev->bitmap_info.offset = 0; 2177 mddev->bitmap_info.offset = 0;
2176 }
2177 } 2178 }
2178 mddev->pers->quiesce(mddev, 0); 2179 mddev->pers->quiesce(mddev, 0);
2179 if (rv) 2180 if (rv) {
2181 bitmap_destroy(mddev);
2180 return rv; 2182 return rv;
2183 }
2181 } 2184 }
2182 } 2185 }
2183 } 2186 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c068f171b4eb..194580fba7fd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -718,6 +718,7 @@ static void super_written(struct bio *bio)
718 718
719 if (atomic_dec_and_test(&mddev->pending_writes)) 719 if (atomic_dec_and_test(&mddev->pending_writes))
720 wake_up(&mddev->sb_wait); 720 wake_up(&mddev->sb_wait);
721 rdev_dec_pending(rdev, mddev);
721 bio_put(bio); 722 bio_put(bio);
722} 723}
723 724
@@ -732,6 +733,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
732 */ 733 */
733 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 734 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
734 735
736 atomic_inc(&rdev->nr_pending);
737
735 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 738 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
736 bio->bi_iter.bi_sector = sector; 739 bio->bi_iter.bi_sector = sector;
737 bio_add_page(bio, page, size, 0); 740 bio_add_page(bio, page, size, 0);
@@ -6883,7 +6886,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6883 6886
6884 case ADD_NEW_DISK: 6887 case ADD_NEW_DISK:
6885 /* We can support ADD_NEW_DISK on read-only arrays 6888 /* We can support ADD_NEW_DISK on read-only arrays
6886 * on if we are re-adding a preexisting device. 6889 * only if we are re-adding a preexisting device.
6887 * So require mddev->pers and MD_DISK_SYNC. 6890 * So require mddev->pers and MD_DISK_SYNC.
6888 */ 6891 */
6889 if (mddev->pers) { 6892 if (mddev->pers) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 39fb21e048e6..a7f2b9c9f8a0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
570 if (best_dist_disk < 0) { 570 if (best_dist_disk < 0) {
571 if (is_badblock(rdev, this_sector, sectors, 571 if (is_badblock(rdev, this_sector, sectors,
572 &first_bad, &bad_sectors)) { 572 &first_bad, &bad_sectors)) {
573 if (first_bad < this_sector) 573 if (first_bad <= this_sector)
574 /* Cannot use this */ 574 /* Cannot use this */
575 continue; 575 continue;
576 best_good_sectors = first_bad - this_sector; 576 best_good_sectors = first_bad - this_sector;
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861aea68a5..6b469e8c4c6e 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
228 "au8522", 0x8e >> 1, NULL); 228 "au8522", 0x8e >> 1, NULL);
229 if (sd == NULL) 229 if (sd == NULL)
230 pr_err("analog subdev registration failed\n"); 230 pr_err("analog subdev registration failed\n");
231#ifdef CONFIG_MEDIA_CONTROLLER
232 if (sd)
233 dev->decoder = &sd->entity;
234#endif
235 } 231 }
236 232
237 /* Setup tuners */ 233 /* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 5dc82e8c8670..cc22b32776ad 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -137,8 +137,14 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
137#ifdef CONFIG_MEDIA_CONTROLLER 137#ifdef CONFIG_MEDIA_CONTROLLER
138 if (dev->media_dev && 138 if (dev->media_dev &&
139 media_devnode_is_registered(&dev->media_dev->devnode)) { 139 media_devnode_is_registered(&dev->media_dev->devnode)) {
140 /* clear enable_source, disable_source */
141 dev->media_dev->source_priv = NULL;
142 dev->media_dev->enable_source = NULL;
143 dev->media_dev->disable_source = NULL;
144
140 media_device_unregister(dev->media_dev); 145 media_device_unregister(dev->media_dev);
141 media_device_cleanup(dev->media_dev); 146 media_device_cleanup(dev->media_dev);
147 kfree(dev->media_dev);
142 dev->media_dev = NULL; 148 dev->media_dev = NULL;
143 } 149 }
144#endif 150#endif
@@ -166,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
166 Set the status so poll routines can check and avoid 172 Set the status so poll routines can check and avoid
167 access after disconnect. 173 access after disconnect.
168 */ 174 */
169 dev->dev_state = DEV_DISCONNECTED; 175 set_bit(DEV_DISCONNECTED, &dev->dev_state);
170 176
171 au0828_rc_unregister(dev); 177 au0828_rc_unregister(dev);
172 /* Digital TV */ 178 /* Digital TV */
@@ -192,7 +198,7 @@ static int au0828_media_device_init(struct au0828_dev *dev,
192#ifdef CONFIG_MEDIA_CONTROLLER 198#ifdef CONFIG_MEDIA_CONTROLLER
193 struct media_device *mdev; 199 struct media_device *mdev;
194 200
195 mdev = media_device_get_devres(&udev->dev); 201 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
196 if (!mdev) 202 if (!mdev)
197 return -ENOMEM; 203 return -ENOMEM;
198 204
@@ -456,7 +462,8 @@ static int au0828_media_device_register(struct au0828_dev *dev,
456{ 462{
457#ifdef CONFIG_MEDIA_CONTROLLER 463#ifdef CONFIG_MEDIA_CONTROLLER
458 int ret; 464 int ret;
459 struct media_entity *entity, *demod = NULL, *tuner = NULL; 465 struct media_entity *entity, *demod = NULL;
466 struct media_link *link;
460 467
461 if (!dev->media_dev) 468 if (!dev->media_dev)
462 return 0; 469 return 0;
@@ -482,26 +489,37 @@ static int au0828_media_device_register(struct au0828_dev *dev,
482 } 489 }
483 490
484 /* 491 /*
485 * Find tuner and demod to disable the link between 492 * Find tuner, decoder and demod.
486 * the two to avoid disable step when tuner is requested 493 *
487 * by video or audio. Note that this step can't be done 494 * The tuner and decoder should be cached, as they'll be used by
488 * until dvb graph is created during dvb register. 495 * au0828_enable_source.
496 *
497 * It also needs to disable the link between tuner and
498 * decoder/demod, to avoid disable step when tuner is requested
499 * by video or audio. Note that this step can't be done until dvb
500 * graph is created during dvb register.
489 */ 501 */
490 media_device_for_each_entity(entity, dev->media_dev) { 502 media_device_for_each_entity(entity, dev->media_dev) {
491 if (entity->function == MEDIA_ENT_F_DTV_DEMOD) 503 switch (entity->function) {
504 case MEDIA_ENT_F_TUNER:
505 dev->tuner = entity;
506 break;
507 case MEDIA_ENT_F_ATV_DECODER:
508 dev->decoder = entity;
509 break;
510 case MEDIA_ENT_F_DTV_DEMOD:
492 demod = entity; 511 demod = entity;
493 else if (entity->function == MEDIA_ENT_F_TUNER) 512 break;
494 tuner = entity; 513 }
495 } 514 }
496 /* Disable link between tuner and demod */
497 if (tuner && demod) {
498 struct media_link *link;
499 515
500 list_for_each_entry(link, &demod->links, list) { 516 /* Disable link between tuner->demod and/or tuner->decoder */
501 if (link->sink->entity == demod && 517 if (dev->tuner) {
502 link->source->entity == tuner) { 518 list_for_each_entry(link, &dev->tuner->links, list) {
519 if (demod && link->sink->entity == demod)
520 media_entity_setup_link(link, 0);
521 if (dev->decoder && link->sink->entity == dev->decoder)
503 media_entity_setup_link(link, 0); 522 media_entity_setup_link(link, 0);
504 }
505 } 523 }
506 } 524 }
507 525
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
130 bool first = true; 130 bool first = true;
131 131
132 /* do nothing if device is disconnected */ 132 /* do nothing if device is disconnected */
133 if (ir->dev->dev_state == DEV_DISCONNECTED) 133 if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
134 return 0; 134 return 0;
135 135
136 /* Check IR int */ 136 /* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
260 cancel_delayed_work_sync(&ir->work); 260 cancel_delayed_work_sync(&ir->work);
261 261
262 /* do nothing if device is disconnected */ 262 /* do nothing if device is disconnected */
263 if (ir->dev->dev_state != DEV_DISCONNECTED) { 263 if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
264 /* Disable IR */ 264 /* Disable IR */
265 au8522_rc_clear(ir, 0xe0, 1 << 4); 265 au8522_rc_clear(ir, 0xe0, 1 << 4);
266 } 266 }
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13f6dab9ccc2..32d7db96479c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -106,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
106 106
107static int check_dev(struct au0828_dev *dev) 107static int check_dev(struct au0828_dev *dev)
108{ 108{
109 if (dev->dev_state & DEV_DISCONNECTED) { 109 if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
110 pr_info("v4l2 ioctl: device not present\n"); 110 pr_info("v4l2 ioctl: device not present\n");
111 return -ENODEV; 111 return -ENODEV;
112 } 112 }
113 113
114 if (dev->dev_state & DEV_MISCONFIGURED) { 114 if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
115 pr_info("v4l2 ioctl: device is misconfigured; " 115 pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
116 "close and open it again\n");
117 return -EIO; 116 return -EIO;
118 } 117 }
119 return 0; 118 return 0;
@@ -521,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
521 if (!dev) 520 if (!dev)
522 return 0; 521 return 0;
523 522
524 if ((dev->dev_state & DEV_DISCONNECTED) || 523 if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
525 (dev->dev_state & DEV_MISCONFIGURED)) 524 test_bit(DEV_MISCONFIGURED, &dev->dev_state))
526 return 0; 525 return 0;
527 526
528 if (urb->status < 0) { 527 if (urb->status < 0) {
@@ -824,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
824 int ret = 0; 823 int ret = 0;
825 824
826 dev->stream_state = STREAM_INTERRUPT; 825 dev->stream_state = STREAM_INTERRUPT;
827 if (dev->dev_state == DEV_DISCONNECTED) 826 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
828 return -ENODEV; 827 return -ENODEV;
829 else if (ret) { 828 else if (ret) {
830 dev->dev_state = DEV_MISCONFIGURED; 829 set_bit(DEV_MISCONFIGURED, &dev->dev_state);
831 dprintk(1, "%s device is misconfigured!\n", __func__); 830 dprintk(1, "%s device is misconfigured!\n", __func__);
832 return ret; 831 return ret;
833 } 832 }
@@ -1026,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
1026 int ret; 1025 int ret;
1027 1026
1028 dprintk(1, 1027 dprintk(1,
1029 "%s called std_set %d dev_state %d stream users %d users %d\n", 1028 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1030 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1029 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1031 dev->streaming_users, dev->users); 1030 dev->streaming_users, dev->users);
1032 1031
@@ -1045,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
1045 au0828_analog_stream_enable(dev); 1044 au0828_analog_stream_enable(dev);
1046 au0828_analog_stream_reset(dev); 1045 au0828_analog_stream_reset(dev);
1047 dev->stream_state = STREAM_OFF; 1046 dev->stream_state = STREAM_OFF;
1048 dev->dev_state |= DEV_INITIALIZED; 1047 set_bit(DEV_INITIALIZED, &dev->dev_state);
1049 } 1048 }
1050 dev->users++; 1049 dev->users++;
1051 mutex_unlock(&dev->lock); 1050 mutex_unlock(&dev->lock);
@@ -1059,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
1059 struct video_device *vdev = video_devdata(filp); 1058 struct video_device *vdev = video_devdata(filp);
1060 1059
1061 dprintk(1, 1060 dprintk(1,
1062 "%s called std_set %d dev_state %d stream users %d users %d\n", 1061 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1063 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1062 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1064 dev->streaming_users, dev->users); 1063 dev->streaming_users, dev->users);
1065 1064
@@ -1075,7 +1074,7 @@ static int au0828_v4l2_close(struct file *filp)
1075 del_timer_sync(&dev->vbi_timeout); 1074 del_timer_sync(&dev->vbi_timeout);
1076 } 1075 }
1077 1076
1078 if (dev->dev_state == DEV_DISCONNECTED) 1077 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
1079 goto end; 1078 goto end;
1080 1079
1081 if (dev->users == 1) { 1080 if (dev->users == 1) {
@@ -1135,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
1135 .type = V4L2_TUNER_ANALOG_TV, 1134 .type = V4L2_TUNER_ANALOG_TV,
1136 }; 1135 };
1137 1136
1138 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1137 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1139 dev->std_set_in_tuner_core, dev->dev_state); 1138 dev->std_set_in_tuner_core, dev->dev_state);
1140 1139
1141 if (dev->std_set_in_tuner_core) 1140 if (dev->std_set_in_tuner_core)
@@ -1207,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv,
1207 struct video_device *vdev = video_devdata(file); 1206 struct video_device *vdev = video_devdata(file);
1208 struct au0828_dev *dev = video_drvdata(file); 1207 struct au0828_dev *dev = video_drvdata(file);
1209 1208
1210 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1209 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1211 dev->std_set_in_tuner_core, dev->dev_state); 1210 dev->std_set_in_tuner_core, dev->dev_state);
1212 1211
1213 strlcpy(cap->driver, "au0828", sizeof(cap->driver)); 1212 strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1250,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
1250{ 1249{
1251 struct au0828_dev *dev = video_drvdata(file); 1250 struct au0828_dev *dev = video_drvdata(file);
1252 1251
1253 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1252 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1254 dev->std_set_in_tuner_core, dev->dev_state); 1253 dev->std_set_in_tuner_core, dev->dev_state);
1255 1254
1256 f->fmt.pix.width = dev->width; 1255 f->fmt.pix.width = dev->width;
@@ -1269,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1269{ 1268{
1270 struct au0828_dev *dev = video_drvdata(file); 1269 struct au0828_dev *dev = video_drvdata(file);
1271 1270
1272 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1271 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1273 dev->std_set_in_tuner_core, dev->dev_state); 1272 dev->std_set_in_tuner_core, dev->dev_state);
1274 1273
1275 return au0828_set_format(dev, VIDIOC_TRY_FMT, f); 1274 return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1281,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
1281 struct au0828_dev *dev = video_drvdata(file); 1280 struct au0828_dev *dev = video_drvdata(file);
1282 int rc; 1281 int rc;
1283 1282
1284 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1283 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1285 dev->std_set_in_tuner_core, dev->dev_state); 1284 dev->std_set_in_tuner_core, dev->dev_state);
1286 1285
1287 rc = check_dev(dev); 1286 rc = check_dev(dev);
@@ -1303,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
1303{ 1302{
1304 struct au0828_dev *dev = video_drvdata(file); 1303 struct au0828_dev *dev = video_drvdata(file);
1305 1304
1306 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1305 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1307 dev->std_set_in_tuner_core, dev->dev_state); 1306 dev->std_set_in_tuner_core, dev->dev_state);
1308 1307
1309 if (norm == dev->std) 1308 if (norm == dev->std)
@@ -1335,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
1335{ 1334{
1336 struct au0828_dev *dev = video_drvdata(file); 1335 struct au0828_dev *dev = video_drvdata(file);
1337 1336
1338 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1337 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1339 dev->std_set_in_tuner_core, dev->dev_state); 1338 dev->std_set_in_tuner_core, dev->dev_state);
1340 1339
1341 *norm = dev->std; 1340 *norm = dev->std;
@@ -1357,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
1357 [AU0828_VMUX_DVB] = "DVB", 1356 [AU0828_VMUX_DVB] = "DVB",
1358 }; 1357 };
1359 1358
1360 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1359 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1361 dev->std_set_in_tuner_core, dev->dev_state); 1360 dev->std_set_in_tuner_core, dev->dev_state);
1362 1361
1363 tmp = input->index; 1362 tmp = input->index;
@@ -1387,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
1387{ 1386{
1388 struct au0828_dev *dev = video_drvdata(file); 1387 struct au0828_dev *dev = video_drvdata(file);
1389 1388
1390 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1389 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1391 dev->std_set_in_tuner_core, dev->dev_state); 1390 dev->std_set_in_tuner_core, dev->dev_state);
1392 1391
1393 *i = dev->ctrl_input; 1392 *i = dev->ctrl_input;
@@ -1398,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
1398{ 1397{
1399 int i; 1398 int i;
1400 1399
1401 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1400 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1402 dev->std_set_in_tuner_core, dev->dev_state); 1401 dev->std_set_in_tuner_core, dev->dev_state);
1403 1402
1404 switch (AUVI_INPUT(index).type) { 1403 switch (AUVI_INPUT(index).type) {
@@ -1496,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
1496{ 1495{
1497 struct au0828_dev *dev = video_drvdata(file); 1496 struct au0828_dev *dev = video_drvdata(file);
1498 1497
1499 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1498 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1500 dev->std_set_in_tuner_core, dev->dev_state); 1499 dev->std_set_in_tuner_core, dev->dev_state);
1501 1500
1502 a->index = dev->ctrl_ainput; 1501 a->index = dev->ctrl_ainput;
@@ -1516,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
1516 if (a->index != dev->ctrl_ainput) 1515 if (a->index != dev->ctrl_ainput)
1517 return -EINVAL; 1516 return -EINVAL;
1518 1517
1519 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1518 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1520 dev->std_set_in_tuner_core, dev->dev_state); 1519 dev->std_set_in_tuner_core, dev->dev_state);
1521 return 0; 1520 return 0;
1522} 1521}
@@ -1534,7 +1533,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
1534 if (ret) 1533 if (ret)
1535 return ret; 1534 return ret;
1536 1535
1537 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1536 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1538 dev->std_set_in_tuner_core, dev->dev_state); 1537 dev->std_set_in_tuner_core, dev->dev_state);
1539 1538
1540 strcpy(t->name, "Auvitek tuner"); 1539 strcpy(t->name, "Auvitek tuner");
@@ -1554,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
1554 if (t->index != 0) 1553 if (t->index != 0)
1555 return -EINVAL; 1554 return -EINVAL;
1556 1555
1557 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1556 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1558 dev->std_set_in_tuner_core, dev->dev_state); 1557 dev->std_set_in_tuner_core, dev->dev_state);
1559 1558
1560 au0828_init_tuner(dev); 1559 au0828_init_tuner(dev);
@@ -1576,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
1576 1575
1577 if (freq->tuner != 0) 1576 if (freq->tuner != 0)
1578 return -EINVAL; 1577 return -EINVAL;
1579 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1578 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1580 dev->std_set_in_tuner_core, dev->dev_state); 1579 dev->std_set_in_tuner_core, dev->dev_state);
1581 freq->frequency = dev->ctrl_freq; 1580 freq->frequency = dev->ctrl_freq;
1582 return 0; 1581 return 0;
@@ -1591,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1591 if (freq->tuner != 0) 1590 if (freq->tuner != 0)
1592 return -EINVAL; 1591 return -EINVAL;
1593 1592
1594 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1593 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1595 dev->std_set_in_tuner_core, dev->dev_state); 1594 dev->std_set_in_tuner_core, dev->dev_state);
1596 1595
1597 au0828_init_tuner(dev); 1596 au0828_init_tuner(dev);
@@ -1617,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
1617{ 1616{
1618 struct au0828_dev *dev = video_drvdata(file); 1617 struct au0828_dev *dev = video_drvdata(file);
1619 1618
1620 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1619 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1621 dev->std_set_in_tuner_core, dev->dev_state); 1620 dev->std_set_in_tuner_core, dev->dev_state);
1622 1621
1623 format->fmt.vbi.samples_per_line = dev->vbi_width; 1622 format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1643,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
1643 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1642 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1644 return -EINVAL; 1643 return -EINVAL;
1645 1644
1646 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1645 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1647 dev->std_set_in_tuner_core, dev->dev_state); 1646 dev->std_set_in_tuner_core, dev->dev_state);
1648 1647
1649 cc->bounds.left = 0; 1648 cc->bounds.left = 0;
@@ -1665,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
1665{ 1664{
1666 struct au0828_dev *dev = video_drvdata(file); 1665 struct au0828_dev *dev = video_drvdata(file);
1667 1666
1668 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1667 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1669 dev->std_set_in_tuner_core, dev->dev_state); 1668 dev->std_set_in_tuner_core, dev->dev_state);
1670 1669
1671 reg->val = au0828_read(dev, reg->reg); 1670 reg->val = au0828_read(dev, reg->reg);
@@ -1678,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
1678{ 1677{
1679 struct au0828_dev *dev = video_drvdata(file); 1678 struct au0828_dev *dev = video_drvdata(file);
1680 1679
1681 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1680 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1682 dev->std_set_in_tuner_core, dev->dev_state); 1681 dev->std_set_in_tuner_core, dev->dev_state);
1683 1682
1684 return au0828_writereg(dev, reg->reg, reg->val); 1683 return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ff7f8510fb77..87f32846f1c0 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 23
24#include <linux/bitops.h>
24#include <linux/usb.h> 25#include <linux/usb.h>
25#include <linux/i2c.h> 26#include <linux/i2c.h>
26#include <linux/i2c-algo-bit.h> 27#include <linux/i2c-algo-bit.h>
@@ -121,9 +122,9 @@ enum au0828_stream_state {
121 122
122/* device state */ 123/* device state */
123enum au0828_dev_state { 124enum au0828_dev_state {
124 DEV_INITIALIZED = 0x01, 125 DEV_INITIALIZED = 0,
125 DEV_DISCONNECTED = 0x02, 126 DEV_DISCONNECTED = 1,
126 DEV_MISCONFIGURED = 0x04 127 DEV_MISCONFIGURED = 2
127}; 128};
128 129
129struct au0828_dev; 130struct au0828_dev;
@@ -247,7 +248,7 @@ struct au0828_dev {
247 int input_type; 248 int input_type;
248 int std_set_in_tuner_core; 249 int std_set_in_tuner_core;
249 unsigned int ctrl_input; 250 unsigned int ctrl_input;
250 enum au0828_dev_state dev_state; 251 long unsigned int dev_state; /* defined at enum au0828_dev_state */;
251 enum au0828_stream_state stream_state; 252 enum au0828_stream_state stream_state;
252 wait_queue_head_t open; 253 wait_queue_head_t open;
253 254
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 2a7b79bc90fd..2228cd3a846e 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -34,7 +34,7 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
34{ 34{
35 struct media_entity *entity; 35 struct media_entity *entity;
36 struct media_entity *if_vid = NULL, *if_aud = NULL; 36 struct media_entity *if_vid = NULL, *if_aud = NULL;
37 struct media_entity *tuner = NULL, *decoder = NULL, *dtv_demod = NULL; 37 struct media_entity *tuner = NULL, *decoder = NULL;
38 struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; 38 struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
39 bool is_webcam = false; 39 bool is_webcam = false;
40 u32 flags; 40 u32 flags;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index df4c052c6bd6..f300f060b3f3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
349 349
350 if (dma->pages) { 350 if (dma->pages) {
351 for (i = 0; i < dma->nr_pages; i++) 351 for (i = 0; i < dma->nr_pages; i++)
352 page_cache_release(dma->pages[i]); 352 put_page(dma->pages[i]);
353 kfree(dma->pages); 353 kfree(dma->pages);
354 dma->pages = NULL; 354 dma->pages = NULL;
355 } 355 }
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..9c677f3f3c26 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
116{ 116{
117 struct inode *root; 117 struct inode *root;
118 118
119 sb->s_blocksize = PAGE_CACHE_SIZE; 119 sb->s_blocksize = PAGE_SIZE;
120 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 120 sb->s_blocksize_bits = PAGE_SHIFT;
121 sb->s_magic = IBMASMFS_MAGIC; 121 sb->s_magic = IBMASMFS_MAGIC;
122 sb->s_op = &ibmasmfs_s_ops; 122 sb->s_op = &ibmasmfs_s_ops;
123 sb->s_time_gran = 1; 123 sb->s_time_gran = 1;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..f84a4275ca29 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
728 if (dirty) 728 if (dirty)
729 set_page_dirty(pages[i]); 729 set_page_dirty(pages[i]);
730 730
731 page_cache_release(pages[i]); 731 put_page(pages[i]);
732 pages[i] = NULL; 732 pages[i] = NULL;
733 } 733 }
734} 734}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d94607611d8..6e4c55a4aab5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
356 * They have to set these according to their abilities. 356 * They have to set these according to their abilities.
357 */ 357 */
358 host->max_segs = 1; 358 host->max_segs = 1;
359 host->max_seg_size = PAGE_CACHE_SIZE; 359 host->max_seg_size = PAGE_SIZE;
360 360
361 host->max_req_size = PAGE_CACHE_SIZE; 361 host->max_req_size = PAGE_SIZE;
362 host->max_blk_size = 512; 362 host->max_blk_size = 512;
363 host->max_blk_count = PAGE_CACHE_SIZE / 512; 363 host->max_blk_count = PAGE_SIZE / 512;
364 364
365 return host; 365 return host;
366} 366}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 62aa5d0efcee..79e19017343e 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
390 slot->cd_idx = 0; 390 slot->cd_idx = 0;
391 slot->cd_override_level = true; 391 slot->cd_override_level = true;
392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || 392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) 394 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
394 slot->host->mmc_host_ops.get_cd = bxt_get_cd; 395 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
395 396
@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
1173 1174
1174 { 1175 {
1175 .vendor = PCI_VENDOR_ID_INTEL, 1176 .vendor = PCI_VENDOR_ID_INTEL,
1177 .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
1178 .subvendor = PCI_ANY_ID,
1179 .subdevice = PCI_ANY_ID,
1180 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
1181 },
1182
1183 {
1184 .vendor = PCI_VENDOR_ID_INTEL,
1185 .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
1186 .subvendor = PCI_ANY_ID,
1187 .subdevice = PCI_ANY_ID,
1188 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
1189 },
1190
1191 {
1192 .vendor = PCI_VENDOR_ID_INTEL,
1193 .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
1194 .subvendor = PCI_ANY_ID,
1195 .subdevice = PCI_ANY_ID,
1196 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
1197 },
1198
1199 {
1200 .vendor = PCI_VENDOR_ID_INTEL,
1176 .device = PCI_DEVICE_ID_INTEL_APL_EMMC, 1201 .device = PCI_DEVICE_ID_INTEL_APL_EMMC,
1177 .subvendor = PCI_ANY_ID, 1202 .subvendor = PCI_ANY_ID,
1178 .subdevice = PCI_ANY_ID, 1203 .subdevice = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
28#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca 28#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
29#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc 29#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
30#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0 30#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
31#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
32#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
33#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
31#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca 34#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
32#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc 35#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
33#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0 36#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index aca439d3ca83..30132500aa1c 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -309,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
309 __func__, uhs, ctrl_2); 309 __func__, uhs, ctrl_2);
310} 310}
311 311
312static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
313 unsigned short vdd)
314{
315 struct mmc_host *mmc = host->mmc;
316 u8 pwr = host->pwr;
317
318 sdhci_set_power(host, mode, vdd);
319
320 if (host->pwr == pwr)
321 return;
322
323 if (host->pwr == 0)
324 vdd = 0;
325
326 if (!IS_ERR(mmc->supply.vmmc)) {
327 spin_unlock_irq(&host->lock);
328 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
329 spin_lock_irq(&host->lock);
330 }
331}
332
312static const struct sdhci_ops pxav3_sdhci_ops = { 333static const struct sdhci_ops pxav3_sdhci_ops = {
313 .set_clock = sdhci_set_clock, 334 .set_clock = sdhci_set_clock,
335 .set_power = pxav3_set_power,
314 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, 336 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
315 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 337 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
316 .set_bus_width = sdhci_set_bus_width, 338 .set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8670f162dec7..6bd3d1794966 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1210,10 +1210,24 @@ clock_set:
1210} 1210}
1211EXPORT_SYMBOL_GPL(sdhci_set_clock); 1211EXPORT_SYMBOL_GPL(sdhci_set_clock);
1212 1212
1213static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1213static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1214 unsigned short vdd) 1214 unsigned short vdd)
1215{ 1215{
1216 struct mmc_host *mmc = host->mmc; 1216 struct mmc_host *mmc = host->mmc;
1217
1218 spin_unlock_irq(&host->lock);
1219 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1220 spin_lock_irq(&host->lock);
1221
1222 if (mode != MMC_POWER_OFF)
1223 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1224 else
1225 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1226}
1227
1228void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1229 unsigned short vdd)
1230{
1217 u8 pwr = 0; 1231 u8 pwr = 0;
1218 1232
1219 if (mode != MMC_POWER_OFF) { 1233 if (mode != MMC_POWER_OFF) {
@@ -1245,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1245 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1259 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1246 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1260 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1247 sdhci_runtime_pm_bus_off(host); 1261 sdhci_runtime_pm_bus_off(host);
1248 vdd = 0;
1249 } else { 1262 } else {
1250 /* 1263 /*
1251 * Spec says that we should clear the power reg before setting 1264 * Spec says that we should clear the power reg before setting
@@ -1276,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1276 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1289 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1277 mdelay(10); 1290 mdelay(10);
1278 } 1291 }
1292}
1293EXPORT_SYMBOL_GPL(sdhci_set_power);
1279 1294
1280 if (!IS_ERR(mmc->supply.vmmc)) { 1295static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1281 spin_unlock_irq(&host->lock); 1296 unsigned short vdd)
1282 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1297{
1283 spin_lock_irq(&host->lock); 1298 struct mmc_host *mmc = host->mmc;
1284 } 1299
1300 if (host->ops->set_power)
1301 host->ops->set_power(host, mode, vdd);
1302 else if (!IS_ERR(mmc->supply.vmmc))
1303 sdhci_set_power_reg(host, mode, vdd);
1304 else
1305 sdhci_set_power(host, mode, vdd);
1285} 1306}
1286 1307
1287/*****************************************************************************\ 1308/*****************************************************************************\
@@ -1431,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1431 } 1452 }
1432 } 1453 }
1433 1454
1434 sdhci_set_power(host, ios->power_mode, ios->vdd); 1455 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1435 1456
1436 if (host->ops->platform_send_init_74_clocks) 1457 if (host->ops->platform_send_init_74_clocks)
1437 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1458 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 3bd28033dbd9..0f39f4f84d10 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -529,6 +529,8 @@ struct sdhci_ops {
529#endif 529#endif
530 530
531 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 531 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
532 void (*set_power)(struct sdhci_host *host, unsigned char mode,
533 unsigned short vdd);
532 534
533 int (*enable_dma)(struct sdhci_host *host); 535 int (*enable_dma)(struct sdhci_host *host);
534 unsigned int (*get_max_clock)(struct sdhci_host *host); 536 unsigned int (*get_max_clock)(struct sdhci_host *host);
@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
660} 662}
661 663
662void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); 664void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
665void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
666 unsigned short vdd);
663void sdhci_set_bus_width(struct sdhci_host *host, int width); 667void sdhci_set_bus_width(struct sdhci_host *host, int width);
664void sdhci_reset(struct sdhci_host *host, u8 mask); 668void sdhci_reset(struct sdhci_host *host, u8 mask);
665void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 669void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 8d870ce9f944..d9a655f47d41 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1513 mmc->caps |= pd->caps; 1513 mmc->caps |= pd->caps;
1514 mmc->max_segs = 32; 1514 mmc->max_segs = 32;
1515 mmc->max_blk_size = 512; 1515 mmc->max_blk_size = 512;
1516 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1516 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1518 mmc->max_seg_size = mmc->max_req_size; 1518 mmc->max_seg_size = mmc->max_req_size;
1519 1519
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 675435873823..7fb0c034dcb6 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
63 } 63 }
64 } 64 }
65 65
66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
67 (align & PAGE_MASK))) || !multiple) { 67 (align & PAGE_MASK))) || !multiple) {
68 ret = -EINVAL; 68 ret = -EINVAL;
69 goto pio; 69 goto pio;
@@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
133 } 133 }
134 } 134 }
135 135
136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
137 (align & PAGE_MASK))) || !multiple) { 137 (align & PAGE_MASK))) || !multiple) {
138 ret = -EINVAL; 138 ret = -EINVAL;
139 goto pio; 139 goto pio;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 03f6e74c1906..0521b4662748 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1125 mmc->caps2 |= pdata->capabilities2; 1125 mmc->caps2 |= pdata->capabilities2;
1126 mmc->max_segs = 32; 1126 mmc->max_segs = 32;
1127 mmc->max_blk_size = 512; 1127 mmc->max_blk_size = 512;
1128 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 1128 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1129 mmc->max_segs; 1129 mmc->max_segs;
1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1131 mmc->max_seg_size = mmc->max_req_size; 1131 mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b2752fe711f2..807c06e203c3 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
1789 /* Set .max_segs to some random number. Feel free to adjust. */ 1789 /* Set .max_segs to some random number. Feel free to adjust. */
1790 mmc->max_segs = 32; 1790 mmc->max_segs = 32;
1791 mmc->max_blk_size = 512; 1791 mmc->max_blk_size = 512;
1792 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1792 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1794 /* 1794 /*
1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code, 1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057737e6..7c887f111a7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
75 break; 75 break;
76 } 76 }
77 77
78 page_cache_release(page); 78 put_page(page);
79 pages--; 79 pages--;
80 index++; 80 index++;
81 } 81 }
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
124 return PTR_ERR(page); 124 return PTR_ERR(page);
125 125
126 memcpy(buf, page_address(page) + offset, cpylen); 126 memcpy(buf, page_address(page) + offset, cpylen);
127 page_cache_release(page); 127 put_page(page);
128 128
129 if (retlen) 129 if (retlen)
130 *retlen += cpylen; 130 *retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
164 unlock_page(page); 164 unlock_page(page);
165 balance_dirty_pages_ratelimited(mapping); 165 balance_dirty_pages_ratelimited(mapping);
166 } 166 }
167 page_cache_release(page); 167 put_page(page);
168 168
169 if (retlen) 169 if (retlen)
170 *retlen += cpylen; 170 *retlen += cpylen;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd519503bb1..a58169a28741 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
1339 int i; 1339 int i;
1340 1340
1341 for (i = 0; i < ns->held_cnt; i++) 1341 for (i = 0; i < ns->held_cnt; i++)
1342 page_cache_release(ns->held_pages[i]); 1342 put_page(ns->held_pages[i]);
1343} 1343}
1344 1344
1345/* Get page cache pages in advance to provide NOFS memory allocation */ 1345/* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
1349 struct page *page; 1349 struct page *page;
1350 struct address_space *mapping = file->f_mapping; 1350 struct address_space *mapping = file->f_mapping;
1351 1351
1352 start_index = pos >> PAGE_CACHE_SHIFT; 1352 start_index = pos >> PAGE_SHIFT;
1353 end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1353 end_index = (pos + count - 1) >> PAGE_SHIFT;
1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) 1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1355 return -EINVAL; 1355 return -EINVAL;
1356 ns->held_cnt = 0; 1356 ns->held_cnt = 0;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fa086e09d6b7..50454be86570 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2264,6 +2264,57 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
2264 mutex_unlock(&ps->smi_mutex); 2264 mutex_unlock(&ps->smi_mutex);
2265} 2265}
2266 2266
2267static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2268 int reg, int val)
2269{
2270 int ret;
2271
2272 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2273 if (ret < 0)
2274 goto restore_page_0;
2275
2276 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2277restore_page_0:
2278 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2279
2280 return ret;
2281}
2282
2283static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
2284 int reg)
2285{
2286 int ret;
2287
2288 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2289 if (ret < 0)
2290 goto restore_page_0;
2291
2292 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2293restore_page_0:
2294 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2295
2296 return ret;
2297}
2298
2299static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
2300{
2301 int ret;
2302
2303 ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2304 MII_BMCR);
2305 if (ret < 0)
2306 return ret;
2307
2308 if (ret & BMCR_PDOWN) {
2309 ret &= ~BMCR_PDOWN;
2310 ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
2311 PAGE_FIBER_SERDES, MII_BMCR,
2312 ret);
2313 }
2314
2315 return ret;
2316}
2317
2267static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) 2318static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2268{ 2319{
2269 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2320 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -2367,6 +2418,23 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2367 goto abort; 2418 goto abort;
2368 } 2419 }
2369 2420
2421 /* If this port is connected to a SerDes, make sure the SerDes is not
2422 * powered down.
2423 */
2424 if (mv88e6xxx_6352_family(ds)) {
2425 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
2426 if (ret < 0)
2427 goto abort;
2428 ret &= PORT_STATUS_CMODE_MASK;
2429 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2430 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2431 (ret == PORT_STATUS_CMODE_SGMII)) {
2432 ret = mv88e6xxx_power_on_serdes(ds);
2433 if (ret < 0)
2434 goto abort;
2435 }
2436 }
2437
2370 /* Port Control 2: don't force a good FCS, set the maximum frame size to 2438 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2371 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or 2439 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2372 * untagged frames on this port, do a destination address lookup on all 2440 * untagged frames on this port, do a destination address lookup on all
@@ -2714,13 +2782,9 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2714 int ret; 2782 int ret;
2715 2783
2716 mutex_lock(&ps->smi_mutex); 2784 mutex_lock(&ps->smi_mutex);
2717 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); 2785 ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
2718 if (ret < 0)
2719 goto error;
2720 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2721error:
2722 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2723 mutex_unlock(&ps->smi_mutex); 2786 mutex_unlock(&ps->smi_mutex);
2787
2724 return ret; 2788 return ret;
2725} 2789}
2726 2790
@@ -2731,14 +2795,9 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2731 int ret; 2795 int ret;
2732 2796
2733 mutex_lock(&ps->smi_mutex); 2797 mutex_lock(&ps->smi_mutex);
2734 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); 2798 ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
2735 if (ret < 0)
2736 goto error;
2737
2738 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2739error:
2740 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2741 mutex_unlock(&ps->smi_mutex); 2799 mutex_unlock(&ps->smi_mutex);
2800
2742 return ret; 2801 return ret;
2743} 2802}
2744 2803
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 9a038aba48fb..26a424acd10f 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -28,6 +28,10 @@
28#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) 28#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
29#define SMI_DATA 0x01 29#define SMI_DATA 0x01
30 30
31/* Fiber/SERDES Registers are located at SMI address F, page 1 */
32#define REG_FIBER_SERDES 0x0f
33#define PAGE_FIBER_SERDES 0x01
34
31#define REG_PORT(p) (0x10 + (p)) 35#define REG_PORT(p) (0x10 + (p))
32#define PORT_STATUS 0x00 36#define PORT_STATUS 0x00
33#define PORT_STATUS_PAUSE_EN BIT(15) 37#define PORT_STATUS_PAUSE_EN BIT(15)
@@ -45,6 +49,10 @@
45#define PORT_STATUS_MGMII BIT(6) /* 6185 */ 49#define PORT_STATUS_MGMII BIT(6) /* 6185 */
46#define PORT_STATUS_TX_PAUSED BIT(5) 50#define PORT_STATUS_TX_PAUSED BIT(5)
47#define PORT_STATUS_FLOW_CTRL BIT(4) 51#define PORT_STATUS_FLOW_CTRL BIT(4)
52#define PORT_STATUS_CMODE_MASK 0x0f
53#define PORT_STATUS_CMODE_100BASE_X 0x8
54#define PORT_STATUS_CMODE_1000BASE_X 0x9
55#define PORT_STATUS_CMODE_SGMII 0xa
48#define PORT_PCS_CTRL 0x01 56#define PORT_PCS_CTRL 0x01
49#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) 57#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
50#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) 58#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aabbd51db981..12a009d720cd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2653,7 +2653,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
2653 /* Write request msg to hwrm channel */ 2653 /* Write request msg to hwrm channel */
2654 __iowrite32_copy(bp->bar0, data, msg_len / 4); 2654 __iowrite32_copy(bp->bar0, data, msg_len / 4);
2655 2655
2656 for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) 2656 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
2657 writel(0, bp->bar0 + i); 2657 writel(0, bp->bar0 + i);
2658 2658
2659 /* currently supports only one outstanding message */ 2659 /* currently supports only one outstanding message */
@@ -3391,11 +3391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3391 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3391 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3392 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3392 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3393 3393
3394 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3394 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 3395 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3395 INVALID_STATS_CTX_ID); 3396 INVALID_STATS_CTX_ID);
3396 if (rc) 3397 if (rc)
3397 goto err_out; 3398 goto err_out;
3398 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3399 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3399 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3400 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 3400 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3401 } 3401 }
@@ -3830,6 +3830,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3830 struct hwrm_ver_get_input req = {0}; 3830 struct hwrm_ver_get_input req = {0};
3831 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 3831 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3832 3832
3833 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
3833 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 3834 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3834 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 3835 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3835 req.hwrm_intf_min = HWRM_VERSION_MINOR; 3836 req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3855,6 +3856,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3855 if (!bp->hwrm_cmd_timeout) 3856 if (!bp->hwrm_cmd_timeout)
3856 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 3857 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
3857 3858
3859 if (resp->hwrm_intf_maj >= 1)
3860 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
3861
3858hwrm_ver_get_exit: 3862hwrm_ver_get_exit:
3859 mutex_unlock(&bp->hwrm_cmd_lock); 3863 mutex_unlock(&bp->hwrm_cmd_lock);
3860 return rc; 3864 return rc;
@@ -4555,7 +4559,7 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4555 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 4559 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4556 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4560 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4557 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 4561 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4558 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4562 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
4559 req->enables |= 4563 req->enables |=
4560 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 4564 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4561 } else { 4565 } else {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec04c47172b7..709b95b8fcba 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -477,6 +477,7 @@ struct rx_tpa_end_cmp_ext {
477#define RING_CMP(idx) ((idx) & bp->cp_ring_mask) 477#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
478#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) 478#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
479 479
480#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
480#define DFLT_HWRM_CMD_TIMEOUT 500 481#define DFLT_HWRM_CMD_TIMEOUT 500
481#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) 482#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
482#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) 483#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -953,6 +954,7 @@ struct bnxt {
953 dma_addr_t hw_tx_port_stats_map; 954 dma_addr_t hw_tx_port_stats_map;
954 int hw_port_stats_size; 955 int hw_port_stats_size;
955 956
957 u16 hwrm_max_req_len;
956 int hwrm_cmd_timeout; 958 int hwrm_cmd_timeout;
957 struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ 959 struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
958 struct hwrm_ver_get_output ver_resp; 960 struct hwrm_ver_get_output ver_resp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ada1662b651..2e472f6dbf2d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -855,10 +855,8 @@ static void bnxt_get_pauseparam(struct net_device *dev,
855 if (BNXT_VF(bp)) 855 if (BNXT_VF(bp))
856 return; 856 return;
857 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 857 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
858 epause->rx_pause = 858 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
859 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0); 859 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
860 epause->tx_pause =
861 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
862} 860}
863 861
864static int bnxt_set_pauseparam(struct net_device *dev, 862static int bnxt_set_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6746fd03cb3a..cf6445d148ca 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1171,6 +1171,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1171 struct enet_cb *tx_cb_ptr; 1171 struct enet_cb *tx_cb_ptr;
1172 struct netdev_queue *txq; 1172 struct netdev_queue *txq;
1173 unsigned int pkts_compl = 0; 1173 unsigned int pkts_compl = 0;
1174 unsigned int bytes_compl = 0;
1174 unsigned int c_index; 1175 unsigned int c_index;
1175 unsigned int txbds_ready; 1176 unsigned int txbds_ready;
1176 unsigned int txbds_processed = 0; 1177 unsigned int txbds_processed = 0;
@@ -1193,16 +1194,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1193 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; 1194 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1194 if (tx_cb_ptr->skb) { 1195 if (tx_cb_ptr->skb) {
1195 pkts_compl++; 1196 pkts_compl++;
1196 dev->stats.tx_packets++; 1197 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1197 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1198 dma_unmap_single(&dev->dev, 1198 dma_unmap_single(&dev->dev,
1199 dma_unmap_addr(tx_cb_ptr, dma_addr), 1199 dma_unmap_addr(tx_cb_ptr, dma_addr),
1200 dma_unmap_len(tx_cb_ptr, dma_len), 1200 dma_unmap_len(tx_cb_ptr, dma_len),
1201 DMA_TO_DEVICE); 1201 DMA_TO_DEVICE);
1202 bcmgenet_free_cb(tx_cb_ptr); 1202 bcmgenet_free_cb(tx_cb_ptr);
1203 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { 1203 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1204 dev->stats.tx_bytes +=
1205 dma_unmap_len(tx_cb_ptr, dma_len);
1206 dma_unmap_page(&dev->dev, 1204 dma_unmap_page(&dev->dev,
1207 dma_unmap_addr(tx_cb_ptr, dma_addr), 1205 dma_unmap_addr(tx_cb_ptr, dma_addr),
1208 dma_unmap_len(tx_cb_ptr, dma_len), 1206 dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1218,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1220 ring->free_bds += txbds_processed; 1218 ring->free_bds += txbds_processed;
1221 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; 1219 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1222 1220
1221 dev->stats.tx_packets += pkts_compl;
1222 dev->stats.tx_bytes += bytes_compl;
1223
1223 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { 1224 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1224 txq = netdev_get_tx_queue(dev, ring->queue); 1225 txq = netdev_get_tx_queue(dev, ring->queue);
1225 if (netif_tx_queue_stopped(txq)) 1226 if (netif_tx_queue_stopped(txq))
@@ -1296,7 +1297,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
1296 1297
1297 tx_cb_ptr->skb = skb; 1298 tx_cb_ptr->skb = skb;
1298 1299
1299 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); 1300 skb_len = skb_headlen(skb);
1300 1301
1301 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1302 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1302 ret = dma_mapping_error(kdev, mapping); 1303 ret = dma_mapping_error(kdev, mapping);
@@ -1464,6 +1465,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1464 goto out; 1465 goto out;
1465 } 1466 }
1466 1467
1468 /* Retain how many bytes will be sent on the wire, without TSB inserted
1469 * by transmit checksum offload
1470 */
1471 GENET_CB(skb)->bytes_sent = skb->len;
1472
1467 /* set the SKB transmit checksum */ 1473 /* set the SKB transmit checksum */
1468 if (priv->desc_64b_en) { 1474 if (priv->desc_64b_en) {
1469 skb = bcmgenet_put_tx_csum(dev, skb); 1475 skb = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 967367557309..1e2dc34d331a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params {
531 u32 flags; 531 u32 flags;
532}; 532};
533 533
534struct bcmgenet_skb_cb {
535 unsigned int bytes_sent; /* bytes on the wire (no TSB) */
536};
537
538#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
539
534struct bcmgenet_tx_ring { 540struct bcmgenet_tx_ring {
535 spinlock_t lock; /* ring lock */ 541 spinlock_t lock; /* ring lock */
536 struct napi_struct napi; /* NAPI per tx queue */ 542 struct napi_struct napi; /* NAPI per tx queue */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 6619178ed77b..48a7d7dee846 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -917,7 +917,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
917 unsigned int frag_len = bp->rx_buffer_size; 917 unsigned int frag_len = bp->rx_buffer_size;
918 918
919 if (offset + frag_len > len) { 919 if (offset + frag_len > len) {
920 BUG_ON(frag != last_frag); 920 if (unlikely(frag != last_frag)) {
921 dev_kfree_skb_any(skb);
922 return -1;
923 }
921 frag_len = len - offset; 924 frag_len = len - offset;
922 } 925 }
923 skb_copy_to_linear_data_offset(skb, offset, 926 skb_copy_to_linear_data_offset(skb, offset,
@@ -945,8 +948,23 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
945 return 0; 948 return 0;
946} 949}
947 950
951static inline void macb_init_rx_ring(struct macb *bp)
952{
953 dma_addr_t addr;
954 int i;
955
956 addr = bp->rx_buffers_dma;
957 for (i = 0; i < RX_RING_SIZE; i++) {
958 bp->rx_ring[i].addr = addr;
959 bp->rx_ring[i].ctrl = 0;
960 addr += bp->rx_buffer_size;
961 }
962 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
963}
964
948static int macb_rx(struct macb *bp, int budget) 965static int macb_rx(struct macb *bp, int budget)
949{ 966{
967 bool reset_rx_queue = false;
950 int received = 0; 968 int received = 0;
951 unsigned int tail; 969 unsigned int tail;
952 int first_frag = -1; 970 int first_frag = -1;
@@ -972,10 +990,18 @@ static int macb_rx(struct macb *bp, int budget)
972 990
973 if (ctrl & MACB_BIT(RX_EOF)) { 991 if (ctrl & MACB_BIT(RX_EOF)) {
974 int dropped; 992 int dropped;
975 BUG_ON(first_frag == -1); 993
994 if (unlikely(first_frag == -1)) {
995 reset_rx_queue = true;
996 continue;
997 }
976 998
977 dropped = macb_rx_frame(bp, first_frag, tail); 999 dropped = macb_rx_frame(bp, first_frag, tail);
978 first_frag = -1; 1000 first_frag = -1;
1001 if (unlikely(dropped < 0)) {
1002 reset_rx_queue = true;
1003 continue;
1004 }
979 if (!dropped) { 1005 if (!dropped) {
980 received++; 1006 received++;
981 budget--; 1007 budget--;
@@ -983,6 +1009,26 @@ static int macb_rx(struct macb *bp, int budget)
983 } 1009 }
984 } 1010 }
985 1011
1012 if (unlikely(reset_rx_queue)) {
1013 unsigned long flags;
1014 u32 ctrl;
1015
1016 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1017
1018 spin_lock_irqsave(&bp->lock, flags);
1019
1020 ctrl = macb_readl(bp, NCR);
1021 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1022
1023 macb_init_rx_ring(bp);
1024 macb_writel(bp, RBQP, bp->rx_ring_dma);
1025
1026 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1027
1028 spin_unlock_irqrestore(&bp->lock, flags);
1029 return received;
1030 }
1031
986 if (first_frag != -1) 1032 if (first_frag != -1)
987 bp->rx_tail = first_frag; 1033 bp->rx_tail = first_frag;
988 else 1034 else
@@ -1100,7 +1146,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1100 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1146 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1101 1147
1102 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1148 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1103 macb_writel(bp, ISR, MACB_BIT(RXUBR)); 1149 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1104 } 1150 }
1105 1151
1106 if (status & MACB_BIT(ISR_ROVR)) { 1152 if (status & MACB_BIT(ISR_ROVR)) {
@@ -1523,15 +1569,8 @@ static void gem_init_rings(struct macb *bp)
1523static void macb_init_rings(struct macb *bp) 1569static void macb_init_rings(struct macb *bp)
1524{ 1570{
1525 int i; 1571 int i;
1526 dma_addr_t addr;
1527 1572
1528 addr = bp->rx_buffers_dma; 1573 macb_init_rx_ring(bp);
1529 for (i = 0; i < RX_RING_SIZE; i++) {
1530 bp->rx_ring[i].addr = addr;
1531 bp->rx_ring[i].ctrl = 0;
1532 addr += bp->rx_buffer_size;
1533 }
1534 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1535 1574
1536 for (i = 0; i < TX_RING_SIZE; i++) { 1575 for (i = 0; i < TX_RING_SIZE; i++) {
1537 bp->queues[0].tx_ring[i].addr = 0; 1576 bp->queues[0].tx_ring[i].addr = 0;
@@ -2957,9 +2996,10 @@ static int macb_probe(struct platform_device *pdev)
2957 phy_node = of_get_next_available_child(np, NULL); 2996 phy_node = of_get_next_available_child(np, NULL);
2958 if (phy_node) { 2997 if (phy_node) {
2959 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); 2998 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
2960 if (gpio_is_valid(gpio)) 2999 if (gpio_is_valid(gpio)) {
2961 bp->reset_gpio = gpio_to_desc(gpio); 3000 bp->reset_gpio = gpio_to_desc(gpio);
2962 gpiod_direction_output(bp->reset_gpio, 1); 3001 gpiod_direction_output(bp->reset_gpio, 1);
3002 }
2963 } 3003 }
2964 of_node_put(phy_node); 3004 of_node_put(phy_node);
2965 3005
@@ -3029,7 +3069,8 @@ static int macb_remove(struct platform_device *pdev)
3029 mdiobus_free(bp->mii_bus); 3069 mdiobus_free(bp->mii_bus);
3030 3070
3031 /* Shutdown the PHY if there is a GPIO reset */ 3071 /* Shutdown the PHY if there is a GPIO reset */
3032 gpiod_set_value(bp->reset_gpio, 0); 3072 if (bp->reset_gpio)
3073 gpiod_set_value(bp->reset_gpio, 0);
3033 3074
3034 unregister_netdev(dev); 3075 unregister_netdev(dev);
3035 clk_disable_unprepare(bp->tx_clk); 3076 clk_disable_unprepare(bp->tx_clk);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 06bc2d2e7a73..a2cdfc1261dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ 168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
169 CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
169 170
170 /* T6 adapters: 171 /* T6 adapters:
171 */ 172 */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 37c081583084..08243c2ff4b4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -943,8 +943,8 @@ fec_restart(struct net_device *ndev)
943 else 943 else
944 val &= ~FEC_RACC_OPTIONS; 944 val &= ~FEC_RACC_OPTIONS;
945 writel(val, fep->hwp + FEC_RACC); 945 writel(val, fep->hwp + FEC_RACC);
946 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
946 } 947 }
947 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
948#endif 948#endif
949 949
950 /* 950 /*
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 37d0cce392be..e8d36aaea223 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -469,7 +469,7 @@ struct hnae_ae_ops {
469 u32 *tx_usecs, u32 *rx_usecs); 469 u32 *tx_usecs, u32 *rx_usecs);
470 void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle, 470 void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
471 u32 *tx_frames, u32 *rx_frames); 471 u32 *tx_frames, u32 *rx_frames);
472 void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); 472 int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
473 int (*set_coalesce_frames)(struct hnae_handle *handle, 473 int (*set_coalesce_frames)(struct hnae_handle *handle,
474 u32 coalesce_frames); 474 u32 coalesce_frames);
475 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); 475 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 285c893ab135..a1cb461ac45f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -159,11 +159,6 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
159 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; 159 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
160 160
161 ring_pair_cb->used_by_vf = 1; 161 ring_pair_cb->used_by_vf = 1;
162 if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
163 ring_pair_cb->port_id_in_dsa = port_idx;
164 else
165 ring_pair_cb->port_id_in_dsa = 0;
166
167 ring_pair_cb++; 162 ring_pair_cb++;
168 } 163 }
169 164
@@ -453,59 +448,46 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
453static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle, 448static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
454 u32 *tx_usecs, u32 *rx_usecs) 449 u32 *tx_usecs, u32 *rx_usecs)
455{ 450{
456 int port; 451 struct ring_pair_cb *ring_pair =
457 452 container_of(handle->qs[0], struct ring_pair_cb, q);
458 port = hns_ae_map_eport_to_dport(handle->eport_id);
459 453
460 *tx_usecs = hns_rcb_get_coalesce_usecs( 454 *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
461 hns_ae_get_dsaf_dev(handle->dev), 455 ring_pair->port_id_in_comm);
462 hns_dsaf_get_comm_idx_by_port(port)); 456 *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
463 *rx_usecs = hns_rcb_get_coalesce_usecs( 457 ring_pair->port_id_in_comm);
464 hns_ae_get_dsaf_dev(handle->dev),
465 hns_dsaf_get_comm_idx_by_port(port));
466} 458}
467 459
468static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle, 460static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
469 u32 *tx_frames, u32 *rx_frames) 461 u32 *tx_frames, u32 *rx_frames)
470{ 462{
471 int port; 463 struct ring_pair_cb *ring_pair =
464 container_of(handle->qs[0], struct ring_pair_cb, q);
472 465
473 assert(handle); 466 *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
474 467 ring_pair->port_id_in_comm);
475 port = hns_ae_map_eport_to_dport(handle->eport_id); 468 *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
476 469 ring_pair->port_id_in_comm);
477 *tx_frames = hns_rcb_get_coalesced_frames(
478 hns_ae_get_dsaf_dev(handle->dev), port);
479 *rx_frames = hns_rcb_get_coalesced_frames(
480 hns_ae_get_dsaf_dev(handle->dev), port);
481} 470}
482 471
483static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle, 472static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
484 u32 timeout) 473 u32 timeout)
485{ 474{
486 int port; 475 struct ring_pair_cb *ring_pair =
476 container_of(handle->qs[0], struct ring_pair_cb, q);
487 477
488 assert(handle); 478 return hns_rcb_set_coalesce_usecs(
489 479 ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
490 port = hns_ae_map_eport_to_dport(handle->eport_id);
491
492 hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
493 port, timeout);
494} 480}
495 481
496static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, 482static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
497 u32 coalesce_frames) 483 u32 coalesce_frames)
498{ 484{
499 int port; 485 struct ring_pair_cb *ring_pair =
500 int ret; 486 container_of(handle->qs[0], struct ring_pair_cb, q);
501 487
502 assert(handle); 488 return hns_rcb_set_coalesced_frames(
503 489 ring_pair->rcb_common,
504 port = hns_ae_map_eport_to_dport(handle->eport_id); 490 ring_pair->port_id_in_comm, coalesce_frames);
505
506 ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
507 port, coalesce_frames);
508 return ret;
509} 491}
510 492
511void hns_ae_update_stats(struct hnae_handle *handle, 493void hns_ae_update_stats(struct hnae_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 6e2b76ede075..44abb08de155 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -664,7 +664,8 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
664 return; 664 return;
665 665
666 for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) { 666 for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
667 snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc); 667 snprintf(buff, ETH_GSTRING_LEN, "%s",
668 g_gmac_stats_string[i].desc);
668 buff = buff + ETH_GSTRING_LEN; 669 buff = buff + ETH_GSTRING_LEN;
669 } 670 }
670} 671}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5c1ac9ba1bf2..5978a5c8ef35 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2219,17 +2219,17 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2219 /* dsaf onode registers */ 2219 /* dsaf onode registers */
2220 for (i = 0; i < DSAF_XOD_NUM; i++) { 2220 for (i = 0; i < DSAF_XOD_NUM; i++) {
2221 p[311 + i] = dsaf_read_dev(ddev, 2221 p[311 + i] = dsaf_read_dev(ddev,
2222 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90); 2222 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
2223 p[319 + i] = dsaf_read_dev(ddev, 2223 p[319 + i] = dsaf_read_dev(ddev,
2224 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90); 2224 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
2225 p[327 + i] = dsaf_read_dev(ddev, 2225 p[327 + i] = dsaf_read_dev(ddev,
2226 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90); 2226 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
2227 p[335 + i] = dsaf_read_dev(ddev, 2227 p[335 + i] = dsaf_read_dev(ddev,
2228 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90); 2228 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
2229 p[343 + i] = dsaf_read_dev(ddev, 2229 p[343 + i] = dsaf_read_dev(ddev,
2230 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90); 2230 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
2231 p[351 + i] = dsaf_read_dev(ddev, 2231 p[351 + i] = dsaf_read_dev(ddev,
2232 DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90); 2232 DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
2233 } 2233 }
2234 2234
2235 p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); 2235 p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 607c3be42241..e69b02287c44 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -244,31 +244,35 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
244 */ 244 */
245phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) 245phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
246{ 246{
247 u32 hilink3_mode; 247 u32 mode;
248 u32 hilink4_mode; 248 u32 reg;
249 u32 shift;
250 bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
249 void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; 251 void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
250 int dev_id = mac_cb->mac_id; 252 int mac_id = mac_cb->mac_id;
251 phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; 253 phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
252 254
253 hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG); 255 if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
254 hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG); 256 phy_if = PHY_INTERFACE_MODE_SGMII;
255 if (dev_id >= 0 && dev_id <= 3) { 257 } else if (mac_id >= 0 && mac_id <= 3) {
256 if (hilink4_mode == 0) 258 reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
257 phy_if = PHY_INTERFACE_MODE_SGMII; 259 mode = dsaf_read_reg(sys_ctl_vaddr, reg);
258 else 260 /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
261 shift = is_ver1 ? 0 : mac_id;
262 if (dsaf_get_bit(mode, shift))
259 phy_if = PHY_INTERFACE_MODE_XGMII; 263 phy_if = PHY_INTERFACE_MODE_XGMII;
260 } else if (dev_id >= 4 && dev_id <= 5) {
261 if (hilink3_mode == 0)
262 phy_if = PHY_INTERFACE_MODE_SGMII;
263 else 264 else
265 phy_if = PHY_INTERFACE_MODE_SGMII;
266 } else if (mac_id >= 4 && mac_id <= 7) {
267 reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
268 mode = dsaf_read_reg(sys_ctl_vaddr, reg);
269 /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
270 shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
271 if (dsaf_get_bit(mode, shift))
264 phy_if = PHY_INTERFACE_MODE_XGMII; 272 phy_if = PHY_INTERFACE_MODE_XGMII;
265 } else { 273 else
266 phy_if = PHY_INTERFACE_MODE_SGMII; 274 phy_if = PHY_INTERFACE_MODE_SGMII;
267 } 275 }
268
269 dev_dbg(mac_cb->dev,
270 "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
271 hilink3_mode, hilink4_mode, dev_id, phy_if);
272 return phy_if; 276 return phy_if;
273} 277}
274 278
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 12188807468c..28ee26e5c478 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -215,9 +215,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
216 bd_size_type); 216 bd_size_type);
217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
218 ring_pair->port_id_in_dsa); 218 ring_pair->port_id_in_comm);
219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
220 ring_pair->port_id_in_dsa); 220 ring_pair->port_id_in_comm);
221 } else { 221 } else {
222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
223 (u32)dma); 223 (u32)dma);
@@ -227,9 +227,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
228 bd_size_type); 228 bd_size_type);
229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
230 ring_pair->port_id_in_dsa); 230 ring_pair->port_id_in_comm);
231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
232 ring_pair->port_id_in_dsa); 232 ring_pair->port_id_in_comm);
233 } 233 }
234} 234}
235 235
@@ -256,50 +256,16 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
256 desc_cnt); 256 desc_cnt);
257} 257}
258 258
259/** 259static void hns_rcb_set_port_timeout(
260 *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames 260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
261 *@rcb_common: rcb_common device
262 *@port_idx:port index
263 *@coalesced_frames:BD num for coalesced frames
264 */
265static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
266 u32 port_idx,
267 u32 coalesced_frames)
268{
269 if (coalesced_frames >= rcb_common->desc_num ||
270 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
271 return -EINVAL;
272
273 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
274 coalesced_frames);
275 return 0;
276}
277
278/**
279 *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
280 *@rcb_common: rcb_common device
281 *@port_idx:port index
282 * return coaleseced frames value
283 */
284static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
285 u32 port_idx)
286{ 261{
287 if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) 262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
288 port_idx = 0; 263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
289 264 timeout * HNS_RCB_CLK_FREQ_MHZ);
290 return dsaf_read_dev(rcb_common, 265 else
291 RCB_CFG_PKTLINE_REG + port_idx * 4); 266 dsaf_write_dev(rcb_common,
292} 267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
293 268 timeout);
294/**
295 *hns_rcb_set_timeout - set rcb port coalesced time_out
296 *@rcb_common: rcb_common device
297 *@time_out:time for coalesced time_out
298 */
299static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
300 u32 timeout)
301{
302 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
303} 269}
304 270
305static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 271static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -361,10 +327,11 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
361 327
362 for (i = 0; i < port_num; i++) { 328 for (i = 0; i < port_num; i++) {
363 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
364 (void)hns_rcb_set_port_coalesced_frames( 330 (void)hns_rcb_set_coalesced_frames(
365 rcb_common, i, rcb_common->coalesced_frames); 331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
332 hns_rcb_set_port_timeout(
333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
366 } 334 }
367 hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
368 335
369 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
370 HNS_RCB_COMMON_ENDIAN); 337 HNS_RCB_COMMON_ENDIAN);
@@ -460,7 +427,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
460 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
461} 428}
462 429
463static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) 430static int hns_rcb_get_port_in_comm(
431 struct rcb_common_cb *rcb_common, int ring_idx)
464{ 432{
465 int comm_index = rcb_common->comm_index; 433 int comm_index = rcb_common->comm_index;
466 int port; 434 int port;
@@ -470,7 +438,7 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
470 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; 438 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
471 port = ring_idx / q_num; 439 port = ring_idx / q_num;
472 } else { 440 } else {
473 port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; 441 port = 0; /* config debug-ports port_id_in_comm to 0*/
474 } 442 }
475 443
476 return port; 444 return port;
@@ -518,7 +486,8 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
518 ring_pair_cb->index = i; 486 ring_pair_cb->index = i;
519 ring_pair_cb->q.io_base = 487 ring_pair_cb->q.io_base =
520 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 488 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
521 ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); 489 ring_pair_cb->port_id_in_comm =
490 hns_rcb_get_port_in_comm(rcb_common, i);
522 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 491 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
523 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : 492 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
524 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 493 platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
@@ -534,82 +503,95 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
534/** 503/**
535 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 504 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
536 *@rcb_common: rcb_common device 505 *@rcb_common: rcb_common device
537 *@comm_index:port index 506 *@port_idx:port id in comm
538 *return coalesced_frames 507 *
508 *Returns: coalesced_frames
539 */ 509 */
540u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) 510u32 hns_rcb_get_coalesced_frames(
511 struct rcb_common_cb *rcb_common, u32 port_idx)
541{ 512{
542 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 513 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
543 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
544
545 return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
546} 514}
547 515
548/** 516/**
549 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 517 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
550 *@rcb_common: rcb_common device 518 *@rcb_common: rcb_common device
551 *@comm_index:port index 519 *@port_idx:port id in comm
552 *return time_out 520 *
521 *Returns: time_out
553 */ 522 */
554u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) 523u32 hns_rcb_get_coalesce_usecs(
524 struct rcb_common_cb *rcb_common, u32 port_idx)
555{ 525{
556 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 526 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
557 527 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
558 return rcb_comm->timeout; 528 HNS_RCB_CLK_FREQ_MHZ;
529 else
530 return dsaf_read_dev(rcb_common,
531 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
559} 532}
560 533
561/** 534/**
562 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 535 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
563 *@rcb_common: rcb_common device 536 *@rcb_common: rcb_common device
564 *@comm_index: comm :index 537 *@port_idx:port id in comm
565 *@etx_usecs:tx time for coalesced time_out 538 *@timeout:tx/rx time for coalesced time_out
566 *@rx_usecs:rx time for coalesced time_out 539 *
540 * Returns:
541 * Zero for success, or an error code in case of failure
567 */ 542 */
568void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 543int hns_rcb_set_coalesce_usecs(
569 int port, u32 timeout) 544 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
570{ 545{
571 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 546 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
572 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
573 547
574 if (rcb_comm->timeout == timeout) 548 if (timeout == old_timeout)
575 return; 549 return 0;
576 550
577 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 551 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
578 dev_err(dsaf_dev->dev, 552 if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
579 "error: not support coalesce_usecs setting!\n"); 553 dev_err(rcb_common->dsaf_dev->dev,
580 return; 554 "error: not support coalesce_usecs setting!\n");
555 return -EINVAL;
556 }
581 } 557 }
582 rcb_comm->timeout = timeout; 558 if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
583 hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); 559 dev_err(rcb_common->dsaf_dev->dev,
560 "error: not support coalesce %dus!\n", timeout);
561 return -EINVAL;
562 }
563 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
564 return 0;
584} 565}
585 566
586/** 567/**
587 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 568 *hns_rcb_set_coalesced_frames - set rcb coalesced frames
588 *@rcb_common: rcb_common device 569 *@rcb_common: rcb_common device
589 *@tx_frames:tx BD num for coalesced frames 570 *@port_idx:port id in comm
590 *@rx_frames:rx BD num for coalesced frames 571 *@coalesced_frames:tx/rx BD num for coalesced frames
591 *Return 0 on success, negative on failure 572 *
573 * Returns:
574 * Zero for success, or an error code in case of failure
592 */ 575 */
593int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 576int hns_rcb_set_coalesced_frames(
594 int port, u32 coalesced_frames) 577 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
595{ 578{
596 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 579 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
597 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
598 u32 coalesced_reg_val;
599 int ret;
600 580
601 coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); 581 if (coalesced_frames == old_waterline)
602
603 if (coalesced_reg_val == coalesced_frames)
604 return 0; 582 return 0;
605 583
606 if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { 584 if (coalesced_frames >= rcb_common->desc_num ||
607 ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, 585 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
608 coalesced_frames); 586 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
609 return ret; 587 dev_err(rcb_common->dsaf_dev->dev,
610 } else { 588 "error: not support coalesce_frames setting!\n");
611 return -EINVAL; 589 return -EINVAL;
612 } 590 }
591
592 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
593 coalesced_frames);
594 return 0;
613} 595}
614 596
615/** 597/**
@@ -749,8 +731,6 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
749 rcb_common->dsaf_dev = dsaf_dev; 731 rcb_common->dsaf_dev = dsaf_dev;
750 732
751 rcb_common->desc_num = dsaf_dev->desc_num; 733 rcb_common->desc_num = dsaf_dev->desc_num;
752 rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
753 rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
754 734
755 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); 735 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
756 rcb_common->max_vfn = max_vfn; 736 rcb_common->max_vfn = max_vfn;
@@ -951,6 +931,10 @@ void hns_rcb_get_strings(int stringset, u8 *data, int index)
951void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 931void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
952{ 932{
953 u32 *regs = data; 933 u32 *regs = data;
934 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
935 bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
936 u32 reg_tmp;
937 u32 reg_num_tmp;
954 u32 i = 0; 938 u32 i = 0;
955 939
956 /*rcb common registers */ 940 /*rcb common registers */
@@ -1004,12 +988,16 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
1004 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 988 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
1005 } 989 }
1006 990
1007 regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); 991 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
1008 regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 992 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
1009 regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 993 for (i = 0; i < reg_num_tmp; i++)
994 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
995
996 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
997 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
1010 998
1011 /* mark end of rcb common regs */ 999 /* mark end of rcb common regs */
1012 for (i = 73; i < 80; i++) 1000 for (i = 78; i < 80; i++)
1013 regs[i] = 0xcccccccc; 1001 regs[i] = 0xcccccccc;
1014} 1002}
1015 1003
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 81fe9f849973..eb61014ad615 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -38,7 +38,9 @@ struct rcb_common_cb;
38#define HNS_RCB_MAX_COALESCED_FRAMES 1023 38#define HNS_RCB_MAX_COALESCED_FRAMES 1023
39#define HNS_RCB_MIN_COALESCED_FRAMES 1 39#define HNS_RCB_MIN_COALESCED_FRAMES 1
40#define HNS_RCB_DEF_COALESCED_FRAMES 50 40#define HNS_RCB_DEF_COALESCED_FRAMES 50
41#define HNS_RCB_MAX_TIME_OUT 0x500 41#define HNS_RCB_CLK_FREQ_MHZ 350
42#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
43#define HNS_RCB_DEF_COALESCED_USECS 3
42 44
43#define HNS_RCB_COMMON_ENDIAN 1 45#define HNS_RCB_COMMON_ENDIAN 1
44 46
@@ -82,7 +84,7 @@ struct ring_pair_cb {
82 84
83 int virq[HNS_RCB_IRQ_NUM_PER_QUEUE]; 85 int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
84 86
85 u8 port_id_in_dsa; 87 u8 port_id_in_comm;
86 u8 used_by_vf; 88 u8 used_by_vf;
87 89
88 struct hns_ring_hw_stats hw_stats; 90 struct hns_ring_hw_stats hw_stats;
@@ -97,8 +99,6 @@ struct rcb_common_cb {
97 99
98 u8 comm_index; 100 u8 comm_index;
99 u32 ring_num; 101 u32 ring_num;
100 u32 coalesced_frames; /* frames threshold of rx interrupt */
101 u32 timeout; /* time threshold of rx interrupt */
102 u32 desc_num; /* desc num per queue*/ 102 u32 desc_num; /* desc num per queue*/
103 103
104 struct ring_pair_cb ring_pair_cb[0]; 104 struct ring_pair_cb ring_pair_cb[0];
@@ -125,13 +125,14 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
125void hns_rcb_init_hw(struct ring_pair_cb *ring); 125void hns_rcb_init_hw(struct ring_pair_cb *ring);
126void hns_rcb_reset_ring_hw(struct hnae_queue *q); 126void hns_rcb_reset_ring_hw(struct hnae_queue *q);
127void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); 127void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
128 128u32 hns_rcb_get_coalesced_frames(
129u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index); 129 struct rcb_common_cb *rcb_common, u32 port_idx);
130u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index); 130u32 hns_rcb_get_coalesce_usecs(
131void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 131 struct rcb_common_cb *rcb_common, u32 port_idx);
132 int comm_index, u32 timeout); 132int hns_rcb_set_coalesce_usecs(
133int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 133 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
134 int comm_index, u32 coalesce_frames); 134int hns_rcb_set_coalesced_frames(
135 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
135void hns_rcb_update_stats(struct hnae_queue *queue); 136void hns_rcb_update_stats(struct hnae_queue *queue);
136 137
137void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data); 138void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index bf62687e5ea7..7d7204f45e78 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -103,6 +103,8 @@
103/*serdes offset**/ 103/*serdes offset**/
104#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 104#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
105#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 105#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
106#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
107#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
106#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL 108#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
107#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL 109#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
108#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL 110#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
@@ -404,6 +406,7 @@
404#define RCB_CFG_OVERTIME_REG 0x9300 406#define RCB_CFG_OVERTIME_REG 0x9300
405#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 407#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
406#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 408#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
409#define RCB_PORT_CFG_OVERTIME_REG 0x9430
407 410
408#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 411#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
409#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004 412#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 71aa37b4b338..687204b780b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -913,10 +913,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
913static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) 913static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
914{ 914{
915 struct hnae_ring *ring = ring_data->ring; 915 struct hnae_ring *ring = ring_data->ring;
916 int head = ring->next_to_clean; 916 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
917
918 /* for hardware bug fixed */
919 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
920 917
921 if (head != ring->next_to_clean) { 918 if (head != ring->next_to_clean) {
922 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 919 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -959,8 +956,8 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
959 napi_complete(napi); 956 napi_complete(napi);
960 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 957 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
961 ring_data->ring, 0); 958 ring_data->ring, 0);
962 959 if (ring_data->fini_process)
963 ring_data->fini_process(ring_data); 960 ring_data->fini_process(ring_data);
964 return 0; 961 return 0;
965 } 962 }
966 963
@@ -1723,6 +1720,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1723{ 1720{
1724 struct hnae_handle *h = priv->ae_handle; 1721 struct hnae_handle *h = priv->ae_handle;
1725 struct hns_nic_ring_data *rd; 1722 struct hns_nic_ring_data *rd;
1723 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
1726 int i; 1724 int i;
1727 1725
1728 if (h->q_num > NIC_MAX_Q_PER_VF) { 1726 if (h->q_num > NIC_MAX_Q_PER_VF) {
@@ -1740,7 +1738,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1740 rd->queue_index = i; 1738 rd->queue_index = i;
1741 rd->ring = &h->qs[i]->tx_ring; 1739 rd->ring = &h->qs[i]->tx_ring;
1742 rd->poll_one = hns_nic_tx_poll_one; 1740 rd->poll_one = hns_nic_tx_poll_one;
1743 rd->fini_process = hns_nic_tx_fini_pro; 1741 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
1744 1742
1745 netif_napi_add(priv->netdev, &rd->napi, 1743 netif_napi_add(priv->netdev, &rd->napi,
1746 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 1744 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1752,7 +1750,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1752 rd->ring = &h->qs[i - h->q_num]->rx_ring; 1750 rd->ring = &h->qs[i - h->q_num]->rx_ring;
1753 rd->poll_one = hns_nic_rx_poll_one; 1751 rd->poll_one = hns_nic_rx_poll_one;
1754 rd->ex_process = hns_nic_rx_up_pro; 1752 rd->ex_process = hns_nic_rx_up_pro;
1755 rd->fini_process = hns_nic_rx_fini_pro; 1753 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
1756 1754
1757 netif_napi_add(priv->netdev, &rd->napi, 1755 netif_napi_add(priv->netdev, &rd->napi,
1758 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 1756 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
@@ -1816,7 +1814,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
1816 h = hnae_get_handle(&priv->netdev->dev, 1814 h = hnae_get_handle(&priv->netdev->dev,
1817 priv->ae_node, priv->port_id, NULL); 1815 priv->ae_node, priv->port_id, NULL);
1818 if (IS_ERR_OR_NULL(h)) { 1816 if (IS_ERR_OR_NULL(h)) {
1819 ret = PTR_ERR(h); 1817 ret = -ENODEV;
1820 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1818 dev_dbg(priv->dev, "has not handle, register notifier!\n");
1821 goto out; 1819 goto out;
1822 } 1820 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 9c3ba65988e1..3d746c887873 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -794,8 +794,10 @@ static int hns_set_coalesce(struct net_device *net_dev,
794 (!ops->set_coalesce_frames)) 794 (!ops->set_coalesce_frames))
795 return -ESRCH; 795 return -ESRCH;
796 796
797 ops->set_coalesce_usecs(priv->ae_handle, 797 ret = ops->set_coalesce_usecs(priv->ae_handle,
798 ec->rx_coalesce_usecs); 798 ec->rx_coalesce_usecs);
799 if (ret)
800 return ret;
799 801
800 ret = ops->set_coalesce_frames( 802 ret = ops->set_coalesce_frames(
801 priv->ae_handle, 803 priv->ae_handle,
@@ -1013,8 +1015,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
1013 struct phy_device *phy_dev = priv->phy; 1015 struct phy_device *phy_dev = priv->phy;
1014 1016
1015 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); 1017 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
1016 retval = phy_write(phy_dev, HNS_LED_FC_REG, value); 1018 retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
1017 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); 1019 retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
1018 if (retval) { 1020 if (retval) {
1019 netdev_err(netdev, "mdiobus_write fail !\n"); 1021 netdev_err(netdev, "mdiobus_write fail !\n");
1020 return retval; 1022 return retval;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3fc7bde699ba..ae90d4f12b70 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
3106 return __e1000_maybe_stop_tx(netdev, size); 3106 return __e1000_maybe_stop_tx(netdev, size);
3107} 3107}
3108 3108
3109#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) 3109#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev) 3111 struct net_device *netdev)
3112{ 3112{
@@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3256 nr_frags, mss); 3256 nr_frags, mss);
3257 3257
3258 if (count) { 3258 if (count) {
3259 /* The descriptors needed is higher than other Intel drivers
3260 * due to a number of workarounds. The breakdown is below:
3261 * Data descriptors: MAX_SKB_FRAGS + 1
3262 * Context Descriptor: 1
3263 * Keep head from touching tail: 2
3264 * Workarounds: 3
3265 */
3266 int desc_needed = MAX_SKB_FRAGS + 7;
3267
3259 netdev_sent_queue(netdev, skb->len); 3268 netdev_sent_queue(netdev, skb->len);
3260 skb_tx_timestamp(skb); 3269 skb_tx_timestamp(skb);
3261 3270
3262 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3271 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3272
3273 /* 82544 potentially requires twice as many data descriptors
3274 * in order to guarantee buffers don't end on evenly-aligned
3275 * dwords
3276 */
3277 if (adapter->pcix_82544)
3278 desc_needed += MAX_SKB_FRAGS + 1;
3279
3263 /* Make sure there is space in the ring for the next send. */ 3280 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3281 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3265 3282
3266 if (!skb->xmit_more || 3283 if (!skb->xmit_more ||
3267 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3284 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 67006431726a..344912957cab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8559,6 +8559,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
8559 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8559 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8560 I40E_FLAG_WB_ON_ITR_CAPABLE | 8560 I40E_FLAG_WB_ON_ITR_CAPABLE |
8561 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8561 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8562 I40E_FLAG_NO_PCI_LINK_CHECK |
8562 I40E_FLAG_100M_SGMII_CAPABLE | 8563 I40E_FLAG_100M_SGMII_CAPABLE |
8563 I40E_FLAG_USE_SET_LLDP_MIB | 8564 I40E_FLAG_USE_SET_LLDP_MIB |
8564 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8565 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 84fa28ceb200..e4949af7dd6b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -661,9 +661,7 @@ struct ixgbe_adapter {
661#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 661#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
662#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) 662#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
663#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) 663#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
664#ifdef CONFIG_IXGBE_VXLAN
665#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) 664#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
666#endif
667#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) 665#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
668 666
669 /* Tx fast path data */ 667 /* Tx fast path data */
@@ -675,6 +673,9 @@ struct ixgbe_adapter {
675 int num_rx_queues; 673 int num_rx_queues;
676 u16 rx_itr_setting; 674 u16 rx_itr_setting;
677 675
676 /* Port number used to identify VXLAN traffic */
677 __be16 vxlan_port;
678
678 /* TX */ 679 /* TX */
679 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; 680 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
680 681
@@ -782,9 +783,6 @@ struct ixgbe_adapter {
782 u32 timer_event_accumulator; 783 u32 timer_event_accumulator;
783 u32 vferr_refcount; 784 u32 vferr_refcount;
784 struct ixgbe_mac_addr *mac_table; 785 struct ixgbe_mac_addr *mac_table;
785#ifdef CONFIG_IXGBE_VXLAN
786 u16 vxlan_port;
787#endif
788 struct kobject *info_kobj; 786 struct kobject *info_kobj;
789#ifdef CONFIG_IXGBE_HWMON 787#ifdef CONFIG_IXGBE_HWMON
790 struct hwmon_buff *ixgbe_hwmon_buff; 788 struct hwmon_buff *ixgbe_hwmon_buff;
@@ -879,6 +877,8 @@ extern const char ixgbe_driver_version[];
879extern char ixgbe_default_device_descr[]; 877extern char ixgbe_default_device_descr[];
880#endif /* IXGBE_FCOE */ 878#endif /* IXGBE_FCOE */
881 879
880int ixgbe_open(struct net_device *netdev);
881int ixgbe_close(struct net_device *netdev);
882void ixgbe_up(struct ixgbe_adapter *adapter); 882void ixgbe_up(struct ixgbe_adapter *adapter);
883void ixgbe_down(struct ixgbe_adapter *adapter); 883void ixgbe_down(struct ixgbe_adapter *adapter);
884void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 884void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 726e0eeee63b..b3530e1e3ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
2053 2053
2054 if (if_running) 2054 if (if_running)
2055 /* indicate we're in test mode */ 2055 /* indicate we're in test mode */
2056 dev_close(netdev); 2056 ixgbe_close(netdev);
2057 else 2057 else
2058 ixgbe_reset(adapter); 2058 ixgbe_reset(adapter);
2059 2059
@@ -2091,7 +2091,7 @@ skip_loopback:
2091 /* clear testing bit and return adapter to previous state */ 2091 /* clear testing bit and return adapter to previous state */
2092 clear_bit(__IXGBE_TESTING, &adapter->state); 2092 clear_bit(__IXGBE_TESTING, &adapter->state);
2093 if (if_running) 2093 if (if_running)
2094 dev_open(netdev); 2094 ixgbe_open(netdev);
2095 else if (hw->mac.ops.disable_tx_laser) 2095 else if (hw->mac.ops.disable_tx_laser)
2096 hw->mac.ops.disable_tx_laser(hw); 2096 hw->mac.ops.disable_tx_laser(hw);
2097 } else { 2097 } else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 569cb0757c93..7df3fe29b210 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4531,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
4531 case ixgbe_mac_X550: 4531 case ixgbe_mac_X550:
4532 case ixgbe_mac_X550EM_x: 4532 case ixgbe_mac_X550EM_x:
4533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); 4533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
4534#ifdef CONFIG_IXGBE_VXLAN
4535 adapter->vxlan_port = 0; 4534 adapter->vxlan_port = 0;
4536#endif
4537 break; 4535 break;
4538 default: 4536 default:
4539 break; 4537 break;
@@ -5994,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5994 * handler is registered with the OS, the watchdog timer is started, 5992 * handler is registered with the OS, the watchdog timer is started,
5995 * and the stack is notified that the interface is ready. 5993 * and the stack is notified that the interface is ready.
5996 **/ 5994 **/
5997static int ixgbe_open(struct net_device *netdev) 5995int ixgbe_open(struct net_device *netdev)
5998{ 5996{
5999 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5997 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6000 struct ixgbe_hw *hw = &adapter->hw; 5998 struct ixgbe_hw *hw = &adapter->hw;
@@ -6096,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6096 * needs to be disabled. A global MAC reset is issued to stop the 6094 * needs to be disabled. A global MAC reset is issued to stop the
6097 * hardware, and all transmit and receive resources are freed. 6095 * hardware, and all transmit and receive resources are freed.
6098 **/ 6096 **/
6099static int ixgbe_close(struct net_device *netdev) 6097int ixgbe_close(struct net_device *netdev)
6100{ 6098{
6101 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6099 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6102 6100
@@ -7560,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7560 struct ipv6hdr *ipv6; 7558 struct ipv6hdr *ipv6;
7561 } hdr; 7559 } hdr;
7562 struct tcphdr *th; 7560 struct tcphdr *th;
7561 unsigned int hlen;
7563 struct sk_buff *skb; 7562 struct sk_buff *skb;
7564#ifdef CONFIG_IXGBE_VXLAN
7565 u8 encap = false;
7566#endif /* CONFIG_IXGBE_VXLAN */
7567 __be16 vlan_id; 7563 __be16 vlan_id;
7564 int l4_proto;
7568 7565
7569 /* if ring doesn't have a interrupt vector, cannot perform ATR */ 7566 /* if ring doesn't have a interrupt vector, cannot perform ATR */
7570 if (!q_vector) 7567 if (!q_vector)
@@ -7576,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7576 7573
7577 ring->atr_count++; 7574 ring->atr_count++;
7578 7575
7576 /* currently only IPv4/IPv6 with TCP is supported */
7577 if ((first->protocol != htons(ETH_P_IP)) &&
7578 (first->protocol != htons(ETH_P_IPV6)))
7579 return;
7580
7579 /* snag network header to get L4 type and address */ 7581 /* snag network header to get L4 type and address */
7580 skb = first->skb; 7582 skb = first->skb;
7581 hdr.network = skb_network_header(skb); 7583 hdr.network = skb_network_header(skb);
7582 if (!skb->encapsulation) {
7583 th = tcp_hdr(skb);
7584 } else {
7585#ifdef CONFIG_IXGBE_VXLAN 7584#ifdef CONFIG_IXGBE_VXLAN
7585 if (skb->encapsulation &&
7586 first->protocol == htons(ETH_P_IP) &&
7587 hdr.ipv4->protocol != IPPROTO_UDP) {
7586 struct ixgbe_adapter *adapter = q_vector->adapter; 7588 struct ixgbe_adapter *adapter = q_vector->adapter;
7587 7589
7588 if (!adapter->vxlan_port) 7590 /* verify the port is recognized as VXLAN */
7589 return; 7591 if (adapter->vxlan_port &&
7590 if (first->protocol != htons(ETH_P_IP) || 7592 udp_hdr(skb)->dest == adapter->vxlan_port)
7591 hdr.ipv4->version != IPVERSION || 7593 hdr.network = skb_inner_network_header(skb);
7592 hdr.ipv4->protocol != IPPROTO_UDP) {
7593 return;
7594 }
7595 if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
7596 return;
7597 encap = true;
7598 hdr.network = skb_inner_network_header(skb);
7599 th = inner_tcp_hdr(skb);
7600#else
7601 return;
7602#endif /* CONFIG_IXGBE_VXLAN */
7603 } 7594 }
7595#endif /* CONFIG_IXGBE_VXLAN */
7604 7596
7605 /* Currently only IPv4/IPv6 with TCP is supported */ 7597 /* Currently only IPv4/IPv6 with TCP is supported */
7606 switch (hdr.ipv4->version) { 7598 switch (hdr.ipv4->version) {
7607 case IPVERSION: 7599 case IPVERSION:
7608 if (hdr.ipv4->protocol != IPPROTO_TCP) 7600 /* access ihl as u8 to avoid unaligned access on ia64 */
7609 return; 7601 hlen = (hdr.network[0] & 0x0F) << 2;
7602 l4_proto = hdr.ipv4->protocol;
7610 break; 7603 break;
7611 case 6: 7604 case 6:
7612 if (likely((unsigned char *)th - hdr.network == 7605 hlen = hdr.network - skb->data;
7613 sizeof(struct ipv6hdr))) { 7606 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
7614 if (hdr.ipv6->nexthdr != IPPROTO_TCP) 7607 hlen -= hdr.network - skb->data;
7615 return;
7616 } else {
7617 __be16 frag_off;
7618 u8 l4_hdr;
7619
7620 ipv6_skip_exthdr(skb, hdr.network - skb->data +
7621 sizeof(struct ipv6hdr),
7622 &l4_hdr, &frag_off);
7623 if (unlikely(frag_off))
7624 return;
7625 if (l4_hdr != IPPROTO_TCP)
7626 return;
7627 }
7628 break; 7608 break;
7629 default: 7609 default:
7630 return; 7610 return;
7631 } 7611 }
7632 7612
7633 /* skip this packet since it is invalid or the socket is closing */ 7613 if (l4_proto != IPPROTO_TCP)
7634 if (!th || th->fin) 7614 return;
7615
7616 th = (struct tcphdr *)(hdr.network + hlen);
7617
7618 /* skip this packet since the socket is closing */
7619 if (th->fin)
7635 return; 7620 return;
7636 7621
7637 /* sample on all syn packets or once every atr sample count */ 7622 /* sample on all syn packets or once every atr sample count */
@@ -7682,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7682 break; 7667 break;
7683 } 7668 }
7684 7669
7685#ifdef CONFIG_IXGBE_VXLAN 7670 if (hdr.network != skb_network_header(skb))
7686 if (encap)
7687 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; 7671 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7688#endif /* CONFIG_IXGBE_VXLAN */
7689 7672
7690 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 7673 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
7691 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, 7674 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8209,10 +8192,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8209static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, 8192static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8210 struct tc_cls_u32_offload *cls) 8193 struct tc_cls_u32_offload *cls)
8211{ 8194{
8195 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8196 u32 loc;
8212 int err; 8197 int err;
8213 8198
8199 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8200 return -EINVAL;
8201
8202 loc = cls->knode.handle & 0xfffff;
8203
8214 spin_lock(&adapter->fdir_perfect_lock); 8204 spin_lock(&adapter->fdir_perfect_lock);
8215 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); 8205 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
8216 spin_unlock(&adapter->fdir_perfect_lock); 8206 spin_unlock(&adapter->fdir_perfect_lock);
8217 return err; 8207 return err;
8218} 8208}
@@ -8221,20 +8211,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
8221 __be16 protocol, 8211 __be16 protocol,
8222 struct tc_cls_u32_offload *cls) 8212 struct tc_cls_u32_offload *cls)
8223{ 8213{
8214 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8215
8216 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8217 return -EINVAL;
8218
8224 /* This ixgbe devices do not support hash tables at the moment 8219 /* This ixgbe devices do not support hash tables at the moment
8225 * so abort when given hash tables. 8220 * so abort when given hash tables.
8226 */ 8221 */
8227 if (cls->hnode.divisor > 0) 8222 if (cls->hnode.divisor > 0)
8228 return -EINVAL; 8223 return -EINVAL;
8229 8224
8230 set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); 8225 set_bit(uhtid - 1, &adapter->tables);
8231 return 0; 8226 return 0;
8232} 8227}
8233 8228
8234static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, 8229static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
8235 struct tc_cls_u32_offload *cls) 8230 struct tc_cls_u32_offload *cls)
8236{ 8231{
8237 clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); 8232 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8233
8234 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8235 return -EINVAL;
8236
8237 clear_bit(uhtid - 1, &adapter->tables);
8238 return 0; 8238 return 0;
8239} 8239}
8240 8240
@@ -8252,27 +8252,29 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8252#endif 8252#endif
8253 int i, err = 0; 8253 int i, err = 0;
8254 u8 queue; 8254 u8 queue;
8255 u32 handle; 8255 u32 uhtid, link_uhtid;
8256 8256
8257 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 8257 memset(&mask, 0, sizeof(union ixgbe_atr_input));
8258 handle = cls->knode.handle; 8258 uhtid = TC_U32_USERHTID(cls->knode.handle);
8259 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
8259 8260
8260 /* At the moment cls_u32 jumps to transport layer and skips past 8261 /* At the moment cls_u32 jumps to network layer and skips past
8261 * L2 headers. The canonical method to match L2 frames is to use 8262 * L2 headers. The canonical method to match L2 frames is to use
8262 * negative values. However this is error prone at best but really 8263 * negative values. However this is error prone at best but really
8263 * just broken because there is no way to "know" what sort of hdr 8264 * just broken because there is no way to "know" what sort of hdr
8264 * is in front of the transport layer. Fix cls_u32 to support L2 8265 * is in front of the network layer. Fix cls_u32 to support L2
8265 * headers when needed. 8266 * headers when needed.
8266 */ 8267 */
8267 if (protocol != htons(ETH_P_IP)) 8268 if (protocol != htons(ETH_P_IP))
8268 return -EINVAL; 8269 return -EINVAL;
8269 8270
8270 if (cls->knode.link_handle || 8271 if (link_uhtid) {
8271 cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
8272 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; 8272 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
8273 u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
8274 8273
8275 if (!test_bit(uhtid, &adapter->tables)) 8274 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
8275 return -EINVAL;
8276
8277 if (!test_bit(link_uhtid - 1, &adapter->tables))
8276 return -EINVAL; 8278 return -EINVAL;
8277 8279
8278 for (i = 0; nexthdr[i].jump; i++) { 8280 for (i = 0; nexthdr[i].jump; i++) {
@@ -8288,10 +8290,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8288 nexthdr->mask != cls->knode.sel->keys[0].mask) 8290 nexthdr->mask != cls->knode.sel->keys[0].mask)
8289 return -EINVAL; 8291 return -EINVAL;
8290 8292
8291 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 8293 adapter->jump_tables[link_uhtid] = nexthdr->jump;
8292 return -EINVAL;
8293
8294 adapter->jump_tables[uhtid] = nexthdr->jump;
8295 } 8294 }
8296 return 0; 8295 return 0;
8297 } 8296 }
@@ -8308,13 +8307,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8308 * To add support for new nodes update ixgbe_model.h parse structures 8307 * To add support for new nodes update ixgbe_model.h parse structures
8309 * this function _should_ be generic try not to hardcode values here. 8308 * this function _should_ be generic try not to hardcode values here.
8310 */ 8309 */
8311 if (TC_U32_USERHTID(handle) == 0x800) { 8310 if (uhtid == 0x800) {
8312 field_ptr = adapter->jump_tables[0]; 8311 field_ptr = adapter->jump_tables[0];
8313 } else { 8312 } else {
8314 if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) 8313 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8315 return -EINVAL; 8314 return -EINVAL;
8316 8315
8317 field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; 8316 field_ptr = adapter->jump_tables[uhtid];
8318 } 8317 }
8319 8318
8320 if (!field_ptr) 8319 if (!field_ptr)
@@ -8332,8 +8331,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8332 int j; 8331 int j;
8333 8332
8334 for (j = 0; field_ptr[j].val; j++) { 8333 for (j = 0; field_ptr[j].val; j++) {
8335 if (field_ptr[j].off == off && 8334 if (field_ptr[j].off == off) {
8336 field_ptr[j].mask == m) {
8337 field_ptr[j].val(input, &mask, val, m); 8335 field_ptr[j].val(input, &mask, val, m);
8338 input->filter.formatted.flow_type |= 8336 input->filter.formatted.flow_type |=
8339 field_ptr[j].type; 8337 field_ptr[j].type;
@@ -8393,8 +8391,8 @@ err_out:
8393 return -EINVAL; 8391 return -EINVAL;
8394} 8392}
8395 8393
8396int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 8394static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
8397 struct tc_to_netdev *tc) 8395 struct tc_to_netdev *tc)
8398{ 8396{
8399 struct ixgbe_adapter *adapter = netdev_priv(dev); 8397 struct ixgbe_adapter *adapter = netdev_priv(dev);
8400 8398
@@ -8554,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8554{ 8552{
8555 struct ixgbe_adapter *adapter = netdev_priv(dev); 8553 struct ixgbe_adapter *adapter = netdev_priv(dev);
8556 struct ixgbe_hw *hw = &adapter->hw; 8554 struct ixgbe_hw *hw = &adapter->hw;
8557 u16 new_port = ntohs(port);
8558 8555
8559 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8556 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8560 return; 8557 return;
@@ -8562,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8562 if (sa_family == AF_INET6) 8559 if (sa_family == AF_INET6)
8563 return; 8560 return;
8564 8561
8565 if (adapter->vxlan_port == new_port) 8562 if (adapter->vxlan_port == port)
8566 return; 8563 return;
8567 8564
8568 if (adapter->vxlan_port) { 8565 if (adapter->vxlan_port) {
8569 netdev_info(dev, 8566 netdev_info(dev,
8570 "Hit Max num of VXLAN ports, not adding port %d\n", 8567 "Hit Max num of VXLAN ports, not adding port %d\n",
8571 new_port); 8568 ntohs(port));
8572 return; 8569 return;
8573 } 8570 }
8574 8571
8575 adapter->vxlan_port = new_port; 8572 adapter->vxlan_port = port;
8576 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port); 8573 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
8577} 8574}
8578 8575
8579/** 8576/**
@@ -8586,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8586 __be16 port) 8583 __be16 port)
8587{ 8584{
8588 struct ixgbe_adapter *adapter = netdev_priv(dev); 8585 struct ixgbe_adapter *adapter = netdev_priv(dev);
8589 u16 new_port = ntohs(port);
8590 8586
8591 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8587 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8592 return; 8588 return;
@@ -8594,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8594 if (sa_family == AF_INET6) 8590 if (sa_family == AF_INET6)
8595 return; 8591 return;
8596 8592
8597 if (adapter->vxlan_port != new_port) { 8593 if (adapter->vxlan_port != port) {
8598 netdev_info(dev, "Port %d was not found, not deleting\n", 8594 netdev_info(dev, "Port %d was not found, not deleting\n",
8599 new_port); 8595 ntohs(port));
8600 return; 8596 return;
8601 } 8597 }
8602 8598
@@ -9265,17 +9261,6 @@ skip_sriov:
9265 netdev->priv_flags |= IFF_UNICAST_FLT; 9261 netdev->priv_flags |= IFF_UNICAST_FLT;
9266 netdev->priv_flags |= IFF_SUPP_NOFCS; 9262 netdev->priv_flags |= IFF_SUPP_NOFCS;
9267 9263
9268#ifdef CONFIG_IXGBE_VXLAN
9269 switch (adapter->hw.mac.type) {
9270 case ixgbe_mac_X550:
9271 case ixgbe_mac_X550EM_x:
9272 netdev->hw_enc_features |= NETIF_F_RXCSUM;
9273 break;
9274 default:
9275 break;
9276 }
9277#endif /* CONFIG_IXGBE_VXLAN */
9278
9279#ifdef CONFIG_IXGBE_DCB 9264#ifdef CONFIG_IXGBE_DCB
9280 netdev->dcbnl_ops = &dcbnl_ops; 9265 netdev->dcbnl_ops = &dcbnl_ops;
9281#endif 9266#endif
@@ -9329,6 +9314,8 @@ skip_sriov:
9329 goto err_sw_init; 9314 goto err_sw_init;
9330 } 9315 }
9331 9316
9317 /* Set hw->mac.addr to permanent MAC address */
9318 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
9332 ixgbe_mac_set_default_filter(adapter); 9319 ixgbe_mac_set_default_filter(adapter);
9333 9320
9334 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 9321 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index ce48872d4782..74c53ad9d268 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -32,7 +32,6 @@
32 32
33struct ixgbe_mat_field { 33struct ixgbe_mat_field {
34 unsigned int off; 34 unsigned int off;
35 unsigned int mask;
36 int (*val)(struct ixgbe_fdir_filter *input, 35 int (*val)(struct ixgbe_fdir_filter *input,
37 union ixgbe_atr_input *mask, 36 union ixgbe_atr_input *mask,
38 u32 val, u32 m); 37 u32 val, u32 m);
@@ -58,35 +57,27 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
58} 57}
59 58
60static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { 59static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
61 { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, 60 { .off = 12, .val = ixgbe_mat_prgm_sip,
62 .type = IXGBE_ATR_FLOW_TYPE_IPV4}, 61 .type = IXGBE_ATR_FLOW_TYPE_IPV4},
63 { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, 62 { .off = 16, .val = ixgbe_mat_prgm_dip,
64 .type = IXGBE_ATR_FLOW_TYPE_IPV4}, 63 .type = IXGBE_ATR_FLOW_TYPE_IPV4},
65 { .val = NULL } /* terminal node */ 64 { .val = NULL } /* terminal node */
66}; 65};
67 66
68static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, 67static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
69 union ixgbe_atr_input *mask, 68 union ixgbe_atr_input *mask,
70 u32 val, u32 m) 69 u32 val, u32 m)
71{ 70{
72 input->filter.formatted.src_port = val & 0xffff; 71 input->filter.formatted.src_port = val & 0xffff;
73 mask->formatted.src_port = m & 0xffff; 72 mask->formatted.src_port = m & 0xffff;
74 return 0; 73 input->filter.formatted.dst_port = val >> 16;
75}; 74 mask->formatted.dst_port = m >> 16;
76 75
77static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
78 union ixgbe_atr_input *mask,
79 u32 val, u32 m)
80{
81 input->filter.formatted.dst_port = val & 0xffff;
82 mask->formatted.dst_port = m & 0xffff;
83 return 0; 76 return 0;
84}; 77};
85 78
86static struct ixgbe_mat_field ixgbe_tcp_fields[] = { 79static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
87 {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, 80 {.off = 0, .val = ixgbe_mat_prgm_ports,
88 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
89 {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
90 .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, 81 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
91 { .val = NULL } /* terminal node */ 82 { .val = NULL } /* terminal node */
92}; 83};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 87aca3f7c3de..68a9c646498e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
355 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); 355 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
356 if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) 356 if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
357 break; 357 break;
358 usleep_range(10, 20); 358 udelay(10);
359 } 359 }
360 if (ctrl) 360 if (ctrl)
361 *ctrl = command; 361 *ctrl = command;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c48aef613b0a..d7aa4b203f40 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
680 680
681 if (if_running) 681 if (if_running)
682 /* indicate we're in test mode */ 682 /* indicate we're in test mode */
683 dev_close(netdev); 683 ixgbevf_close(netdev);
684 else 684 else
685 ixgbevf_reset(adapter); 685 ixgbevf_reset(adapter);
686 686
@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
692 692
693 clear_bit(__IXGBEVF_TESTING, &adapter->state); 693 clear_bit(__IXGBEVF_TESTING, &adapter->state);
694 if (if_running) 694 if (if_running)
695 dev_open(netdev); 695 ixgbevf_open(netdev);
696 } else { 696 } else {
697 hw_dbg(&adapter->hw, "online testing starting\n"); 697 hw_dbg(&adapter->hw, "online testing starting\n");
698 /* Online tests */ 698 /* Online tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 68ec7daa04fd..991eeae81473 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
486extern const char ixgbevf_driver_name[]; 486extern const char ixgbevf_driver_name[];
487extern const char ixgbevf_driver_version[]; 487extern const char ixgbevf_driver_version[];
488 488
489int ixgbevf_open(struct net_device *netdev);
490int ixgbevf_close(struct net_device *netdev);
489void ixgbevf_up(struct ixgbevf_adapter *adapter); 491void ixgbevf_up(struct ixgbevf_adapter *adapter);
490void ixgbevf_down(struct ixgbevf_adapter *adapter); 492void ixgbevf_down(struct ixgbevf_adapter *adapter);
491void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 493void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ea14c0a2e74..b0edae94d73d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3122 * handler is registered with the OS, the watchdog timer is started, 3122 * handler is registered with the OS, the watchdog timer is started,
3123 * and the stack is notified that the interface is ready. 3123 * and the stack is notified that the interface is ready.
3124 **/ 3124 **/
3125static int ixgbevf_open(struct net_device *netdev) 3125int ixgbevf_open(struct net_device *netdev)
3126{ 3126{
3127 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3127 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3128 struct ixgbe_hw *hw = &adapter->hw; 3128 struct ixgbe_hw *hw = &adapter->hw;
@@ -3205,7 +3205,7 @@ err_setup_reset:
3205 * needs to be disabled. A global MAC reset is issued to stop the 3205 * needs to be disabled. A global MAC reset is issued to stop the
3206 * hardware, and all transmit and receive resources are freed. 3206 * hardware, and all transmit and receive resources are freed.
3207 **/ 3207 **/
3208static int ixgbevf_close(struct net_device *netdev) 3208int ixgbevf_close(struct net_device *netdev)
3209{ 3209{
3210 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3210 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3211 3211
@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3692 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3692 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3693 struct ixgbe_hw *hw = &adapter->hw; 3693 struct ixgbe_hw *hw = &adapter->hw;
3694 struct sockaddr *addr = p; 3694 struct sockaddr *addr = p;
3695 int err;
3695 3696
3696 if (!is_valid_ether_addr(addr->sa_data)) 3697 if (!is_valid_ether_addr(addr->sa_data))
3697 return -EADDRNOTAVAIL; 3698 return -EADDRNOTAVAIL;
3698 3699
3699 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3700 ether_addr_copy(hw->mac.addr, addr->sa_data);
3701
3702 spin_lock_bh(&adapter->mbx_lock); 3700 spin_lock_bh(&adapter->mbx_lock);
3703 3701
3704 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3702 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
3705 3703
3706 spin_unlock_bh(&adapter->mbx_lock); 3704 spin_unlock_bh(&adapter->mbx_lock);
3707 3705
3706 if (err)
3707 return -EPERM;
3708
3709 ether_addr_copy(hw->mac.addr, addr->sa_data);
3710 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3711
3708 return 0; 3712 return 0;
3709} 3713}
3710 3714
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61a98f4c5746..4d613a4f2a7f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
408 408
409 /* if nacked the address was rejected, use "perm_addr" */ 409 /* if nacked the address was rejected, use "perm_addr" */
410 if (!ret_val && 410 if (!ret_val &&
411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) 411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); 412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
413 return IXGBE_ERR_MBX;
414 }
413 415
414 return ret_val; 416 return ret_val;
415} 417}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 577f7ca7deba..7fc490225da5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -260,7 +260,6 @@
260 260
261#define MVNETA_VLAN_TAG_LEN 4 261#define MVNETA_VLAN_TAG_LEN 4
262 262
263#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
264#define MVNETA_TX_CSUM_DEF_SIZE 1600 263#define MVNETA_TX_CSUM_DEF_SIZE 1600
265#define MVNETA_TX_CSUM_MAX_SIZE 9800 264#define MVNETA_TX_CSUM_MAX_SIZE 9800
266#define MVNETA_ACC_MODE_EXT1 1 265#define MVNETA_ACC_MODE_EXT1 1
@@ -300,7 +299,7 @@
300#define MVNETA_RX_PKT_SIZE(mtu) \ 299#define MVNETA_RX_PKT_SIZE(mtu) \
301 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 300 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
302 ETH_HLEN + ETH_FCS_LEN, \ 301 ETH_HLEN + ETH_FCS_LEN, \
303 MVNETA_CPU_D_CACHE_LINE_SIZE) 302 cache_line_size())
304 303
305#define IS_TSO_HEADER(txq, addr) \ 304#define IS_TSO_HEADER(txq, addr) \
306 ((addr >= txq->tso_hdrs_phys) && \ 305 ((addr >= txq->tso_hdrs_phys) && \
@@ -2764,9 +2763,6 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
2764 if (rxq->descs == NULL) 2763 if (rxq->descs == NULL)
2765 return -ENOMEM; 2764 return -ENOMEM;
2766 2765
2767 BUG_ON(rxq->descs !=
2768 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2769
2770 rxq->last_desc = rxq->size - 1; 2766 rxq->last_desc = rxq->size - 1;
2771 2767
2772 /* Set Rx descriptors queue starting address */ 2768 /* Set Rx descriptors queue starting address */
@@ -2837,10 +2833,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2837 if (txq->descs == NULL) 2833 if (txq->descs == NULL)
2838 return -ENOMEM; 2834 return -ENOMEM;
2839 2835
2840 /* Make sure descriptor address is cache line size aligned */
2841 BUG_ON(txq->descs !=
2842 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2843
2844 txq->last_desc = txq->size - 1; 2836 txq->last_desc = txq->size - 1;
2845 2837
2846 /* Set maximum bandwidth for enabled TXQs */ 2838 /* Set maximum bandwidth for enabled TXQs */
@@ -3050,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
3050 return mtu; 3042 return mtu;
3051} 3043}
3052 3044
3045static void mvneta_percpu_enable(void *arg)
3046{
3047 struct mvneta_port *pp = arg;
3048
3049 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3050}
3051
3052static void mvneta_percpu_disable(void *arg)
3053{
3054 struct mvneta_port *pp = arg;
3055
3056 disable_percpu_irq(pp->dev->irq);
3057}
3058
3053/* Change the device mtu */ 3059/* Change the device mtu */
3054static int mvneta_change_mtu(struct net_device *dev, int mtu) 3060static int mvneta_change_mtu(struct net_device *dev, int mtu)
3055{ 3061{
@@ -3074,6 +3080,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3074 * reallocation of the queues 3080 * reallocation of the queues
3075 */ 3081 */
3076 mvneta_stop_dev(pp); 3082 mvneta_stop_dev(pp);
3083 on_each_cpu(mvneta_percpu_disable, pp, true);
3077 3084
3078 mvneta_cleanup_txqs(pp); 3085 mvneta_cleanup_txqs(pp);
3079 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
@@ -3097,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3097 return ret; 3104 return ret;
3098 } 3105 }
3099 3106
3107 on_each_cpu(mvneta_percpu_enable, pp, true);
3100 mvneta_start_dev(pp); 3108 mvneta_start_dev(pp);
3101 mvneta_port_up(pp); 3109 mvneta_port_up(pp);
3102 3110
@@ -3250,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
3250 pp->phy_dev = NULL; 3258 pp->phy_dev = NULL;
3251} 3259}
3252 3260
3253static void mvneta_percpu_enable(void *arg)
3254{
3255 struct mvneta_port *pp = arg;
3256
3257 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3258}
3259
3260static void mvneta_percpu_disable(void *arg)
3261{
3262 struct mvneta_port *pp = arg;
3263
3264 disable_percpu_irq(pp->dev->irq);
3265}
3266
3267/* Electing a CPU must be done in an atomic way: it should be done 3261/* Electing a CPU must be done in an atomic way: it should be done
3268 * after or before the removal/insertion of a CPU and this function is 3262 * after or before the removal/insertion of a CPU and this function is
3269 * not reentrant. 3263 * not reentrant.
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index c797971aefab..868a957f24bb 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -321,7 +321,6 @@
321/* Lbtd 802.3 type */ 321/* Lbtd 802.3 type */
322#define MVPP2_IP_LBDT_TYPE 0xfffa 322#define MVPP2_IP_LBDT_TYPE 0xfffa
323 323
324#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
325#define MVPP2_TX_CSUM_MAX_SIZE 9800 324#define MVPP2_TX_CSUM_MAX_SIZE 9800
326 325
327/* Timeout constants */ 326/* Timeout constants */
@@ -377,7 +376,7 @@
377 376
378#define MVPP2_RX_PKT_SIZE(mtu) \ 377#define MVPP2_RX_PKT_SIZE(mtu) \
379 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 378 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
380 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 379 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
381 380
382#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 381#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
383#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 382#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
@@ -4493,10 +4492,6 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4493 if (!aggr_txq->descs) 4492 if (!aggr_txq->descs)
4494 return -ENOMEM; 4493 return -ENOMEM;
4495 4494
4496 /* Make sure descriptor address is cache line size aligned */
4497 BUG_ON(aggr_txq->descs !=
4498 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4499
4500 aggr_txq->last_desc = aggr_txq->size - 1; 4495 aggr_txq->last_desc = aggr_txq->size - 1;
4501 4496
4502 /* Aggr TXQ no reset WA */ 4497 /* Aggr TXQ no reset WA */
@@ -4526,9 +4521,6 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4526 if (!rxq->descs) 4521 if (!rxq->descs)
4527 return -ENOMEM; 4522 return -ENOMEM;
4528 4523
4529 BUG_ON(rxq->descs !=
4530 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4531
4532 rxq->last_desc = rxq->size - 1; 4524 rxq->last_desc = rxq->size - 1;
4533 4525
4534 /* Zero occupied and non-occupied counters - direct access */ 4526 /* Zero occupied and non-occupied counters - direct access */
@@ -4616,10 +4608,6 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4616 if (!txq->descs) 4608 if (!txq->descs)
4617 return -ENOMEM; 4609 return -ENOMEM;
4618 4610
4619 /* Make sure descriptor address is cache line size aligned */
4620 BUG_ON(txq->descs !=
4621 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4622
4623 txq->last_desc = txq->size - 1; 4611 txq->last_desc = txq->size - 1;
4624 4612
4625 /* Set Tx descriptors queue starting address - indirect access */ 4613 /* Set Tx descriptors queue starting address - indirect access */
@@ -6059,8 +6047,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
6059 6047
6060 /* Map physical Rx queue to port's logical Rx queue */ 6048 /* Map physical Rx queue to port's logical Rx queue */
6061 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 6049 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6062 if (!rxq) 6050 if (!rxq) {
6051 err = -ENOMEM;
6063 goto err_free_percpu; 6052 goto err_free_percpu;
6053 }
6064 /* Map this Rx queue to a physical queue */ 6054 /* Map this Rx queue to a physical queue */
6065 rxq->id = port->first_rxq + queue; 6055 rxq->id = port->first_rxq + queue;
6066 rxq->port = port->id; 6056 rxq->port = port->id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index ffd0accc2ec9..2017b0121f5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2750,7 +2750,7 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
2750int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2750int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2751 enum qed_int_mode int_mode) 2751 enum qed_int_mode int_mode)
2752{ 2752{
2753 int rc; 2753 int rc = 0;
2754 2754
2755 /* Configure AEU signal change to produce attentions */ 2755 /* Configure AEU signal change to produce attentions */
2756 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 2756 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ef332708e5f2..6d31f92ef2b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "1.00.00.34" 21#define DRV_VERSION "1.00.00.35"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4e1a7dba7c4a..087e14a3fba7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1377,11 +1377,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1377 1377
1378 /* TAG and timestamp required flag */ 1378 /* TAG and timestamp required flag */
1379 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1379 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1380 skb_tx_timestamp(skb);
1381 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; 1380 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1382 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); 1381 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1383 } 1382 }
1384 1383
1384 skb_tx_timestamp(skb);
1385 /* Descriptor type must be set after all the above writes */ 1385 /* Descriptor type must be set after all the above writes */
1386 dma_wmb(); 1386 dma_wmb();
1387 desc->die_dt = DT_FEND; 1387 desc->die_dt = DT_FEND;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b02eed12bfc5..73427e29df2a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -155,11 +155,11 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
155 return 0; 155 return 0;
156 156
157err_rx_irq_unmap: 157err_rx_irq_unmap:
158 while (--i) 158 while (i--)
159 irq_dispose_mapping(priv->rxq[i]->irq_no); 159 irq_dispose_mapping(priv->rxq[i]->irq_no);
160 i = SXGBE_TX_QUEUES; 160 i = SXGBE_TX_QUEUES;
161err_tx_irq_unmap: 161err_tx_irq_unmap:
162 while (--i) 162 while (i--)
163 irq_dispose_mapping(priv->txq[i]->irq_no); 163 irq_dispose_mapping(priv->txq[i]->irq_no);
164 irq_dispose_mapping(priv->irq); 164 irq_dispose_mapping(priv->irq);
165err_drv_remove: 165err_drv_remove:
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index e13228f115f0..011386f6f24d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -199,11 +199,6 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
199{ 199{
200 unsigned int tdes1 = p->des1; 200 unsigned int tdes1 = p->des1;
201 201
202 if (mode == STMMAC_CHAIN_MODE)
203 norm_set_tx_desc_len_on_chain(p, len);
204 else
205 norm_set_tx_desc_len_on_ring(p, len);
206
207 if (is_fs) 202 if (is_fs)
208 tdes1 |= TDES1_FIRST_SEGMENT; 203 tdes1 |= TDES1_FIRST_SEGMENT;
209 else 204 else
@@ -217,10 +212,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
217 if (ls) 212 if (ls)
218 tdes1 |= TDES1_LAST_SEGMENT; 213 tdes1 |= TDES1_LAST_SEGMENT;
219 214
220 if (tx_own)
221 tdes1 |= TDES0_OWN;
222
223 p->des1 = tdes1; 215 p->des1 = tdes1;
216
217 if (mode == STMMAC_CHAIN_MODE)
218 norm_set_tx_desc_len_on_chain(p, len);
219 else
220 norm_set_tx_desc_len_on_ring(p, len);
221
222 if (tx_own)
223 p->des0 |= TDES0_OWN;
224} 224}
225 225
226static void ndesc_set_tx_ic(struct dma_desc *p) 226static void ndesc_set_tx_ic(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c5ce9848ca9..fcbd4be562e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -278,7 +278,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
278 */ 278 */
279bool stmmac_eee_init(struct stmmac_priv *priv) 279bool stmmac_eee_init(struct stmmac_priv *priv)
280{ 280{
281 char *phy_bus_name = priv->plat->phy_bus_name;
282 unsigned long flags; 281 unsigned long flags;
283 bool ret = false; 282 bool ret = false;
284 283
@@ -289,10 +288,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
289 (priv->pcs == STMMAC_PCS_RTBI)) 288 (priv->pcs == STMMAC_PCS_RTBI))
290 goto out; 289 goto out;
291 290
292 /* Never init EEE in case of a switch is attached */
293 if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
294 goto out;
295
296 /* MAC core supports the EEE feature. */ 291 /* MAC core supports the EEE feature. */
297 if (priv->dma_cap.eee) { 292 if (priv->dma_cap.eee) {
298 int tx_lpi_timer = priv->tx_lpi_timer; 293 int tx_lpi_timer = priv->tx_lpi_timer;
@@ -772,10 +767,16 @@ static void stmmac_adjust_link(struct net_device *dev)
772 767
773 spin_unlock_irqrestore(&priv->lock, flags); 768 spin_unlock_irqrestore(&priv->lock, flags);
774 769
775 /* At this stage, it could be needed to setup the EEE or adjust some 770 if (phydev->is_pseudo_fixed_link)
776 * MAC related HW registers. 771 /* Stop PHY layer to call the hook to adjust the link in case
777 */ 772 * of a switch is attached to the stmmac driver.
778 priv->eee_enabled = stmmac_eee_init(priv); 773 */
774 phydev->irq = PHY_IGNORE_INTERRUPT;
775 else
776 /* At this stage, init the EEE if supported.
777 * Never called in case of fixed_link.
778 */
779 priv->eee_enabled = stmmac_eee_init(priv);
779} 780}
780 781
781/** 782/**
@@ -827,12 +828,8 @@ static int stmmac_init_phy(struct net_device *dev)
827 phydev = of_phy_connect(dev, priv->plat->phy_node, 828 phydev = of_phy_connect(dev, priv->plat->phy_node,
828 &stmmac_adjust_link, 0, interface); 829 &stmmac_adjust_link, 0, interface);
829 } else { 830 } else {
830 if (priv->plat->phy_bus_name) 831 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
831 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", 832 priv->plat->bus_id);
832 priv->plat->phy_bus_name, priv->plat->bus_id);
833 else
834 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
835 priv->plat->bus_id);
836 833
837 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 834 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
838 priv->plat->phy_addr); 835 priv->plat->phy_addr);
@@ -870,11 +867,6 @@ static int stmmac_init_phy(struct net_device *dev)
870 return -ENODEV; 867 return -ENODEV;
871 } 868 }
872 869
873 /* If attached to a switch, there is no reason to poll phy handler */
874 if (priv->plat->phy_bus_name)
875 if (!strcmp(priv->plat->phy_bus_name, "fixed"))
876 phydev->irq = PHY_IGNORE_INTERRUPT;
877
878 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 870 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
879 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 871 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
880 872
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ea76129dafc2..06704ca6f9ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -198,20 +198,12 @@ int stmmac_mdio_register(struct net_device *ndev)
198 struct mii_bus *new_bus; 198 struct mii_bus *new_bus;
199 struct stmmac_priv *priv = netdev_priv(ndev); 199 struct stmmac_priv *priv = netdev_priv(ndev);
200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
201 int addr, found;
202 struct device_node *mdio_node = priv->plat->mdio_node; 201 struct device_node *mdio_node = priv->plat->mdio_node;
202 int addr, found;
203 203
204 if (!mdio_bus_data) 204 if (!mdio_bus_data)
205 return 0; 205 return 0;
206 206
207 if (IS_ENABLED(CONFIG_OF)) {
208 if (mdio_node) {
209 netdev_dbg(ndev, "FOUND MDIO subnode\n");
210 } else {
211 netdev_warn(ndev, "No MDIO subnode found\n");
212 }
213 }
214
215 new_bus = mdiobus_alloc(); 207 new_bus = mdiobus_alloc();
216 if (new_bus == NULL) 208 if (new_bus == NULL)
217 return -ENOMEM; 209 return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dcbd2a1601e8..cf37ea558ecc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -132,6 +132,69 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
132} 132}
133 133
134/** 134/**
135 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
136 * @plat: driver data platform structure
137 * @np: device tree node
138 * @dev: device pointer
139 * Description:
140 * The mdio bus will be allocated in case of a phy transceiver is on board;
141 * it will be NULL if the fixed-link is configured.
142 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
143 * in any case (for DSA, mdio must be registered even if fixed-link).
144 * The table below sums the supported configurations:
145 * -------------------------------
146 * snps,phy-addr | Y
147 * -------------------------------
148 * phy-handle | Y
149 * -------------------------------
150 * fixed-link | N
151 * -------------------------------
152 * snps,dwmac-mdio |
153 * even if | Y
154 * fixed-link |
155 * -------------------------------
156 *
157 * It returns 0 in case of success otherwise -ENODEV.
158 */
159static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
160 struct device_node *np, struct device *dev)
161{
162 bool mdio = true;
163
164 /* If phy-handle property is passed from DT, use it as the PHY */
165 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
166 if (plat->phy_node)
167 dev_dbg(dev, "Found phy-handle subnode\n");
168
169 /* If phy-handle is not specified, check if we have a fixed-phy */
170 if (!plat->phy_node && of_phy_is_fixed_link(np)) {
171 if ((of_phy_register_fixed_link(np) < 0))
172 return -ENODEV;
173
174 dev_dbg(dev, "Found fixed-link subnode\n");
175 plat->phy_node = of_node_get(np);
176 mdio = false;
177 }
178
179 /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
180 for_each_child_of_node(np, plat->mdio_node) {
181 if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
182 break;
183 }
184
185 if (plat->mdio_node) {
186 dev_dbg(dev, "Found MDIO subnode\n");
187 mdio = true;
188 }
189
190 if (mdio)
191 plat->mdio_bus_data =
192 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
193 GFP_KERNEL);
194 return 0;
195}
196
197/**
135 * stmmac_probe_config_dt - parse device-tree driver parameters 198 * stmmac_probe_config_dt - parse device-tree driver parameters
136 * @pdev: platform_device structure 199 * @pdev: platform_device structure
137 * @plat: driver data platform structure 200 * @plat: driver data platform structure
@@ -146,7 +209,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
146 struct device_node *np = pdev->dev.of_node; 209 struct device_node *np = pdev->dev.of_node;
147 struct plat_stmmacenet_data *plat; 210 struct plat_stmmacenet_data *plat;
148 struct stmmac_dma_cfg *dma_cfg; 211 struct stmmac_dma_cfg *dma_cfg;
149 struct device_node *child_node = NULL;
150 212
151 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 213 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
152 if (!plat) 214 if (!plat)
@@ -166,36 +228,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
166 /* Default to phy auto-detection */ 228 /* Default to phy auto-detection */
167 plat->phy_addr = -1; 229 plat->phy_addr = -1;
168 230
169 /* If we find a phy-handle property, use it as the PHY */
170 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
171
172 /* If phy-handle is not specified, check if we have a fixed-phy */
173 if (!plat->phy_node && of_phy_is_fixed_link(np)) {
174 if ((of_phy_register_fixed_link(np) < 0))
175 return ERR_PTR(-ENODEV);
176
177 plat->phy_node = of_node_get(np);
178 }
179
180 for_each_child_of_node(np, child_node)
181 if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
182 plat->mdio_node = child_node;
183 break;
184 }
185
186 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 231 /* "snps,phy-addr" is not a standard property. Mark it as deprecated
187 * and warn of its use. Remove this when phy node support is added. 232 * and warn of its use. Remove this when phy node support is added.
188 */ 233 */
189 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 234 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
190 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 235 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
191 236
192 if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node) 237 /* To Configure PHY by using all device-tree supported properties */
193 plat->mdio_bus_data = NULL; 238 if (stmmac_dt_phy(plat, np, &pdev->dev))
194 else 239 return ERR_PTR(-ENODEV);
195 plat->mdio_bus_data =
196 devm_kzalloc(&pdev->dev,
197 sizeof(struct stmmac_mdio_bus_data),
198 GFP_KERNEL);
199 240
200 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 241 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
201 242
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b881a7b1e4f6..9636da0b6efc 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -339,6 +339,8 @@ static struct phy_driver bcm7xxx_driver[] = {
339 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), 339 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
340 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), 340 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
341 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), 341 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
342 BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
343 BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
342 BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), 344 BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
343 BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), 345 BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
344 BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), 346 BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
@@ -348,6 +350,8 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
348 { PHY_ID_BCM7250, 0xfffffff0, }, 350 { PHY_ID_BCM7250, 0xfffffff0, },
349 { PHY_ID_BCM7364, 0xfffffff0, }, 351 { PHY_ID_BCM7364, 0xfffffff0, },
350 { PHY_ID_BCM7366, 0xfffffff0, }, 352 { PHY_ID_BCM7366, 0xfffffff0, },
353 { PHY_ID_BCM7346, 0xfffffff0, },
354 { PHY_ID_BCM7362, 0xfffffff0, },
351 { PHY_ID_BCM7425, 0xfffffff0, }, 355 { PHY_ID_BCM7425, 0xfffffff0, },
352 { PHY_ID_BCM7429, 0xfffffff0, }, 356 { PHY_ID_BCM7429, 0xfffffff0, },
353 { PHY_ID_BCM7439, 0xfffffff0, }, 357 { PHY_ID_BCM7439, 0xfffffff0, },
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 26c64d2782fa..a0f64cba86ba 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1198,6 +1198,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1198 goto err_dev_open; 1198 goto err_dev_open;
1199 } 1199 }
1200 1200
1201 dev_uc_sync_multiple(port_dev, dev);
1202 dev_mc_sync_multiple(port_dev, dev);
1203
1201 err = vlan_vids_add_by_dev(port_dev, dev); 1204 err = vlan_vids_add_by_dev(port_dev, dev);
1202 if (err) { 1205 if (err) {
1203 netdev_err(dev, "Failed to add vlan ids to device %s\n", 1206 netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1261,6 +1264,8 @@ err_enable_netpoll:
1261 vlan_vids_del_by_dev(port_dev, dev); 1264 vlan_vids_del_by_dev(port_dev, dev);
1262 1265
1263err_vids_add: 1266err_vids_add:
1267 dev_uc_unsync(port_dev, dev);
1268 dev_mc_unsync(port_dev, dev);
1264 dev_close(port_dev); 1269 dev_close(port_dev);
1265 1270
1266err_dev_open: 1271err_dev_open:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afdf950617c3..2c9e45f50edb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -622,7 +622,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
622 622
623 /* Re-attach the filter to persist device */ 623 /* Re-attach the filter to persist device */
624 if (!skip_filter && (tun->filter_attached == true)) { 624 if (!skip_filter && (tun->filter_attached == true)) {
625 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 625 err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
626 lockdep_rtnl_is_held());
626 if (!err) 627 if (!err)
627 goto out; 628 goto out;
628 } 629 }
@@ -1014,7 +1015,6 @@ static void tun_net_init(struct net_device *dev)
1014 /* Zero header length */ 1015 /* Zero header length */
1015 dev->type = ARPHRD_NONE; 1016 dev->type = ARPHRD_NONE;
1016 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1017 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1017 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1018 break; 1018 break;
1019 1019
1020 case IFF_TAP: 1020 case IFF_TAP:
@@ -1026,7 +1026,6 @@ static void tun_net_init(struct net_device *dev)
1026 1026
1027 eth_hw_addr_random(dev); 1027 eth_hw_addr_random(dev);
1028 1028
1029 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1030 break; 1029 break;
1031 } 1030 }
1032} 1031}
@@ -1480,6 +1479,8 @@ static void tun_setup(struct net_device *dev)
1480 1479
1481 dev->ethtool_ops = &tun_ethtool_ops; 1480 dev->ethtool_ops = &tun_ethtool_ops;
1482 dev->destructor = tun_free_netdev; 1481 dev->destructor = tun_free_netdev;
1482 /* We prefer our own queue length */
1483 dev->tx_queue_len = TUN_READQ_SIZE;
1483} 1484}
1484 1485
1485/* Trivial set of netlink ops to allow deleting tun or tap 1486/* Trivial set of netlink ops to allow deleting tun or tap
@@ -1822,7 +1823,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
1822 1823
1823 for (i = 0; i < n; i++) { 1824 for (i = 0; i < n; i++) {
1824 tfile = rtnl_dereference(tun->tfiles[i]); 1825 tfile = rtnl_dereference(tun->tfiles[i]);
1825 sk_detach_filter(tfile->socket.sk); 1826 __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
1826 } 1827 }
1827 1828
1828 tun->filter_attached = false; 1829 tun->filter_attached = false;
@@ -1835,7 +1836,8 @@ static int tun_attach_filter(struct tun_struct *tun)
1835 1836
1836 for (i = 0; i < tun->numqueues; i++) { 1837 for (i = 0; i < tun->numqueues; i++) {
1837 tfile = rtnl_dereference(tun->tfiles[i]); 1838 tfile = rtnl_dereference(tun->tfiles[i]);
1838 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 1839 ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
1840 lockdep_rtnl_is_held());
1839 if (ret) { 1841 if (ret) {
1840 tun_detach_filter(tun, i); 1842 tun_detach_filter(tun, i);
1841 return ret; 1843 return ret;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 86ba30ba35e8..2fb31edab125 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1626,6 +1626,13 @@ static const struct usb_device_id cdc_devs[] = {
1626 .driver_info = (unsigned long) &wwan_info, 1626 .driver_info = (unsigned long) &wwan_info,
1627 }, 1627 },
1628 1628
1629 /* Telit LE910 V2 */
1630 { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
1631 USB_CLASS_COMM,
1632 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1633 .driver_info = (unsigned long)&wwan_noarp_info,
1634 },
1635
1629 /* DW5812 LTE Verizon Mobile Broadband Card 1636 /* DW5812 LTE Verizon Mobile Broadband Card
1630 * Unlike DW5550 this device requires FLAG_NOARP 1637 * Unlike DW5550 this device requires FLAG_NOARP
1631 */ 1638 */
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 1bfe0fcaccf5..22e1a9a99a7d 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -38,7 +38,7 @@
38 * HEADS UP: this handshaking isn't all that robust. This driver 38 * HEADS UP: this handshaking isn't all that robust. This driver
39 * gets confused easily if you unplug one end of the cable then 39 * gets confused easily if you unplug one end of the cable then
40 * try to connect it again; you'll need to restart both ends. The 40 * try to connect it again; you'll need to restart both ends. The
41 * "naplink" software (used by some PlayStation/2 deveopers) does 41 * "naplink" software (used by some PlayStation/2 developers) does
42 * the handshaking much better! Also, sometimes this hardware 42 * the handshaking much better! Also, sometimes this hardware
43 * seems to get wedged under load. Prolific docs are weak, and 43 * seems to get wedged under load. Prolific docs are weak, and
44 * don't identify differences between PL2301 and PL2302, much less 44 * don't identify differences between PL2301 and PL2302, much less
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 7d717c66bcb0..9d1fce8a6e84 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
844 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 844 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
845 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 845 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
846 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 846 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
847 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
847 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 848 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
848 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 849 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
849 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 850 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index c32cbb593600..f068b6513cd2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
1204{ 1204{
1205 struct btt *btt = bdev->bd_disk->private_data; 1205 struct btt *btt = bdev->bd_disk->private_data;
1206 1206
1207 btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector); 1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
1208 page_endio(page, rw & WRITE, 0); 1208 page_endio(page, rw & WRITE, 0);
1209 return 0; 1209 return 0;
1210} 1210}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index fc82743aefb6..19f822d7f652 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
407 [ND_CMD_IMPLEMENTED] = { }, 407 [ND_CMD_IMPLEMENTED] = { },
408 [ND_CMD_SMART] = { 408 [ND_CMD_SMART] = {
409 .out_num = 2, 409 .out_num = 2,
410 .out_sizes = { 4, 8, }, 410 .out_sizes = { 4, 128, },
411 }, 411 },
412 [ND_CMD_SMART_THRESHOLD] = { 412 [ND_CMD_SMART_THRESHOLD] = {
413 .out_num = 2, 413 .out_num = 2,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 79646d0c3277..182a93fe3712 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
417 set_badblock(bb, start_sector, num_sectors); 417 set_badblock(bb, start_sector, num_sectors);
418} 418}
419 419
420static void namespace_add_poison(struct list_head *poison_list, 420static void badblocks_populate(struct list_head *poison_list,
421 struct badblocks *bb, struct resource *res) 421 struct badblocks *bb, const struct resource *res)
422{ 422{
423 struct nd_poison *pl; 423 struct nd_poison *pl;
424 424
@@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list,
460} 460}
461 461
462/** 462/**
463 * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks 463 * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
464 * @ndns: the namespace containing poison ranges 464 * @region: parent region of the range to interrogate
465 * @bb: badblocks instance to populate 465 * @bb: badblocks instance to populate
466 * @offset: offset at the start of the namespace before 'sector 0' 466 * @res: resource range to consider
467 * 467 *
468 * The poison list generated during NFIT initialization may contain multiple, 468 * The poison list generated during bus initialization may contain
469 * possibly overlapping ranges in the SPA (System Physical Address) space. 469 * multiple, possibly overlapping physical address ranges. Compare each
470 * Compare each of these ranges to the namespace currently being initialized, 470 * of these ranges to the resource range currently being initialized,
471 * and add badblocks to the gendisk for all matching sub-ranges 471 * and add badblocks entries for all matching sub-ranges
472 */ 472 */
473void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, 473void nvdimm_badblocks_populate(struct nd_region *nd_region,
474 struct badblocks *bb, resource_size_t offset) 474 struct badblocks *bb, const struct resource *res)
475{ 475{
476 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
477 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
478 struct nvdimm_bus *nvdimm_bus; 476 struct nvdimm_bus *nvdimm_bus;
479 struct list_head *poison_list; 477 struct list_head *poison_list;
480 struct resource res = {
481 .start = nsio->res.start + offset,
482 .end = nsio->res.end,
483 };
484 478
485 nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); 479 if (!is_nd_pmem(&nd_region->dev)) {
480 dev_WARN_ONCE(&nd_region->dev, 1,
481 "%s only valid for pmem regions\n", __func__);
482 return;
483 }
484 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
486 poison_list = &nvdimm_bus->poison_list; 485 poison_list = &nvdimm_bus->poison_list;
487 486
488 nvdimm_bus_lock(&nvdimm_bus->dev); 487 nvdimm_bus_lock(&nvdimm_bus->dev);
489 namespace_add_poison(poison_list, bb, &res); 488 badblocks_populate(poison_list, bb, res);
490 nvdimm_bus_unlock(&nvdimm_bus->dev); 489 nvdimm_bus_unlock(&nvdimm_bus->dev);
491} 490}
492EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison); 491EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
493 492
494static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 493static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
495{ 494{
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1799bd97a9ce..875c524fafb0 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
266int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns); 266int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
267const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, 267const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
268 char *name); 268 char *name);
269void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, 269void nvdimm_badblocks_populate(struct nd_region *nd_region,
270 struct badblocks *bb, resource_size_t offset); 270 struct badblocks *bb, const struct resource *res);
271int nd_blk_region_init(struct nd_region *nd_region); 271int nd_blk_region_init(struct nd_region *nd_region);
272void __nd_iostat_start(struct bio *bio, unsigned long *start); 272void __nd_iostat_start(struct bio *bio, unsigned long *start);
273static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) 273static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 254d3bc13f70..e071e214feba 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
376 } else { 376 } else {
377 /* from init we validate */ 377 /* from init we validate */
378 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) 378 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
379 return -EINVAL; 379 return -ENODEV;
380 } 380 }
381 381
382 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) { 382 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ca5721c306bb..8e09c544d892 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -99,7 +99,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
99 if (unlikely(bad_pmem)) 99 if (unlikely(bad_pmem))
100 rc = -EIO; 100 rc = -EIO;
101 else { 101 else {
102 memcpy_from_pmem(mem + off, pmem_addr, len); 102 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
103 flush_dcache_page(page); 103 flush_dcache_page(page);
104 } 104 }
105 } else { 105 } else {
@@ -151,7 +151,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
151 struct pmem_device *pmem = bdev->bd_disk->private_data; 151 struct pmem_device *pmem = bdev->bd_disk->private_data;
152 int rc; 152 int rc;
153 153
154 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); 154 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
155 if (rw & WRITE) 155 if (rw & WRITE)
156 wmb_pmem(); 156 wmb_pmem();
157 157
@@ -244,7 +244,9 @@ static void pmem_detach_disk(struct pmem_device *pmem)
244static int pmem_attach_disk(struct device *dev, 244static int pmem_attach_disk(struct device *dev,
245 struct nd_namespace_common *ndns, struct pmem_device *pmem) 245 struct nd_namespace_common *ndns, struct pmem_device *pmem)
246{ 246{
247 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
247 int nid = dev_to_node(dev); 248 int nid = dev_to_node(dev);
249 struct resource bb_res;
248 struct gendisk *disk; 250 struct gendisk *disk;
249 251
250 blk_queue_make_request(pmem->pmem_queue, pmem_make_request); 252 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
@@ -271,8 +273,17 @@ static int pmem_attach_disk(struct device *dev,
271 devm_exit_badblocks(dev, &pmem->bb); 273 devm_exit_badblocks(dev, &pmem->bb);
272 if (devm_init_badblocks(dev, &pmem->bb)) 274 if (devm_init_badblocks(dev, &pmem->bb))
273 return -ENOMEM; 275 return -ENOMEM;
274 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); 276 bb_res.start = nsio->res.start + pmem->data_offset;
275 277 bb_res.end = nsio->res.end;
278 if (is_nd_pfn(dev)) {
279 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
280 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
281
282 bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
283 bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
284 }
285 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
286 &bb_res);
276 disk->bb = &pmem->bb; 287 disk->bb = &pmem->bb;
277 add_disk(disk); 288 add_disk(disk);
278 revalidate_disk(disk); 289 revalidate_disk(disk);
@@ -295,7 +306,7 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
295 306
296 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align))) 307 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
297 return -EIO; 308 return -EIO;
298 memcpy_from_pmem(buf, pmem->virt_addr + offset, size); 309 return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
299 } else { 310 } else {
300 memcpy_to_pmem(pmem->virt_addr + offset, buf, size); 311 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
301 wmb_pmem(); 312 wmb_pmem();
@@ -553,7 +564,7 @@ static int nd_pmem_probe(struct device *dev)
553 ndns->rw_bytes = pmem_rw_bytes; 564 ndns->rw_bytes = pmem_rw_bytes;
554 if (devm_init_badblocks(dev, &pmem->bb)) 565 if (devm_init_badblocks(dev, &pmem->bb))
555 return -ENOMEM; 566 return -ENOMEM;
556 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); 567 nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
557 568
558 if (is_nd_btt(dev)) { 569 if (is_nd_btt(dev)) {
559 /* btt allocates its own request_queue */ 570 /* btt allocates its own request_queue */
@@ -595,14 +606,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
595{ 606{
596 struct pmem_device *pmem = dev_get_drvdata(dev); 607 struct pmem_device *pmem = dev_get_drvdata(dev);
597 struct nd_namespace_common *ndns = pmem->ndns; 608 struct nd_namespace_common *ndns = pmem->ndns;
609 struct nd_region *nd_region = to_nd_region(dev->parent);
610 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
611 struct resource res = {
612 .start = nsio->res.start + pmem->data_offset,
613 .end = nsio->res.end,
614 };
598 615
599 if (event != NVDIMM_REVALIDATE_POISON) 616 if (event != NVDIMM_REVALIDATE_POISON)
600 return; 617 return;
601 618
602 if (is_nd_btt(dev)) 619 if (is_nd_pfn(dev)) {
603 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); 620 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
604 else 621 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
605 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); 622
623 res.start += __le32_to_cpu(pfn_sb->start_pad);
624 res.end -= __le32_to_cpu(pfn_sb->end_trunc);
625 }
626
627 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
606} 628}
607 629
608MODULE_ALIAS("pmem"); 630MODULE_ALIAS("pmem");
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b48ac6300c79..a0e5260bd006 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
239{ 239{
240 struct inode *root_inode; 240 struct inode *root_inode;
241 241
242 sb->s_blocksize = PAGE_CACHE_SIZE; 242 sb->s_blocksize = PAGE_SIZE;
243 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 243 sb->s_blocksize_bits = PAGE_SHIFT;
244 sb->s_magic = OPROFILEFS_MAGIC; 244 sb->s_magic = OPROFILEFS_MAGIC;
245 sb->s_op = &s_ops; 245 sb->s_op = &s_ops;
246 sb->s_time_gran = 1; 246 sb->s_time_gran = 1;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
56 int stschg_irq; /* card-status-change irq */ 56 int stschg_irq; /* card-status-change irq */
57 int card_irq; /* card irq */ 57 int card_irq; /* card irq */
58 int eject_irq; /* db1200/pb1200 have these */ 58 int eject_irq; /* db1200/pb1200 have these */
59 int insert_gpio; /* db1000 carddetect gpio */
59 60
60#define BOARD_TYPE_DEFAULT 0 /* most boards */ 61#define BOARD_TYPE_DEFAULT 0 /* most boards */
61#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */ 62#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
83/* carddetect gpio: low-active */ 84/* carddetect gpio: low-active */
84static int db1000_card_inserted(struct db1x_pcmcia_sock *sock) 85static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
85{ 86{
86 return !gpio_get_value(irq_to_gpio(sock->insert_irq)); 87 return !gpio_get_value(sock->insert_gpio);
87} 88}
88 89
89static int db1x_card_inserted(struct db1x_pcmcia_sock *sock) 90static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
457 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card"); 458 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
458 sock->card_irq = r ? r->start : 0; 459 sock->card_irq = r ? r->start : 0;
459 460
460 /* insert: irq which triggers on card insertion/ejection */ 461 /* insert: irq which triggers on card insertion/ejection
462 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
463 */
461 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert"); 464 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
462 sock->insert_irq = r ? r->start : -1; 465 sock->insert_irq = r ? r->start : -1;
466 if (sock->board_type == BOARD_TYPE_DEFAULT) {
467 sock->insert_gpio = r ? r->start : -1;
468 sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
469 }
463 470
464 /* stschg: irq which trigger on card status change (optional) */ 471 /* stschg: irq which trigger on card status change (optional) */
465 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg"); 472 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 46210512d8ec..9cfa544072b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -762,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
762 762
763 if (of_property_read_bool(dev_np, "fsl,input-sel")) { 763 if (of_property_read_bool(dev_np, "fsl,input-sel")) {
764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0); 764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
765 if (np) { 765 if (!np) {
766 ipctl->input_sel_base = of_iomap(np, 0);
767 if (IS_ERR(ipctl->input_sel_base)) {
768 of_node_put(np);
769 dev_err(&pdev->dev,
770 "iomuxc input select base address not found\n");
771 return PTR_ERR(ipctl->input_sel_base);
772 }
773 } else {
774 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n"); 766 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
775 return -EINVAL; 767 return -EINVAL;
776 } 768 }
769
770 ipctl->input_sel_base = of_iomap(np, 0);
777 of_node_put(np); 771 of_node_put(np);
772 if (!ipctl->input_sel_base) {
773 dev_err(&pdev->dev,
774 "iomuxc input select base address not found\n");
775 return -ENOMEM;
776 }
778 } 777 }
779 778
780 imx_pinctrl_desc.name = dev_name(&pdev->dev); 779 imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85536b467c25..6c2c816f8e5f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -665,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d)
665 spin_unlock(&pctrl->lock); 665 spin_unlock(&pctrl->lock);
666} 666}
667 667
668static void intel_gpio_irq_enable(struct irq_data *d)
669{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
671 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
672 const struct intel_community *community;
673 unsigned pin = irqd_to_hwirq(d);
674 unsigned long flags;
675
676 spin_lock_irqsave(&pctrl->lock, flags);
677
678 community = intel_get_community(pctrl, pin);
679 if (community) {
680 unsigned padno = pin_to_padno(community, pin);
681 unsigned gpp_size = community->gpp_size;
682 unsigned gpp_offset = padno % gpp_size;
683 unsigned gpp = padno / gpp_size;
684 u32 value;
685
686 /* Clear interrupt status first to avoid unexpected interrupt */
687 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
688
689 value = readl(community->regs + community->ie_offset + gpp * 4);
690 value |= BIT(gpp_offset);
691 writel(value, community->regs + community->ie_offset + gpp * 4);
692 }
693
694 spin_unlock_irqrestore(&pctrl->lock, flags);
695}
696
668static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) 697static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
669{ 698{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 699 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
741 value |= PADCFG0_RXINV; 770 value |= PADCFG0_RXINV;
742 } else if (type & IRQ_TYPE_EDGE_RISING) { 771 } else if (type & IRQ_TYPE_EDGE_RISING) {
743 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT; 772 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
744 } else if (type & IRQ_TYPE_LEVEL_LOW) { 773 } else if (type & IRQ_TYPE_LEVEL_MASK) {
745 value |= PADCFG0_RXINV; 774 if (type & IRQ_TYPE_LEVEL_LOW)
775 value |= PADCFG0_RXINV;
746 } else { 776 } else {
747 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT; 777 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
748 } 778 }
@@ -852,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
852 882
853static struct irq_chip intel_gpio_irqchip = { 883static struct irq_chip intel_gpio_irqchip = {
854 .name = "intel-gpio", 884 .name = "intel-gpio",
885 .irq_enable = intel_gpio_irq_enable,
855 .irq_ack = intel_gpio_irq_ack, 886 .irq_ack = intel_gpio_irq_ack,
856 .irq_mask = intel_gpio_irq_mask, 887 .irq_mask = intel_gpio_irq_mask,
857 .irq_unmask = intel_gpio_irq_unmask, 888 .irq_unmask = intel_gpio_irq_unmask,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352406108fa0..c8969dd49449 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
990 int val; 990 int val;
991 991
992 if (pull) 992 if (pull)
993 pullidx = data_out ? 1 : 2; 993 pullidx = data_out ? 2 : 1;
994 994
995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s", 995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
996 gpio, 996 gpio,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 856f736cb1a6..2673cd9d106e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
469 "mfio83", 469 "mfio83",
470}; 470};
471 471
472static const char * const pistachio_sys_pll_lock_groups[] = { 472static const char * const pistachio_audio_pll_lock_groups[] = {
473 "mfio84", 473 "mfio84",
474}; 474};
475 475
476static const char * const pistachio_wifi_pll_lock_groups[] = { 476static const char * const pistachio_rpu_v_pll_lock_groups[] = {
477 "mfio85", 477 "mfio85",
478}; 478};
479 479
480static const char * const pistachio_bt_pll_lock_groups[] = { 480static const char * const pistachio_rpu_l_pll_lock_groups[] = {
481 "mfio86", 481 "mfio86",
482}; 482};
483 483
484static const char * const pistachio_rpu_v_pll_lock_groups[] = { 484static const char * const pistachio_sys_pll_lock_groups[] = {
485 "mfio87", 485 "mfio87",
486}; 486};
487 487
488static const char * const pistachio_rpu_l_pll_lock_groups[] = { 488static const char * const pistachio_wifi_pll_lock_groups[] = {
489 "mfio88", 489 "mfio88",
490}; 490};
491 491
492static const char * const pistachio_audio_pll_lock_groups[] = { 492static const char * const pistachio_bt_pll_lock_groups[] = {
493 "mfio89", 493 "mfio89",
494}; 494};
495 495
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
559 PISTACHIO_FUNCTION_DREQ4, 559 PISTACHIO_FUNCTION_DREQ4,
560 PISTACHIO_FUNCTION_DREQ5, 560 PISTACHIO_FUNCTION_DREQ5,
561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK, 561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
562 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
563 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
564 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
562 PISTACHIO_FUNCTION_SYS_PLL_LOCK, 565 PISTACHIO_FUNCTION_SYS_PLL_LOCK,
563 PISTACHIO_FUNCTION_WIFI_PLL_LOCK, 566 PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
564 PISTACHIO_FUNCTION_BT_PLL_LOCK, 567 PISTACHIO_FUNCTION_BT_PLL_LOCK,
565 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
566 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
567 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND, 568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND, 569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND, 570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
620 FUNCTION(dreq4), 620 FUNCTION(dreq4),
621 FUNCTION(dreq5), 621 FUNCTION(dreq5),
622 FUNCTION(mips_pll_lock), 622 FUNCTION(mips_pll_lock),
623 FUNCTION(audio_pll_lock),
624 FUNCTION(rpu_v_pll_lock),
625 FUNCTION(rpu_l_pll_lock),
623 FUNCTION(sys_pll_lock), 626 FUNCTION(sys_pll_lock),
624 FUNCTION(wifi_pll_lock), 627 FUNCTION(wifi_pll_lock),
625 FUNCTION(bt_pll_lock), 628 FUNCTION(bt_pll_lock),
626 FUNCTION(rpu_v_pll_lock),
627 FUNCTION(rpu_l_pll_lock),
628 FUNCTION(audio_pll_lock),
629 FUNCTION(debug_raw_cca_ind), 629 FUNCTION(debug_raw_cca_ind),
630 FUNCTION(debug_ed_sec20_cca_ind), 630 FUNCTION(debug_ed_sec20_cca_ind),
631 FUNCTION(debug_ed_sec40_cca_ind), 631 FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 412c6b78140a..a13f2b6f6fc0 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
1573 return 0; 1573 return 0;
1574} 1574}
1575 1575
1576/*
1577 * gpiolib gpiod_to_irq callback function.
1578 * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
1579 */
1580static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
1581{
1582 struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
1583 int i;
1584
1585 for (i = 0; i < info->num_exin; i++)
1586 if (info->exin[i] == offset)
1587 return ltq_eiu_get_irq(i);
1588
1589 return -1;
1590}
1591
1576static struct gpio_chip xway_chip = { 1592static struct gpio_chip xway_chip = {
1577 .label = "gpio-xway", 1593 .label = "gpio-xway",
1578 .direction_input = xway_gpio_dir_in, 1594 .direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = {
1581 .set = xway_gpio_set, 1597 .set = xway_gpio_set,
1582 .request = gpiochip_generic_request, 1598 .request = gpiochip_generic_request,
1583 .free = gpiochip_generic_free, 1599 .free = gpiochip_generic_free,
1600 .to_irq = xway_gpio_to_irq,
1584 .base = -1, 1601 .base = -1,
1585}; 1602};
1586 1603
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b5d81ced6ce6..b68ae424cee2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -237,7 +237,7 @@ DECLARE_QCA_GPIO_PINS(99);
237 .pins = gpio##id##_pins, \ 237 .pins = gpio##id##_pins, \
238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \ 238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
239 .funcs = (int[]){ \ 239 .funcs = (int[]){ \
240 qca_mux_NA, /* gpio mode */ \ 240 qca_mux_gpio, /* gpio mode */ \
241 qca_mux_##f1, \ 241 qca_mux_##f1, \
242 qca_mux_##f2, \ 242 qca_mux_##f2, \
243 qca_mux_##f3, \ 243 qca_mux_##f3, \
@@ -254,11 +254,11 @@ DECLARE_QCA_GPIO_PINS(99);
254 qca_mux_##f14 \ 254 qca_mux_##f14 \
255 }, \ 255 }, \
256 .nfuncs = 15, \ 256 .nfuncs = 15, \
257 .ctl_reg = 0x1000 + 0x10 * id, \ 257 .ctl_reg = 0x0 + 0x1000 * id, \
258 .io_reg = 0x1004 + 0x10 * id, \ 258 .io_reg = 0x4 + 0x1000 * id, \
259 .intr_cfg_reg = 0x1008 + 0x10 * id, \ 259 .intr_cfg_reg = 0x8 + 0x1000 * id, \
260 .intr_status_reg = 0x100c + 0x10 * id, \ 260 .intr_status_reg = 0xc + 0x1000 * id, \
261 .intr_target_reg = 0x400 + 0x4 * id, \ 261 .intr_target_reg = 0x8 + 0x1000 * id, \
262 .mux_bit = 2, \ 262 .mux_bit = 2, \
263 .pull_bit = 0, \ 263 .pull_bit = 0, \
264 .drv_bit = 6, \ 264 .drv_bit = 6, \
@@ -414,7 +414,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
414 .nfunctions = ARRAY_SIZE(ipq4019_functions), 414 .nfunctions = ARRAY_SIZE(ipq4019_functions),
415 .groups = ipq4019_groups, 415 .groups = ipq4019_groups,
416 .ngroups = ARRAY_SIZE(ipq4019_groups), 416 .ngroups = ARRAY_SIZE(ipq4019_groups),
417 .ngpios = 70, 417 .ngpios = 100,
418}; 418};
419 419
420static int ipq4019_pinctrl_probe(struct platform_device *pdev) 420static int ipq4019_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index dc3609f0c60b..ee0c1f2567d9 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -546,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
546 return ret; 546 return ret;
547 } 547 }
548 548
549 pinctrl_provide_dummies(); 549 /* Enable dummy states for those platforms without pinctrl support */
550 if (!of_have_populated_dt())
551 pinctrl_provide_dummies();
550 552
551 ret = sh_pfc_init_ranges(pfc); 553 ret = sh_pfc_init_ranges(pfc);
552 if (ret < 0) 554 if (ret < 0)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
485 .pins = sun8i_a33_pins, 485 .pins = sun8i_a33_pins,
486 .npins = ARRAY_SIZE(sun8i_a33_pins), 486 .npins = ARRAY_SIZE(sun8i_a33_pins),
487 .irq_banks = 2, 487 .irq_banks = 2,
488 .irq_bank_base = 1,
488}; 489};
489 490
490static int sun8i_a33_pinctrl_probe(struct platform_device *pdev) 491static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 12a1dfabb1af..3b017dbd289c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -579,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) 579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
580{ 580{
581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
582 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 582 u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
583 u8 index = sunxi_irq_cfg_offset(d->hwirq); 583 u8 index = sunxi_irq_cfg_offset(d->hwirq);
584 unsigned long flags; 584 unsigned long flags;
585 u32 regval; 585 u32 regval;
@@ -626,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
626static void sunxi_pinctrl_irq_ack(struct irq_data *d) 626static void sunxi_pinctrl_irq_ack(struct irq_data *d)
627{ 627{
628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
629 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 629 u32 status_reg = sunxi_irq_status_reg(d->hwirq,
630 pctl->desc->irq_bank_base);
630 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 631 u8 status_idx = sunxi_irq_status_offset(d->hwirq);
631 632
632 /* Clear the IRQ */ 633 /* Clear the IRQ */
@@ -636,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
636static void sunxi_pinctrl_irq_mask(struct irq_data *d) 637static void sunxi_pinctrl_irq_mask(struct irq_data *d)
637{ 638{
638 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 639 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
639 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 640 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
640 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 641 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
641 unsigned long flags; 642 unsigned long flags;
642 u32 val; 643 u32 val;
@@ -653,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
653static void sunxi_pinctrl_irq_unmask(struct irq_data *d) 654static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
654{ 655{
655 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 656 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
656 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 657 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
657 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 658 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
658 unsigned long flags; 659 unsigned long flags;
659 u32 val; 660 u32 val;
@@ -745,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
745 if (bank == pctl->desc->irq_banks) 746 if (bank == pctl->desc->irq_banks)
746 return; 747 return;
747 748
748 reg = sunxi_irq_status_reg_from_bank(bank); 749 reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
749 val = readl(pctl->membase + reg); 750 val = readl(pctl->membase + reg);
750 751
751 if (val) { 752 if (val) {
@@ -1024,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
1024 1025
1025 for (i = 0; i < pctl->desc->irq_banks; i++) { 1026 for (i = 0; i < pctl->desc->irq_banks; i++) {
1026 /* Mask and clear all IRQs before registering a handler */ 1027 /* Mask and clear all IRQs before registering a handler */
1027 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i)); 1028 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
1029 pctl->desc->irq_bank_base));
1028 writel(0xffffffff, 1030 writel(0xffffffff,
1029 pctl->membase + sunxi_irq_status_reg_from_bank(i)); 1031 pctl->membase + sunxi_irq_status_reg_from_bank(i,
1032 pctl->desc->irq_bank_base));
1030 1033
1031 irq_set_chained_handler_and_data(pctl->irq[i], 1034 irq_set_chained_handler_and_data(pctl->irq[i],
1032 sunxi_pinctrl_irq_handler, 1035 sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
97 int npins; 97 int npins;
98 unsigned pin_base; 98 unsigned pin_base;
99 unsigned irq_banks; 99 unsigned irq_banks;
100 unsigned irq_bank_base;
100 bool irq_read_needs_mux; 101 bool irq_read_needs_mux;
101}; 102};
102 103
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
233 return pin_num * PULL_PINS_BITS; 234 return pin_num * PULL_PINS_BITS;
234} 235}
235 236
236static inline u32 sunxi_irq_cfg_reg(u16 irq) 237static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
237{ 238{
238 u8 bank = irq / IRQ_PER_BANK; 239 u8 bank = irq / IRQ_PER_BANK;
239 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04; 240 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
240 241
241 return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg; 242 return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
242} 243}
243 244
244static inline u32 sunxi_irq_cfg_offset(u16 irq) 245static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
247 return irq_num * IRQ_CFG_IRQ_BITS; 248 return irq_num * IRQ_CFG_IRQ_BITS;
248} 249}
249 250
250static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank) 251static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
251{ 252{
252 return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE; 253 return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
253} 254}
254 255
255static inline u32 sunxi_irq_ctrl_reg(u16 irq) 256static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
256{ 257{
257 u8 bank = irq / IRQ_PER_BANK; 258 u8 bank = irq / IRQ_PER_BANK;
258 259
259 return sunxi_irq_ctrl_reg_from_bank(bank); 260 return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
260} 261}
261 262
262static inline u32 sunxi_irq_ctrl_offset(u16 irq) 263static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
265 return irq_num * IRQ_CTRL_IRQ_BITS; 266 return irq_num * IRQ_CTRL_IRQ_BITS;
266} 267}
267 268
268static inline u32 sunxi_irq_status_reg_from_bank(u8 bank) 269static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
269{ 270{
270 return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE; 271 return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
271} 272}
272 273
273static inline u32 sunxi_irq_status_reg(u16 irq) 274static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
274{ 275{
275 u8 bank = irq / IRQ_PER_BANK; 276 u8 bank = irq / IRQ_PER_BANK;
276 277
277 return sunxi_irq_status_reg_from_bank(bank); 278 return sunxi_irq_status_reg_from_bank(bank, bank_base);
278} 279}
279 280
280static inline u32 sunxi_irq_status_offset(u16 irq) 281static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 9973cebb4d6f..07462d79d040 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,8 +309,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
309 * much memory to the process. 309 * much memory to the process.
310 */ 310 */
311 down_read(&current->mm->mmap_sem); 311 down_read(&current->mm->mmap_sem);
312 ret = get_user_pages(current, current->mm, address, 1, 312 ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
313 !is_write, 0, &page, NULL);
314 up_read(&current->mm->mmap_sem); 313 up_read(&current->mm->mmap_sem);
315 if (ret < 0) 314 if (ret < 0)
316 break; 315 break;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cdfd01f0adb8..8fad0a7044d3 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1091,6 +1091,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
1091 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1091 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1092 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ 1092 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
1093 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1093 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1094 RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
1094 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */ 1095 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
1095 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */ 1096 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
1096 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */ 1097 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9607bc826460..5d4d91846357 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -886,7 +886,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
886 } 886 }
887 887
888 down_read(&current->mm->mmap_sem); 888 down_read(&current->mm->mmap_sem);
889 pinned = get_user_pages(current, current->mm, 889 pinned = get_user_pages(
890 (unsigned long)xfer->loc_addr & PAGE_MASK, 890 (unsigned long)xfer->loc_addr & PAGE_MASK,
891 nr_pages, dir == DMA_FROM_DEVICE, 0, 891 nr_pages, dir == DMA_FROM_DEVICE, 0,
892 page_list, NULL); 892 page_list, NULL);
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 6bb04d453247..6f056caa8a56 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -189,9 +189,9 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
189 } 189 }
190 190
191 ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); 191 ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
192 if (!ddata->boot_base) { 192 if (IS_ERR(ddata->boot_base)) {
193 dev_err(dev, "Boot base not found\n"); 193 dev_err(dev, "Boot base not found\n");
194 return -EINVAL; 194 return PTR_ERR(ddata->boot_base);
195 } 195 }
196 196
197 err = of_property_read_u32_index(np, "st,syscfg", 1, 197 err = of_property_read_u32_index(np, "st,syscfg", 1,
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 17ad5749e91d..1e560188dd13 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -317,17 +317,17 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
317 struct alias_pav_group *group; 317 struct alias_pav_group *group;
318 struct dasd_uid uid; 318 struct dasd_uid uid;
319 319
320 spin_lock(get_ccwdev_lock(device->cdev));
320 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; 321 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
321 private->uid.base_unit_addr = 322 private->uid.base_unit_addr =
322 lcu->uac->unit[private->uid.real_unit_addr].base_ua; 323 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
323 uid = private->uid; 324 uid = private->uid;
324 325 spin_unlock(get_ccwdev_lock(device->cdev));
325 /* if we have no PAV anyway, we don't need to bother with PAV groups */ 326 /* if we have no PAV anyway, we don't need to bother with PAV groups */
326 if (lcu->pav == NO_PAV) { 327 if (lcu->pav == NO_PAV) {
327 list_move(&device->alias_list, &lcu->active_devices); 328 list_move(&device->alias_list, &lcu->active_devices);
328 return 0; 329 return 0;
329 } 330 }
330
331 group = _find_group(lcu, &uid); 331 group = _find_group(lcu, &uid);
332 if (!group) { 332 if (!group) {
333 group = kzalloc(sizeof(*group), GFP_ATOMIC); 333 group = kzalloc(sizeof(*group), GFP_ATOMIC);
@@ -397,130 +397,6 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
397 return 0; 397 return 0;
398} 398}
399 399
400/*
401 * This function tries to lock all devices on an lcu via trylock
402 * return NULL on success otherwise return first failed device
403 */
404static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
405 struct dasd_device *pos)
406
407{
408 struct alias_pav_group *pavgroup;
409 struct dasd_device *device;
410
411 list_for_each_entry(device, &lcu->active_devices, alias_list) {
412 if (device == pos)
413 continue;
414 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
415 return device;
416 }
417 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
418 if (device == pos)
419 continue;
420 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
421 return device;
422 }
423 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
424 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
425 if (device == pos)
426 continue;
427 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
428 return device;
429 }
430 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
431 if (device == pos)
432 continue;
433 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
434 return device;
435 }
436 }
437 return NULL;
438}
439
440/*
441 * unlock all devices except the one that is specified as pos
442 * stop if enddev is specified and reached
443 */
444static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
445 struct dasd_device *pos,
446 struct dasd_device *enddev)
447
448{
449 struct alias_pav_group *pavgroup;
450 struct dasd_device *device;
451
452 list_for_each_entry(device, &lcu->active_devices, alias_list) {
453 if (device == pos)
454 continue;
455 if (device == enddev)
456 return;
457 spin_unlock(get_ccwdev_lock(device->cdev));
458 }
459 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
460 if (device == pos)
461 continue;
462 if (device == enddev)
463 return;
464 spin_unlock(get_ccwdev_lock(device->cdev));
465 }
466 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
467 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
468 if (device == pos)
469 continue;
470 if (device == enddev)
471 return;
472 spin_unlock(get_ccwdev_lock(device->cdev));
473 }
474 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
475 if (device == pos)
476 continue;
477 if (device == enddev)
478 return;
479 spin_unlock(get_ccwdev_lock(device->cdev));
480 }
481 }
482}
483
484/*
485 * this function is needed because the locking order
486 * device lock -> lcu lock
487 * needs to be assured when iterating over devices in an LCU
488 *
489 * if a device is specified in pos then the device lock is already hold
490 */
491static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
492 struct dasd_device *pos,
493 unsigned long *flags)
494{
495 struct dasd_device *failed;
496
497 do {
498 spin_lock_irqsave(&lcu->lock, *flags);
499 failed = _trylock_all_devices_on_lcu(lcu, pos);
500 if (failed) {
501 _unlock_all_devices_on_lcu(lcu, pos, failed);
502 spin_unlock_irqrestore(&lcu->lock, *flags);
503 cpu_relax();
504 }
505 } while (failed);
506}
507
508static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
509 struct dasd_device *pos)
510{
511 struct dasd_device *failed;
512
513 do {
514 spin_lock(&lcu->lock);
515 failed = _trylock_all_devices_on_lcu(lcu, pos);
516 if (failed) {
517 _unlock_all_devices_on_lcu(lcu, pos, failed);
518 spin_unlock(&lcu->lock);
519 cpu_relax();
520 }
521 } while (failed);
522}
523
524static int read_unit_address_configuration(struct dasd_device *device, 400static int read_unit_address_configuration(struct dasd_device *device,
525 struct alias_lcu *lcu) 401 struct alias_lcu *lcu)
526{ 402{
@@ -615,7 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
615 if (rc) 491 if (rc)
616 return rc; 492 return rc;
617 493
618 _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); 494 spin_lock_irqsave(&lcu->lock, flags);
619 lcu->pav = NO_PAV; 495 lcu->pav = NO_PAV;
620 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { 496 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
621 switch (lcu->uac->unit[i].ua_type) { 497 switch (lcu->uac->unit[i].ua_type) {
@@ -634,7 +510,6 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
634 alias_list) { 510 alias_list) {
635 _add_device_to_lcu(lcu, device, refdev); 511 _add_device_to_lcu(lcu, device, refdev);
636 } 512 }
637 _unlock_all_devices_on_lcu(lcu, NULL, NULL);
638 spin_unlock_irqrestore(&lcu->lock, flags); 513 spin_unlock_irqrestore(&lcu->lock, flags);
639 return 0; 514 return 0;
640} 515}
@@ -722,8 +597,7 @@ int dasd_alias_add_device(struct dasd_device *device)
722 597
723 lcu = private->lcu; 598 lcu = private->lcu;
724 rc = 0; 599 rc = 0;
725 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 600 spin_lock_irqsave(&lcu->lock, flags);
726 spin_lock(&lcu->lock);
727 if (!(lcu->flags & UPDATE_PENDING)) { 601 if (!(lcu->flags & UPDATE_PENDING)) {
728 rc = _add_device_to_lcu(lcu, device, device); 602 rc = _add_device_to_lcu(lcu, device, device);
729 if (rc) 603 if (rc)
@@ -733,8 +607,7 @@ int dasd_alias_add_device(struct dasd_device *device)
733 list_move(&device->alias_list, &lcu->active_devices); 607 list_move(&device->alias_list, &lcu->active_devices);
734 _schedule_lcu_update(lcu, device); 608 _schedule_lcu_update(lcu, device);
735 } 609 }
736 spin_unlock(&lcu->lock); 610 spin_unlock_irqrestore(&lcu->lock, flags);
737 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
738 return rc; 611 return rc;
739} 612}
740 613
@@ -933,15 +806,27 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
933 struct alias_pav_group *pavgroup; 806 struct alias_pav_group *pavgroup;
934 struct dasd_device *device; 807 struct dasd_device *device;
935 808
936 list_for_each_entry(device, &lcu->active_devices, alias_list) 809 list_for_each_entry(device, &lcu->active_devices, alias_list) {
810 spin_lock(get_ccwdev_lock(device->cdev));
937 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 811 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
938 list_for_each_entry(device, &lcu->inactive_devices, alias_list) 812 spin_unlock(get_ccwdev_lock(device->cdev));
813 }
814 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
815 spin_lock(get_ccwdev_lock(device->cdev));
939 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 816 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
817 spin_unlock(get_ccwdev_lock(device->cdev));
818 }
940 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 819 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
941 list_for_each_entry(device, &pavgroup->baselist, alias_list) 820 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
821 spin_lock(get_ccwdev_lock(device->cdev));
942 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 822 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
943 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) 823 spin_unlock(get_ccwdev_lock(device->cdev));
824 }
825 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
826 spin_lock(get_ccwdev_lock(device->cdev));
944 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 827 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
828 spin_unlock(get_ccwdev_lock(device->cdev));
829 }
945 } 830 }
946} 831}
947 832
@@ -950,15 +835,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
950 struct alias_pav_group *pavgroup; 835 struct alias_pav_group *pavgroup;
951 struct dasd_device *device; 836 struct dasd_device *device;
952 837
953 list_for_each_entry(device, &lcu->active_devices, alias_list) 838 list_for_each_entry(device, &lcu->active_devices, alias_list) {
839 spin_lock(get_ccwdev_lock(device->cdev));
954 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 840 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
955 list_for_each_entry(device, &lcu->inactive_devices, alias_list) 841 spin_unlock(get_ccwdev_lock(device->cdev));
842 }
843 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
844 spin_lock(get_ccwdev_lock(device->cdev));
956 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 845 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
846 spin_unlock(get_ccwdev_lock(device->cdev));
847 }
957 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 848 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
958 list_for_each_entry(device, &pavgroup->baselist, alias_list) 849 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
850 spin_lock(get_ccwdev_lock(device->cdev));
959 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 851 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
960 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) 852 spin_unlock(get_ccwdev_lock(device->cdev));
853 }
854 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
855 spin_lock(get_ccwdev_lock(device->cdev));
961 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 856 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
857 spin_unlock(get_ccwdev_lock(device->cdev));
858 }
962 } 859 }
963} 860}
964 861
@@ -984,48 +881,32 @@ static void summary_unit_check_handling_work(struct work_struct *work)
984 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 881 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
985 reset_summary_unit_check(lcu, device, suc_data->reason); 882 reset_summary_unit_check(lcu, device, suc_data->reason);
986 883
987 _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); 884 spin_lock_irqsave(&lcu->lock, flags);
988 _unstop_all_devices_on_lcu(lcu); 885 _unstop_all_devices_on_lcu(lcu);
989 _restart_all_base_devices_on_lcu(lcu); 886 _restart_all_base_devices_on_lcu(lcu);
990 /* 3. read new alias configuration */ 887 /* 3. read new alias configuration */
991 _schedule_lcu_update(lcu, device); 888 _schedule_lcu_update(lcu, device);
992 lcu->suc_data.device = NULL; 889 lcu->suc_data.device = NULL;
993 dasd_put_device(device); 890 dasd_put_device(device);
994 _unlock_all_devices_on_lcu(lcu, NULL, NULL);
995 spin_unlock_irqrestore(&lcu->lock, flags); 891 spin_unlock_irqrestore(&lcu->lock, flags);
996} 892}
997 893
998/* 894void dasd_alias_handle_summary_unit_check(struct work_struct *work)
999 * note: this will be called from int handler context (cdev locked)
1000 */
1001void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
1002 struct irb *irb)
1003{ 895{
896 struct dasd_device *device = container_of(work, struct dasd_device,
897 suc_work);
1004 struct dasd_eckd_private *private = device->private; 898 struct dasd_eckd_private *private = device->private;
1005 struct alias_lcu *lcu; 899 struct alias_lcu *lcu;
1006 char reason; 900 unsigned long flags;
1007 char *sense;
1008
1009 sense = dasd_get_sense(irb);
1010 if (sense) {
1011 reason = sense[8];
1012 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
1013 "eckd handle summary unit check: reason", reason);
1014 } else {
1015 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1016 "eckd handle summary unit check:"
1017 " no reason code available");
1018 return;
1019 }
1020 901
1021 lcu = private->lcu; 902 lcu = private->lcu;
1022 if (!lcu) { 903 if (!lcu) {
1023 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 904 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1024 "device not ready to handle summary" 905 "device not ready to handle summary"
1025 " unit check (no lcu structure)"); 906 " unit check (no lcu structure)");
1026 return; 907 goto out;
1027 } 908 }
1028 _trylock_and_lock_lcu(lcu, device); 909 spin_lock_irqsave(&lcu->lock, flags);
1029 /* If this device is about to be removed just return and wait for 910 /* If this device is about to be removed just return and wait for
1030 * the next interrupt on a different device 911 * the next interrupt on a different device
1031 */ 912 */
@@ -1033,27 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
1033 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 914 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1034 "device is in offline processing," 915 "device is in offline processing,"
1035 " don't do summary unit check handling"); 916 " don't do summary unit check handling");
1036 _unlock_all_devices_on_lcu(lcu, device, NULL); 917 goto out_unlock;
1037 spin_unlock(&lcu->lock);
1038 return;
1039 } 918 }
1040 if (lcu->suc_data.device) { 919 if (lcu->suc_data.device) {
1041 /* already scheduled or running */ 920 /* already scheduled or running */
1042 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 921 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1043 "previous instance of summary unit check worker" 922 "previous instance of summary unit check worker"
1044 " still pending"); 923 " still pending");
1045 _unlock_all_devices_on_lcu(lcu, device, NULL); 924 goto out_unlock;
1046 spin_unlock(&lcu->lock);
1047 return ;
1048 } 925 }
1049 _stop_all_devices_on_lcu(lcu); 926 _stop_all_devices_on_lcu(lcu);
1050 /* prepare for lcu_update */ 927 /* prepare for lcu_update */
1051 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; 928 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
1052 lcu->suc_data.reason = reason; 929 lcu->suc_data.reason = private->suc_reason;
1053 lcu->suc_data.device = device; 930 lcu->suc_data.device = device;
1054 dasd_get_device(device); 931 dasd_get_device(device);
1055 _unlock_all_devices_on_lcu(lcu, device, NULL);
1056 spin_unlock(&lcu->lock);
1057 if (!schedule_work(&lcu->suc_data.worker)) 932 if (!schedule_work(&lcu->suc_data.worker))
1058 dasd_put_device(device); 933 dasd_put_device(device);
934out_unlock:
935 spin_unlock_irqrestore(&lcu->lock, flags);
936out:
937 clear_bit(DASD_FLAG_SUC, &device->flags);
938 dasd_put_device(device);
1059}; 939};
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 75c032dcf173..c1b4ae55e129 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1682,6 +1682,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1682 1682
1683 /* setup work queue for validate server*/ 1683 /* setup work queue for validate server*/
1684 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 1684 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
1685 /* setup work queue for summary unit check */
1686 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
1685 1687
1686 if (!ccw_device_is_pathgroup(device->cdev)) { 1688 if (!ccw_device_is_pathgroup(device->cdev)) {
1687 dev_warn(&device->cdev->dev, 1689 dev_warn(&device->cdev->dev,
@@ -2549,14 +2551,6 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2549 device->state == DASD_STATE_ONLINE && 2551 device->state == DASD_STATE_ONLINE &&
2550 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2552 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2551 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 2553 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
2552 /*
2553 * the state change could be caused by an alias
2554 * reassignment remove device from alias handling
2555 * to prevent new requests from being scheduled on
2556 * the wrong alias device
2557 */
2558 dasd_alias_remove_device(device);
2559
2560 /* schedule worker to reload device */ 2554 /* schedule worker to reload device */
2561 dasd_reload_device(device); 2555 dasd_reload_device(device);
2562 } 2556 }
@@ -2571,7 +2565,27 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2571 /* summary unit check */ 2565 /* summary unit check */
2572 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 2566 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
2573 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2567 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
2574 dasd_alias_handle_summary_unit_check(device, irb); 2568 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
2569 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2570 "eckd suc: device already notified");
2571 return;
2572 }
2573 sense = dasd_get_sense(irb);
2574 if (!sense) {
2575 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2576 "eckd suc: no reason code available");
2577 clear_bit(DASD_FLAG_SUC, &device->flags);
2578 return;
2579
2580 }
2581 private->suc_reason = sense[8];
2582 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
2583 "eckd handle summary unit check: reason",
2584 private->suc_reason);
2585 dasd_get_device(device);
2586 if (!schedule_work(&device->suc_work))
2587 dasd_put_device(device);
2588
2575 return; 2589 return;
2576 } 2590 }
2577 2591
@@ -4495,6 +4509,12 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
4495 struct dasd_uid uid; 4509 struct dasd_uid uid;
4496 unsigned long flags; 4510 unsigned long flags;
4497 4511
4512 /*
4513 * remove device from alias handling to prevent new requests
4514 * from being scheduled on the wrong alias device
4515 */
4516 dasd_alias_remove_device(device);
4517
4498 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4518 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
4499 old_base = private->uid.base_unit_addr; 4519 old_base = private->uid.base_unit_addr;
4500 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 4520 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index f8f91ee652d3..6d9a6d3517cd 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -525,6 +525,7 @@ struct dasd_eckd_private {
525 int count; 525 int count;
526 526
527 u32 fcx_max_data; 527 u32 fcx_max_data;
528 char suc_reason;
528}; 529};
529 530
530 531
@@ -534,7 +535,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
534int dasd_alias_add_device(struct dasd_device *); 535int dasd_alias_add_device(struct dasd_device *);
535int dasd_alias_remove_device(struct dasd_device *); 536int dasd_alias_remove_device(struct dasd_device *);
536struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); 537struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
537void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); 538void dasd_alias_handle_summary_unit_check(struct work_struct *);
538void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 539void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
539void dasd_alias_lcu_setup_complete(struct dasd_device *); 540void dasd_alias_lcu_setup_complete(struct dasd_device *);
540void dasd_alias_wait_for_lcu_setup(struct dasd_device *); 541void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8de29be32a56..0f0add932e7a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -470,6 +470,7 @@ struct dasd_device {
470 struct work_struct restore_device; 470 struct work_struct restore_device;
471 struct work_struct reload_device; 471 struct work_struct reload_device;
472 struct work_struct kick_validate; 472 struct work_struct kick_validate;
473 struct work_struct suc_work;
473 struct timer_list timer; 474 struct timer_list timer;
474 475
475 debug_info_t *debug_area; 476 debug_info_t *debug_area;
@@ -542,6 +543,7 @@ struct dasd_attention_data {
542#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */ 543#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
543#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */ 544#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
544#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */ 545#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */
546#define DASD_FLAG_SUC 14 /* unhandled summary unit check */
545 547
546#define DASD_SLEEPON_START_TAG ((void *) 1) 548#define DASD_SLEEPON_START_TAG ((void *) 1)
547#define DASD_SLEEPON_END_TAG ((void *) 2) 549#define DASD_SLEEPON_END_TAG ((void *) 2)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 21a67ed047e8..ff6caab8cc8b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -452,10 +452,11 @@ static int aac_slave_configure(struct scsi_device *sdev)
452 else if (depth < 2) 452 else if (depth < 2)
453 depth = 2; 453 depth = 2;
454 scsi_change_queue_depth(sdev, depth); 454 scsi_change_queue_depth(sdev, depth);
455 } else 455 } else {
456 scsi_change_queue_depth(sdev, 1); 456 scsi_change_queue_depth(sdev, 1);
457 457
458 sdev->tagged_supported = 1; 458 sdev->tagged_supported = 1;
459 }
459 460
460 return 0; 461 return 0;
461} 462}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 35968bdb4866..8fb9643fe6e3 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
289 atomic64_set(&afu->room, room); 289 atomic64_set(&afu->room, room);
290 if (room) 290 if (room)
291 goto write_rrin; 291 goto write_rrin;
292 udelay(nretry); 292 udelay(1 << nretry);
293 } while (nretry++ < MC_ROOM_RETRY_CNT); 293 } while (nretry++ < MC_ROOM_RETRY_CNT);
294 294
295 pr_err("%s: no cmd_room to send reset\n", __func__); 295 pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
303 if (rrin != 0x1) 303 if (rrin != 0x1)
304 break; 304 break;
305 /* Double delay each time */ 305 /* Double delay each time */
306 udelay(2 << nretry); 306 udelay(1 << nretry);
307 } while (nretry++ < MC_ROOM_RETRY_CNT); 307 } while (nretry++ < MC_ROOM_RETRY_CNT);
308} 308}
309 309
@@ -338,7 +338,7 @@ retry:
338 atomic64_set(&afu->room, room); 338 atomic64_set(&afu->room, room);
339 if (room) 339 if (room)
340 goto write_ioarrin; 340 goto write_ioarrin;
341 udelay(nretry); 341 udelay(1 << nretry);
342 } while (nretry++ < MC_ROOM_RETRY_CNT); 342 } while (nretry++ < MC_ROOM_RETRY_CNT);
343 343
344 dev_err(dev, "%s: no cmd_room to send 0x%X\n", 344 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
352 * afu->room. 352 * afu->room.
353 */ 353 */
354 if (nretry++ < MC_ROOM_RETRY_CNT) { 354 if (nretry++ < MC_ROOM_RETRY_CNT) {
355 udelay(nretry); 355 udelay(1 << nretry);
356 goto retry; 356 goto retry;
357 } 357 }
358 358
@@ -683,28 +683,23 @@ static void stop_afu(struct cxlflash_cfg *cfg)
683} 683}
684 684
685/** 685/**
686 * term_mc() - terminates the master context 686 * term_intr() - disables all AFU interrupts
687 * @cfg: Internal structure associated with the host. 687 * @cfg: Internal structure associated with the host.
688 * @level: Depth of allocation, where to begin waterfall tear down. 688 * @level: Depth of allocation, where to begin waterfall tear down.
689 * 689 *
690 * Safe to call with AFU/MC in partially allocated/initialized state. 690 * Safe to call with AFU/MC in partially allocated/initialized state.
691 */ 691 */
692static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) 692static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
693{ 693{
694 int rc = 0;
695 struct afu *afu = cfg->afu; 694 struct afu *afu = cfg->afu;
696 struct device *dev = &cfg->dev->dev; 695 struct device *dev = &cfg->dev->dev;
697 696
698 if (!afu || !cfg->mcctx) { 697 if (!afu || !cfg->mcctx) {
699 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n", 698 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
700 __func__);
701 return; 699 return;
702 } 700 }
703 701
704 switch (level) { 702 switch (level) {
705 case UNDO_START:
706 rc = cxl_stop_context(cfg->mcctx);
707 BUG_ON(rc);
708 case UNMAP_THREE: 703 case UNMAP_THREE:
709 cxl_unmap_afu_irq(cfg->mcctx, 3, afu); 704 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
710 case UNMAP_TWO: 705 case UNMAP_TWO:
@@ -713,9 +708,34 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
713 cxl_unmap_afu_irq(cfg->mcctx, 1, afu); 708 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
714 case FREE_IRQ: 709 case FREE_IRQ:
715 cxl_free_afu_irqs(cfg->mcctx); 710 cxl_free_afu_irqs(cfg->mcctx);
716 case RELEASE_CONTEXT: 711 /* fall through */
717 cfg->mcctx = NULL; 712 case UNDO_NOOP:
713 /* No action required */
714 break;
715 }
716}
717
718/**
719 * term_mc() - terminates the master context
720 * @cfg: Internal structure associated with the host.
721 * @level: Depth of allocation, where to begin waterfall tear down.
722 *
723 * Safe to call with AFU/MC in partially allocated/initialized state.
724 */
725static void term_mc(struct cxlflash_cfg *cfg)
726{
727 int rc = 0;
728 struct afu *afu = cfg->afu;
729 struct device *dev = &cfg->dev->dev;
730
731 if (!afu || !cfg->mcctx) {
732 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
733 return;
718 } 734 }
735
736 rc = cxl_stop_context(cfg->mcctx);
737 WARN_ON(rc);
738 cfg->mcctx = NULL;
719} 739}
720 740
721/** 741/**
@@ -726,10 +746,20 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
726 */ 746 */
727static void term_afu(struct cxlflash_cfg *cfg) 747static void term_afu(struct cxlflash_cfg *cfg)
728{ 748{
749 /*
750 * Tear down is carefully orchestrated to ensure
751 * no interrupts can come in when the problem state
752 * area is unmapped.
753 *
754 * 1) Disable all AFU interrupts
755 * 2) Unmap the problem state area
756 * 3) Stop the master context
757 */
758 term_intr(cfg, UNMAP_THREE);
729 if (cfg->afu) 759 if (cfg->afu)
730 stop_afu(cfg); 760 stop_afu(cfg);
731 761
732 term_mc(cfg, UNDO_START); 762 term_mc(cfg);
733 763
734 pr_debug("%s: returning\n", __func__); 764 pr_debug("%s: returning\n", __func__);
735} 765}
@@ -1597,41 +1627,24 @@ static int start_afu(struct cxlflash_cfg *cfg)
1597} 1627}
1598 1628
1599/** 1629/**
1600 * init_mc() - create and register as the master context 1630 * init_intr() - setup interrupt handlers for the master context
1601 * @cfg: Internal structure associated with the host. 1631 * @cfg: Internal structure associated with the host.
1602 * 1632 *
1603 * Return: 0 on success, -errno on failure 1633 * Return: 0 on success, -errno on failure
1604 */ 1634 */
1605static int init_mc(struct cxlflash_cfg *cfg) 1635static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1636 struct cxl_context *ctx)
1606{ 1637{
1607 struct cxl_context *ctx;
1608 struct device *dev = &cfg->dev->dev;
1609 struct afu *afu = cfg->afu; 1638 struct afu *afu = cfg->afu;
1639 struct device *dev = &cfg->dev->dev;
1610 int rc = 0; 1640 int rc = 0;
1611 enum undo_level level; 1641 enum undo_level level = UNDO_NOOP;
1612
1613 ctx = cxl_get_context(cfg->dev);
1614 if (unlikely(!ctx))
1615 return -ENOMEM;
1616 cfg->mcctx = ctx;
1617
1618 /* Set it up as a master with the CXL */
1619 cxl_set_master(ctx);
1620
1621 /* During initialization reset the AFU to start from a clean slate */
1622 rc = cxl_afu_reset(cfg->mcctx);
1623 if (unlikely(rc)) {
1624 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1625 __func__, rc);
1626 level = RELEASE_CONTEXT;
1627 goto out;
1628 }
1629 1642
1630 rc = cxl_allocate_afu_irqs(ctx, 3); 1643 rc = cxl_allocate_afu_irqs(ctx, 3);
1631 if (unlikely(rc)) { 1644 if (unlikely(rc)) {
1632 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", 1645 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1633 __func__, rc); 1646 __func__, rc);
1634 level = RELEASE_CONTEXT; 1647 level = UNDO_NOOP;
1635 goto out; 1648 goto out;
1636 } 1649 }
1637 1650
@@ -1661,8 +1674,47 @@ static int init_mc(struct cxlflash_cfg *cfg)
1661 level = UNMAP_TWO; 1674 level = UNMAP_TWO;
1662 goto out; 1675 goto out;
1663 } 1676 }
1677out:
1678 return level;
1679}
1664 1680
1665 rc = 0; 1681/**
1682 * init_mc() - create and register as the master context
1683 * @cfg: Internal structure associated with the host.
1684 *
1685 * Return: 0 on success, -errno on failure
1686 */
1687static int init_mc(struct cxlflash_cfg *cfg)
1688{
1689 struct cxl_context *ctx;
1690 struct device *dev = &cfg->dev->dev;
1691 int rc = 0;
1692 enum undo_level level;
1693
1694 ctx = cxl_get_context(cfg->dev);
1695 if (unlikely(!ctx)) {
1696 rc = -ENOMEM;
1697 goto ret;
1698 }
1699 cfg->mcctx = ctx;
1700
1701 /* Set it up as a master with the CXL */
1702 cxl_set_master(ctx);
1703
1704 /* During initialization reset the AFU to start from a clean slate */
1705 rc = cxl_afu_reset(cfg->mcctx);
1706 if (unlikely(rc)) {
1707 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1708 __func__, rc);
1709 goto ret;
1710 }
1711
1712 level = init_intr(cfg, ctx);
1713 if (unlikely(level)) {
1714 dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
1715 __func__, rc);
1716 goto out;
1717 }
1666 1718
1667 /* This performs the equivalent of the CXL_IOCTL_START_WORK. 1719 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1668 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 1720 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
@@ -1678,7 +1730,7 @@ ret:
1678 pr_debug("%s: returning rc=%d\n", __func__, rc); 1730 pr_debug("%s: returning rc=%d\n", __func__, rc);
1679 return rc; 1731 return rc;
1680out: 1732out:
1681 term_mc(cfg, level); 1733 term_intr(cfg, level);
1682 goto ret; 1734 goto ret;
1683} 1735}
1684 1736
@@ -1751,7 +1803,8 @@ out:
1751err2: 1803err2:
1752 kref_put(&afu->mapcount, afu_unmap); 1804 kref_put(&afu->mapcount, afu_unmap);
1753err1: 1805err1:
1754 term_mc(cfg, UNDO_START); 1806 term_intr(cfg, UNMAP_THREE);
1807 term_mc(cfg);
1755 goto out; 1808 goto out;
1756} 1809}
1757 1810
@@ -2488,8 +2541,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2488 if (unlikely(rc)) 2541 if (unlikely(rc))
2489 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", 2542 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2490 __func__, rc); 2543 __func__, rc);
2491 stop_afu(cfg); 2544 term_afu(cfg);
2492 term_mc(cfg, UNDO_START);
2493 return PCI_ERS_RESULT_NEED_RESET; 2545 return PCI_ERS_RESULT_NEED_RESET;
2494 case pci_channel_io_perm_failure: 2546 case pci_channel_io_perm_failure:
2495 cfg->state = STATE_FAILTERM; 2547 cfg->state = STATE_FAILTERM;
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 0faed422c7f4..eb9d8f730b38 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -79,12 +79,11 @@
79#define WWPN_BUF_LEN (WWPN_LEN + 1) 79#define WWPN_BUF_LEN (WWPN_LEN + 1)
80 80
81enum undo_level { 81enum undo_level {
82 RELEASE_CONTEXT = 0, 82 UNDO_NOOP = 0,
83 FREE_IRQ, 83 FREE_IRQ,
84 UNMAP_ONE, 84 UNMAP_ONE,
85 UNMAP_TWO, 85 UNMAP_TWO,
86 UNMAP_THREE, 86 UNMAP_THREE
87 UNDO_START
88}; 87};
89 88
90struct dev_dependent_vals { 89struct dev_dependent_vals {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a404a41e871c..8eaed0522aa3 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1112,9 +1112,9 @@ static void alua_bus_detach(struct scsi_device *sdev)
1112 h->sdev = NULL; 1112 h->sdev = NULL;
1113 spin_unlock(&h->pg_lock); 1113 spin_unlock(&h->pg_lock);
1114 if (pg) { 1114 if (pg) {
1115 spin_lock(&pg->lock); 1115 spin_lock_irq(&pg->lock);
1116 list_del_rcu(&h->node); 1116 list_del_rcu(&h->node);
1117 spin_unlock(&pg->lock); 1117 spin_unlock_irq(&pg->lock);
1118 kref_put(&pg->kref, release_port_group); 1118 kref_put(&pg->kref, release_port_group);
1119 } 1119 }
1120 sdev->handler_data = NULL; 1120 sdev->handler_data = NULL;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e4db5fb3239a..8c44b9c424af 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5030,7 +5030,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5030static int 5030static int
5031_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 5031_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5032{ 5032{
5033 int r, i; 5033 int r, i, index;
5034 unsigned long flags; 5034 unsigned long flags;
5035 u32 reply_address; 5035 u32 reply_address;
5036 u16 smid; 5036 u16 smid;
@@ -5039,8 +5039,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5039 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; 5039 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
5040 u8 hide_flag; 5040 u8 hide_flag;
5041 struct adapter_reply_queue *reply_q; 5041 struct adapter_reply_queue *reply_q;
5042 long reply_post_free; 5042 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
5043 u32 reply_post_free_sz, index = 0;
5044 5043
5045 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5044 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5046 __func__)); 5045 __func__));
@@ -5124,27 +5123,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5124 _base_assign_reply_queues(ioc); 5123 _base_assign_reply_queues(ioc);
5125 5124
5126 /* initialize Reply Post Free Queue */ 5125 /* initialize Reply Post Free Queue */
5127 reply_post_free_sz = ioc->reply_post_queue_depth * 5126 index = 0;
5128 sizeof(Mpi2DefaultReplyDescriptor_t); 5127 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
5129 reply_post_free = (long)ioc->reply_post[index].reply_post_free;
5130 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5128 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5129 /*
5130 * If RDPQ is enabled, switch to the next allocation.
5131 * Otherwise advance within the contiguous region.
5132 */
5133 if (ioc->rdpq_array_enable) {
5134 reply_q->reply_post_free =
5135 ioc->reply_post[index++].reply_post_free;
5136 } else {
5137 reply_q->reply_post_free = reply_post_free_contig;
5138 reply_post_free_contig += ioc->reply_post_queue_depth;
5139 }
5140
5131 reply_q->reply_post_host_index = 0; 5141 reply_q->reply_post_host_index = 0;
5132 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
5133 reply_post_free;
5134 for (i = 0; i < ioc->reply_post_queue_depth; i++) 5142 for (i = 0; i < ioc->reply_post_queue_depth; i++)
5135 reply_q->reply_post_free[i].Words = 5143 reply_q->reply_post_free[i].Words =
5136 cpu_to_le64(ULLONG_MAX); 5144 cpu_to_le64(ULLONG_MAX);
5137 if (!_base_is_controller_msix_enabled(ioc)) 5145 if (!_base_is_controller_msix_enabled(ioc))
5138 goto skip_init_reply_post_free_queue; 5146 goto skip_init_reply_post_free_queue;
5139 /*
5140 * If RDPQ is enabled, switch to the next allocation.
5141 * Otherwise advance within the contiguous region.
5142 */
5143 if (ioc->rdpq_array_enable)
5144 reply_post_free = (long)
5145 ioc->reply_post[++index].reply_post_free;
5146 else
5147 reply_post_free += reply_post_free_sz;
5148 } 5147 }
5149 skip_init_reply_post_free_queue: 5148 skip_init_reply_post_free_queue:
5150 5149
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b1bf42b93fcc..1deb6adc411f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
784 int pg83_supported = 0; 784 int pg83_supported = 0;
785 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL; 785 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
786 786
787 if (sdev->skip_vpd_pages) 787 if (!scsi_device_supports_vpd(sdev))
788 return; 788 return;
789
789retry_pg0: 790retry_pg0:
790 vpd_buf = kmalloc(vpd_len, GFP_KERNEL); 791 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
791 if (!vpd_buf) 792 if (!vpd_buf)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 92ffd2406f97..2b642b145be1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -81,6 +81,7 @@ const char *scsi_host_state_name(enum scsi_host_state state)
81 return name; 81 return name;
82} 82}
83 83
84#ifdef CONFIG_SCSI_DH
84static const struct { 85static const struct {
85 unsigned char value; 86 unsigned char value;
86 char *name; 87 char *name;
@@ -94,7 +95,7 @@ static const struct {
94 { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, 95 { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
95}; 96};
96 97
97const char *scsi_access_state_name(unsigned char state) 98static const char *scsi_access_state_name(unsigned char state)
98{ 99{
99 int i; 100 int i;
100 char *name = NULL; 101 char *name = NULL;
@@ -107,6 +108,7 @@ const char *scsi_access_state_name(unsigned char state)
107 } 108 }
108 return name; 109 return name;
109} 110}
111#endif
110 112
111static int check_set(unsigned long long *val, char *src) 113static int check_set(unsigned long long *val, char *src)
112{ 114{
@@ -226,7 +228,7 @@ show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
226} 228}
227 229
228/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ 230/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
229struct device_attribute dev_attr_hstate = 231static struct device_attribute dev_attr_hstate =
230 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); 232 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
231 233
232static ssize_t 234static ssize_t
@@ -401,7 +403,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
401 NULL 403 NULL
402}; 404};
403 405
404struct attribute_group scsi_shost_attr_group = { 406static struct attribute_group scsi_shost_attr_group = {
405 .attrs = scsi_sysfs_shost_attrs, 407 .attrs = scsi_sysfs_shost_attrs,
406}; 408};
407 409
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a5457ac9cdb..f52b74cf8d1e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1275 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1275 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1276 struct scsi_device *sdp = sdkp->device; 1276 struct scsi_device *sdp = sdkp->device;
1277 struct Scsi_Host *host = sdp->host; 1277 struct Scsi_Host *host = sdp->host;
1278 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1278 int diskinfo[4]; 1279 int diskinfo[4];
1279 1280
1280 /* default to most commonly used values */ 1281 /* default to most commonly used values */
1281 diskinfo[0] = 0x40; /* 1 << 6 */ 1282 diskinfo[0] = 0x40; /* 1 << 6 */
1282 diskinfo[1] = 0x20; /* 1 << 5 */ 1283 diskinfo[1] = 0x20; /* 1 << 5 */
1283 diskinfo[2] = sdkp->capacity >> 11; 1284 diskinfo[2] = capacity >> 11;
1284 1285
1285 /* override with calculated, extended default, or driver values */ 1286 /* override with calculated, extended default, or driver values */
1286 if (host->hostt->bios_param) 1287 if (host->hostt->bios_param)
1287 host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo); 1288 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1288 else 1289 else
1289 scsicam_bios_param(bdev, sdkp->capacity, diskinfo); 1290 scsicam_bios_param(bdev, capacity, diskinfo);
1290 1291
1291 geo->heads = diskinfo[0]; 1292 geo->heads = diskinfo[0];
1292 geo->sectors = diskinfo[1]; 1293 geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@ got_data:
2337 if (sdkp->capacity > 0xffffffff) 2338 if (sdkp->capacity > 0xffffffff)
2338 sdp->use_16_for_rw = 1; 2339 sdp->use_16_for_rw = 1;
2339 2340
2340 /* Rescale capacity to 512-byte units */
2341 if (sector_size == 4096)
2342 sdkp->capacity <<= 3;
2343 else if (sector_size == 2048)
2344 sdkp->capacity <<= 2;
2345 else if (sector_size == 1024)
2346 sdkp->capacity <<= 1;
2347
2348 blk_queue_physical_block_size(sdp->request_queue, 2341 blk_queue_physical_block_size(sdp->request_queue,
2349 sdkp->physical_block_size); 2342 sdkp->physical_block_size);
2350 sdkp->device->sector_size = sector_size; 2343 sdkp->device->sector_size = sector_size;
@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2795 sdkp->ws10 = 1; 2788 sdkp->ws10 = 1;
2796} 2789}
2797 2790
2798static int sd_try_extended_inquiry(struct scsi_device *sdp)
2799{
2800 /* Attempt VPD inquiry if the device blacklist explicitly calls
2801 * for it.
2802 */
2803 if (sdp->try_vpd_pages)
2804 return 1;
2805 /*
2806 * Although VPD inquiries can go to SCSI-2 type devices,
2807 * some USB ones crash on receiving them, and the pages
2808 * we currently ask for are for SPC-3 and beyond
2809 */
2810 if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
2811 return 1;
2812 return 0;
2813}
2814
2815static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
2816{
2817 return blocks << (ilog2(sdev->sector_size) - 9);
2818}
2819
2820/** 2791/**
2821 * sd_revalidate_disk - called the first time a new disk is seen, 2792 * sd_revalidate_disk - called the first time a new disk is seen,
2822 * performs disk spin up, read_capacity, etc. 2793 * performs disk spin up, read_capacity, etc.
@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2856 if (sdkp->media_present) { 2827 if (sdkp->media_present) {
2857 sd_read_capacity(sdkp, buffer); 2828 sd_read_capacity(sdkp, buffer);
2858 2829
2859 if (sd_try_extended_inquiry(sdp)) { 2830 if (scsi_device_supports_vpd(sdp)) {
2860 sd_read_block_provisioning(sdkp); 2831 sd_read_block_provisioning(sdkp);
2861 sd_read_block_limits(sdkp); 2832 sd_read_block_limits(sdkp);
2862 sd_read_block_characteristics(sdkp); 2833 sd_read_block_characteristics(sdkp);
@@ -2891,7 +2862,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2891 if (sdkp->opt_xfer_blocks && 2862 if (sdkp->opt_xfer_blocks &&
2892 sdkp->opt_xfer_blocks <= dev_max && 2863 sdkp->opt_xfer_blocks <= dev_max &&
2893 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && 2864 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2894 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) 2865 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
2895 rw_max = q->limits.io_opt = 2866 rw_max = q->limits.io_opt =
2896 sdkp->opt_xfer_blocks * sdp->sector_size; 2867 sdkp->opt_xfer_blocks * sdp->sector_size;
2897 else 2868 else
@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2900 /* Combine with controller limits */ 2871 /* Combine with controller limits */
2901 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 2872 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
2902 2873
2903 set_capacity(disk, sdkp->capacity); 2874 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
2904 sd_config_write_same(sdkp); 2875 sd_config_write_same(sdkp);
2905 kfree(buffer); 2876 kfree(buffer);
2906 2877
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84aff29f..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@ struct scsi_disk {
65 struct device dev; 65 struct device dev;
66 struct gendisk *disk; 66 struct gendisk *disk;
67 atomic_t openers; 67 atomic_t openers;
68 sector_t capacity; /* size in 512-byte sectors */ 68 sector_t capacity; /* size in logical blocks */
69 u32 max_xfer_blocks; 69 u32 max_xfer_blocks;
70 u32 opt_xfer_blocks; 70 u32 opt_xfer_blocks;
71 u32 max_ws_blocks; 71 u32 max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
146 return 0; 146 return 0;
147} 147}
148 148
149static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
150{
151 return blocks << (ilog2(sdev->sector_size) - 9);
152}
153
149/* 154/*
150 * A DIF-capable target device can be formatted with different 155 * A DIF-capable target device can be formatted with different
151 * protection schemes. Currently 0 through 3 are defined: 156 * protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71c5138ddf94..dbf1882cfbac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4941 out_unmap: 4941 out_unmap:
4942 if (res > 0) { 4942 if (res > 0) {
4943 for (j=0; j < res; j++) 4943 for (j=0; j < res; j++)
4944 page_cache_release(pages[j]); 4944 put_page(pages[j]);
4945 res = 0; 4945 res = 0;
4946 } 4946 }
4947 kfree(pages); 4947 kfree(pages);
@@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
4963 /* FIXME: cache flush missing for rw==READ 4963 /* FIXME: cache flush missing for rw==READ
4964 * FIXME: call the correct reference counting function 4964 * FIXME: call the correct reference counting function
4965 */ 4965 */
4966 page_cache_release(page); 4966 put_page(page);
4967 } 4967 }
4968 kfree(STbp->mapped_pages); 4968 kfree(STbp->mapped_pages);
4969 STbp->mapped_pages = NULL; 4969 STbp->mapped_pages = NULL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e7a19be87c38..50769078e72e 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -211,11 +211,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
211 struct spi_transfer *transfer) 211 struct spi_transfer *transfer)
212{ 212{
213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
214 unsigned int bpw = transfer->bits_per_word; 214 unsigned int bpw;
215 215
216 if (!master->dma_rx) 216 if (!master->dma_rx)
217 return false; 217 return false;
218 218
219 if (!transfer)
220 return false;
221
222 bpw = transfer->bits_per_word;
219 if (!bpw) 223 if (!bpw)
220 bpw = spi->bits_per_word; 224 bpw = spi->bits_per_word;
221 225
@@ -333,8 +337,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
333static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 337static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
334 struct spi_imx_config *config) 338 struct spi_imx_config *config)
335{ 339{
336 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0; 340 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
337 u32 clk = config->speed_hz, delay, reg; 341 u32 clk = config->speed_hz, delay, reg;
342 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
338 343
339 /* 344 /*
340 * The hardware seems to have a race condition when changing modes. The 345 * The hardware seems to have a race condition when changing modes. The
@@ -358,13 +363,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
358 363
359 if (config->mode & SPI_CPHA) 364 if (config->mode & SPI_CPHA)
360 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 365 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
366 else
367 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
361 368
362 if (config->mode & SPI_CPOL) { 369 if (config->mode & SPI_CPOL) {
363 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 370 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
364 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 371 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
372 } else {
373 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
374 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
365 } 375 }
366 if (config->mode & SPI_CS_HIGH) 376 if (config->mode & SPI_CS_HIGH)
367 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 377 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
378 else
379 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
368 380
369 if (spi_imx->usedma) 381 if (spi_imx->usedma)
370 ctrl |= MX51_ECSPI_CTRL_SMC; 382 ctrl |= MX51_ECSPI_CTRL_SMC;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8bef46..43a02e377b3b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
423 423
424 if (mcspi_dma->dma_tx) { 424 if (mcspi_dma->dma_tx) {
425 struct dma_async_tx_descriptor *tx; 425 struct dma_async_tx_descriptor *tx;
426 struct scatterlist sg;
427 426
428 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); 427 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
429 428
430 sg_init_table(&sg, 1); 429 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
431 sg_dma_address(&sg) = xfer->tx_dma; 430 xfer->tx_sg.nents, DMA_MEM_TO_DEV,
432 sg_dma_len(&sg) = xfer->len; 431 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
433
434 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
435 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
436 if (tx) { 432 if (tx) {
437 tx->callback = omap2_mcspi_tx_callback; 433 tx->callback = omap2_mcspi_tx_callback;
438 tx->callback_param = spi; 434 tx->callback_param = spi;
@@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
478 474
479 if (mcspi_dma->dma_rx) { 475 if (mcspi_dma->dma_rx) {
480 struct dma_async_tx_descriptor *tx; 476 struct dma_async_tx_descriptor *tx;
481 struct scatterlist sg;
482 477
483 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); 478 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
484 479
485 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) 480 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
486 dma_count -= es; 481 dma_count -= es;
487 482
488 sg_init_table(&sg, 1); 483 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
489 sg_dma_address(&sg) = xfer->rx_dma; 484 xfer->rx_sg.nents, DMA_DEV_TO_MEM,
490 sg_dma_len(&sg) = dma_count; 485 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
491
492 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
493 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
494 DMA_CTRL_ACK);
495 if (tx) { 486 if (tx) {
496 tx->callback = omap2_mcspi_rx_callback; 487 tx->callback = omap2_mcspi_rx_callback;
497 tx->callback_param = spi; 488 tx->callback_param = spi;
@@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
505 omap2_mcspi_set_dma_req(spi, 1, 1); 496 omap2_mcspi_set_dma_req(spi, 1, 1);
506 497
507 wait_for_completion(&mcspi_dma->dma_rx_completion); 498 wait_for_completion(&mcspi_dma->dma_rx_completion);
508 dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
509 DMA_FROM_DEVICE);
510 499
511 if (mcspi->fifo_depth > 0) 500 if (mcspi->fifo_depth > 0)
512 return count; 501 return count;
@@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
619 608
620 if (tx != NULL) { 609 if (tx != NULL) {
621 wait_for_completion(&mcspi_dma->dma_tx_completion); 610 wait_for_completion(&mcspi_dma->dma_tx_completion);
622 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
623 DMA_TO_DEVICE);
624 611
625 if (mcspi->fifo_depth > 0) { 612 if (mcspi->fifo_depth > 0) {
626 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; 613 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
1087 gpio_free(spi->cs_gpio); 1074 gpio_free(spi->cs_gpio);
1088} 1075}
1089 1076
1077static bool omap2_mcspi_can_dma(struct spi_master *master,
1078 struct spi_device *spi,
1079 struct spi_transfer *xfer)
1080{
1081 if (xfer->len < DMA_MIN_BYTES)
1082 return false;
1083
1084 return true;
1085}
1086
1090static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, 1087static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1091 struct spi_device *spi, struct spi_transfer *t) 1088 struct spi_device *spi, struct spi_transfer *t)
1092{ 1089{
@@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
1268 return -EINVAL; 1265 return -EINVAL;
1269 } 1266 }
1270 1267
1271 if (len < DMA_MIN_BYTES)
1272 goto skip_dma_map;
1273
1274 if (mcspi_dma->dma_tx && tx_buf != NULL) {
1275 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1276 len, DMA_TO_DEVICE);
1277 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1278 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1279 'T', len);
1280 return -EINVAL;
1281 }
1282 }
1283 if (mcspi_dma->dma_rx && rx_buf != NULL) {
1284 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1285 DMA_FROM_DEVICE);
1286 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1287 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1288 'R', len);
1289 if (tx_buf != NULL)
1290 dma_unmap_single(mcspi->dev, t->tx_dma,
1291 len, DMA_TO_DEVICE);
1292 return -EINVAL;
1293 }
1294 }
1295
1296skip_dma_map:
1297 return omap2_mcspi_work_one(mcspi, spi, t); 1268 return omap2_mcspi_work_one(mcspi, spi, t);
1298} 1269}
1299 1270
@@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1377 master->transfer_one = omap2_mcspi_transfer_one; 1348 master->transfer_one = omap2_mcspi_transfer_one;
1378 master->set_cs = omap2_mcspi_set_cs; 1349 master->set_cs = omap2_mcspi_set_cs;
1379 master->cleanup = omap2_mcspi_cleanup; 1350 master->cleanup = omap2_mcspi_cleanup;
1351 master->can_dma = omap2_mcspi_can_dma;
1380 master->dev.of_node = node; 1352 master->dev.of_node = node;
1381 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; 1353 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
1382 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; 1354 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 8f50a4020f6f..6c6c0013ec7a 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -534,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
534 if (WARN_ON(rs->speed > MAX_SCLK_OUT)) 534 if (WARN_ON(rs->speed > MAX_SCLK_OUT))
535 rs->speed = MAX_SCLK_OUT; 535 rs->speed = MAX_SCLK_OUT;
536 536
537 /* the minimum divsor is 2 */ 537 /* the minimum divisor is 2 */
538 if (rs->max_freq < 2 * rs->speed) { 538 if (rs->max_freq < 2 * rs->speed) {
539 clk_set_rate(rs->spiclk, 2 * rs->speed); 539 clk_set_rate(rs->spiclk, 2 * rs->speed);
540 rs->max_freq = clk_get_rate(rs->spiclk); 540 rs->max_freq = clk_get_rate(rs->spiclk);
@@ -730,23 +730,27 @@ static int rockchip_spi_probe(struct platform_device *pdev)
730 master->transfer_one = rockchip_spi_transfer_one; 730 master->transfer_one = rockchip_spi_transfer_one;
731 master->handle_err = rockchip_spi_handle_err; 731 master->handle_err = rockchip_spi_handle_err;
732 732
733 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); 733 rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
734 if (IS_ERR_OR_NULL(rs->dma_tx.ch)) { 734 if (IS_ERR(rs->dma_tx.ch)) {
735 /* Check tx to see if we need defer probing driver */ 735 /* Check tx to see if we need defer probing driver */
736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { 736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
737 ret = -EPROBE_DEFER; 737 ret = -EPROBE_DEFER;
738 goto err_get_fifo_len; 738 goto err_get_fifo_len;
739 } 739 }
740 dev_warn(rs->dev, "Failed to request TX DMA channel\n"); 740 dev_warn(rs->dev, "Failed to request TX DMA channel\n");
741 rs->dma_tx.ch = NULL;
741 } 742 }
742 743
743 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); 744 rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
744 if (!rs->dma_rx.ch) { 745 if (IS_ERR(rs->dma_rx.ch)) {
745 if (rs->dma_tx.ch) { 746 if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
746 dma_release_channel(rs->dma_tx.ch); 747 dma_release_channel(rs->dma_tx.ch);
747 rs->dma_tx.ch = NULL; 748 rs->dma_tx.ch = NULL;
749 ret = -EPROBE_DEFER;
750 goto err_get_fifo_len;
748 } 751 }
749 dev_warn(rs->dev, "Failed to request RX DMA channel\n"); 752 dev_warn(rs->dev, "Failed to request RX DMA channel\n");
753 rs->dma_rx.ch = NULL;
750 } 754 }
751 755
752 if (rs->dma_tx.ch && rs->dma_rx.ch) { 756 if (rs->dma_tx.ch && rs->dma_rx.ch) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index de2f2f90d799..0239b45eed92 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1209,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
1209 struct spi_master *master = 1209 struct spi_master *master =
1210 container_of(work, struct spi_master, pump_messages); 1210 container_of(work, struct spi_master, pump_messages);
1211 1211
1212 __spi_pump_messages(master, true, false); 1212 __spi_pump_messages(master, true, master->bus_lock_flag);
1213} 1213}
1214 1214
1215static int spi_init_queue(struct spi_master *master) 1215static int spi_init_queue(struct spi_master *master)
@@ -2853,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2853 */ 2853 */
2854int spi_sync(struct spi_device *spi, struct spi_message *message) 2854int spi_sync(struct spi_device *spi, struct spi_message *message)
2855{ 2855{
2856 return __spi_sync(spi, message, 0); 2856 return __spi_sync(spi, message, spi->master->bus_lock_flag);
2857} 2857}
2858EXPORT_SYMBOL_GPL(spi_sync); 2858EXPORT_SYMBOL_GPL(spi_sync);
2859 2859
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index cf84581287b9..5bac28a3944e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -30,6 +30,8 @@ source "drivers/staging/wlan-ng/Kconfig"
30 30
31source "drivers/staging/comedi/Kconfig" 31source "drivers/staging/comedi/Kconfig"
32 32
33source "drivers/staging/olpc_dcon/Kconfig"
34
33source "drivers/staging/rtl8192u/Kconfig" 35source "drivers/staging/rtl8192u/Kconfig"
34 36
35source "drivers/staging/rtl8192e/Kconfig" 37source "drivers/staging/rtl8192e/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 7d6448d20464..a954242b0f2c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,6 +4,7 @@ obj-y += media/
4obj-$(CONFIG_SLICOSS) += slicoss/ 4obj-$(CONFIG_SLICOSS) += slicoss/
5obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 5obj-$(CONFIG_PRISM2_USB) += wlan-ng/
6obj-$(CONFIG_COMEDI) += comedi/ 6obj-$(CONFIG_COMEDI) += comedi/
7obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
7obj-$(CONFIG_RTL8192U) += rtl8192u/ 8obj-$(CONFIG_RTL8192U) += rtl8192u/
8obj-$(CONFIG_RTL8192E) += rtl8192e/ 9obj-$(CONFIG_RTL8192E) += rtl8192e/
9obj-$(CONFIG_R8712U) += rtl8712/ 10obj-$(CONFIG_R8712U) += rtl8712/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 98430e7108c1..455c54d0d17c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -85,7 +85,6 @@ struct ptldebug_header {
85#define PH_FLAG_FIRST_RECORD 1 85#define PH_FLAG_FIRST_RECORD 1
86 86
87/* Debugging subsystems (32 bits, non-overlapping) */ 87/* Debugging subsystems (32 bits, non-overlapping) */
88/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
89#define S_UNDEFINED 0x00000001 88#define S_UNDEFINED 0x00000001
90#define S_MDC 0x00000002 89#define S_MDC 0x00000002
91#define S_MDS 0x00000004 90#define S_MDS 0x00000004
@@ -118,10 +117,14 @@ struct ptldebug_header {
118#define S_MGS 0x20000000 117#define S_MGS 0x20000000
119#define S_FID 0x40000000 /* b_new_cmd */ 118#define S_FID 0x40000000 /* b_new_cmd */
120#define S_FLD 0x80000000 /* b_new_cmd */ 119#define S_FLD 0x80000000 /* b_new_cmd */
121/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */ 120
121#define LIBCFS_DEBUG_SUBSYS_NAMES { \
122 "undefined", "mdc", "mds", "osc", "ost", "class", "log", \
123 "llite", "rpc", "mgmt", "lnet", "lnd", "pinger", "filter", "", \
124 "echo", "ldlm", "lov", "lquota", "osd", "lfsck", "", "", "lmv", \
125 "", "sec", "gss", "", "mgc", "mgs", "fid", "fld", NULL }
122 126
123/* Debugging masks (32 bits, non-overlapping) */ 127/* Debugging masks (32 bits, non-overlapping) */
124/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
125#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */ 128#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
126#define D_INODE 0x00000002 129#define D_INODE 0x00000002
127#define D_SUPER 0x00000004 130#define D_SUPER 0x00000004
@@ -151,9 +154,14 @@ struct ptldebug_header {
151#define D_QUOTA 0x04000000 154#define D_QUOTA 0x04000000
152#define D_SEC 0x08000000 155#define D_SEC 0x08000000
153#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */ 156#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
154/* keep these in sync with lnet/{utils,libcfs}/debug.c */ 157#define D_HSM 0x20000000
155 158
156#define D_HSM D_TRACE 159#define LIBCFS_DEBUG_MASKS_NAMES { \
160 "trace", "inode", "super", "ext2", "malloc", "cache", "info", \
161 "ioctl", "neterror", "net", "warning", "buffs", "other", \
162 "dentry", "nettrace", "page", "dlmtrace", "error", "emerg", \
163 "ha", "rpctrace", "vfstrace", "reada", "mmap", "config", \
164 "console", "quota", "sec", "lfsck", "hsm", NULL }
157 165
158#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE) 166#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
159 167
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 6f7a276b87b7..ac4e8cfe6c8c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -46,7 +46,7 @@
46#if BITS_PER_LONG == 32 46#if BITS_PER_LONG == 32
47/* limit to lowmem on 32-bit systems */ 47/* limit to lowmem on 32-bit systems */
48#define NUM_CACHEPAGES \ 48#define NUM_CACHEPAGES \
49 min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) 49 min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
50#else 50#else
51#define NUM_CACHEPAGES totalram_pages 51#define NUM_CACHEPAGES totalram_pages
52#endif 52#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index e18b57b5c64e..2fd2a9690a34 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -88,7 +88,7 @@ do { \
88} while (0) 88} while (0)
89 89
90#ifndef LIBCFS_VMALLOC_SIZE 90#ifndef LIBCFS_VMALLOC_SIZE
91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ 91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
92#endif 92#endif
93 93
94#define LIBCFS_ALLOC_PRE(size, mask) \ 94#define LIBCFS_ALLOC_PRE(size, mask) \
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c341c5..1c679cb72785 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@ typedef struct {
514 /** 514 /**
515 * Starting offset of the fragment within the page. Note that the 515 * Starting offset of the fragment within the page. Note that the
516 * end of the fragment must not pass the end of the page; i.e., 516 * end of the fragment must not pass the end of the page; i.e.,
517 * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. 517 * kiov_len + kiov_offset <= PAGE_SIZE.
518 */ 518 */
519 unsigned int kiov_offset; 519 unsigned int kiov_offset;
520} lnet_kiov_t; 520} lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index acebc6fe0dbe..964b4e338fe0 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
291 291
292 for (nob = i = 0; i < niov; i++) { 292 for (nob = i = 0; i < niov; i++) {
293 if ((kiov[i].kiov_offset && i > 0) || 293 if ((kiov[i].kiov_offset && i > 0) ||
294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) 294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
295 return NULL; 295 return NULL;
296 296
297 pages[i] = kiov[i].kiov_page; 297 pages[i] = kiov[i].kiov_page;
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c90e5102fe06..8c260c3d5da4 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -232,130 +232,24 @@ int libcfs_panic_in_progress;
232static const char * 232static const char *
233libcfs_debug_subsys2str(int subsys) 233libcfs_debug_subsys2str(int subsys)
234{ 234{
235 switch (1 << subsys) { 235 static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
236 default: 236
237 if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
237 return NULL; 238 return NULL;
238 case S_UNDEFINED: 239
239 return "undefined"; 240 return libcfs_debug_subsystems[subsys];
240 case S_MDC:
241 return "mdc";
242 case S_MDS:
243 return "mds";
244 case S_OSC:
245 return "osc";
246 case S_OST:
247 return "ost";
248 case S_CLASS:
249 return "class";
250 case S_LOG:
251 return "log";
252 case S_LLITE:
253 return "llite";
254 case S_RPC:
255 return "rpc";
256 case S_LNET:
257 return "lnet";
258 case S_LND:
259 return "lnd";
260 case S_PINGER:
261 return "pinger";
262 case S_FILTER:
263 return "filter";
264 case S_ECHO:
265 return "echo";
266 case S_LDLM:
267 return "ldlm";
268 case S_LOV:
269 return "lov";
270 case S_LQUOTA:
271 return "lquota";
272 case S_OSD:
273 return "osd";
274 case S_LFSCK:
275 return "lfsck";
276 case S_LMV:
277 return "lmv";
278 case S_SEC:
279 return "sec";
280 case S_GSS:
281 return "gss";
282 case S_MGC:
283 return "mgc";
284 case S_MGS:
285 return "mgs";
286 case S_FID:
287 return "fid";
288 case S_FLD:
289 return "fld";
290 }
291} 241}
292 242
293/* libcfs_debug_token2mask() expects the returned string in lower-case */ 243/* libcfs_debug_token2mask() expects the returned string in lower-case */
294static const char * 244static const char *
295libcfs_debug_dbg2str(int debug) 245libcfs_debug_dbg2str(int debug)
296{ 246{
297 switch (1 << debug) { 247 static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
298 default: 248
249 if (debug >= ARRAY_SIZE(libcfs_debug_masks))
299 return NULL; 250 return NULL;
300 case D_TRACE: 251
301 return "trace"; 252 return libcfs_debug_masks[debug];
302 case D_INODE:
303 return "inode";
304 case D_SUPER:
305 return "super";
306 case D_EXT2:
307 return "ext2";
308 case D_MALLOC:
309 return "malloc";
310 case D_CACHE:
311 return "cache";
312 case D_INFO:
313 return "info";
314 case D_IOCTL:
315 return "ioctl";
316 case D_NETERROR:
317 return "neterror";
318 case D_NET:
319 return "net";
320 case D_WARNING:
321 return "warning";
322 case D_BUFFS:
323 return "buffs";
324 case D_OTHER:
325 return "other";
326 case D_DENTRY:
327 return "dentry";
328 case D_NETTRACE:
329 return "nettrace";
330 case D_PAGE:
331 return "page";
332 case D_DLMTRACE:
333 return "dlmtrace";
334 case D_ERROR:
335 return "error";
336 case D_EMERG:
337 return "emerg";
338 case D_HA:
339 return "ha";
340 case D_RPCTRACE:
341 return "rpctrace";
342 case D_VFSTRACE:
343 return "vfstrace";
344 case D_READA:
345 return "reada";
346 case D_MMAP:
347 return "mmap";
348 case D_CONFIG:
349 return "config";
350 case D_CONSOLE:
351 return "console";
352 case D_QUOTA:
353 return "quota";
354 case D_SEC:
355 return "sec";
356 case D_LFSCK:
357 return "lfsck";
358 }
359} 253}
360 254
361int 255int
@@ -517,7 +411,7 @@ int libcfs_debug_init(unsigned long bufsize)
517 max = TCD_MAX_PAGES; 411 max = TCD_MAX_PAGES;
518 } else { 412 } else {
519 max = max / num_possible_cpus(); 413 max = max / num_possible_cpus();
520 max <<= (20 - PAGE_CACHE_SHIFT); 414 max <<= (20 - PAGE_SHIFT);
521 } 415 }
522 rc = cfs_tracefile_init(max); 416 rc = cfs_tracefile_init(max);
523 417
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 5169597e2c34..7739b9469c5a 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
182 if (tcd->tcd_cur_pages > 0) { 182 if (tcd->tcd_cur_pages > 0) {
183 __LASSERT(!list_empty(&tcd->tcd_pages)); 183 __LASSERT(!list_empty(&tcd->tcd_pages));
184 tage = cfs_tage_from_list(tcd->tcd_pages.prev); 184 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
185 if (tage->used + len <= PAGE_CACHE_SIZE) 185 if (tage->used + len <= PAGE_SIZE)
186 return tage; 186 return tage;
187 } 187 }
188 188
@@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
260 * from here: this will lead to infinite recursion. 260 * from here: this will lead to infinite recursion.
261 */ 261 */
262 262
263 if (len > PAGE_CACHE_SIZE) { 263 if (len > PAGE_SIZE) {
264 pr_err("cowardly refusing to write %lu bytes in a page\n", len); 264 pr_err("cowardly refusing to write %lu bytes in a page\n", len);
265 return NULL; 265 return NULL;
266 } 266 }
@@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
349 for (i = 0; i < 2; i++) { 349 for (i = 0; i < 2; i++) {
350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1); 350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
351 if (!tage) { 351 if (!tage) {
352 if (needed + known_size > PAGE_CACHE_SIZE) 352 if (needed + known_size > PAGE_SIZE)
353 mask |= D_ERROR; 353 mask |= D_ERROR;
354 354
355 cfs_trace_put_tcd(tcd); 355 cfs_trace_put_tcd(tcd);
@@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
360 string_buf = (char *)page_address(tage->page) + 360 string_buf = (char *)page_address(tage->page) +
361 tage->used + known_size; 361 tage->used + known_size;
362 362
363 max_nob = PAGE_CACHE_SIZE - tage->used - known_size; 363 max_nob = PAGE_SIZE - tage->used - known_size;
364 if (max_nob <= 0) { 364 if (max_nob <= 0) {
365 printk(KERN_EMERG "negative max_nob: %d\n", 365 printk(KERN_EMERG "negative max_nob: %d\n",
366 max_nob); 366 max_nob);
@@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
424 __LASSERT(debug_buf == string_buf); 424 __LASSERT(debug_buf == string_buf);
425 425
426 tage->used += needed; 426 tage->used += needed;
427 __LASSERT(tage->used <= PAGE_CACHE_SIZE); 427 __LASSERT(tage->used <= PAGE_SIZE);
428 428
429console: 429console:
430 if ((mask & libcfs_printk) == 0) { 430 if ((mask & libcfs_printk) == 0) {
@@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
835 835
836int cfs_trace_allocate_string_buffer(char **str, int nob) 836int cfs_trace_allocate_string_buffer(char **str, int nob)
837{ 837{
838 if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ 838 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
839 return -EINVAL; 839 return -EINVAL;
840 840
841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); 841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
951 } 951 }
952 952
953 mb /= num_possible_cpus(); 953 mb /= num_possible_cpus();
954 pages = mb << (20 - PAGE_CACHE_SHIFT); 954 pages = mb << (20 - PAGE_SHIFT);
955 955
956 cfs_tracefile_write_lock(); 956 cfs_tracefile_write_lock();
957 957
@@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
977 977
978 cfs_tracefile_read_unlock(); 978 cfs_tracefile_read_unlock();
979 979
980 return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; 980 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
981} 981}
982 982
983static int tracefiled(void *arg) 983static int tracefiled(void *arg)
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index 4c77f9044dd3..ac84e7f4c859 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
87extern int libcfs_panic_in_progress; 87extern int libcfs_panic_in_progress;
88int cfs_trace_max_debug_mb(void); 88int cfs_trace_max_debug_mb(void);
89 89
90#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 90#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
91#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 91#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
92#define CFS_TRACEFILE_SIZE (500 << 20) 92#define CFS_TRACEFILE_SIZE (500 << 20)
93 93
@@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
96/* 96/*
97 * Private declare for tracefile 97 * Private declare for tracefile
98 */ 98 */
99#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 99#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
100#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 100#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
101 101
102#define CFS_TRACEFILE_SIZE (500 << 20) 102#define CFS_TRACEFILE_SIZE (500 << 20)
@@ -257,7 +257,7 @@ do { \
257do { \ 257do { \
258 __LASSERT(tage); \ 258 __LASSERT(tage); \
259 __LASSERT(tage->page); \ 259 __LASSERT(tage->page); \
260 __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ 260 __LASSERT(tage->used <= PAGE_SIZE); \
261 __LASSERT(page_count(tage->page) > 0); \ 261 __LASSERT(page_count(tage->page) > 0); \
262} while (0) 262} while (0)
263 263
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index c74514f99f90..75d31217bf92 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
139 for (i = 0; i < (int)niov; i++) { 139 for (i = 0; i < (int)niov; i++) {
140 /* We take the page pointer on trust */ 140 /* We take the page pointer on trust */
141 if (lmd->md_iov.kiov[i].kiov_offset + 141 if (lmd->md_iov.kiov[i].kiov_offset +
142 lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) 142 lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
143 return -EINVAL; /* invalid length */ 143 return -EINVAL; /* invalid length */
144 144
145 total_length += lmd->md_iov.kiov[i].kiov_len; 145 total_length += lmd->md_iov.kiov[i].kiov_len;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 44e2bd6dba63..c5d5bedb3128 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
549 if (len <= frag_len) { 549 if (len <= frag_len) {
550 dst->kiov_len = len; 550 dst->kiov_len = len;
551 LASSERT(dst->kiov_offset + dst->kiov_len 551 LASSERT(dst->kiov_offset + dst->kiov_len
552 <= PAGE_CACHE_SIZE); 552 <= PAGE_SIZE);
553 return niov; 553 return niov;
554 } 554 }
555 555
556 dst->kiov_len = frag_len; 556 dst->kiov_len = frag_len;
557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); 557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
558 558
559 len -= frag_len; 559 len -= frag_len;
560 dst++; 560 dst++;
@@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
887 rbp = &the_lnet.ln_rtrpools[cpt][0]; 887 rbp = &the_lnet.ln_rtrpools[cpt][0];
888 888
889 LASSERT(msg->msg_len <= LNET_MTU); 889 LASSERT(msg->msg_len <= LNET_MTU);
890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { 890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
891 rbp++; 891 rbp++;
892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); 892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
893 } 893 }
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index cc0c2753dd63..891fd59401d7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
166 nalloc = 16; /* first guess at max interfaces */ 166 nalloc = 16; /* first guess at max interfaces */
167 toobig = 0; 167 toobig = 0;
168 for (;;) { 168 for (;;) {
169 if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { 169 if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
170 toobig = 1; 170 toobig = 1;
171 nalloc = PAGE_CACHE_SIZE / sizeof(*ifr); 171 nalloc = PAGE_SIZE / sizeof(*ifr);
172 CWARN("Too many interfaces: only enumerating first %d\n", 172 CWARN("Too many interfaces: only enumerating first %d\n",
173 nalloc); 173 nalloc);
174 } 174 }
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 61459cf9d58f..b01dc424c514 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -27,8 +27,8 @@
27#define LNET_NRB_SMALL_PAGES 1 27#define LNET_NRB_SMALL_PAGES 1
28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ 28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) 29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \ 30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
31 PAGE_CACHE_SHIFT) 31 PAGE_SHIFT)
32 32
33static char *forwarding = ""; 33static char *forwarding = "";
34module_param(forwarding, charp, 0444); 34module_param(forwarding, charp, 0444);
@@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1338 return NULL; 1338 return NULL;
1339 } 1339 }
1340 1340
1341 rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; 1341 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1342 rb->rb_kiov[i].kiov_offset = 0; 1342 rb->rb_kiov[i].kiov_offset = 0;
1343 rb->rb_kiov[i].kiov_page = page; 1343 rb->rb_kiov[i].kiov_page = page;
1344 } 1344 }
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index 1988cee36751..7f539f92321c 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -49,10 +49,10 @@ module_param(brw_inject_errors, int, 0644);
49MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default"); 49MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
50 50
51static void 51static void
52brw_client_fini(sfw_test_instance_t *tsi) 52brw_client_fini(struct sfw_test_instance *tsi)
53{ 53{
54 srpc_bulk_t *bulk; 54 struct srpc_bulk *bulk;
55 sfw_test_unit_t *tsu; 55 struct sfw_test_unit *tsu;
56 56
57 LASSERT(tsi->tsi_is_client); 57 LASSERT(tsi->tsi_is_client);
58 58
@@ -67,15 +67,15 @@ brw_client_fini(sfw_test_instance_t *tsi)
67} 67}
68 68
69static int 69static int
70brw_client_init(sfw_test_instance_t *tsi) 70brw_client_init(struct sfw_test_instance *tsi)
71{ 71{
72 sfw_session_t *sn = tsi->tsi_batch->bat_session; 72 struct sfw_session *sn = tsi->tsi_batch->bat_session;
73 int flags; 73 int flags;
74 int npg; 74 int npg;
75 int len; 75 int len;
76 int opc; 76 int opc;
77 srpc_bulk_t *bulk; 77 struct srpc_bulk *bulk;
78 sfw_test_unit_t *tsu; 78 struct sfw_test_unit *tsu;
79 79
80 LASSERT(sn); 80 LASSERT(sn);
81 LASSERT(tsi->tsi_is_client); 81 LASSERT(tsi->tsi_is_client);
@@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
90 * NB: this is not going to work for variable page size, 90 * NB: this is not going to work for variable page size,
91 * but we have to keep it for compatibility 91 * but we have to keep it for compatibility
92 */ 92 */
93 len = npg * PAGE_CACHE_SIZE; 93 len = npg * PAGE_SIZE;
94 } else { 94 } else {
95 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 95 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
96 96
@@ -103,7 +103,7 @@ brw_client_init(sfw_test_instance_t *tsi)
103 opc = breq->blk_opc; 103 opc = breq->blk_opc;
104 flags = breq->blk_flags; 104 flags = breq->blk_flags;
105 len = breq->blk_len; 105 len = breq->blk_len;
106 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 106 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
107 } 107 }
108 108
109 if (npg > LNET_MAX_IOV || npg <= 0) 109 if (npg > LNET_MAX_IOV || npg <= 0)
@@ -166,13 +166,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
166 166
167 if (pattern == LST_BRW_CHECK_SIMPLE) { 167 if (pattern == LST_BRW_CHECK_SIMPLE) {
168 memcpy(addr, &magic, BRW_MSIZE); 168 memcpy(addr, &magic, BRW_MSIZE);
169 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 169 addr += PAGE_SIZE - BRW_MSIZE;
170 memcpy(addr, &magic, BRW_MSIZE); 170 memcpy(addr, &magic, BRW_MSIZE);
171 return; 171 return;
172 } 172 }
173 173
174 if (pattern == LST_BRW_CHECK_FULL) { 174 if (pattern == LST_BRW_CHECK_FULL) {
175 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) 175 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
176 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); 176 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
177 return; 177 return;
178 } 178 }
@@ -197,7 +197,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
197 if (data != magic) 197 if (data != magic)
198 goto bad_data; 198 goto bad_data;
199 199
200 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 200 addr += PAGE_SIZE - BRW_MSIZE;
201 data = *((__u64 *)addr); 201 data = *((__u64 *)addr);
202 if (data != magic) 202 if (data != magic)
203 goto bad_data; 203 goto bad_data;
@@ -206,7 +206,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
206 } 206 }
207 207
208 if (pattern == LST_BRW_CHECK_FULL) { 208 if (pattern == LST_BRW_CHECK_FULL) {
209 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { 209 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
210 data = *(((__u64 *)addr) + i); 210 data = *(((__u64 *)addr) + i);
211 if (data != magic) 211 if (data != magic)
212 goto bad_data; 212 goto bad_data;
@@ -224,7 +224,7 @@ bad_data:
224} 224}
225 225
226static void 226static void
227brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) 227brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
228{ 228{
229 int i; 229 int i;
230 struct page *pg; 230 struct page *pg;
@@ -236,7 +236,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
236} 236}
237 237
238static int 238static int
239brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) 239brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
240{ 240{
241 int i; 241 int i;
242 struct page *pg; 242 struct page *pg;
@@ -254,13 +254,13 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
254} 254}
255 255
256static int 256static int
257brw_client_prep_rpc(sfw_test_unit_t *tsu, 257brw_client_prep_rpc(struct sfw_test_unit *tsu,
258 lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) 258 lnet_process_id_t dest, struct srpc_client_rpc **rpcpp)
259{ 259{
260 srpc_bulk_t *bulk = tsu->tsu_private; 260 struct srpc_bulk *bulk = tsu->tsu_private;
261 sfw_test_instance_t *tsi = tsu->tsu_instance; 261 struct sfw_test_instance *tsi = tsu->tsu_instance;
262 sfw_session_t *sn = tsi->tsi_batch->bat_session; 262 struct sfw_session *sn = tsi->tsi_batch->bat_session;
263 srpc_client_rpc_t *rpc; 263 struct srpc_client_rpc *rpc;
264 srpc_brw_reqst_t *req; 264 srpc_brw_reqst_t *req;
265 int flags; 265 int flags;
266 int npg; 266 int npg;
@@ -277,7 +277,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
277 opc = breq->blk_opc; 277 opc = breq->blk_opc;
278 flags = breq->blk_flags; 278 flags = breq->blk_flags;
279 npg = breq->blk_npg; 279 npg = breq->blk_npg;
280 len = npg * PAGE_CACHE_SIZE; 280 len = npg * PAGE_SIZE;
281 } else { 281 } else {
282 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 282 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
283 283
@@ -290,14 +290,14 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
290 opc = breq->blk_opc; 290 opc = breq->blk_opc;
291 flags = breq->blk_flags; 291 flags = breq->blk_flags;
292 len = breq->blk_len; 292 len = breq->blk_len;
293 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 293 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
294 } 294 }
295 295
296 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); 296 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
297 if (rc) 297 if (rc)
298 return rc; 298 return rc;
299 299
300 memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg])); 300 memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg]));
301 if (opc == LST_BRW_WRITE) 301 if (opc == LST_BRW_WRITE)
302 brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC); 302 brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
303 else 303 else
@@ -313,12 +313,12 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
313} 313}
314 314
315static void 315static void
316brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) 316brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
317{ 317{
318 __u64 magic = BRW_MAGIC; 318 __u64 magic = BRW_MAGIC;
319 sfw_test_instance_t *tsi = tsu->tsu_instance; 319 struct sfw_test_instance *tsi = tsu->tsu_instance;
320 sfw_session_t *sn = tsi->tsi_batch->bat_session; 320 struct sfw_session *sn = tsi->tsi_batch->bat_session;
321 srpc_msg_t *msg = &rpc->crpc_replymsg; 321 struct srpc_msg *msg = &rpc->crpc_replymsg;
322 srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; 322 srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
323 srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; 323 srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
324 324
@@ -361,7 +361,7 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
361static void 361static void
362brw_server_rpc_done(struct srpc_server_rpc *rpc) 362brw_server_rpc_done(struct srpc_server_rpc *rpc)
363{ 363{
364 srpc_bulk_t *blk = rpc->srpc_bulk; 364 struct srpc_bulk *blk = rpc->srpc_bulk;
365 365
366 if (!blk) 366 if (!blk)
367 return; 367 return;
@@ -384,7 +384,7 @@ brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
384 __u64 magic = BRW_MAGIC; 384 __u64 magic = BRW_MAGIC;
385 srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply; 385 srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
386 srpc_brw_reqst_t *reqst; 386 srpc_brw_reqst_t *reqst;
387 srpc_msg_t *reqstmsg; 387 struct srpc_msg *reqstmsg;
388 388
389 LASSERT(rpc->srpc_bulk); 389 LASSERT(rpc->srpc_bulk);
390 LASSERT(rpc->srpc_reqstbuf); 390 LASSERT(rpc->srpc_reqstbuf);
@@ -418,8 +418,8 @@ static int
418brw_server_handle(struct srpc_server_rpc *rpc) 418brw_server_handle(struct srpc_server_rpc *rpc)
419{ 419{
420 struct srpc_service *sv = rpc->srpc_scd->scd_svc; 420 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
421 srpc_msg_t *replymsg = &rpc->srpc_replymsg; 421 struct srpc_msg *replymsg = &rpc->srpc_replymsg;
422 srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; 422 struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
423 srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply; 423 srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply;
424 srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst; 424 srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst;
425 int npg; 425 int npg;
@@ -461,10 +461,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
461 reply->brw_status = EINVAL; 461 reply->brw_status = EINVAL;
462 return 0; 462 return 0;
463 } 463 }
464 npg = reqst->brw_len >> PAGE_CACHE_SHIFT; 464 npg = reqst->brw_len >> PAGE_SHIFT;
465 465
466 } else { 466 } else {
467 npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 467 npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
468 } 468 }
469 469
470 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; 470 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
@@ -488,7 +488,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
488 return 0; 488 return 0;
489} 489}
490 490
491sfw_test_client_ops_t brw_test_client; 491struct sfw_test_client_ops brw_test_client;
492
492void brw_init_test_client(void) 493void brw_init_test_client(void)
493{ 494{
494 brw_test_client.tso_init = brw_client_init; 495 brw_test_client.tso_init = brw_client_init;
@@ -497,7 +498,8 @@ void brw_init_test_client(void)
497 brw_test_client.tso_done_rpc = brw_client_done_rpc; 498 brw_test_client.tso_done_rpc = brw_client_done_rpc;
498}; 499};
499 500
500srpc_service_t brw_test_service; 501struct srpc_service brw_test_service;
502
501void brw_init_test_service(void) 503void brw_init_test_service(void)
502{ 504{
503 brw_test_service.sv_id = SRPC_SERVICE_BRW; 505 brw_test_service.sv_id = SRPC_SERVICE_BRW;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 6e2a81d8e730..a76f1c3b86df 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
743 if (args->lstio_tes_param && 743 if (args->lstio_tes_param &&
744 (args->lstio_tes_param_len <= 0 || 744 (args->lstio_tes_param_len <= 0 ||
745 args->lstio_tes_param_len > 745 args->lstio_tes_param_len >
746 PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) 746 PAGE_SIZE - sizeof(lstcon_test_t)))
747 return -EINVAL; 747 return -EINVAL;
748 748
749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); 749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
819 819
820 opc = data->ioc_u32[0]; 820 opc = data->ioc_u32[0];
821 821
822 if (data->ioc_plen1 > PAGE_CACHE_SIZE) 822 if (data->ioc_plen1 > PAGE_SIZE)
823 return -EINVAL; 823 return -EINVAL;
824 824
825 LIBCFS_ALLOC(buf, data->ioc_plen1); 825 LIBCFS_ALLOC(buf, data->ioc_plen1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 3908c100ccb2..31d7b4f4a9e4 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -46,13 +46,13 @@
46#include "conrpc.h" 46#include "conrpc.h"
47#include "console.h" 47#include "console.h"
48 48
49void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *, 49void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *,
50 lstcon_node_t *, lstcon_trans_stat_t *); 50 struct lstcon_node *, lstcon_trans_stat_t *);
51 51
52static void 52static void
53lstcon_rpc_done(srpc_client_rpc_t *rpc) 53lstcon_rpc_done(struct srpc_client_rpc *rpc)
54{ 54{
55 lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv; 55 struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv;
56 56
57 LASSERT(crpc && rpc == crpc->crp_rpc); 57 LASSERT(crpc && rpc == crpc->crp_rpc);
58 LASSERT(crpc->crp_posted && !crpc->crp_finished); 58 LASSERT(crpc->crp_posted && !crpc->crp_finished);
@@ -90,8 +90,8 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
90} 90}
91 91
92static int 92static int
93lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats, 93lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
94 int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc) 94 int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
95{ 95{
96 crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, 96 crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
97 feats, bulk_npg, bulk_len, 97 feats, bulk_npg, bulk_len,
@@ -115,16 +115,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
115} 115}
116 116
117static int 117static int
118lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats, 118lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
119 int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp) 119 int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
120{ 120{
121 lstcon_rpc_t *crpc = NULL; 121 struct lstcon_rpc *crpc = NULL;
122 int rc; 122 int rc;
123 123
124 spin_lock(&console_session.ses_rpc_lock); 124 spin_lock(&console_session.ses_rpc_lock);
125 125
126 crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist, 126 crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist,
127 lstcon_rpc_t, crp_link); 127 struct lstcon_rpc, crp_link);
128 if (crpc) 128 if (crpc)
129 list_del_init(&crpc->crp_link); 129 list_del_init(&crpc->crp_link);
130 130
@@ -148,9 +148,9 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
148} 148}
149 149
150void 150void
151lstcon_rpc_put(lstcon_rpc_t *crpc) 151lstcon_rpc_put(struct lstcon_rpc *crpc)
152{ 152{
153 srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk; 153 struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk;
154 int i; 154 int i;
155 155
156 LASSERT(list_empty(&crpc->crp_link)); 156 LASSERT(list_empty(&crpc->crp_link));
@@ -183,9 +183,9 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
183} 183}
184 184
185static void 185static void
186lstcon_rpc_post(lstcon_rpc_t *crpc) 186lstcon_rpc_post(struct lstcon_rpc *crpc)
187{ 187{
188 lstcon_rpc_trans_t *trans = crpc->crp_trans; 188 struct lstcon_rpc_trans *trans = crpc->crp_trans;
189 189
190 LASSERT(trans); 190 LASSERT(trans);
191 191
@@ -236,9 +236,9 @@ lstcon_rpc_trans_name(int transop)
236 236
237int 237int
238lstcon_rpc_trans_prep(struct list_head *translist, int transop, 238lstcon_rpc_trans_prep(struct list_head *translist, int transop,
239 lstcon_rpc_trans_t **transpp) 239 struct lstcon_rpc_trans **transpp)
240{ 240{
241 lstcon_rpc_trans_t *trans; 241 struct lstcon_rpc_trans *trans;
242 242
243 if (translist) { 243 if (translist) {
244 list_for_each_entry(trans, translist, tas_link) { 244 list_for_each_entry(trans, translist, tas_link) {
@@ -278,18 +278,18 @@ lstcon_rpc_trans_prep(struct list_head *translist, int transop,
278} 278}
279 279
280void 280void
281lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc) 281lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc)
282{ 282{
283 list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list); 283 list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
284 crpc->crp_trans = trans; 284 crpc->crp_trans = trans;
285} 285}
286 286
287void 287void
288lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) 288lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
289{ 289{
290 srpc_client_rpc_t *rpc; 290 struct srpc_client_rpc *rpc;
291 lstcon_rpc_t *crpc; 291 struct lstcon_rpc *crpc;
292 lstcon_node_t *nd; 292 struct lstcon_node *nd;
293 293
294 list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { 294 list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
295 rpc = crpc->crp_rpc; 295 rpc = crpc->crp_rpc;
@@ -326,7 +326,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
326} 326}
327 327
328static int 328static int
329lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans) 329lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans)
330{ 330{
331 if (console_session.ses_shutdown && 331 if (console_session.ses_shutdown &&
332 !list_empty(&trans->tas_olink)) /* Not an end session RPC */ 332 !list_empty(&trans->tas_olink)) /* Not an end session RPC */
@@ -336,9 +336,9 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
336} 336}
337 337
338int 338int
339lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) 339lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout)
340{ 340{
341 lstcon_rpc_t *crpc; 341 struct lstcon_rpc *crpc;
342 int rc; 342 int rc;
343 343
344 if (list_empty(&trans->tas_rpcs_list)) 344 if (list_empty(&trans->tas_rpcs_list))
@@ -386,10 +386,10 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
386} 386}
387 387
388static int 388static int
389lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) 389lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp)
390{ 390{
391 lstcon_node_t *nd = crpc->crp_node; 391 struct lstcon_node *nd = crpc->crp_node;
392 srpc_client_rpc_t *rpc = crpc->crp_rpc; 392 struct srpc_client_rpc *rpc = crpc->crp_rpc;
393 srpc_generic_reply_t *rep; 393 srpc_generic_reply_t *rep;
394 394
395 LASSERT(nd && rpc); 395 LASSERT(nd && rpc);
@@ -423,10 +423,10 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
423} 423}
424 424
425void 425void
426lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) 426lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, lstcon_trans_stat_t *stat)
427{ 427{
428 lstcon_rpc_t *crpc; 428 struct lstcon_rpc *crpc;
429 srpc_msg_t *rep; 429 struct srpc_msg *rep;
430 int error; 430 int error;
431 431
432 LASSERT(stat); 432 LASSERT(stat);
@@ -466,7 +466,7 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
466} 466}
467 467
468int 468int
469lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, 469lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
470 struct list_head __user *head_up, 470 struct list_head __user *head_up,
471 lstcon_rpc_readent_func_t readent) 471 lstcon_rpc_readent_func_t readent)
472{ 472{
@@ -474,9 +474,9 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
474 struct list_head __user *next; 474 struct list_head __user *next;
475 lstcon_rpc_ent_t *ent; 475 lstcon_rpc_ent_t *ent;
476 srpc_generic_reply_t *rep; 476 srpc_generic_reply_t *rep;
477 lstcon_rpc_t *crpc; 477 struct lstcon_rpc *crpc;
478 srpc_msg_t *msg; 478 struct srpc_msg *msg;
479 lstcon_node_t *nd; 479 struct lstcon_node *nd;
480 long dur; 480 long dur;
481 struct timeval tv; 481 struct timeval tv;
482 int error; 482 int error;
@@ -539,11 +539,11 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
539} 539}
540 540
541void 541void
542lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) 542lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
543{ 543{
544 srpc_client_rpc_t *rpc; 544 struct srpc_client_rpc *rpc;
545 lstcon_rpc_t *crpc; 545 struct lstcon_rpc *crpc;
546 lstcon_rpc_t *tmp; 546 struct lstcon_rpc *tmp;
547 int count = 0; 547 int count = 0;
548 548
549 list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) { 549 list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
@@ -592,8 +592,8 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
592} 592}
593 593
594int 594int
595lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, 595lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
596 unsigned feats, lstcon_rpc_t **crpc) 596 unsigned feats, struct lstcon_rpc **crpc)
597{ 597{
598 srpc_mksn_reqst_t *msrq; 598 srpc_mksn_reqst_t *msrq;
599 srpc_rmsn_reqst_t *rsrq; 599 srpc_rmsn_reqst_t *rsrq;
@@ -631,7 +631,7 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
631} 631}
632 632
633int 633int
634lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) 634lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
635{ 635{
636 srpc_debug_reqst_t *drq; 636 srpc_debug_reqst_t *drq;
637 int rc; 637 int rc;
@@ -649,10 +649,10 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
649} 649}
650 650
651int 651int
652lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, 652lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
653 lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc) 653 struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
654{ 654{
655 lstcon_batch_t *batch; 655 struct lstcon_batch *batch;
656 srpc_batch_reqst_t *brq; 656 srpc_batch_reqst_t *brq;
657 int rc; 657 int rc;
658 658
@@ -675,14 +675,14 @@ lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
675 675
676 LASSERT(!tsb->tsb_index); 676 LASSERT(!tsb->tsb_index);
677 677
678 batch = (lstcon_batch_t *)tsb; 678 batch = (struct lstcon_batch *)tsb;
679 brq->bar_arg = batch->bat_arg; 679 brq->bar_arg = batch->bat_arg;
680 680
681 return 0; 681 return 0;
682} 682}
683 683
684int 684int
685lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) 685lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
686{ 686{
687 srpc_stat_reqst_t *srq; 687 srpc_stat_reqst_t *srq;
688 int rc; 688 int rc;
@@ -715,12 +715,12 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
715} 715}
716 716
717static int 717static int
718lstcon_dstnodes_prep(lstcon_group_t *grp, int idx, 718lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
719 int dist, int span, int nkiov, lnet_kiov_t *kiov) 719 int dist, int span, int nkiov, lnet_kiov_t *kiov)
720{ 720{
721 lnet_process_id_packed_t *pid; 721 lnet_process_id_packed_t *pid;
722 lstcon_ndlink_t *ndl; 722 struct lstcon_ndlink *ndl;
723 lstcon_node_t *nd; 723 struct lstcon_node *nd;
724 int start; 724 int start;
725 int end; 725 int end;
726 int i = 0; 726 int i = 0;
@@ -785,8 +785,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
785 test_bulk_req_t *brq = &req->tsr_u.bulk_v0; 785 test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
786 786
787 brq->blk_opc = param->blk_opc; 787 brq->blk_opc = param->blk_opc;
788 brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / 788 brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
789 PAGE_CACHE_SIZE; 789 PAGE_SIZE;
790 brq->blk_flags = param->blk_flags; 790 brq->blk_flags = param->blk_flags;
791 791
792 return 0; 792 return 0;
@@ -806,13 +806,13 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
806} 806}
807 807
808int 808int
809lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, 809lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
810 lstcon_test_t *test, lstcon_rpc_t **crpc) 810 lstcon_test_t *test, struct lstcon_rpc **crpc)
811{ 811{
812 lstcon_group_t *sgrp = test->tes_src_grp; 812 struct lstcon_group *sgrp = test->tes_src_grp;
813 lstcon_group_t *dgrp = test->tes_dst_grp; 813 struct lstcon_group *dgrp = test->tes_dst_grp;
814 srpc_test_reqst_t *trq; 814 srpc_test_reqst_t *trq;
815 srpc_bulk_t *bulk; 815 struct srpc_bulk *bulk;
816 int i; 816 int i;
817 int npg = 0; 817 int npg = 0;
818 int nob = 0; 818 int nob = 0;
@@ -821,7 +821,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
821 if (transop == LST_TRANS_TSBCLIADD) { 821 if (transop == LST_TRANS_TSBCLIADD) {
822 npg = sfw_id_pages(test->tes_span); 822 npg = sfw_id_pages(test->tes_span);
823 nob = !(feats & LST_FEAT_BULK_LEN) ? 823 nob = !(feats & LST_FEAT_BULK_LEN) ?
824 npg * PAGE_CACHE_SIZE : 824 npg * PAGE_SIZE :
825 sizeof(lnet_process_id_packed_t) * test->tes_span; 825 sizeof(lnet_process_id_packed_t) * test->tes_span;
826 } 826 }
827 827
@@ -849,8 +849,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
849 LASSERT(nob > 0); 849 LASSERT(nob > 0);
850 850
851 len = !(feats & LST_FEAT_BULK_LEN) ? 851 len = !(feats & LST_FEAT_BULK_LEN) ?
852 PAGE_CACHE_SIZE : 852 PAGE_SIZE :
853 min_t(int, nob, PAGE_CACHE_SIZE); 853 min_t(int, nob, PAGE_SIZE);
854 nob -= len; 854 nob -= len;
855 855
856 bulk->bk_iovs[i].kiov_offset = 0; 856 bulk->bk_iovs[i].kiov_offset = 0;
@@ -915,8 +915,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
915} 915}
916 916
917static int 917static int
918lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, 918lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans,
919 lstcon_node_t *nd, srpc_msg_t *reply) 919 struct lstcon_node *nd, struct srpc_msg *reply)
920{ 920{
921 srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply; 921 srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
922 int status = mksn_rep->mksn_status; 922 int status = mksn_rep->mksn_status;
@@ -962,8 +962,8 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
962} 962}
963 963
964void 964void
965lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, 965lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg,
966 lstcon_node_t *nd, lstcon_trans_stat_t *stat) 966 struct lstcon_node *nd, lstcon_trans_stat_t *stat)
967{ 967{
968 srpc_rmsn_reply_t *rmsn_rep; 968 srpc_rmsn_reply_t *rmsn_rep;
969 srpc_debug_reply_t *dbg_rep; 969 srpc_debug_reply_t *dbg_rep;
@@ -1083,12 +1083,12 @@ int
1083lstcon_rpc_trans_ndlist(struct list_head *ndlist, 1083lstcon_rpc_trans_ndlist(struct list_head *ndlist,
1084 struct list_head *translist, int transop, 1084 struct list_head *translist, int transop,
1085 void *arg, lstcon_rpc_cond_func_t condition, 1085 void *arg, lstcon_rpc_cond_func_t condition,
1086 lstcon_rpc_trans_t **transpp) 1086 struct lstcon_rpc_trans **transpp)
1087{ 1087{
1088 lstcon_rpc_trans_t *trans; 1088 struct lstcon_rpc_trans *trans;
1089 lstcon_ndlink_t *ndl; 1089 struct lstcon_ndlink *ndl;
1090 lstcon_node_t *nd; 1090 struct lstcon_node *nd;
1091 lstcon_rpc_t *rpc; 1091 struct lstcon_rpc *rpc;
1092 unsigned feats; 1092 unsigned feats;
1093 int rc; 1093 int rc;
1094 1094
@@ -1135,7 +1135,8 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
1135 case LST_TRANS_TSBCLIQRY: 1135 case LST_TRANS_TSBCLIQRY:
1136 case LST_TRANS_TSBSRVQRY: 1136 case LST_TRANS_TSBSRVQRY:
1137 rc = lstcon_batrpc_prep(nd, transop, feats, 1137 rc = lstcon_batrpc_prep(nd, transop, feats,
1138 (lstcon_tsb_hdr_t *)arg, &rpc); 1138 (struct lstcon_tsb_hdr *)arg,
1139 &rpc);
1139 break; 1140 break;
1140 case LST_TRANS_STATQRY: 1141 case LST_TRANS_STATQRY:
1141 rc = lstcon_statrpc_prep(nd, feats, &rpc); 1142 rc = lstcon_statrpc_prep(nd, feats, &rpc);
@@ -1168,12 +1169,12 @@ static void
1168lstcon_rpc_pinger(void *arg) 1169lstcon_rpc_pinger(void *arg)
1169{ 1170{
1170 struct stt_timer *ptimer = (struct stt_timer *)arg; 1171 struct stt_timer *ptimer = (struct stt_timer *)arg;
1171 lstcon_rpc_trans_t *trans; 1172 struct lstcon_rpc_trans *trans;
1172 lstcon_rpc_t *crpc; 1173 struct lstcon_rpc *crpc;
1173 srpc_msg_t *rep; 1174 struct srpc_msg *rep;
1174 srpc_debug_reqst_t *drq; 1175 srpc_debug_reqst_t *drq;
1175 lstcon_ndlink_t *ndl; 1176 struct lstcon_ndlink *ndl;
1176 lstcon_node_t *nd; 1177 struct lstcon_node *nd;
1177 int intv; 1178 int intv;
1178 int count = 0; 1179 int count = 0;
1179 int rc; 1180 int rc;
@@ -1325,9 +1326,9 @@ lstcon_rpc_pinger_stop(void)
1325void 1326void
1326lstcon_rpc_cleanup_wait(void) 1327lstcon_rpc_cleanup_wait(void)
1327{ 1328{
1328 lstcon_rpc_trans_t *trans; 1329 struct lstcon_rpc_trans *trans;
1329 lstcon_rpc_t *crpc; 1330 struct lstcon_rpc *crpc;
1330 lstcon_rpc_t *temp; 1331 struct lstcon_rpc *temp;
1331 struct list_head *pacer; 1332 struct list_head *pacer;
1332 struct list_head zlist; 1333 struct list_head zlist;
1333 1334
@@ -1337,7 +1338,7 @@ lstcon_rpc_cleanup_wait(void)
1337 1338
1338 while (!list_empty(&console_session.ses_trans_list)) { 1339 while (!list_empty(&console_session.ses_trans_list)) {
1339 list_for_each(pacer, &console_session.ses_trans_list) { 1340 list_for_each(pacer, &console_session.ses_trans_list) {
1340 trans = list_entry(pacer, lstcon_rpc_trans_t, 1341 trans = list_entry(pacer, struct lstcon_rpc_trans,
1341 tas_link); 1342 tas_link);
1342 1343
1343 CDEBUG(D_NET, "Session closed, wakeup transaction %s\n", 1344 CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
@@ -1369,7 +1370,7 @@ lstcon_rpc_cleanup_wait(void)
1369 1370
1370 list_for_each_entry_safe(crpc, temp, &zlist, crp_link) { 1371 list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
1371 list_del(&crpc->crp_link); 1372 list_del(&crpc->crp_link);
1372 LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t)); 1373 LIBCFS_FREE(crpc, sizeof(struct lstcon_rpc));
1373 } 1374 }
1374} 1375}
1375 1376
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 3e7839dad5bb..90c3385a355c 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -63,9 +63,9 @@ struct lstcon_tsb_hdr;
63struct lstcon_test; 63struct lstcon_test;
64struct lstcon_node; 64struct lstcon_node;
65 65
66typedef struct lstcon_rpc { 66struct lstcon_rpc {
67 struct list_head crp_link; /* chain on rpc transaction */ 67 struct list_head crp_link; /* chain on rpc transaction */
68 srpc_client_rpc_t *crp_rpc; /* client rpc */ 68 struct srpc_client_rpc *crp_rpc; /* client rpc */
69 struct lstcon_node *crp_node; /* destination node */ 69 struct lstcon_node *crp_node; /* destination node */
70 struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ 70 struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
71 71
@@ -76,9 +76,9 @@ typedef struct lstcon_rpc {
76 unsigned int crp_embedded:1; 76 unsigned int crp_embedded:1;
77 int crp_status; /* console rpc errors */ 77 int crp_status; /* console rpc errors */
78 unsigned long crp_stamp; /* replied time stamp */ 78 unsigned long crp_stamp; /* replied time stamp */
79} lstcon_rpc_t; 79};
80 80
81typedef struct lstcon_rpc_trans { 81struct lstcon_rpc_trans {
82 struct list_head tas_olink; /* link chain on owner list */ 82 struct list_head tas_olink; /* link chain on owner list */
83 struct list_head tas_link; /* link chain on global list */ 83 struct list_head tas_link; /* link chain on global list */
84 int tas_opc; /* operation code of transaction */ 84 int tas_opc; /* operation code of transaction */
@@ -87,7 +87,7 @@ typedef struct lstcon_rpc_trans {
87 wait_queue_head_t tas_waitq; /* wait queue head */ 87 wait_queue_head_t tas_waitq; /* wait queue head */
88 atomic_t tas_remaining; /* # of un-scheduled rpcs */ 88 atomic_t tas_remaining; /* # of un-scheduled rpcs */
89 struct list_head tas_rpcs_list; /* queued requests */ 89 struct list_head tas_rpcs_list; /* queued requests */
90} lstcon_rpc_trans_t; 90};
91 91
92#define LST_TRANS_PRIVATE 0x1000 92#define LST_TRANS_PRIVATE 0x1000
93 93
@@ -106,35 +106,35 @@ typedef struct lstcon_rpc_trans {
106#define LST_TRANS_STATQRY 0x21 106#define LST_TRANS_STATQRY 0x21
107 107
108typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *); 108typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
109typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, 109typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
110 lstcon_rpc_ent_t __user *); 110 lstcon_rpc_ent_t __user *);
111 111
112int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, 112int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
113 unsigned version, lstcon_rpc_t **crpc); 113 unsigned version, struct lstcon_rpc **crpc);
114int lstcon_dbgrpc_prep(struct lstcon_node *nd, 114int lstcon_dbgrpc_prep(struct lstcon_node *nd,
115 unsigned version, lstcon_rpc_t **crpc); 115 unsigned version, struct lstcon_rpc **crpc);
116int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version, 116int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
117 struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc); 117 struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
118int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version, 118int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
119 struct lstcon_test *test, lstcon_rpc_t **crpc); 119 struct lstcon_test *test, struct lstcon_rpc **crpc);
120int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version, 120int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
121 lstcon_rpc_t **crpc); 121 struct lstcon_rpc **crpc);
122void lstcon_rpc_put(lstcon_rpc_t *crpc); 122void lstcon_rpc_put(struct lstcon_rpc *crpc);
123int lstcon_rpc_trans_prep(struct list_head *translist, 123int lstcon_rpc_trans_prep(struct list_head *translist,
124 int transop, lstcon_rpc_trans_t **transpp); 124 int transop, struct lstcon_rpc_trans **transpp);
125int lstcon_rpc_trans_ndlist(struct list_head *ndlist, 125int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
126 struct list_head *translist, int transop, 126 struct list_head *translist, int transop,
127 void *arg, lstcon_rpc_cond_func_t condition, 127 void *arg, lstcon_rpc_cond_func_t condition,
128 lstcon_rpc_trans_t **transpp); 128 struct lstcon_rpc_trans **transpp);
129void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, 129void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans,
130 lstcon_trans_stat_t *stat); 130 lstcon_trans_stat_t *stat);
131int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, 131int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
132 struct list_head __user *head_up, 132 struct list_head __user *head_up,
133 lstcon_rpc_readent_func_t readent); 133 lstcon_rpc_readent_func_t readent);
134void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error); 134void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
135void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans); 135void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
136void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req); 136void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
137int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout); 137int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
138int lstcon_rpc_pinger_start(void); 138int lstcon_rpc_pinger_start(void);
139void lstcon_rpc_pinger_stop(void); 139void lstcon_rpc_pinger_stop(void);
140void lstcon_rpc_cleanup_wait(void); 140void lstcon_rpc_cleanup_wait(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index dcfc83d0ad41..03c73b014b2c 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -61,7 +61,7 @@ do { \
61struct lstcon_session console_session; 61struct lstcon_session console_session;
62 62
63static void 63static void
64lstcon_node_get(lstcon_node_t *nd) 64lstcon_node_get(struct lstcon_node *nd)
65{ 65{
66 LASSERT(nd->nd_ref >= 1); 66 LASSERT(nd->nd_ref >= 1);
67 67
@@ -69,9 +69,9 @@ lstcon_node_get(lstcon_node_t *nd)
69} 69}
70 70
71static int 71static int
72lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) 72lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
73{ 73{
74 lstcon_ndlink_t *ndl; 74 struct lstcon_ndlink *ndl;
75 unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; 75 unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
76 76
77 LASSERT(id.nid != LNET_NID_ANY); 77 LASSERT(id.nid != LNET_NID_ANY);
@@ -90,11 +90,11 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
90 if (!create) 90 if (!create)
91 return -ENOENT; 91 return -ENOENT;
92 92
93 LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); 93 LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
94 if (!*ndpp) 94 if (!*ndpp)
95 return -ENOMEM; 95 return -ENOMEM;
96 96
97 ndl = (lstcon_ndlink_t *)(*ndpp + 1); 97 ndl = (struct lstcon_ndlink *)(*ndpp + 1);
98 98
99 ndl->ndl_node = *ndpp; 99 ndl->ndl_node = *ndpp;
100 100
@@ -103,7 +103,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
103 ndl->ndl_node->nd_stamp = cfs_time_current(); 103 ndl->ndl_node->nd_stamp = cfs_time_current();
104 ndl->ndl_node->nd_state = LST_NODE_UNKNOWN; 104 ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
105 ndl->ndl_node->nd_timeout = 0; 105 ndl->ndl_node->nd_timeout = 0;
106 memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t)); 106 memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
107 107
108 /* 108 /*
109 * queued in global hash & list, no refcount is taken by 109 * queued in global hash & list, no refcount is taken by
@@ -117,16 +117,16 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
117} 117}
118 118
119static void 119static void
120lstcon_node_put(lstcon_node_t *nd) 120lstcon_node_put(struct lstcon_node *nd)
121{ 121{
122 lstcon_ndlink_t *ndl; 122 struct lstcon_ndlink *ndl;
123 123
124 LASSERT(nd->nd_ref > 0); 124 LASSERT(nd->nd_ref > 0);
125 125
126 if (--nd->nd_ref > 0) 126 if (--nd->nd_ref > 0)
127 return; 127 return;
128 128
129 ndl = (lstcon_ndlink_t *)(nd + 1); 129 ndl = (struct lstcon_ndlink *)(nd + 1);
130 130
131 LASSERT(!list_empty(&ndl->ndl_link)); 131 LASSERT(!list_empty(&ndl->ndl_link));
132 LASSERT(!list_empty(&ndl->ndl_hlink)); 132 LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -135,16 +135,16 @@ lstcon_node_put(lstcon_node_t *nd)
135 list_del(&ndl->ndl_link); 135 list_del(&ndl->ndl_link);
136 list_del(&ndl->ndl_hlink); 136 list_del(&ndl->ndl_hlink);
137 137
138 LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); 138 LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
139} 139}
140 140
141static int 141static int
142lstcon_ndlink_find(struct list_head *hash, 142lstcon_ndlink_find(struct list_head *hash,
143 lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create) 143 lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
144{ 144{
145 unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; 145 unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
146 lstcon_ndlink_t *ndl; 146 struct lstcon_ndlink *ndl;
147 lstcon_node_t *nd; 147 struct lstcon_node *nd;
148 int rc; 148 int rc;
149 149
150 if (id.nid == LNET_NID_ANY) 150 if (id.nid == LNET_NID_ANY)
@@ -168,7 +168,7 @@ lstcon_ndlink_find(struct list_head *hash,
168 if (rc) 168 if (rc)
169 return rc; 169 return rc;
170 170
171 LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t)); 171 LIBCFS_ALLOC(ndl, sizeof(struct lstcon_ndlink));
172 if (!ndl) { 172 if (!ndl) {
173 lstcon_node_put(nd); 173 lstcon_node_put(nd);
174 return -ENOMEM; 174 return -ENOMEM;
@@ -184,7 +184,7 @@ lstcon_ndlink_find(struct list_head *hash,
184} 184}
185 185
186static void 186static void
187lstcon_ndlink_release(lstcon_ndlink_t *ndl) 187lstcon_ndlink_release(struct lstcon_ndlink *ndl)
188{ 188{
189 LASSERT(list_empty(&ndl->ndl_link)); 189 LASSERT(list_empty(&ndl->ndl_link));
190 LASSERT(!list_empty(&ndl->ndl_hlink)); 190 LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -196,12 +196,12 @@ lstcon_ndlink_release(lstcon_ndlink_t *ndl)
196} 196}
197 197
198static int 198static int
199lstcon_group_alloc(char *name, lstcon_group_t **grpp) 199lstcon_group_alloc(char *name, struct lstcon_group **grpp)
200{ 200{
201 lstcon_group_t *grp; 201 struct lstcon_group *grp;
202 int i; 202 int i;
203 203
204 LIBCFS_ALLOC(grp, offsetof(lstcon_group_t, 204 LIBCFS_ALLOC(grp, offsetof(struct lstcon_group,
205 grp_ndl_hash[LST_NODE_HASHSIZE])); 205 grp_ndl_hash[LST_NODE_HASHSIZE]));
206 if (!grp) 206 if (!grp)
207 return -ENOMEM; 207 return -ENOMEM;
@@ -209,7 +209,7 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
209 grp->grp_ref = 1; 209 grp->grp_ref = 1;
210 if (name) { 210 if (name) {
211 if (strlen(name) > sizeof(grp->grp_name) - 1) { 211 if (strlen(name) > sizeof(grp->grp_name) - 1) {
212 LIBCFS_FREE(grp, offsetof(lstcon_group_t, 212 LIBCFS_FREE(grp, offsetof(struct lstcon_group,
213 grp_ndl_hash[LST_NODE_HASHSIZE])); 213 grp_ndl_hash[LST_NODE_HASHSIZE]));
214 return -E2BIG; 214 return -E2BIG;
215 } 215 }
@@ -229,18 +229,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
229} 229}
230 230
231static void 231static void
232lstcon_group_addref(lstcon_group_t *grp) 232lstcon_group_addref(struct lstcon_group *grp)
233{ 233{
234 grp->grp_ref++; 234 grp->grp_ref++;
235} 235}
236 236
237static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *); 237static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
238 238
239static void 239static void
240lstcon_group_drain(lstcon_group_t *grp, int keep) 240lstcon_group_drain(struct lstcon_group *grp, int keep)
241{ 241{
242 lstcon_ndlink_t *ndl; 242 struct lstcon_ndlink *ndl;
243 lstcon_ndlink_t *tmp; 243 struct lstcon_ndlink *tmp;
244 244
245 list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) { 245 list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
246 if (!(ndl->ndl_node->nd_state & keep)) 246 if (!(ndl->ndl_node->nd_state & keep))
@@ -249,7 +249,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep)
249} 249}
250 250
251static void 251static void
252lstcon_group_decref(lstcon_group_t *grp) 252lstcon_group_decref(struct lstcon_group *grp)
253{ 253{
254 int i; 254 int i;
255 255
@@ -264,14 +264,14 @@ lstcon_group_decref(lstcon_group_t *grp)
264 for (i = 0; i < LST_NODE_HASHSIZE; i++) 264 for (i = 0; i < LST_NODE_HASHSIZE; i++)
265 LASSERT(list_empty(&grp->grp_ndl_hash[i])); 265 LASSERT(list_empty(&grp->grp_ndl_hash[i]));
266 266
267 LIBCFS_FREE(grp, offsetof(lstcon_group_t, 267 LIBCFS_FREE(grp, offsetof(struct lstcon_group,
268 grp_ndl_hash[LST_NODE_HASHSIZE])); 268 grp_ndl_hash[LST_NODE_HASHSIZE]));
269} 269}
270 270
271static int 271static int
272lstcon_group_find(const char *name, lstcon_group_t **grpp) 272lstcon_group_find(const char *name, struct lstcon_group **grpp)
273{ 273{
274 lstcon_group_t *grp; 274 struct lstcon_group *grp;
275 275
276 list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { 276 list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
277 if (strncmp(grp->grp_name, name, LST_NAME_SIZE)) 277 if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
@@ -286,8 +286,8 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp)
286} 286}
287 287
288static int 288static int
289lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id, 289lstcon_group_ndlink_find(struct lstcon_group *grp, lnet_process_id_t id,
290 lstcon_ndlink_t **ndlpp, int create) 290 struct lstcon_ndlink **ndlpp, int create)
291{ 291{
292 int rc; 292 int rc;
293 293
@@ -305,7 +305,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
305} 305}
306 306
307static void 307static void
308lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl) 308lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl)
309{ 309{
310 list_del_init(&ndl->ndl_link); 310 list_del_init(&ndl->ndl_link);
311 lstcon_ndlink_release(ndl); 311 lstcon_ndlink_release(ndl);
@@ -313,8 +313,8 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
313} 313}
314 314
315static void 315static void
316lstcon_group_ndlink_move(lstcon_group_t *old, 316lstcon_group_ndlink_move(struct lstcon_group *old,
317 lstcon_group_t *new, lstcon_ndlink_t *ndl) 317 struct lstcon_group *new, struct lstcon_ndlink *ndl)
318{ 318{
319 unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) % 319 unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
320 LST_NODE_HASHSIZE; 320 LST_NODE_HASHSIZE;
@@ -329,21 +329,21 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
329} 329}
330 330
331static void 331static void
332lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new) 332lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new)
333{ 333{
334 lstcon_ndlink_t *ndl; 334 struct lstcon_ndlink *ndl;
335 335
336 while (!list_empty(&old->grp_ndl_list)) { 336 while (!list_empty(&old->grp_ndl_list)) {
337 ndl = list_entry(old->grp_ndl_list.next, 337 ndl = list_entry(old->grp_ndl_list.next,
338 lstcon_ndlink_t, ndl_link); 338 struct lstcon_ndlink, ndl_link);
339 lstcon_group_ndlink_move(old, new, ndl); 339 lstcon_group_ndlink_move(old, new, ndl);
340 } 340 }
341} 341}
342 342
343static int 343static int
344lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) 344lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg)
345{ 345{
346 lstcon_group_t *grp = (lstcon_group_t *)arg; 346 struct lstcon_group *grp = (struct lstcon_group *)arg;
347 347
348 switch (transop) { 348 switch (transop) {
349 case LST_TRANS_SESNEW: 349 case LST_TRANS_SESNEW:
@@ -370,7 +370,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
370} 370}
371 371
372static int 372static int
373lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, 373lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
374 lstcon_rpc_ent_t __user *ent_up) 374 lstcon_rpc_ent_t __user *ent_up)
375{ 375{
376 srpc_debug_reply_t *rep; 376 srpc_debug_reply_t *rep;
@@ -399,13 +399,13 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
399} 399}
400 400
401static int 401static int
402lstcon_group_nodes_add(lstcon_group_t *grp, 402lstcon_group_nodes_add(struct lstcon_group *grp,
403 int count, lnet_process_id_t __user *ids_up, 403 int count, lnet_process_id_t __user *ids_up,
404 unsigned *featp, struct list_head __user *result_up) 404 unsigned *featp, struct list_head __user *result_up)
405{ 405{
406 lstcon_rpc_trans_t *trans; 406 struct lstcon_rpc_trans *trans;
407 lstcon_ndlink_t *ndl; 407 struct lstcon_ndlink *ndl;
408 lstcon_group_t *tmp; 408 struct lstcon_group *tmp;
409 lnet_process_id_t id; 409 lnet_process_id_t id;
410 int i; 410 int i;
411 int rc; 411 int rc;
@@ -466,13 +466,13 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
466} 466}
467 467
468static int 468static int
469lstcon_group_nodes_remove(lstcon_group_t *grp, 469lstcon_group_nodes_remove(struct lstcon_group *grp,
470 int count, lnet_process_id_t __user *ids_up, 470 int count, lnet_process_id_t __user *ids_up,
471 struct list_head __user *result_up) 471 struct list_head __user *result_up)
472{ 472{
473 lstcon_rpc_trans_t *trans; 473 struct lstcon_rpc_trans *trans;
474 lstcon_ndlink_t *ndl; 474 struct lstcon_ndlink *ndl;
475 lstcon_group_t *tmp; 475 struct lstcon_group *tmp;
476 lnet_process_id_t id; 476 lnet_process_id_t id;
477 int rc; 477 int rc;
478 int i; 478 int i;
@@ -523,7 +523,7 @@ error:
523int 523int
524lstcon_group_add(char *name) 524lstcon_group_add(char *name)
525{ 525{
526 lstcon_group_t *grp; 526 struct lstcon_group *grp;
527 int rc; 527 int rc;
528 528
529 rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST; 529 rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST;
@@ -548,7 +548,7 @@ int
548lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, 548lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
549 unsigned *featp, struct list_head __user *result_up) 549 unsigned *featp, struct list_head __user *result_up)
550{ 550{
551 lstcon_group_t *grp; 551 struct lstcon_group *grp;
552 int rc; 552 int rc;
553 553
554 LASSERT(count > 0); 554 LASSERT(count > 0);
@@ -578,8 +578,8 @@ lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
578int 578int
579lstcon_group_del(char *name) 579lstcon_group_del(char *name)
580{ 580{
581 lstcon_rpc_trans_t *trans; 581 struct lstcon_rpc_trans *trans;
582 lstcon_group_t *grp; 582 struct lstcon_group *grp;
583 int rc; 583 int rc;
584 584
585 rc = lstcon_group_find(name, &grp); 585 rc = lstcon_group_find(name, &grp);
@@ -621,7 +621,7 @@ lstcon_group_del(char *name)
621int 621int
622lstcon_group_clean(char *name, int args) 622lstcon_group_clean(char *name, int args)
623{ 623{
624 lstcon_group_t *grp = NULL; 624 struct lstcon_group *grp = NULL;
625 int rc; 625 int rc;
626 626
627 rc = lstcon_group_find(name, &grp); 627 rc = lstcon_group_find(name, &grp);
@@ -654,7 +654,7 @@ int
654lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up, 654lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
655 struct list_head __user *result_up) 655 struct list_head __user *result_up)
656{ 656{
657 lstcon_group_t *grp = NULL; 657 struct lstcon_group *grp = NULL;
658 int rc; 658 int rc;
659 659
660 rc = lstcon_group_find(name, &grp); 660 rc = lstcon_group_find(name, &grp);
@@ -683,8 +683,8 @@ lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
683int 683int
684lstcon_group_refresh(char *name, struct list_head __user *result_up) 684lstcon_group_refresh(char *name, struct list_head __user *result_up)
685{ 685{
686 lstcon_rpc_trans_t *trans; 686 struct lstcon_rpc_trans *trans;
687 lstcon_group_t *grp; 687 struct lstcon_group *grp;
688 int rc; 688 int rc;
689 689
690 rc = lstcon_group_find(name, &grp); 690 rc = lstcon_group_find(name, &grp);
@@ -725,7 +725,7 @@ lstcon_group_refresh(char *name, struct list_head __user *result_up)
725int 725int
726lstcon_group_list(int index, int len, char __user *name_up) 726lstcon_group_list(int index, int len, char __user *name_up)
727{ 727{
728 lstcon_group_t *grp; 728 struct lstcon_group *grp;
729 729
730 LASSERT(index >= 0); 730 LASSERT(index >= 0);
731 LASSERT(name_up); 731 LASSERT(name_up);
@@ -744,8 +744,8 @@ static int
744lstcon_nodes_getent(struct list_head *head, int *index_p, 744lstcon_nodes_getent(struct list_head *head, int *index_p,
745 int *count_p, lstcon_node_ent_t __user *dents_up) 745 int *count_p, lstcon_node_ent_t __user *dents_up)
746{ 746{
747 lstcon_ndlink_t *ndl; 747 struct lstcon_ndlink *ndl;
748 lstcon_node_t *nd; 748 struct lstcon_node *nd;
749 int count = 0; 749 int count = 0;
750 int index = 0; 750 int index = 0;
751 751
@@ -786,8 +786,8 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
786 lstcon_node_ent_t __user *dents_up) 786 lstcon_node_ent_t __user *dents_up)
787{ 787{
788 lstcon_ndlist_ent_t *gentp; 788 lstcon_ndlist_ent_t *gentp;
789 lstcon_group_t *grp; 789 struct lstcon_group *grp;
790 lstcon_ndlink_t *ndl; 790 struct lstcon_ndlink *ndl;
791 int rc; 791 int rc;
792 792
793 rc = lstcon_group_find(name, &grp); 793 rc = lstcon_group_find(name, &grp);
@@ -828,9 +828,9 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
828} 828}
829 829
830static int 830static int
831lstcon_batch_find(const char *name, lstcon_batch_t **batpp) 831lstcon_batch_find(const char *name, struct lstcon_batch **batpp)
832{ 832{
833 lstcon_batch_t *bat; 833 struct lstcon_batch *bat;
834 834
835 list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { 835 list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
836 if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) { 836 if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
@@ -845,7 +845,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
845int 845int
846lstcon_batch_add(char *name) 846lstcon_batch_add(char *name)
847{ 847{
848 lstcon_batch_t *bat; 848 struct lstcon_batch *bat;
849 int i; 849 int i;
850 int rc; 850 int rc;
851 851
@@ -855,7 +855,7 @@ lstcon_batch_add(char *name)
855 return rc; 855 return rc;
856 } 856 }
857 857
858 LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t)); 858 LIBCFS_ALLOC(bat, sizeof(struct lstcon_batch));
859 if (!bat) { 859 if (!bat) {
860 CERROR("Can't allocate descriptor for batch %s\n", name); 860 CERROR("Can't allocate descriptor for batch %s\n", name);
861 return -ENOMEM; 861 return -ENOMEM;
@@ -865,7 +865,7 @@ lstcon_batch_add(char *name)
865 sizeof(struct list_head) * LST_NODE_HASHSIZE); 865 sizeof(struct list_head) * LST_NODE_HASHSIZE);
866 if (!bat->bat_cli_hash) { 866 if (!bat->bat_cli_hash) {
867 CERROR("Can't allocate hash for batch %s\n", name); 867 CERROR("Can't allocate hash for batch %s\n", name);
868 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 868 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
869 869
870 return -ENOMEM; 870 return -ENOMEM;
871 } 871 }
@@ -875,7 +875,7 @@ lstcon_batch_add(char *name)
875 if (!bat->bat_srv_hash) { 875 if (!bat->bat_srv_hash) {
876 CERROR("Can't allocate hash for batch %s\n", name); 876 CERROR("Can't allocate hash for batch %s\n", name);
877 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); 877 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
878 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 878 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
879 879
880 return -ENOMEM; 880 return -ENOMEM;
881 } 881 }
@@ -883,7 +883,7 @@ lstcon_batch_add(char *name)
883 if (strlen(name) > sizeof(bat->bat_name) - 1) { 883 if (strlen(name) > sizeof(bat->bat_name) - 1) {
884 LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE); 884 LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
885 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); 885 LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
886 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 886 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
887 return -E2BIG; 887 return -E2BIG;
888 } 888 }
889 strncpy(bat->bat_name, name, sizeof(bat->bat_name)); 889 strncpy(bat->bat_name, name, sizeof(bat->bat_name));
@@ -911,7 +911,7 @@ lstcon_batch_add(char *name)
911int 911int
912lstcon_batch_list(int index, int len, char __user *name_up) 912lstcon_batch_list(int index, int len, char __user *name_up)
913{ 913{
914 lstcon_batch_t *bat; 914 struct lstcon_batch *bat;
915 915
916 LASSERT(name_up); 916 LASSERT(name_up);
917 LASSERT(index >= 0); 917 LASSERT(index >= 0);
@@ -935,8 +935,8 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
935 struct list_head *clilst; 935 struct list_head *clilst;
936 struct list_head *srvlst; 936 struct list_head *srvlst;
937 lstcon_test_t *test = NULL; 937 lstcon_test_t *test = NULL;
938 lstcon_batch_t *bat; 938 struct lstcon_batch *bat;
939 lstcon_ndlink_t *ndl; 939 struct lstcon_ndlink *ndl;
940 int rc; 940 int rc;
941 941
942 rc = lstcon_batch_find(name, &bat); 942 rc = lstcon_batch_find(name, &bat);
@@ -998,7 +998,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
998} 998}
999 999
1000static int 1000static int
1001lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg) 1001lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg)
1002{ 1002{
1003 switch (transop) { 1003 switch (transop) {
1004 case LST_TRANS_TSBRUN: 1004 case LST_TRANS_TSBRUN:
@@ -1020,10 +1020,10 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
1020} 1020}
1021 1021
1022static int 1022static int
1023lstcon_batch_op(lstcon_batch_t *bat, int transop, 1023lstcon_batch_op(struct lstcon_batch *bat, int transop,
1024 struct list_head __user *result_up) 1024 struct list_head __user *result_up)
1025{ 1025{
1026 lstcon_rpc_trans_t *trans; 1026 struct lstcon_rpc_trans *trans;
1027 int rc; 1027 int rc;
1028 1028
1029 rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list, 1029 rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
@@ -1046,7 +1046,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop,
1046int 1046int
1047lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up) 1047lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
1048{ 1048{
1049 lstcon_batch_t *bat; 1049 struct lstcon_batch *bat;
1050 int rc; 1050 int rc;
1051 1051
1052 if (lstcon_batch_find(name, &bat)) { 1052 if (lstcon_batch_find(name, &bat)) {
@@ -1068,7 +1068,7 @@ lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
1068int 1068int
1069lstcon_batch_stop(char *name, int force, struct list_head __user *result_up) 1069lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
1070{ 1070{
1071 lstcon_batch_t *bat; 1071 struct lstcon_batch *bat;
1072 int rc; 1072 int rc;
1073 1073
1074 if (lstcon_batch_find(name, &bat)) { 1074 if (lstcon_batch_find(name, &bat)) {
@@ -1088,9 +1088,9 @@ lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
1088} 1088}
1089 1089
1090static void 1090static void
1091lstcon_batch_destroy(lstcon_batch_t *bat) 1091lstcon_batch_destroy(struct lstcon_batch *bat)
1092{ 1092{
1093 lstcon_ndlink_t *ndl; 1093 struct lstcon_ndlink *ndl;
1094 lstcon_test_t *test; 1094 lstcon_test_t *test;
1095 int i; 1095 int i;
1096 1096
@@ -1114,7 +1114,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
1114 1114
1115 while (!list_empty(&bat->bat_cli_list)) { 1115 while (!list_empty(&bat->bat_cli_list)) {
1116 ndl = list_entry(bat->bat_cli_list.next, 1116 ndl = list_entry(bat->bat_cli_list.next,
1117 lstcon_ndlink_t, ndl_link); 1117 struct lstcon_ndlink, ndl_link);
1118 list_del_init(&ndl->ndl_link); 1118 list_del_init(&ndl->ndl_link);
1119 1119
1120 lstcon_ndlink_release(ndl); 1120 lstcon_ndlink_release(ndl);
@@ -1122,7 +1122,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
1122 1122
1123 while (!list_empty(&bat->bat_srv_list)) { 1123 while (!list_empty(&bat->bat_srv_list)) {
1124 ndl = list_entry(bat->bat_srv_list.next, 1124 ndl = list_entry(bat->bat_srv_list.next,
1125 lstcon_ndlink_t, ndl_link); 1125 struct lstcon_ndlink, ndl_link);
1126 list_del_init(&ndl->ndl_link); 1126 list_del_init(&ndl->ndl_link);
1127 1127
1128 lstcon_ndlink_release(ndl); 1128 lstcon_ndlink_release(ndl);
@@ -1137,15 +1137,15 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
1137 sizeof(struct list_head) * LST_NODE_HASHSIZE); 1137 sizeof(struct list_head) * LST_NODE_HASHSIZE);
1138 LIBCFS_FREE(bat->bat_srv_hash, 1138 LIBCFS_FREE(bat->bat_srv_hash,
1139 sizeof(struct list_head) * LST_NODE_HASHSIZE); 1139 sizeof(struct list_head) * LST_NODE_HASHSIZE);
1140 LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); 1140 LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
1141} 1141}
1142 1142
1143static int 1143static int
1144lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) 1144lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
1145{ 1145{
1146 lstcon_test_t *test; 1146 lstcon_test_t *test;
1147 lstcon_batch_t *batch; 1147 struct lstcon_batch *batch;
1148 lstcon_ndlink_t *ndl; 1148 struct lstcon_ndlink *ndl;
1149 struct list_head *hash; 1149 struct list_head *hash;
1150 struct list_head *head; 1150 struct list_head *head;
1151 1151
@@ -1187,8 +1187,8 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
1187static int 1187static int
1188lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up) 1188lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up)
1189{ 1189{
1190 lstcon_rpc_trans_t *trans; 1190 struct lstcon_rpc_trans *trans;
1191 lstcon_group_t *grp; 1191 struct lstcon_group *grp;
1192 int transop; 1192 int transop;
1193 int rc; 1193 int rc;
1194 1194
@@ -1236,7 +1236,7 @@ again:
1236} 1236}
1237 1237
1238static int 1238static int
1239lstcon_verify_batch(const char *name, lstcon_batch_t **batch) 1239lstcon_verify_batch(const char *name, struct lstcon_batch **batch)
1240{ 1240{
1241 int rc; 1241 int rc;
1242 1242
@@ -1255,10 +1255,10 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
1255} 1255}
1256 1256
1257static int 1257static int
1258lstcon_verify_group(const char *name, lstcon_group_t **grp) 1258lstcon_verify_group(const char *name, struct lstcon_group **grp)
1259{ 1259{
1260 int rc; 1260 int rc;
1261 lstcon_ndlink_t *ndl; 1261 struct lstcon_ndlink *ndl;
1262 1262
1263 rc = lstcon_group_find(name, grp); 1263 rc = lstcon_group_find(name, grp);
1264 if (rc) { 1264 if (rc) {
@@ -1285,9 +1285,9 @@ lstcon_test_add(char *batch_name, int type, int loop,
1285{ 1285{
1286 lstcon_test_t *test = NULL; 1286 lstcon_test_t *test = NULL;
1287 int rc; 1287 int rc;
1288 lstcon_group_t *src_grp = NULL; 1288 struct lstcon_group *src_grp = NULL;
1289 lstcon_group_t *dst_grp = NULL; 1289 struct lstcon_group *dst_grp = NULL;
1290 lstcon_batch_t *batch = NULL; 1290 struct lstcon_batch *batch = NULL;
1291 1291
1292 /* 1292 /*
1293 * verify that a batch of the given name exists, and the groups 1293 * verify that a batch of the given name exists, and the groups
@@ -1368,7 +1368,7 @@ out:
1368} 1368}
1369 1369
1370static int 1370static int
1371lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp) 1371lstcon_test_find(struct lstcon_batch *batch, int idx, lstcon_test_t **testpp)
1372{ 1372{
1373 lstcon_test_t *test; 1373 lstcon_test_t *test;
1374 1374
@@ -1383,7 +1383,7 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
1383} 1383}
1384 1384
1385static int 1385static int
1386lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, 1386lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg,
1387 lstcon_rpc_ent_t __user *ent_up) 1387 lstcon_rpc_ent_t __user *ent_up)
1388{ 1388{
1389 srpc_batch_reply_t *rep = &msg->msg_body.bat_reply; 1389 srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
@@ -1403,11 +1403,11 @@ int
1403lstcon_test_batch_query(char *name, int testidx, int client, 1403lstcon_test_batch_query(char *name, int testidx, int client,
1404 int timeout, struct list_head __user *result_up) 1404 int timeout, struct list_head __user *result_up)
1405{ 1405{
1406 lstcon_rpc_trans_t *trans; 1406 struct lstcon_rpc_trans *trans;
1407 struct list_head *translist; 1407 struct list_head *translist;
1408 struct list_head *ndlist; 1408 struct list_head *ndlist;
1409 lstcon_tsb_hdr_t *hdr; 1409 struct lstcon_tsb_hdr *hdr;
1410 lstcon_batch_t *batch; 1410 struct lstcon_batch *batch;
1411 lstcon_test_t *test = NULL; 1411 lstcon_test_t *test = NULL;
1412 int transop; 1412 int transop;
1413 int rc; 1413 int rc;
@@ -1462,7 +1462,7 @@ lstcon_test_batch_query(char *name, int testidx, int client,
1462} 1462}
1463 1463
1464static int 1464static int
1465lstcon_statrpc_readent(int transop, srpc_msg_t *msg, 1465lstcon_statrpc_readent(int transop, struct srpc_msg *msg,
1466 lstcon_rpc_ent_t __user *ent_up) 1466 lstcon_rpc_ent_t __user *ent_up)
1467{ 1467{
1468 srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; 1468 srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
@@ -1490,7 +1490,7 @@ lstcon_ndlist_stat(struct list_head *ndlist,
1490 int timeout, struct list_head __user *result_up) 1490 int timeout, struct list_head __user *result_up)
1491{ 1491{
1492 struct list_head head; 1492 struct list_head head;
1493 lstcon_rpc_trans_t *trans; 1493 struct lstcon_rpc_trans *trans;
1494 int rc; 1494 int rc;
1495 1495
1496 INIT_LIST_HEAD(&head); 1496 INIT_LIST_HEAD(&head);
@@ -1515,7 +1515,7 @@ int
1515lstcon_group_stat(char *grp_name, int timeout, 1515lstcon_group_stat(char *grp_name, int timeout,
1516 struct list_head __user *result_up) 1516 struct list_head __user *result_up)
1517{ 1517{
1518 lstcon_group_t *grp; 1518 struct lstcon_group *grp;
1519 int rc; 1519 int rc;
1520 1520
1521 rc = lstcon_group_find(grp_name, &grp); 1521 rc = lstcon_group_find(grp_name, &grp);
@@ -1535,8 +1535,8 @@ int
1535lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up, 1535lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
1536 int timeout, struct list_head __user *result_up) 1536 int timeout, struct list_head __user *result_up)
1537{ 1537{
1538 lstcon_ndlink_t *ndl; 1538 struct lstcon_ndlink *ndl;
1539 lstcon_group_t *tmp; 1539 struct lstcon_group *tmp;
1540 lnet_process_id_t id; 1540 lnet_process_id_t id;
1541 int i; 1541 int i;
1542 int rc; 1542 int rc;
@@ -1580,7 +1580,7 @@ lstcon_debug_ndlist(struct list_head *ndlist,
1580 struct list_head *translist, 1580 struct list_head *translist,
1581 int timeout, struct list_head __user *result_up) 1581 int timeout, struct list_head __user *result_up)
1582{ 1582{
1583 lstcon_rpc_trans_t *trans; 1583 struct lstcon_rpc_trans *trans;
1584 int rc; 1584 int rc;
1585 1585
1586 rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY, 1586 rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
@@ -1610,7 +1610,7 @@ int
1610lstcon_batch_debug(int timeout, char *name, 1610lstcon_batch_debug(int timeout, char *name,
1611 int client, struct list_head __user *result_up) 1611 int client, struct list_head __user *result_up)
1612{ 1612{
1613 lstcon_batch_t *bat; 1613 struct lstcon_batch *bat;
1614 int rc; 1614 int rc;
1615 1615
1616 rc = lstcon_batch_find(name, &bat); 1616 rc = lstcon_batch_find(name, &bat);
@@ -1628,7 +1628,7 @@ int
1628lstcon_group_debug(int timeout, char *name, 1628lstcon_group_debug(int timeout, char *name,
1629 struct list_head __user *result_up) 1629 struct list_head __user *result_up)
1630{ 1630{
1631 lstcon_group_t *grp; 1631 struct lstcon_group *grp;
1632 int rc; 1632 int rc;
1633 1633
1634 rc = lstcon_group_find(name, &grp); 1634 rc = lstcon_group_find(name, &grp);
@@ -1648,8 +1648,8 @@ lstcon_nodes_debug(int timeout,
1648 struct list_head __user *result_up) 1648 struct list_head __user *result_up)
1649{ 1649{
1650 lnet_process_id_t id; 1650 lnet_process_id_t id;
1651 lstcon_ndlink_t *ndl; 1651 struct lstcon_ndlink *ndl;
1652 lstcon_group_t *grp; 1652 struct lstcon_group *grp;
1653 int i; 1653 int i;
1654 int rc; 1654 int rc;
1655 1655
@@ -1757,7 +1757,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
1757 1757
1758 rc = lstcon_rpc_pinger_start(); 1758 rc = lstcon_rpc_pinger_start();
1759 if (rc) { 1759 if (rc) {
1760 lstcon_batch_t *bat = NULL; 1760 struct lstcon_batch *bat = NULL;
1761 1761
1762 lstcon_batch_find(LST_DEFAULT_BATCH, &bat); 1762 lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
1763 lstcon_batch_destroy(bat); 1763 lstcon_batch_destroy(bat);
@@ -1781,7 +1781,7 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
1781 char __user *name_up, int len) 1781 char __user *name_up, int len)
1782{ 1782{
1783 lstcon_ndlist_ent_t *entp; 1783 lstcon_ndlist_ent_t *entp;
1784 lstcon_ndlink_t *ndl; 1784 struct lstcon_ndlink *ndl;
1785 int rc = 0; 1785 int rc = 0;
1786 1786
1787 if (console_session.ses_state != LST_SESSION_ACTIVE) 1787 if (console_session.ses_state != LST_SESSION_ACTIVE)
@@ -1812,9 +1812,9 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
1812int 1812int
1813lstcon_session_end(void) 1813lstcon_session_end(void)
1814{ 1814{
1815 lstcon_rpc_trans_t *trans; 1815 struct lstcon_rpc_trans *trans;
1816 lstcon_group_t *grp; 1816 struct lstcon_group *grp;
1817 lstcon_batch_t *bat; 1817 struct lstcon_batch *bat;
1818 int rc = 0; 1818 int rc = 0;
1819 1819
1820 LASSERT(console_session.ses_state == LST_SESSION_ACTIVE); 1820 LASSERT(console_session.ses_state == LST_SESSION_ACTIVE);
@@ -1848,7 +1848,7 @@ lstcon_session_end(void)
1848 /* destroy all batches */ 1848 /* destroy all batches */
1849 while (!list_empty(&console_session.ses_bat_list)) { 1849 while (!list_empty(&console_session.ses_bat_list)) {
1850 bat = list_entry(console_session.ses_bat_list.next, 1850 bat = list_entry(console_session.ses_bat_list.next,
1851 lstcon_batch_t, bat_link); 1851 struct lstcon_batch, bat_link);
1852 1852
1853 lstcon_batch_destroy(bat); 1853 lstcon_batch_destroy(bat);
1854 } 1854 }
@@ -1856,7 +1856,7 @@ lstcon_session_end(void)
1856 /* destroy all groups */ 1856 /* destroy all groups */
1857 while (!list_empty(&console_session.ses_grp_list)) { 1857 while (!list_empty(&console_session.ses_grp_list)) {
1858 grp = list_entry(console_session.ses_grp_list.next, 1858 grp = list_entry(console_session.ses_grp_list.next,
1859 lstcon_group_t, grp_link); 1859 struct lstcon_group, grp_link);
1860 LASSERT(grp->grp_ref == 1); 1860 LASSERT(grp->grp_ref == 1);
1861 1861
1862 lstcon_group_decref(grp); 1862 lstcon_group_decref(grp);
@@ -1905,12 +1905,12 @@ lstcon_session_feats_check(unsigned feats)
1905static int 1905static int
1906lstcon_acceptor_handle(struct srpc_server_rpc *rpc) 1906lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
1907{ 1907{
1908 srpc_msg_t *rep = &rpc->srpc_replymsg; 1908 struct srpc_msg *rep = &rpc->srpc_replymsg;
1909 srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; 1909 struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg;
1910 srpc_join_reqst_t *jreq = &req->msg_body.join_reqst; 1910 srpc_join_reqst_t *jreq = &req->msg_body.join_reqst;
1911 srpc_join_reply_t *jrep = &rep->msg_body.join_reply; 1911 srpc_join_reply_t *jrep = &rep->msg_body.join_reply;
1912 lstcon_group_t *grp = NULL; 1912 struct lstcon_group *grp = NULL;
1913 lstcon_ndlink_t *ndl; 1913 struct lstcon_ndlink *ndl;
1914 int rc = 0; 1914 int rc = 0;
1915 1915
1916 sfw_unpack_message(req); 1916 sfw_unpack_message(req);
@@ -1986,7 +1986,8 @@ out:
1986 return rc; 1986 return rc;
1987} 1987}
1988 1988
1989static srpc_service_t lstcon_acceptor_service; 1989static struct srpc_service lstcon_acceptor_service;
1990
1990static void lstcon_init_acceptor_service(void) 1991static void lstcon_init_acceptor_service(void)
1991{ 1992{
1992 /* initialize selftest console acceptor service table */ 1993 /* initialize selftest console acceptor service table */
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 554f582441f1..ccd4982b0cd6 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -50,22 +50,25 @@
50#include "selftest.h" 50#include "selftest.h"
51#include "conrpc.h" 51#include "conrpc.h"
52 52
53typedef struct lstcon_node { 53/* node descriptor */
54struct lstcon_node {
54 lnet_process_id_t nd_id; /* id of the node */ 55 lnet_process_id_t nd_id; /* id of the node */
55 int nd_ref; /* reference count */ 56 int nd_ref; /* reference count */
56 int nd_state; /* state of the node */ 57 int nd_state; /* state of the node */
57 int nd_timeout; /* session timeout */ 58 int nd_timeout; /* session timeout */
58 unsigned long nd_stamp; /* timestamp of last replied RPC */ 59 unsigned long nd_stamp; /* timestamp of last replied RPC */
59 struct lstcon_rpc nd_ping; /* ping rpc */ 60 struct lstcon_rpc nd_ping; /* ping rpc */
60} lstcon_node_t; /* node descriptor */ 61};
61 62
62typedef struct { 63/* node link descriptor */
64struct lstcon_ndlink {
63 struct list_head ndl_link; /* chain on list */ 65 struct list_head ndl_link; /* chain on list */
64 struct list_head ndl_hlink; /* chain on hash */ 66 struct list_head ndl_hlink; /* chain on hash */
65 lstcon_node_t *ndl_node; /* pointer to node */ 67 struct lstcon_node *ndl_node; /* pointer to node */
66} lstcon_ndlink_t; /* node link descriptor */ 68};
67 69
68typedef struct { 70/* (alias of nodes) group descriptor */
71struct lstcon_group {
69 struct list_head grp_link; /* chain on global group list 72 struct list_head grp_link; /* chain on global group list
70 */ 73 */
71 int grp_ref; /* reference count */ 74 int grp_ref; /* reference count */
@@ -76,18 +79,19 @@ typedef struct {
76 struct list_head grp_trans_list; /* transaction list */ 79 struct list_head grp_trans_list; /* transaction list */
77 struct list_head grp_ndl_list; /* nodes list */ 80 struct list_head grp_ndl_list; /* nodes list */
78 struct list_head grp_ndl_hash[0]; /* hash table for nodes */ 81 struct list_head grp_ndl_hash[0]; /* hash table for nodes */
79} lstcon_group_t; /* (alias of nodes) group descriptor */ 82};
80 83
81#define LST_BATCH_IDLE 0xB0 /* idle batch */ 84#define LST_BATCH_IDLE 0xB0 /* idle batch */
82#define LST_BATCH_RUNNING 0xB1 /* running batch */ 85#define LST_BATCH_RUNNING 0xB1 /* running batch */
83 86
84typedef struct lstcon_tsb_hdr { 87struct lstcon_tsb_hdr {
85 lst_bid_t tsb_id; /* batch ID */ 88 lst_bid_t tsb_id; /* batch ID */
86 int tsb_index; /* test index */ 89 int tsb_index; /* test index */
87} lstcon_tsb_hdr_t; 90};
88 91
89typedef struct { 92/* (tests ) batch descriptor */
90 lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ 93struct lstcon_batch {
94 struct lstcon_tsb_hdr bat_hdr; /* test_batch header */
91 struct list_head bat_link; /* chain on session's batches list */ 95 struct list_head bat_link; /* chain on session's batches list */
92 int bat_ntest; /* # of test */ 96 int bat_ntest; /* # of test */
93 int bat_state; /* state of the batch */ 97 int bat_state; /* state of the batch */
@@ -99,16 +103,16 @@ typedef struct {
99 */ 103 */
100 struct list_head bat_trans_list; /* list head of transaction */ 104 struct list_head bat_trans_list; /* list head of transaction */
101 struct list_head bat_cli_list; /* list head of client nodes 105 struct list_head bat_cli_list; /* list head of client nodes
102 * (lstcon_node_t) */ 106 * (struct lstcon_node) */
103 struct list_head *bat_cli_hash; /* hash table of client nodes */ 107 struct list_head *bat_cli_hash; /* hash table of client nodes */
104 struct list_head bat_srv_list; /* list head of server nodes */ 108 struct list_head bat_srv_list; /* list head of server nodes */
105 struct list_head *bat_srv_hash; /* hash table of server nodes */ 109 struct list_head *bat_srv_hash; /* hash table of server nodes */
106} lstcon_batch_t; /* (tests ) batch descriptor */ 110};
107 111
108typedef struct lstcon_test { 112typedef struct lstcon_test {
109 lstcon_tsb_hdr_t tes_hdr; /* test batch header */ 113 struct lstcon_tsb_hdr tes_hdr; /* test batch header */
110 struct list_head tes_link; /* chain on batch's tests list */ 114 struct list_head tes_link; /* chain on batch's tests list */
111 lstcon_batch_t *tes_batch; /* pointer to batch */ 115 struct lstcon_batch *tes_batch; /* pointer to batch */
112 116
113 int tes_type; /* type of the test, i.e: bulk, ping */ 117 int tes_type; /* type of the test, i.e: bulk, ping */
114 int tes_stop_onerr; /* stop on error */ 118 int tes_stop_onerr; /* stop on error */
@@ -120,8 +124,8 @@ typedef struct lstcon_test {
120 int tes_cliidx; /* client index, used for RPC creating */ 124 int tes_cliidx; /* client index, used for RPC creating */
121 125
122 struct list_head tes_trans_list; /* transaction list */ 126 struct list_head tes_trans_list; /* transaction list */
123 lstcon_group_t *tes_src_grp; /* group run the test */ 127 struct lstcon_group *tes_src_grp; /* group run the test */
124 lstcon_group_t *tes_dst_grp; /* target group */ 128 struct lstcon_group *tes_dst_grp; /* target group */
125 129
126 int tes_paramlen; /* test parameter length */ 130 int tes_paramlen; /* test parameter length */
127 char tes_param[0]; /* test parameter */ 131 char tes_param[0]; /* test parameter */
@@ -152,7 +156,7 @@ struct lstcon_session {
152 unsigned ses_expired:1; /* console is timedout */ 156 unsigned ses_expired:1; /* console is timedout */
153 __u64 ses_id_cookie; /* batch id cookie */ 157 __u64 ses_id_cookie; /* batch id cookie */
154 char ses_name[LST_NAME_SIZE];/* session name */ 158 char ses_name[LST_NAME_SIZE];/* session name */
155 lstcon_rpc_trans_t *ses_ping; /* session pinger */ 159 struct lstcon_rpc_trans *ses_ping; /* session pinger */
156 struct stt_timer ses_ping_timer; /* timer for pinger */ 160 struct stt_timer ses_ping_timer; /* timer for pinger */
157 lstcon_trans_stat_t ses_trans_stat; /* transaction stats */ 161 lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
158 162
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index aa646a780f58..ef3cc8264daa 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -109,19 +109,19 @@ static struct smoketest_framework {
109 struct list_head fw_tests; /* registered test cases */ 109 struct list_head fw_tests; /* registered test cases */
110 atomic_t fw_nzombies; /* # zombie sessions */ 110 atomic_t fw_nzombies; /* # zombie sessions */
111 spinlock_t fw_lock; /* serialise */ 111 spinlock_t fw_lock; /* serialise */
112 sfw_session_t *fw_session; /* _the_ session */ 112 struct sfw_session *fw_session; /* _the_ session */
113 int fw_shuttingdown; /* shutdown in progress */ 113 int fw_shuttingdown; /* shutdown in progress */
114 struct srpc_server_rpc *fw_active_srpc;/* running RPC */ 114 struct srpc_server_rpc *fw_active_srpc;/* running RPC */
115} sfw_data; 115} sfw_data;
116 116
117/* forward ref's */ 117/* forward ref's */
118int sfw_stop_batch(sfw_batch_t *tsb, int force); 118int sfw_stop_batch(struct sfw_batch *tsb, int force);
119void sfw_destroy_session(sfw_session_t *sn); 119void sfw_destroy_session(struct sfw_session *sn);
120 120
121static inline sfw_test_case_t * 121static inline struct sfw_test_case *
122sfw_find_test_case(int id) 122sfw_find_test_case(int id)
123{ 123{
124 sfw_test_case_t *tsc; 124 struct sfw_test_case *tsc;
125 125
126 LASSERT(id <= SRPC_SERVICE_MAX_ID); 126 LASSERT(id <= SRPC_SERVICE_MAX_ID);
127 LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID); 127 LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -135,9 +135,9 @@ sfw_find_test_case(int id)
135} 135}
136 136
137static int 137static int
138sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops) 138sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
139{ 139{
140 sfw_test_case_t *tsc; 140 struct sfw_test_case *tsc;
141 141
142 if (sfw_find_test_case(service->sv_id)) { 142 if (sfw_find_test_case(service->sv_id)) {
143 CERROR("Failed to register test %s (%d)\n", 143 CERROR("Failed to register test %s (%d)\n",
@@ -145,7 +145,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
145 return -EEXIST; 145 return -EEXIST;
146 } 146 }
147 147
148 LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t)); 148 LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case));
149 if (!tsc) 149 if (!tsc)
150 return -ENOMEM; 150 return -ENOMEM;
151 151
@@ -159,7 +159,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
159static void 159static void
160sfw_add_session_timer(void) 160sfw_add_session_timer(void)
161{ 161{
162 sfw_session_t *sn = sfw_data.fw_session; 162 struct sfw_session *sn = sfw_data.fw_session;
163 struct stt_timer *timer = &sn->sn_timer; 163 struct stt_timer *timer = &sn->sn_timer;
164 164
165 LASSERT(!sfw_data.fw_shuttingdown); 165 LASSERT(!sfw_data.fw_shuttingdown);
@@ -177,7 +177,7 @@ sfw_add_session_timer(void)
177static int 177static int
178sfw_del_session_timer(void) 178sfw_del_session_timer(void)
179{ 179{
180 sfw_session_t *sn = sfw_data.fw_session; 180 struct sfw_session *sn = sfw_data.fw_session;
181 181
182 if (!sn || !sn->sn_timer_active) 182 if (!sn || !sn->sn_timer_active)
183 return 0; 183 return 0;
@@ -196,10 +196,10 @@ static void
196sfw_deactivate_session(void) 196sfw_deactivate_session(void)
197__must_hold(&sfw_data.fw_lock) 197__must_hold(&sfw_data.fw_lock)
198{ 198{
199 sfw_session_t *sn = sfw_data.fw_session; 199 struct sfw_session *sn = sfw_data.fw_session;
200 int nactive = 0; 200 int nactive = 0;
201 sfw_batch_t *tsb; 201 struct sfw_batch *tsb;
202 sfw_test_case_t *tsc; 202 struct sfw_test_case *tsc;
203 203
204 if (!sn) 204 if (!sn)
205 return; 205 return;
@@ -239,7 +239,7 @@ __must_hold(&sfw_data.fw_lock)
239static void 239static void
240sfw_session_expired(void *data) 240sfw_session_expired(void *data)
241{ 241{
242 sfw_session_t *sn = data; 242 struct sfw_session *sn = data;
243 243
244 spin_lock(&sfw_data.fw_lock); 244 spin_lock(&sfw_data.fw_lock);
245 245
@@ -257,12 +257,12 @@ sfw_session_expired(void *data)
257} 257}
258 258
259static inline void 259static inline void
260sfw_init_session(sfw_session_t *sn, lst_sid_t sid, 260sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
261 unsigned features, const char *name) 261 unsigned features, const char *name)
262{ 262{
263 struct stt_timer *timer = &sn->sn_timer; 263 struct stt_timer *timer = &sn->sn_timer;
264 264
265 memset(sn, 0, sizeof(sfw_session_t)); 265 memset(sn, 0, sizeof(struct sfw_session));
266 INIT_LIST_HEAD(&sn->sn_list); 266 INIT_LIST_HEAD(&sn->sn_list);
267 INIT_LIST_HEAD(&sn->sn_batches); 267 INIT_LIST_HEAD(&sn->sn_batches);
268 atomic_set(&sn->sn_refcount, 1); /* +1 for caller */ 268 atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
@@ -298,7 +298,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
298} 298}
299 299
300static void 300static void
301sfw_client_rpc_fini(srpc_client_rpc_t *rpc) 301sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
302{ 302{
303 LASSERT(!rpc->crpc_bulk.bk_niov); 303 LASSERT(!rpc->crpc_bulk.bk_niov);
304 LASSERT(list_empty(&rpc->crpc_list)); 304 LASSERT(list_empty(&rpc->crpc_list));
@@ -318,11 +318,11 @@ sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
318 spin_unlock(&sfw_data.fw_lock); 318 spin_unlock(&sfw_data.fw_lock);
319} 319}
320 320
321static sfw_batch_t * 321static struct sfw_batch *
322sfw_find_batch(lst_bid_t bid) 322sfw_find_batch(lst_bid_t bid)
323{ 323{
324 sfw_session_t *sn = sfw_data.fw_session; 324 struct sfw_session *sn = sfw_data.fw_session;
325 sfw_batch_t *bat; 325 struct sfw_batch *bat;
326 326
327 LASSERT(sn); 327 LASSERT(sn);
328 328
@@ -334,11 +334,11 @@ sfw_find_batch(lst_bid_t bid)
334 return NULL; 334 return NULL;
335} 335}
336 336
337static sfw_batch_t * 337static struct sfw_batch *
338sfw_bid2batch(lst_bid_t bid) 338sfw_bid2batch(lst_bid_t bid)
339{ 339{
340 sfw_session_t *sn = sfw_data.fw_session; 340 struct sfw_session *sn = sfw_data.fw_session;
341 sfw_batch_t *bat; 341 struct sfw_batch *bat;
342 342
343 LASSERT(sn); 343 LASSERT(sn);
344 344
@@ -346,7 +346,7 @@ sfw_bid2batch(lst_bid_t bid)
346 if (bat) 346 if (bat)
347 return bat; 347 return bat;
348 348
349 LIBCFS_ALLOC(bat, sizeof(sfw_batch_t)); 349 LIBCFS_ALLOC(bat, sizeof(struct sfw_batch));
350 if (!bat) 350 if (!bat)
351 return NULL; 351 return NULL;
352 352
@@ -363,9 +363,9 @@ sfw_bid2batch(lst_bid_t bid)
363static int 363static int
364sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) 364sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
365{ 365{
366 sfw_session_t *sn = sfw_data.fw_session; 366 struct sfw_session *sn = sfw_data.fw_session;
367 sfw_counters_t *cnt = &reply->str_fw; 367 sfw_counters_t *cnt = &reply->str_fw;
368 sfw_batch_t *bat; 368 struct sfw_batch *bat;
369 369
370 reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id; 370 reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
371 371
@@ -404,8 +404,8 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
404int 404int
405sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) 405sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
406{ 406{
407 sfw_session_t *sn = sfw_data.fw_session; 407 struct sfw_session *sn = sfw_data.fw_session;
408 srpc_msg_t *msg = container_of(request, srpc_msg_t, 408 struct srpc_msg *msg = container_of(request, struct srpc_msg,
409 msg_body.mksn_reqst); 409 msg_body.mksn_reqst);
410 int cplen = 0; 410 int cplen = 0;
411 411
@@ -438,7 +438,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
438 /* 438 /*
439 * reject the request if it requires unknown features 439 * reject the request if it requires unknown features
440 * NB: old version will always accept all features because it's not 440 * NB: old version will always accept all features because it's not
441 * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also 441 * aware of srpc_msg::msg_ses_feats, it's a defect but it's also
442 * harmless because it will return zero feature to console, and it's 442 * harmless because it will return zero feature to console, and it's
443 * console's responsibility to make sure all nodes in a session have 443 * console's responsibility to make sure all nodes in a session have
444 * same feature mask. 444 * same feature mask.
@@ -449,7 +449,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
449 } 449 }
450 450
451 /* brand new or create by force */ 451 /* brand new or create by force */
452 LIBCFS_ALLOC(sn, sizeof(sfw_session_t)); 452 LIBCFS_ALLOC(sn, sizeof(struct sfw_session));
453 if (!sn) { 453 if (!sn) {
454 CERROR("dropping RPC mksn under memory pressure\n"); 454 CERROR("dropping RPC mksn under memory pressure\n");
455 return -ENOMEM; 455 return -ENOMEM;
@@ -475,7 +475,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
475static int 475static int
476sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) 476sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
477{ 477{
478 sfw_session_t *sn = sfw_data.fw_session; 478 struct sfw_session *sn = sfw_data.fw_session;
479 479
480 reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id; 480 reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
481 481
@@ -507,7 +507,7 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
507static int 507static int
508sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) 508sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
509{ 509{
510 sfw_session_t *sn = sfw_data.fw_session; 510 struct sfw_session *sn = sfw_data.fw_session;
511 511
512 if (!sn) { 512 if (!sn) {
513 reply->dbg_status = ESRCH; 513 reply->dbg_status = ESRCH;
@@ -526,10 +526,10 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
526} 526}
527 527
528static void 528static void
529sfw_test_rpc_fini(srpc_client_rpc_t *rpc) 529sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
530{ 530{
531 sfw_test_unit_t *tsu = rpc->crpc_priv; 531 struct sfw_test_unit *tsu = rpc->crpc_priv;
532 sfw_test_instance_t *tsi = tsu->tsu_instance; 532 struct sfw_test_instance *tsi = tsu->tsu_instance;
533 533
534 /* Called with hold of tsi->tsi_lock */ 534 /* Called with hold of tsi->tsi_lock */
535 LASSERT(list_empty(&rpc->crpc_list)); 535 LASSERT(list_empty(&rpc->crpc_list));
@@ -537,7 +537,7 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
537} 537}
538 538
539static inline int 539static inline int
540sfw_test_buffers(sfw_test_instance_t *tsi) 540sfw_test_buffers(struct sfw_test_instance *tsi)
541{ 541{
542 struct sfw_test_case *tsc; 542 struct sfw_test_case *tsc;
543 struct srpc_service *svc; 543 struct srpc_service *svc;
@@ -614,10 +614,10 @@ sfw_unload_test(struct sfw_test_instance *tsi)
614} 614}
615 615
616static void 616static void
617sfw_destroy_test_instance(sfw_test_instance_t *tsi) 617sfw_destroy_test_instance(struct sfw_test_instance *tsi)
618{ 618{
619 srpc_client_rpc_t *rpc; 619 struct srpc_client_rpc *rpc;
620 sfw_test_unit_t *tsu; 620 struct sfw_test_unit *tsu;
621 621
622 if (!tsi->tsi_is_client) 622 if (!tsi->tsi_is_client)
623 goto clean; 623 goto clean;
@@ -630,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
630 630
631 while (!list_empty(&tsi->tsi_units)) { 631 while (!list_empty(&tsi->tsi_units)) {
632 tsu = list_entry(tsi->tsi_units.next, 632 tsu = list_entry(tsi->tsi_units.next,
633 sfw_test_unit_t, tsu_list); 633 struct sfw_test_unit, tsu_list);
634 list_del(&tsu->tsu_list); 634 list_del(&tsu->tsu_list);
635 LIBCFS_FREE(tsu, sizeof(*tsu)); 635 LIBCFS_FREE(tsu, sizeof(*tsu));
636 } 636 }
637 637
638 while (!list_empty(&tsi->tsi_free_rpcs)) { 638 while (!list_empty(&tsi->tsi_free_rpcs)) {
639 rpc = list_entry(tsi->tsi_free_rpcs.next, 639 rpc = list_entry(tsi->tsi_free_rpcs.next,
640 srpc_client_rpc_t, crpc_list); 640 struct srpc_client_rpc, crpc_list);
641 list_del(&rpc->crpc_list); 641 list_del(&rpc->crpc_list);
642 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); 642 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
643 } 643 }
@@ -648,34 +648,34 @@ clean:
648} 648}
649 649
650static void 650static void
651sfw_destroy_batch(sfw_batch_t *tsb) 651sfw_destroy_batch(struct sfw_batch *tsb)
652{ 652{
653 sfw_test_instance_t *tsi; 653 struct sfw_test_instance *tsi;
654 654
655 LASSERT(!sfw_batch_active(tsb)); 655 LASSERT(!sfw_batch_active(tsb));
656 LASSERT(list_empty(&tsb->bat_list)); 656 LASSERT(list_empty(&tsb->bat_list));
657 657
658 while (!list_empty(&tsb->bat_tests)) { 658 while (!list_empty(&tsb->bat_tests)) {
659 tsi = list_entry(tsb->bat_tests.next, 659 tsi = list_entry(tsb->bat_tests.next,
660 sfw_test_instance_t, tsi_list); 660 struct sfw_test_instance, tsi_list);
661 list_del_init(&tsi->tsi_list); 661 list_del_init(&tsi->tsi_list);
662 sfw_destroy_test_instance(tsi); 662 sfw_destroy_test_instance(tsi);
663 } 663 }
664 664
665 LIBCFS_FREE(tsb, sizeof(sfw_batch_t)); 665 LIBCFS_FREE(tsb, sizeof(struct sfw_batch));
666} 666}
667 667
668void 668void
669sfw_destroy_session(sfw_session_t *sn) 669sfw_destroy_session(struct sfw_session *sn)
670{ 670{
671 sfw_batch_t *batch; 671 struct sfw_batch *batch;
672 672
673 LASSERT(list_empty(&sn->sn_list)); 673 LASSERT(list_empty(&sn->sn_list));
674 LASSERT(sn != sfw_data.fw_session); 674 LASSERT(sn != sfw_data.fw_session);
675 675
676 while (!list_empty(&sn->sn_batches)) { 676 while (!list_empty(&sn->sn_batches)) {
677 batch = list_entry(sn->sn_batches.next, 677 batch = list_entry(sn->sn_batches.next,
678 sfw_batch_t, bat_list); 678 struct sfw_batch, bat_list);
679 list_del_init(&batch->bat_list); 679 list_del_init(&batch->bat_list);
680 sfw_destroy_batch(batch); 680 sfw_destroy_batch(batch);
681 } 681 }
@@ -685,7 +685,7 @@ sfw_destroy_session(sfw_session_t *sn)
685} 685}
686 686
687static void 687static void
688sfw_unpack_addtest_req(srpc_msg_t *msg) 688sfw_unpack_addtest_req(struct srpc_msg *msg)
689{ 689{
690 srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; 690 srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
691 691
@@ -729,14 +729,14 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
729} 729}
730 730
731static int 731static int
732sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) 732sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
733{ 733{
734 srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg; 734 struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
735 srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; 735 srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
736 srpc_bulk_t *bk = rpc->srpc_bulk; 736 struct srpc_bulk *bk = rpc->srpc_bulk;
737 int ndest = req->tsr_ndest; 737 int ndest = req->tsr_ndest;
738 sfw_test_unit_t *tsu; 738 struct sfw_test_unit *tsu;
739 sfw_test_instance_t *tsi; 739 struct sfw_test_instance *tsi;
740 int i; 740 int i;
741 int rc; 741 int rc;
742 742
@@ -795,7 +795,7 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
795 sfw_unpack_id(id); 795 sfw_unpack_id(id);
796 796
797 for (j = 0; j < tsi->tsi_concur; j++) { 797 for (j = 0; j < tsi->tsi_concur; j++) {
798 LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t)); 798 LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit));
799 if (!tsu) { 799 if (!tsu) {
800 rc = -ENOMEM; 800 rc = -ENOMEM;
801 CERROR("Can't allocate tsu for %d\n", 801 CERROR("Can't allocate tsu for %d\n",
@@ -824,11 +824,11 @@ error:
824} 824}
825 825
826static void 826static void
827sfw_test_unit_done(sfw_test_unit_t *tsu) 827sfw_test_unit_done(struct sfw_test_unit *tsu)
828{ 828{
829 sfw_test_instance_t *tsi = tsu->tsu_instance; 829 struct sfw_test_instance *tsi = tsu->tsu_instance;
830 sfw_batch_t *tsb = tsi->tsi_batch; 830 struct sfw_batch *tsb = tsi->tsi_batch;
831 sfw_session_t *sn = tsb->bat_session; 831 struct sfw_session *sn = tsb->bat_session;
832 832
833 LASSERT(sfw_test_active(tsi)); 833 LASSERT(sfw_test_active(tsi));
834 834
@@ -866,10 +866,10 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
866} 866}
867 867
868static void 868static void
869sfw_test_rpc_done(srpc_client_rpc_t *rpc) 869sfw_test_rpc_done(struct srpc_client_rpc *rpc)
870{ 870{
871 sfw_test_unit_t *tsu = rpc->crpc_priv; 871 struct sfw_test_unit *tsu = rpc->crpc_priv;
872 sfw_test_instance_t *tsi = tsu->tsu_instance; 872 struct sfw_test_instance *tsi = tsu->tsu_instance;
873 int done = 0; 873 int done = 0;
874 874
875 tsi->tsi_ops->tso_done_rpc(tsu, rpc); 875 tsi->tsi_ops->tso_done_rpc(tsu, rpc);
@@ -900,19 +900,19 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
900} 900}
901 901
902int 902int
903sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, 903sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
904 unsigned features, int nblk, int blklen, 904 unsigned features, int nblk, int blklen,
905 srpc_client_rpc_t **rpcpp) 905 struct srpc_client_rpc **rpcpp)
906{ 906{
907 srpc_client_rpc_t *rpc = NULL; 907 struct srpc_client_rpc *rpc = NULL;
908 sfw_test_instance_t *tsi = tsu->tsu_instance; 908 struct sfw_test_instance *tsi = tsu->tsu_instance;
909 909
910 spin_lock(&tsi->tsi_lock); 910 spin_lock(&tsi->tsi_lock);
911 911
912 LASSERT(sfw_test_active(tsi)); 912 LASSERT(sfw_test_active(tsi));
913 /* pick request from buffer */ 913 /* pick request from buffer */
914 rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs, 914 rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
915 srpc_client_rpc_t, crpc_list); 915 struct srpc_client_rpc, crpc_list);
916 if (rpc) { 916 if (rpc) {
917 LASSERT(nblk == rpc->crpc_bulk.bk_niov); 917 LASSERT(nblk == rpc->crpc_bulk.bk_niov);
918 list_del_init(&rpc->crpc_list); 918 list_del_init(&rpc->crpc_list);
@@ -942,11 +942,11 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
942} 942}
943 943
944static int 944static int
945sfw_run_test(swi_workitem_t *wi) 945sfw_run_test(struct swi_workitem *wi)
946{ 946{
947 sfw_test_unit_t *tsu = wi->swi_workitem.wi_data; 947 struct sfw_test_unit *tsu = wi->swi_workitem.wi_data;
948 sfw_test_instance_t *tsi = tsu->tsu_instance; 948 struct sfw_test_instance *tsi = tsu->tsu_instance;
949 srpc_client_rpc_t *rpc = NULL; 949 struct srpc_client_rpc *rpc = NULL;
950 950
951 LASSERT(wi == &tsu->tsu_worker); 951 LASSERT(wi == &tsu->tsu_worker);
952 952
@@ -991,11 +991,11 @@ test_done:
991} 991}
992 992
993static int 993static int
994sfw_run_batch(sfw_batch_t *tsb) 994sfw_run_batch(struct sfw_batch *tsb)
995{ 995{
996 swi_workitem_t *wi; 996 struct swi_workitem *wi;
997 sfw_test_unit_t *tsu; 997 struct sfw_test_unit *tsu;
998 sfw_test_instance_t *tsi; 998 struct sfw_test_instance *tsi;
999 999
1000 if (sfw_batch_active(tsb)) { 1000 if (sfw_batch_active(tsb)) {
1001 CDEBUG(D_NET, "Batch already active: %llu (%d)\n", 1001 CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
@@ -1026,10 +1026,10 @@ sfw_run_batch(sfw_batch_t *tsb)
1026} 1026}
1027 1027
1028int 1028int
1029sfw_stop_batch(sfw_batch_t *tsb, int force) 1029sfw_stop_batch(struct sfw_batch *tsb, int force)
1030{ 1030{
1031 sfw_test_instance_t *tsi; 1031 struct sfw_test_instance *tsi;
1032 srpc_client_rpc_t *rpc; 1032 struct srpc_client_rpc *rpc;
1033 1033
1034 if (!sfw_batch_active(tsb)) { 1034 if (!sfw_batch_active(tsb)) {
1035 CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id); 1035 CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
@@ -1068,9 +1068,9 @@ sfw_stop_batch(sfw_batch_t *tsb, int force)
1068} 1068}
1069 1069
1070static int 1070static int
1071sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply) 1071sfw_query_batch(struct sfw_batch *tsb, int testidx, srpc_batch_reply_t *reply)
1072{ 1072{
1073 sfw_test_instance_t *tsi; 1073 struct sfw_test_instance *tsi;
1074 1074
1075 if (testidx < 0) 1075 if (testidx < 0)
1076 return -EINVAL; 1076 return -EINVAL;
@@ -1115,11 +1115,11 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
1115static int 1115static int
1116sfw_add_test(struct srpc_server_rpc *rpc) 1116sfw_add_test(struct srpc_server_rpc *rpc)
1117{ 1117{
1118 sfw_session_t *sn = sfw_data.fw_session; 1118 struct sfw_session *sn = sfw_data.fw_session;
1119 srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply; 1119 srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
1120 srpc_test_reqst_t *request; 1120 srpc_test_reqst_t *request;
1121 int rc; 1121 int rc;
1122 sfw_batch_t *bat; 1122 struct sfw_batch *bat;
1123 1123
1124 request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst; 1124 request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
1125 reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id; 1125 reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
1161 int len; 1161 int len;
1162 1162
1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { 1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
1164 len = npg * PAGE_CACHE_SIZE; 1164 len = npg * PAGE_SIZE;
1165 1165
1166 } else { 1166 } else {
1167 len = sizeof(lnet_process_id_packed_t) * 1167 len = sizeof(lnet_process_id_packed_t) *
@@ -1185,9 +1185,9 @@ sfw_add_test(struct srpc_server_rpc *rpc)
1185static int 1185static int
1186sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) 1186sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
1187{ 1187{
1188 sfw_session_t *sn = sfw_data.fw_session; 1188 struct sfw_session *sn = sfw_data.fw_session;
1189 int rc = 0; 1189 int rc = 0;
1190 sfw_batch_t *bat; 1190 struct sfw_batch *bat;
1191 1191
1192 reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id; 1192 reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
1193 1193
@@ -1227,8 +1227,8 @@ static int
1227sfw_handle_server_rpc(struct srpc_server_rpc *rpc) 1227sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1228{ 1228{
1229 struct srpc_service *sv = rpc->srpc_scd->scd_svc; 1229 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
1230 srpc_msg_t *reply = &rpc->srpc_replymsg; 1230 struct srpc_msg *reply = &rpc->srpc_replymsg;
1231 srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg; 1231 struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
1232 unsigned features = LST_FEATS_MASK; 1232 unsigned features = LST_FEATS_MASK;
1233 int rc = 0; 1233 int rc = 0;
1234 1234
@@ -1261,7 +1261,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1261 1261
1262 if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION && 1262 if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
1263 sv->sv_id != SRPC_SERVICE_DEBUG) { 1263 sv->sv_id != SRPC_SERVICE_DEBUG) {
1264 sfw_session_t *sn = sfw_data.fw_session; 1264 struct sfw_session *sn = sfw_data.fw_session;
1265 1265
1266 if (sn && 1266 if (sn &&
1267 sn->sn_features != request->msg_ses_feats) { 1267 sn->sn_features != request->msg_ses_feats) {
@@ -1377,12 +1377,12 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
1377 return rc; 1377 return rc;
1378} 1378}
1379 1379
1380srpc_client_rpc_t * 1380struct srpc_client_rpc *
1381sfw_create_rpc(lnet_process_id_t peer, int service, 1381sfw_create_rpc(lnet_process_id_t peer, int service,
1382 unsigned features, int nbulkiov, int bulklen, 1382 unsigned features, int nbulkiov, int bulklen,
1383 void (*done)(srpc_client_rpc_t *), void *priv) 1383 void (*done)(struct srpc_client_rpc *), void *priv)
1384{ 1384{
1385 srpc_client_rpc_t *rpc = NULL; 1385 struct srpc_client_rpc *rpc = NULL;
1386 1386
1387 spin_lock(&sfw_data.fw_lock); 1387 spin_lock(&sfw_data.fw_lock);
1388 1388
@@ -1391,7 +1391,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
1391 1391
1392 if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) { 1392 if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
1393 rpc = list_entry(sfw_data.fw_zombie_rpcs.next, 1393 rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
1394 srpc_client_rpc_t, crpc_list); 1394 struct srpc_client_rpc, crpc_list);
1395 list_del(&rpc->crpc_list); 1395 list_del(&rpc->crpc_list);
1396 1396
1397 srpc_init_client_rpc(rpc, peer, service, 0, 0, 1397 srpc_init_client_rpc(rpc, peer, service, 0, 0,
@@ -1415,7 +1415,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
1415} 1415}
1416 1416
1417void 1417void
1418sfw_unpack_message(srpc_msg_t *msg) 1418sfw_unpack_message(struct srpc_msg *msg)
1419{ 1419{
1420 if (msg->msg_magic == SRPC_MSG_MAGIC) 1420 if (msg->msg_magic == SRPC_MSG_MAGIC)
1421 return; /* no flipping needed */ 1421 return; /* no flipping needed */
@@ -1558,7 +1558,7 @@ sfw_unpack_message(srpc_msg_t *msg)
1558} 1558}
1559 1559
1560void 1560void
1561sfw_abort_rpc(srpc_client_rpc_t *rpc) 1561sfw_abort_rpc(struct srpc_client_rpc *rpc)
1562{ 1562{
1563 LASSERT(atomic_read(&rpc->crpc_refcount) > 0); 1563 LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
1564 LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID); 1564 LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -1569,7 +1569,7 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
1569} 1569}
1570 1570
1571void 1571void
1572sfw_post_rpc(srpc_client_rpc_t *rpc) 1572sfw_post_rpc(struct srpc_client_rpc *rpc)
1573{ 1573{
1574 spin_lock(&rpc->crpc_lock); 1574 spin_lock(&rpc->crpc_lock);
1575 1575
@@ -1584,7 +1584,7 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
1584 spin_unlock(&rpc->crpc_lock); 1584 spin_unlock(&rpc->crpc_lock);
1585} 1585}
1586 1586
1587static srpc_service_t sfw_services[] = { 1587static struct srpc_service sfw_services[] = {
1588 { 1588 {
1589 /* sv_id */ SRPC_SERVICE_DEBUG, 1589 /* sv_id */ SRPC_SERVICE_DEBUG,
1590 /* sv_name */ "debug", 1590 /* sv_name */ "debug",
@@ -1628,8 +1628,8 @@ sfw_startup(void)
1628 int i; 1628 int i;
1629 int rc; 1629 int rc;
1630 int error; 1630 int error;
1631 srpc_service_t *sv; 1631 struct srpc_service *sv;
1632 sfw_test_case_t *tsc; 1632 struct sfw_test_case *tsc;
1633 1633
1634 if (session_timeout < 0) { 1634 if (session_timeout < 0) {
1635 CERROR("Session timeout must be non-negative: %d\n", 1635 CERROR("Session timeout must be non-negative: %d\n",
@@ -1721,8 +1721,8 @@ sfw_startup(void)
1721void 1721void
1722sfw_shutdown(void) 1722sfw_shutdown(void)
1723{ 1723{
1724 srpc_service_t *sv; 1724 struct srpc_service *sv;
1725 sfw_test_case_t *tsc; 1725 struct sfw_test_case *tsc;
1726 int i; 1726 int i;
1727 1727
1728 spin_lock(&sfw_data.fw_lock); 1728 spin_lock(&sfw_data.fw_lock);
@@ -1759,10 +1759,10 @@ sfw_shutdown(void)
1759 } 1759 }
1760 1760
1761 while (!list_empty(&sfw_data.fw_zombie_rpcs)) { 1761 while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
1762 srpc_client_rpc_t *rpc; 1762 struct srpc_client_rpc *rpc;
1763 1763
1764 rpc = list_entry(sfw_data.fw_zombie_rpcs.next, 1764 rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
1765 srpc_client_rpc_t, crpc_list); 1765 struct srpc_client_rpc, crpc_list);
1766 list_del(&rpc->crpc_list); 1766 list_del(&rpc->crpc_list);
1767 1767
1768 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); 1768 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
@@ -1778,7 +1778,7 @@ sfw_shutdown(void)
1778 1778
1779 while (!list_empty(&sfw_data.fw_tests)) { 1779 while (!list_empty(&sfw_data.fw_tests)) {
1780 tsc = list_entry(sfw_data.fw_tests.next, 1780 tsc = list_entry(sfw_data.fw_tests.next,
1781 sfw_test_case_t, tsc_list); 1781 struct sfw_test_case, tsc_list);
1782 1782
1783 srpc_wait_service_shutdown(tsc->tsc_srv_service); 1783 srpc_wait_service_shutdown(tsc->tsc_srv_service);
1784 1784
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index c7c50be6dab4..8a9d7a41f7de 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -56,9 +56,9 @@ struct lst_ping_data {
56static struct lst_ping_data lst_ping_data; 56static struct lst_ping_data lst_ping_data;
57 57
58static int 58static int
59ping_client_init(sfw_test_instance_t *tsi) 59ping_client_init(struct sfw_test_instance *tsi)
60{ 60{
61 sfw_session_t *sn = tsi->tsi_batch->bat_session; 61 struct sfw_session *sn = tsi->tsi_batch->bat_session;
62 62
63 LASSERT(tsi->tsi_is_client); 63 LASSERT(tsi->tsi_is_client);
64 LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK)); 64 LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
@@ -70,9 +70,9 @@ ping_client_init(sfw_test_instance_t *tsi)
70} 70}
71 71
72static void 72static void
73ping_client_fini(sfw_test_instance_t *tsi) 73ping_client_fini(struct sfw_test_instance *tsi)
74{ 74{
75 sfw_session_t *sn = tsi->tsi_batch->bat_session; 75 struct sfw_session *sn = tsi->tsi_batch->bat_session;
76 int errors; 76 int errors;
77 77
78 LASSERT(sn); 78 LASSERT(sn);
@@ -86,12 +86,12 @@ ping_client_fini(sfw_test_instance_t *tsi)
86} 86}
87 87
88static int 88static int
89ping_client_prep_rpc(sfw_test_unit_t *tsu, lnet_process_id_t dest, 89ping_client_prep_rpc(struct sfw_test_unit *tsu, lnet_process_id_t dest,
90 srpc_client_rpc_t **rpc) 90 struct srpc_client_rpc **rpc)
91{ 91{
92 srpc_ping_reqst_t *req; 92 srpc_ping_reqst_t *req;
93 sfw_test_instance_t *tsi = tsu->tsu_instance; 93 struct sfw_test_instance *tsi = tsu->tsu_instance;
94 sfw_session_t *sn = tsi->tsi_batch->bat_session; 94 struct sfw_session *sn = tsi->tsi_batch->bat_session;
95 struct timespec64 ts; 95 struct timespec64 ts;
96 int rc; 96 int rc;
97 97
@@ -118,10 +118,10 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu, lnet_process_id_t dest,
118} 118}
119 119
120static void 120static void
121ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) 121ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
122{ 122{
123 sfw_test_instance_t *tsi = tsu->tsu_instance; 123 struct sfw_test_instance *tsi = tsu->tsu_instance;
124 sfw_session_t *sn = tsi->tsi_batch->bat_session; 124 struct sfw_session *sn = tsi->tsi_batch->bat_session;
125 srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; 125 srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
126 srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; 126 srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
127 struct timespec64 ts; 127 struct timespec64 ts;
@@ -171,8 +171,8 @@ static int
171ping_server_handle(struct srpc_server_rpc *rpc) 171ping_server_handle(struct srpc_server_rpc *rpc)
172{ 172{
173 struct srpc_service *sv = rpc->srpc_scd->scd_svc; 173 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
174 srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; 174 struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
175 srpc_msg_t *replymsg = &rpc->srpc_replymsg; 175 struct srpc_msg *replymsg = &rpc->srpc_replymsg;
176 srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; 176 srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst;
177 srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply; 177 srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
178 178
@@ -210,7 +210,8 @@ ping_server_handle(struct srpc_server_rpc *rpc)
210 return 0; 210 return 0;
211} 211}
212 212
213sfw_test_client_ops_t ping_test_client; 213struct sfw_test_client_ops ping_test_client;
214
214void ping_init_test_client(void) 215void ping_init_test_client(void)
215{ 216{
216 ping_test_client.tso_init = ping_client_init; 217 ping_test_client.tso_init = ping_client_init;
@@ -219,7 +220,8 @@ void ping_init_test_client(void)
219 ping_test_client.tso_done_rpc = ping_client_done_rpc; 220 ping_test_client.tso_done_rpc = ping_client_done_rpc;
220} 221}
221 222
222srpc_service_t ping_test_service; 223struct srpc_service ping_test_service;
224
223void ping_init_test_service(void) 225void ping_init_test_service(void)
224{ 226{
225 ping_test_service.sv_id = SRPC_SERVICE_PING; 227 ping_test_service.sv_id = SRPC_SERVICE_PING;
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 5d8908d258df..561e28c644b1 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -46,19 +46,19 @@
46 46
47#include "selftest.h" 47#include "selftest.h"
48 48
49typedef enum { 49enum srpc_state {
50 SRPC_STATE_NONE, 50 SRPC_STATE_NONE,
51 SRPC_STATE_NI_INIT, 51 SRPC_STATE_NI_INIT,
52 SRPC_STATE_EQ_INIT, 52 SRPC_STATE_EQ_INIT,
53 SRPC_STATE_RUNNING, 53 SRPC_STATE_RUNNING,
54 SRPC_STATE_STOPPING, 54 SRPC_STATE_STOPPING,
55} srpc_state_t; 55};
56 56
57static struct smoketest_rpc { 57static struct smoketest_rpc {
58 spinlock_t rpc_glock; /* global lock */ 58 spinlock_t rpc_glock; /* global lock */
59 srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1]; 59 struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
60 lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */ 60 lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
61 srpc_state_t rpc_state; 61 enum srpc_state rpc_state;
62 srpc_counters_t rpc_counters; 62 srpc_counters_t rpc_counters;
63 __u64 rpc_matchbits; /* matchbits counter */ 63 __u64 rpc_matchbits; /* matchbits counter */
64} srpc_data; 64} srpc_data;
@@ -71,7 +71,7 @@ srpc_serv_portal(int svc_id)
71} 71}
72 72
73/* forward ref's */ 73/* forward ref's */
74int srpc_handle_rpc(swi_workitem_t *wi); 74int srpc_handle_rpc(struct swi_workitem *wi);
75 75
76void srpc_get_counters(srpc_counters_t *cnt) 76void srpc_get_counters(srpc_counters_t *cnt)
77{ 77{
@@ -88,9 +88,9 @@ void srpc_set_counters(const srpc_counters_t *cnt)
88} 88}
89 89
90static int 90static int
91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) 91srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
92{ 92{
93 nob = min_t(int, nob, PAGE_CACHE_SIZE); 93 nob = min_t(int, nob, PAGE_SIZE);
94 94
95 LASSERT(nob > 0); 95 LASSERT(nob > 0);
96 LASSERT(i >= 0 && i < bk->bk_niov); 96 LASSERT(i >= 0 && i < bk->bk_niov);
@@ -102,7 +102,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
102} 102}
103 103
104void 104void
105srpc_free_bulk(srpc_bulk_t *bk) 105srpc_free_bulk(struct srpc_bulk *bk)
106{ 106{
107 int i; 107 int i;
108 struct page *pg; 108 struct page *pg;
@@ -117,25 +117,25 @@ srpc_free_bulk(srpc_bulk_t *bk)
117 __free_page(pg); 117 __free_page(pg);
118 } 118 }
119 119
120 LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov])); 120 LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
121} 121}
122 122
123srpc_bulk_t * 123struct srpc_bulk *
124srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) 124srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
125{ 125{
126 srpc_bulk_t *bk; 126 struct srpc_bulk *bk;
127 int i; 127 int i;
128 128
129 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); 129 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
130 130
131 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt, 131 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
132 offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); 132 offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
133 if (!bk) { 133 if (!bk) {
134 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); 134 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
135 return NULL; 135 return NULL;
136 } 136 }
137 137
138 memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); 138 memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
139 bk->bk_sink = sink; 139 bk->bk_sink = sink;
140 bk->bk_len = bulk_len; 140 bk->bk_len = bulk_len;
141 bk->bk_niov = bulk_npg; 141 bk->bk_niov = bulk_npg;
@@ -338,7 +338,7 @@ srpc_add_service(struct srpc_service *sv)
338} 338}
339 339
340int 340int
341srpc_remove_service(srpc_service_t *sv) 341srpc_remove_service(struct srpc_service *sv)
342{ 342{
343 int id = sv->sv_id; 343 int id = sv->sv_id;
344 344
@@ -357,7 +357,7 @@ srpc_remove_service(srpc_service_t *sv)
357static int 357static int
358srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, 358srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
359 int len, int options, lnet_process_id_t peer, 359 int len, int options, lnet_process_id_t peer,
360 lnet_handle_md_t *mdh, srpc_event_t *ev) 360 lnet_handle_md_t *mdh, struct srpc_event *ev)
361{ 361{
362 int rc; 362 int rc;
363 lnet_md_t md; 363 lnet_md_t md;
@@ -396,7 +396,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
396static int 396static int
397srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, 397srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
398 int options, lnet_process_id_t peer, lnet_nid_t self, 398 int options, lnet_process_id_t peer, lnet_nid_t self,
399 lnet_handle_md_t *mdh, srpc_event_t *ev) 399 lnet_handle_md_t *mdh, struct srpc_event *ev)
400{ 400{
401 int rc; 401 int rc;
402 lnet_md_t md; 402 lnet_md_t md;
@@ -449,7 +449,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
449 449
450static int 450static int
451srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, 451srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
452 lnet_handle_md_t *mdh, srpc_event_t *ev) 452 lnet_handle_md_t *mdh, struct srpc_event *ev)
453{ 453{
454 lnet_process_id_t any = { 0 }; 454 lnet_process_id_t any = { 0 };
455 455
@@ -697,7 +697,7 @@ srpc_finish_service(struct srpc_service *sv)
697 697
698/* called with sv->sv_lock held */ 698/* called with sv->sv_lock held */
699static void 699static void
700srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) 700srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
701__must_hold(&scd->scd_lock) 701__must_hold(&scd->scd_lock)
702{ 702{
703 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { 703 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
@@ -755,11 +755,11 @@ srpc_abort_service(struct srpc_service *sv)
755} 755}
756 756
757void 757void
758srpc_shutdown_service(srpc_service_t *sv) 758srpc_shutdown_service(struct srpc_service *sv)
759{ 759{
760 struct srpc_service_cd *scd; 760 struct srpc_service_cd *scd;
761 struct srpc_server_rpc *rpc; 761 struct srpc_server_rpc *rpc;
762 srpc_buffer_t *buf; 762 struct srpc_buffer *buf;
763 int i; 763 int i;
764 764
765 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n", 765 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
@@ -792,9 +792,9 @@ srpc_shutdown_service(srpc_service_t *sv)
792} 792}
793 793
794static int 794static int
795srpc_send_request(srpc_client_rpc_t *rpc) 795srpc_send_request(struct srpc_client_rpc *rpc)
796{ 796{
797 srpc_event_t *ev = &rpc->crpc_reqstev; 797 struct srpc_event *ev = &rpc->crpc_reqstev;
798 int rc; 798 int rc;
799 799
800 ev->ev_fired = 0; 800 ev->ev_fired = 0;
@@ -803,7 +803,7 @@ srpc_send_request(srpc_client_rpc_t *rpc)
803 803
804 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service), 804 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
805 rpc->crpc_service, &rpc->crpc_reqstmsg, 805 rpc->crpc_service, &rpc->crpc_reqstmsg,
806 sizeof(srpc_msg_t), LNET_MD_OP_PUT, 806 sizeof(struct srpc_msg), LNET_MD_OP_PUT,
807 rpc->crpc_dest, LNET_NID_ANY, 807 rpc->crpc_dest, LNET_NID_ANY,
808 &rpc->crpc_reqstmdh, ev); 808 &rpc->crpc_reqstmdh, ev);
809 if (rc) { 809 if (rc) {
@@ -814,9 +814,9 @@ srpc_send_request(srpc_client_rpc_t *rpc)
814} 814}
815 815
816static int 816static int
817srpc_prepare_reply(srpc_client_rpc_t *rpc) 817srpc_prepare_reply(struct srpc_client_rpc *rpc)
818{ 818{
819 srpc_event_t *ev = &rpc->crpc_replyev; 819 struct srpc_event *ev = &rpc->crpc_replyev;
820 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid; 820 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
821 int rc; 821 int rc;
822 822
@@ -827,7 +827,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
827 *id = srpc_next_id(); 827 *id = srpc_next_id();
828 828
829 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id, 829 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
830 &rpc->crpc_replymsg, sizeof(srpc_msg_t), 830 &rpc->crpc_replymsg,
831 sizeof(struct srpc_msg),
831 LNET_MD_OP_PUT, rpc->crpc_dest, 832 LNET_MD_OP_PUT, rpc->crpc_dest,
832 &rpc->crpc_replymdh, ev); 833 &rpc->crpc_replymdh, ev);
833 if (rc) { 834 if (rc) {
@@ -838,10 +839,10 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
838} 839}
839 840
840static int 841static int
841srpc_prepare_bulk(srpc_client_rpc_t *rpc) 842srpc_prepare_bulk(struct srpc_client_rpc *rpc)
842{ 843{
843 srpc_bulk_t *bk = &rpc->crpc_bulk; 844 struct srpc_bulk *bk = &rpc->crpc_bulk;
844 srpc_event_t *ev = &rpc->crpc_bulkev; 845 struct srpc_event *ev = &rpc->crpc_bulkev;
845 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; 846 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
846 int rc; 847 int rc;
847 int opt; 848 int opt;
@@ -873,8 +874,8 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc)
873static int 874static int
874srpc_do_bulk(struct srpc_server_rpc *rpc) 875srpc_do_bulk(struct srpc_server_rpc *rpc)
875{ 876{
876 srpc_event_t *ev = &rpc->srpc_ev; 877 struct srpc_event *ev = &rpc->srpc_ev;
877 srpc_bulk_t *bk = rpc->srpc_bulk; 878 struct srpc_bulk *bk = rpc->srpc_bulk;
878 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid; 879 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
879 int rc; 880 int rc;
880 int opt; 881 int opt;
@@ -903,7 +904,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
903{ 904{
904 struct srpc_service_cd *scd = rpc->srpc_scd; 905 struct srpc_service_cd *scd = rpc->srpc_scd;
905 struct srpc_service *sv = scd->scd_svc; 906 struct srpc_service *sv = scd->scd_svc;
906 srpc_buffer_t *buffer; 907 struct srpc_buffer *buffer;
907 908
908 LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE); 909 LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
909 910
@@ -948,7 +949,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
948 949
949 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) { 950 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
950 buffer = list_entry(scd->scd_buf_blocked.next, 951 buffer = list_entry(scd->scd_buf_blocked.next,
951 srpc_buffer_t, buf_list); 952 struct srpc_buffer, buf_list);
952 list_del(&buffer->buf_list); 953 list_del(&buffer->buf_list);
953 954
954 srpc_init_server_rpc(rpc, scd, buffer); 955 srpc_init_server_rpc(rpc, scd, buffer);
@@ -963,12 +964,12 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
963 964
964/* handles an incoming RPC */ 965/* handles an incoming RPC */
965int 966int
966srpc_handle_rpc(swi_workitem_t *wi) 967srpc_handle_rpc(struct swi_workitem *wi)
967{ 968{
968 struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data; 969 struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
969 struct srpc_service_cd *scd = rpc->srpc_scd; 970 struct srpc_service_cd *scd = rpc->srpc_scd;
970 struct srpc_service *sv = scd->scd_svc; 971 struct srpc_service *sv = scd->scd_svc;
971 srpc_event_t *ev = &rpc->srpc_ev; 972 struct srpc_event *ev = &rpc->srpc_ev;
972 int rc = 0; 973 int rc = 0;
973 974
974 LASSERT(wi == &rpc->srpc_wi); 975 LASSERT(wi == &rpc->srpc_wi);
@@ -995,7 +996,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
995 default: 996 default:
996 LBUG(); 997 LBUG();
997 case SWI_STATE_NEWBORN: { 998 case SWI_STATE_NEWBORN: {
998 srpc_msg_t *msg; 999 struct srpc_msg *msg;
999 srpc_generic_reply_t *reply; 1000 srpc_generic_reply_t *reply;
1000 1001
1001 msg = &rpc->srpc_reqstbuf->buf_msg; 1002 msg = &rpc->srpc_reqstbuf->buf_msg;
@@ -1077,7 +1078,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
1077static void 1078static void
1078srpc_client_rpc_expired(void *data) 1079srpc_client_rpc_expired(void *data)
1079{ 1080{
1080 srpc_client_rpc_t *rpc = data; 1081 struct srpc_client_rpc *rpc = data;
1081 1082
1082 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n", 1083 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
1083 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), 1084 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
@@ -1096,7 +1097,7 @@ srpc_client_rpc_expired(void *data)
1096} 1097}
1097 1098
1098static void 1099static void
1099srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) 1100srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
1100{ 1101{
1101 struct stt_timer *timer = &rpc->crpc_timer; 1102 struct stt_timer *timer = &rpc->crpc_timer;
1102 1103
@@ -1117,7 +1118,7 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
1117 * running on any CPU. 1118 * running on any CPU.
1118 */ 1119 */
1119static void 1120static void
1120srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) 1121srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
1121{ 1122{
1122 /* timer not planted or already exploded */ 1123 /* timer not planted or already exploded */
1123 if (!rpc->crpc_timeout) 1124 if (!rpc->crpc_timeout)
@@ -1138,9 +1139,9 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
1138} 1139}
1139 1140
1140static void 1141static void
1141srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) 1142srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
1142{ 1143{
1143 swi_workitem_t *wi = &rpc->crpc_wi; 1144 struct swi_workitem *wi = &rpc->crpc_wi;
1144 1145
1145 LASSERT(status || wi->swi_state == SWI_STATE_DONE); 1146 LASSERT(status || wi->swi_state == SWI_STATE_DONE);
1146 1147
@@ -1175,11 +1176,11 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
1175 1176
1176/* sends an outgoing RPC */ 1177/* sends an outgoing RPC */
1177int 1178int
1178srpc_send_rpc(swi_workitem_t *wi) 1179srpc_send_rpc(struct swi_workitem *wi)
1179{ 1180{
1180 int rc = 0; 1181 int rc = 0;
1181 srpc_client_rpc_t *rpc; 1182 struct srpc_client_rpc *rpc;
1182 srpc_msg_t *reply; 1183 struct srpc_msg *reply;
1183 int do_bulk; 1184 int do_bulk;
1184 1185
1185 LASSERT(wi); 1186 LASSERT(wi);
@@ -1308,15 +1309,15 @@ abort:
1308 return 0; 1309 return 0;
1309} 1310}
1310 1311
1311srpc_client_rpc_t * 1312struct srpc_client_rpc *
1312srpc_create_client_rpc(lnet_process_id_t peer, int service, 1313srpc_create_client_rpc(lnet_process_id_t peer, int service,
1313 int nbulkiov, int bulklen, 1314 int nbulkiov, int bulklen,
1314 void (*rpc_done)(srpc_client_rpc_t *), 1315 void (*rpc_done)(struct srpc_client_rpc *),
1315 void (*rpc_fini)(srpc_client_rpc_t *), void *priv) 1316 void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
1316{ 1317{
1317 srpc_client_rpc_t *rpc; 1318 struct srpc_client_rpc *rpc;
1318 1319
1319 LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t, 1320 LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
1320 crpc_bulk.bk_iovs[nbulkiov])); 1321 crpc_bulk.bk_iovs[nbulkiov]));
1321 if (!rpc) 1322 if (!rpc)
1322 return NULL; 1323 return NULL;
@@ -1328,7 +1329,7 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
1328 1329
1329/* called with rpc->crpc_lock held */ 1330/* called with rpc->crpc_lock held */
1330void 1331void
1331srpc_abort_rpc(srpc_client_rpc_t *rpc, int why) 1332srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
1332{ 1333{
1333 LASSERT(why); 1334 LASSERT(why);
1334 1335
@@ -1347,7 +1348,7 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
1347 1348
1348/* called with rpc->crpc_lock held */ 1349/* called with rpc->crpc_lock held */
1349void 1350void
1350srpc_post_rpc(srpc_client_rpc_t *rpc) 1351srpc_post_rpc(struct srpc_client_rpc *rpc)
1351{ 1352{
1352 LASSERT(!rpc->crpc_aborted); 1353 LASSERT(!rpc->crpc_aborted);
1353 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); 1354 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
@@ -1363,7 +1364,7 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
1363int 1364int
1364srpc_send_reply(struct srpc_server_rpc *rpc) 1365srpc_send_reply(struct srpc_server_rpc *rpc)
1365{ 1366{
1366 srpc_event_t *ev = &rpc->srpc_ev; 1367 struct srpc_event *ev = &rpc->srpc_ev;
1367 struct srpc_msg *msg = &rpc->srpc_replymsg; 1368 struct srpc_msg *msg = &rpc->srpc_replymsg;
1368 struct srpc_buffer *buffer = rpc->srpc_reqstbuf; 1369 struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
1369 struct srpc_service_cd *scd = rpc->srpc_scd; 1370 struct srpc_service_cd *scd = rpc->srpc_scd;
@@ -1410,12 +1411,12 @@ static void
1410srpc_lnet_ev_handler(lnet_event_t *ev) 1411srpc_lnet_ev_handler(lnet_event_t *ev)
1411{ 1412{
1412 struct srpc_service_cd *scd; 1413 struct srpc_service_cd *scd;
1413 srpc_event_t *rpcev = ev->md.user_ptr; 1414 struct srpc_event *rpcev = ev->md.user_ptr;
1414 srpc_client_rpc_t *crpc; 1415 struct srpc_client_rpc *crpc;
1415 struct srpc_server_rpc *srpc; 1416 struct srpc_server_rpc *srpc;
1416 srpc_buffer_t *buffer; 1417 struct srpc_buffer *buffer;
1417 srpc_service_t *sv; 1418 struct srpc_service *sv;
1418 srpc_msg_t *msg; 1419 struct srpc_msg *msg;
1419 srpc_msg_type_t type; 1420 srpc_msg_type_t type;
1420 1421
1421 LASSERT(!in_interrupt()); 1422 LASSERT(!in_interrupt());
@@ -1486,7 +1487,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
1486 LASSERT(ev->type != LNET_EVENT_UNLINK || 1487 LASSERT(ev->type != LNET_EVENT_UNLINK ||
1487 sv->sv_shuttingdown); 1488 sv->sv_shuttingdown);
1488 1489
1489 buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg); 1490 buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg);
1490 buffer->buf_peer = ev->initiator; 1491 buffer->buf_peer = ev->initiator;
1491 buffer->buf_self = ev->target.nid; 1492 buffer->buf_self = ev->target.nid;
1492 1493
@@ -1663,7 +1664,7 @@ srpc_shutdown(void)
1663 spin_lock(&srpc_data.rpc_glock); 1664 spin_lock(&srpc_data.rpc_glock);
1664 1665
1665 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) { 1666 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1666 srpc_service_t *sv = srpc_data.rpc_services[i]; 1667 struct srpc_service *sv = srpc_data.rpc_services[i];
1667 1668
1668 LASSERTF(!sv, "service not empty: id %d, name %s\n", 1669 LASSERTF(!sv, "service not empty: id %d, name %s\n",
1669 i, sv->sv_name); 1670 i, sv->sv_name);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index a79c315f2ceb..fdf881ffd9ad 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -242,7 +242,7 @@ typedef struct {
242#define SRPC_MSG_MAGIC 0xeeb0f00d 242#define SRPC_MSG_MAGIC 0xeeb0f00d
243#define SRPC_MSG_VERSION 1 243#define SRPC_MSG_VERSION 1
244 244
245typedef struct srpc_msg { 245struct srpc_msg {
246 __u32 msg_magic; /* magic number */ 246 __u32 msg_magic; /* magic number */
247 __u32 msg_version; /* message version number */ 247 __u32 msg_version; /* message version number */
248 __u32 msg_type; /* type of message body: srpc_msg_type_t */ 248 __u32 msg_type; /* type of message body: srpc_msg_type_t */
@@ -273,10 +273,10 @@ typedef struct srpc_msg {
273 srpc_brw_reqst_t brw_reqst; 273 srpc_brw_reqst_t brw_reqst;
274 srpc_brw_reply_t brw_reply; 274 srpc_brw_reply_t brw_reply;
275 } msg_body; 275 } msg_body;
276} WIRE_ATTR srpc_msg_t; 276} WIRE_ATTR;
277 277
278static inline void 278static inline void
279srpc_unpack_msg_hdr(srpc_msg_t *msg) 279srpc_unpack_msg_hdr(struct srpc_msg *msg)
280{ 280{
281 if (msg->msg_magic == SRPC_MSG_MAGIC) 281 if (msg->msg_magic == SRPC_MSG_MAGIC)
282 return; /* no flipping needed */ 282 return; /* no flipping needed */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index f50580e5b02a..1dac777daf63 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -134,7 +134,7 @@ srpc_service2reply(int service)
134 return srpc_service2request(service) + 1; 134 return srpc_service2request(service) + 1;
135} 135}
136 136
137typedef enum { 137enum srpc_event_type {
138 SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) 138 SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
139 * received */ 139 * received */
140 SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ 140 SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
@@ -143,57 +143,58 @@ typedef enum {
143 SRPC_REPLY_SENT = 5, /* outgoing reply sent */ 143 SRPC_REPLY_SENT = 5, /* outgoing reply sent */
144 SRPC_REQUEST_RCVD = 6, /* incoming request received */ 144 SRPC_REQUEST_RCVD = 6, /* incoming request received */
145 SRPC_REQUEST_SENT = 7, /* outgoing request sent */ 145 SRPC_REQUEST_SENT = 7, /* outgoing request sent */
146} srpc_event_type_t; 146};
147 147
148/* RPC event */ 148/* RPC event */
149typedef struct { 149struct srpc_event {
150 srpc_event_type_t ev_type; /* what's up */ 150 enum srpc_event_type ev_type; /* what's up */
151 lnet_event_kind_t ev_lnet; /* LNet event type */ 151 lnet_event_kind_t ev_lnet; /* LNet event type */
152 int ev_fired; /* LNet event fired? */ 152 int ev_fired; /* LNet event fired? */
153 int ev_status; /* LNet event status */ 153 int ev_status; /* LNet event status */
154 void *ev_data; /* owning server/client RPC */ 154 void *ev_data; /* owning server/client RPC */
155} srpc_event_t; 155};
156 156
157typedef struct { 157/* bulk descriptor */
158struct srpc_bulk {
158 int bk_len; /* len of bulk data */ 159 int bk_len; /* len of bulk data */
159 lnet_handle_md_t bk_mdh; 160 lnet_handle_md_t bk_mdh;
160 int bk_sink; /* sink/source */ 161 int bk_sink; /* sink/source */
161 int bk_niov; /* # iov in bk_iovs */ 162 int bk_niov; /* # iov in bk_iovs */
162 lnet_kiov_t bk_iovs[0]; 163 lnet_kiov_t bk_iovs[0];
163} srpc_bulk_t; /* bulk descriptor */ 164};
164 165
165/* message buffer descriptor */ 166/* message buffer descriptor */
166typedef struct srpc_buffer { 167struct srpc_buffer {
167 struct list_head buf_list; /* chain on srpc_service::*_msgq */ 168 struct list_head buf_list; /* chain on srpc_service::*_msgq */
168 srpc_msg_t buf_msg; 169 struct srpc_msg buf_msg;
169 lnet_handle_md_t buf_mdh; 170 lnet_handle_md_t buf_mdh;
170 lnet_nid_t buf_self; 171 lnet_nid_t buf_self;
171 lnet_process_id_t buf_peer; 172 lnet_process_id_t buf_peer;
172} srpc_buffer_t; 173};
173 174
174struct swi_workitem; 175struct swi_workitem;
175typedef int (*swi_action_t) (struct swi_workitem *); 176typedef int (*swi_action_t) (struct swi_workitem *);
176 177
177typedef struct swi_workitem { 178struct swi_workitem {
178 struct cfs_wi_sched *swi_sched; 179 struct cfs_wi_sched *swi_sched;
179 struct cfs_workitem swi_workitem; 180 struct cfs_workitem swi_workitem;
180 swi_action_t swi_action; 181 swi_action_t swi_action;
181 int swi_state; 182 int swi_state;
182} swi_workitem_t; 183};
183 184
184/* server-side state of a RPC */ 185/* server-side state of a RPC */
185struct srpc_server_rpc { 186struct srpc_server_rpc {
186 /* chain on srpc_service::*_rpcq */ 187 /* chain on srpc_service::*_rpcq */
187 struct list_head srpc_list; 188 struct list_head srpc_list;
188 struct srpc_service_cd *srpc_scd; 189 struct srpc_service_cd *srpc_scd;
189 swi_workitem_t srpc_wi; 190 struct swi_workitem srpc_wi;
190 srpc_event_t srpc_ev; /* bulk/reply event */ 191 struct srpc_event srpc_ev; /* bulk/reply event */
191 lnet_nid_t srpc_self; 192 lnet_nid_t srpc_self;
192 lnet_process_id_t srpc_peer; 193 lnet_process_id_t srpc_peer;
193 srpc_msg_t srpc_replymsg; 194 struct srpc_msg srpc_replymsg;
194 lnet_handle_md_t srpc_replymdh; 195 lnet_handle_md_t srpc_replymdh;
195 srpc_buffer_t *srpc_reqstbuf; 196 struct srpc_buffer *srpc_reqstbuf;
196 srpc_bulk_t *srpc_bulk; 197 struct srpc_bulk *srpc_bulk;
197 198
198 unsigned int srpc_aborted; /* being given up */ 199 unsigned int srpc_aborted; /* being given up */
199 int srpc_status; 200 int srpc_status;
@@ -201,14 +202,14 @@ struct srpc_server_rpc {
201}; 202};
202 203
203/* client-side state of a RPC */ 204/* client-side state of a RPC */
204typedef struct srpc_client_rpc { 205struct srpc_client_rpc {
205 struct list_head crpc_list; /* chain on user's lists */ 206 struct list_head crpc_list; /* chain on user's lists */
206 spinlock_t crpc_lock; /* serialize */ 207 spinlock_t crpc_lock; /* serialize */
207 int crpc_service; 208 int crpc_service;
208 atomic_t crpc_refcount; 209 atomic_t crpc_refcount;
209 int crpc_timeout; /* # seconds to wait for reply */ 210 int crpc_timeout; /* # seconds to wait for reply */
210 struct stt_timer crpc_timer; 211 struct stt_timer crpc_timer;
211 swi_workitem_t crpc_wi; 212 struct swi_workitem crpc_wi;
212 lnet_process_id_t crpc_dest; 213 lnet_process_id_t crpc_dest;
213 214
214 void (*crpc_done)(struct srpc_client_rpc *); 215 void (*crpc_done)(struct srpc_client_rpc *);
@@ -221,20 +222,20 @@ typedef struct srpc_client_rpc {
221 unsigned int crpc_closed:1; /* completed */ 222 unsigned int crpc_closed:1; /* completed */
222 223
223 /* RPC events */ 224 /* RPC events */
224 srpc_event_t crpc_bulkev; /* bulk event */ 225 struct srpc_event crpc_bulkev; /* bulk event */
225 srpc_event_t crpc_reqstev; /* request event */ 226 struct srpc_event crpc_reqstev; /* request event */
226 srpc_event_t crpc_replyev; /* reply event */ 227 struct srpc_event crpc_replyev; /* reply event */
227 228
228 /* bulk, request(reqst), and reply exchanged on wire */ 229 /* bulk, request(reqst), and reply exchanged on wire */
229 srpc_msg_t crpc_reqstmsg; 230 struct srpc_msg crpc_reqstmsg;
230 srpc_msg_t crpc_replymsg; 231 struct srpc_msg crpc_replymsg;
231 lnet_handle_md_t crpc_reqstmdh; 232 lnet_handle_md_t crpc_reqstmdh;
232 lnet_handle_md_t crpc_replymdh; 233 lnet_handle_md_t crpc_replymdh;
233 srpc_bulk_t crpc_bulk; 234 struct srpc_bulk crpc_bulk;
234} srpc_client_rpc_t; 235};
235 236
236#define srpc_client_rpc_size(rpc) \ 237#define srpc_client_rpc_size(rpc) \
237offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) 238offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
238 239
239#define srpc_client_rpc_addref(rpc) \ 240#define srpc_client_rpc_addref(rpc) \
240do { \ 241do { \
@@ -266,13 +267,13 @@ struct srpc_service_cd {
266 /** backref to service */ 267 /** backref to service */
267 struct srpc_service *scd_svc; 268 struct srpc_service *scd_svc;
268 /** event buffer */ 269 /** event buffer */
269 srpc_event_t scd_ev; 270 struct srpc_event scd_ev;
270 /** free RPC descriptors */ 271 /** free RPC descriptors */
271 struct list_head scd_rpc_free; 272 struct list_head scd_rpc_free;
272 /** in-flight RPCs */ 273 /** in-flight RPCs */
273 struct list_head scd_rpc_active; 274 struct list_head scd_rpc_active;
274 /** workitem for posting buffer */ 275 /** workitem for posting buffer */
275 swi_workitem_t scd_buf_wi; 276 struct swi_workitem scd_buf_wi;
276 /** CPT id */ 277 /** CPT id */
277 int scd_cpt; 278 int scd_cpt;
278 /** error code for scd_buf_wi */ 279 /** error code for scd_buf_wi */
@@ -306,7 +307,7 @@ struct srpc_service_cd {
306#define SFW_FRWK_WI_MIN 16 307#define SFW_FRWK_WI_MIN 16
307#define SFW_FRWK_WI_MAX 256 308#define SFW_FRWK_WI_MAX 256
308 309
309typedef struct srpc_service { 310struct srpc_service {
310 int sv_id; /* service id */ 311 int sv_id; /* service id */
311 const char *sv_name; /* human readable name */ 312 const char *sv_name; /* human readable name */
312 int sv_wi_total; /* total server workitems */ 313 int sv_wi_total; /* total server workitems */
@@ -320,9 +321,9 @@ typedef struct srpc_service {
320 */ 321 */
321 int (*sv_handler)(struct srpc_server_rpc *); 322 int (*sv_handler)(struct srpc_server_rpc *);
322 int (*sv_bulk_ready)(struct srpc_server_rpc *, int); 323 int (*sv_bulk_ready)(struct srpc_server_rpc *, int);
323} srpc_service_t; 324};
324 325
325typedef struct { 326struct sfw_session {
326 struct list_head sn_list; /* chain on fw_zombie_sessions */ 327 struct list_head sn_list; /* chain on fw_zombie_sessions */
327 lst_sid_t sn_id; /* unique identifier */ 328 lst_sid_t sn_id; /* unique identifier */
328 unsigned int sn_timeout; /* # seconds' inactivity to expire */ 329 unsigned int sn_timeout; /* # seconds' inactivity to expire */
@@ -335,37 +336,37 @@ typedef struct {
335 atomic_t sn_brw_errors; 336 atomic_t sn_brw_errors;
336 atomic_t sn_ping_errors; 337 atomic_t sn_ping_errors;
337 unsigned long sn_started; 338 unsigned long sn_started;
338} sfw_session_t; 339};
339 340
340#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \ 341#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
341 (sid0).ses_stamp == (sid1).ses_stamp) 342 (sid0).ses_stamp == (sid1).ses_stamp)
342 343
343typedef struct { 344struct sfw_batch {
344 struct list_head bat_list; /* chain on sn_batches */ 345 struct list_head bat_list; /* chain on sn_batches */
345 lst_bid_t bat_id; /* batch id */ 346 lst_bid_t bat_id; /* batch id */
346 int bat_error; /* error code of batch */ 347 int bat_error; /* error code of batch */
347 sfw_session_t *bat_session; /* batch's session */ 348 struct sfw_session *bat_session; /* batch's session */
348 atomic_t bat_nactive; /* # of active tests */ 349 atomic_t bat_nactive; /* # of active tests */
349 struct list_head bat_tests; /* test instances */ 350 struct list_head bat_tests; /* test instances */
350} sfw_batch_t; 351};
351 352
352typedef struct { 353struct sfw_test_client_ops {
353 int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test 354 int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
354 * client */ 355 * client */
355 void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test 356 void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
356 * client */ 357 * client */
357 int (*tso_prep_rpc)(struct sfw_test_unit *tsu, 358 int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
358 lnet_process_id_t dest, 359 lnet_process_id_t dest,
359 srpc_client_rpc_t **rpc); /* prep a tests rpc */ 360 struct srpc_client_rpc **rpc); /* prep a tests rpc */
360 void (*tso_done_rpc)(struct sfw_test_unit *tsu, 361 void (*tso_done_rpc)(struct sfw_test_unit *tsu,
361 srpc_client_rpc_t *rpc); /* done a test rpc */ 362 struct srpc_client_rpc *rpc); /* done a test rpc */
362} sfw_test_client_ops_t; 363};
363 364
364typedef struct sfw_test_instance { 365struct sfw_test_instance {
365 struct list_head tsi_list; /* chain on batch */ 366 struct list_head tsi_list; /* chain on batch */
366 int tsi_service; /* test type */ 367 int tsi_service; /* test type */
367 sfw_batch_t *tsi_batch; /* batch */ 368 struct sfw_batch *tsi_batch; /* batch */
368 sfw_test_client_ops_t *tsi_ops; /* test client operation 369 struct sfw_test_client_ops *tsi_ops; /* test client operation
369 */ 370 */
370 371
371 /* public parameter for all test units */ 372 /* public parameter for all test units */
@@ -388,66 +389,66 @@ typedef struct sfw_test_instance {
388 test_bulk_req_t bulk_v0; /* bulk parameter */ 389 test_bulk_req_t bulk_v0; /* bulk parameter */
389 test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */ 390 test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */
390 } tsi_u; 391 } tsi_u;
391} sfw_test_instance_t; 392};
392 393
393/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at 394/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
394 * the end of pages are not used */ 395 * pages are not used */
395#define SFW_MAX_CONCUR LST_MAX_CONCUR 396#define SFW_MAX_CONCUR LST_MAX_CONCUR
396#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) 397#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) 398#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) 399#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
399 400
400typedef struct sfw_test_unit { 401struct sfw_test_unit {
401 struct list_head tsu_list; /* chain on lst_test_instance */ 402 struct list_head tsu_list; /* chain on lst_test_instance */
402 lnet_process_id_t tsu_dest; /* id of dest node */ 403 lnet_process_id_t tsu_dest; /* id of dest node */
403 int tsu_loop; /* loop count of the test */ 404 int tsu_loop; /* loop count of the test */
404 sfw_test_instance_t *tsu_instance; /* pointer to test instance */ 405 struct sfw_test_instance *tsu_instance; /* pointer to test instance */
405 void *tsu_private; /* private data */ 406 void *tsu_private; /* private data */
406 swi_workitem_t tsu_worker; /* workitem of the test unit */ 407 struct swi_workitem tsu_worker; /* workitem of the test unit */
407} sfw_test_unit_t; 408};
408 409
409typedef struct sfw_test_case { 410struct sfw_test_case {
410 struct list_head tsc_list; /* chain on fw_tests */ 411 struct list_head tsc_list; /* chain on fw_tests */
411 srpc_service_t *tsc_srv_service; /* test service */ 412 struct srpc_service *tsc_srv_service; /* test service */
412 sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ 413 struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */
413} sfw_test_case_t; 414};
414 415
415srpc_client_rpc_t * 416struct srpc_client_rpc *
416sfw_create_rpc(lnet_process_id_t peer, int service, 417sfw_create_rpc(lnet_process_id_t peer, int service,
417 unsigned features, int nbulkiov, int bulklen, 418 unsigned features, int nbulkiov, int bulklen,
418 void (*done)(srpc_client_rpc_t *), void *priv); 419 void (*done)(struct srpc_client_rpc *), void *priv);
419int sfw_create_test_rpc(sfw_test_unit_t *tsu, 420int sfw_create_test_rpc(struct sfw_test_unit *tsu,
420 lnet_process_id_t peer, unsigned features, 421 lnet_process_id_t peer, unsigned features,
421 int nblk, int blklen, srpc_client_rpc_t **rpc); 422 int nblk, int blklen, struct srpc_client_rpc **rpc);
422void sfw_abort_rpc(srpc_client_rpc_t *rpc); 423void sfw_abort_rpc(struct srpc_client_rpc *rpc);
423void sfw_post_rpc(srpc_client_rpc_t *rpc); 424void sfw_post_rpc(struct srpc_client_rpc *rpc);
424void sfw_client_rpc_done(srpc_client_rpc_t *rpc); 425void sfw_client_rpc_done(struct srpc_client_rpc *rpc);
425void sfw_unpack_message(srpc_msg_t *msg); 426void sfw_unpack_message(struct srpc_msg *msg);
426void sfw_free_pages(struct srpc_server_rpc *rpc); 427void sfw_free_pages(struct srpc_server_rpc *rpc);
427void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i); 428void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i);
428int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, 429int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
429 int sink); 430 int sink);
430int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); 431int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
431 432
432srpc_client_rpc_t * 433struct srpc_client_rpc *
433srpc_create_client_rpc(lnet_process_id_t peer, int service, 434srpc_create_client_rpc(lnet_process_id_t peer, int service,
434 int nbulkiov, int bulklen, 435 int nbulkiov, int bulklen,
435 void (*rpc_done)(srpc_client_rpc_t *), 436 void (*rpc_done)(struct srpc_client_rpc *),
436 void (*rpc_fini)(srpc_client_rpc_t *), void *priv); 437 void (*rpc_fini)(struct srpc_client_rpc *), void *priv);
437void srpc_post_rpc(srpc_client_rpc_t *rpc); 438void srpc_post_rpc(struct srpc_client_rpc *rpc);
438void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why); 439void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
439void srpc_free_bulk(srpc_bulk_t *bk); 440void srpc_free_bulk(struct srpc_bulk *bk);
440srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, 441struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
441 int sink); 442 unsigned bulk_len, int sink);
442int srpc_send_rpc(swi_workitem_t *wi); 443int srpc_send_rpc(struct swi_workitem *wi);
443int srpc_send_reply(struct srpc_server_rpc *rpc); 444int srpc_send_reply(struct srpc_server_rpc *rpc);
444int srpc_add_service(srpc_service_t *sv); 445int srpc_add_service(struct srpc_service *sv);
445int srpc_remove_service(srpc_service_t *sv); 446int srpc_remove_service(struct srpc_service *sv);
446void srpc_shutdown_service(srpc_service_t *sv); 447void srpc_shutdown_service(struct srpc_service *sv);
447void srpc_abort_service(srpc_service_t *sv); 448void srpc_abort_service(struct srpc_service *sv);
448int srpc_finish_service(srpc_service_t *sv); 449int srpc_finish_service(struct srpc_service *sv);
449int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer); 450int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer);
450void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer); 451void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer);
451void srpc_get_counters(srpc_counters_t *cnt); 452void srpc_get_counters(srpc_counters_t *cnt);
452void srpc_set_counters(const srpc_counters_t *cnt); 453void srpc_set_counters(const srpc_counters_t *cnt);
453 454
@@ -463,13 +464,15 @@ srpc_serv_is_framework(struct srpc_service *svc)
463static inline int 464static inline int
464swi_wi_action(struct cfs_workitem *wi) 465swi_wi_action(struct cfs_workitem *wi)
465{ 466{
466 swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem); 467 struct swi_workitem *swi;
468
469 swi = container_of(wi, struct swi_workitem, swi_workitem);
467 470
468 return swi->swi_action(swi); 471 return swi->swi_action(swi);
469} 472}
470 473
471static inline void 474static inline void
472swi_init_workitem(swi_workitem_t *swi, void *data, 475swi_init_workitem(struct swi_workitem *swi, void *data,
473 swi_action_t action, struct cfs_wi_sched *sched) 476 swi_action_t action, struct cfs_wi_sched *sched)
474{ 477{
475 swi->swi_sched = sched; 478 swi->swi_sched = sched;
@@ -479,19 +482,19 @@ swi_init_workitem(swi_workitem_t *swi, void *data,
479} 482}
480 483
481static inline void 484static inline void
482swi_schedule_workitem(swi_workitem_t *wi) 485swi_schedule_workitem(struct swi_workitem *wi)
483{ 486{
484 cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem); 487 cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
485} 488}
486 489
487static inline void 490static inline void
488swi_exit_workitem(swi_workitem_t *swi) 491swi_exit_workitem(struct swi_workitem *swi)
489{ 492{
490 cfs_wi_exit(swi->swi_sched, &swi->swi_workitem); 493 cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
491} 494}
492 495
493static inline int 496static inline int
494swi_deschedule_workitem(swi_workitem_t *swi) 497swi_deschedule_workitem(struct swi_workitem *swi)
495{ 498{
496 return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem); 499 return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
497} 500}
@@ -502,7 +505,7 @@ void sfw_shutdown(void);
502void srpc_shutdown(void); 505void srpc_shutdown(void);
503 506
504static inline void 507static inline void
505srpc_destroy_client_rpc(srpc_client_rpc_t *rpc) 508srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
506{ 509{
507 LASSERT(rpc); 510 LASSERT(rpc);
508 LASSERT(!srpc_event_pending(rpc)); 511 LASSERT(!srpc_event_pending(rpc));
@@ -515,14 +518,14 @@ srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
515} 518}
516 519
517static inline void 520static inline void
518srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer, 521srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer,
519 int service, int nbulkiov, int bulklen, 522 int service, int nbulkiov, int bulklen,
520 void (*rpc_done)(srpc_client_rpc_t *), 523 void (*rpc_done)(struct srpc_client_rpc *),
521 void (*rpc_fini)(srpc_client_rpc_t *), void *priv) 524 void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
522{ 525{
523 LASSERT(nbulkiov <= LNET_MAX_IOV); 526 LASSERT(nbulkiov <= LNET_MAX_IOV);
524 527
525 memset(rpc, 0, offsetof(srpc_client_rpc_t, 528 memset(rpc, 0, offsetof(struct srpc_client_rpc,
526 crpc_bulk.bk_iovs[nbulkiov])); 529 crpc_bulk.bk_iovs[nbulkiov]));
527 530
528 INIT_LIST_HEAD(&rpc->crpc_list); 531 INIT_LIST_HEAD(&rpc->crpc_list);
@@ -592,7 +595,7 @@ do { \
592} while (0) 595} while (0)
593 596
594static inline void 597static inline void
595srpc_wait_service_shutdown(srpc_service_t *sv) 598srpc_wait_service_shutdown(struct srpc_service *sv)
596{ 599{
597 int i = 2; 600 int i = 2;
598 601
@@ -607,16 +610,16 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
607 } 610 }
608} 611}
609 612
610extern sfw_test_client_ops_t brw_test_client; 613extern struct sfw_test_client_ops brw_test_client;
611void brw_init_test_client(void); 614void brw_init_test_client(void);
612 615
613extern srpc_service_t brw_test_service; 616extern struct srpc_service brw_test_service;
614void brw_init_test_service(void); 617void brw_init_test_service(void);
615 618
616extern sfw_test_client_ops_t ping_test_client; 619extern struct sfw_test_client_ops ping_test_client;
617void ping_init_test_client(void); 620void ping_init_test_client(void);
618 621
619extern srpc_service_t ping_test_service; 622extern struct srpc_service ping_test_service;
620void ping_init_test_service(void); 623void ping_init_test_service(void);
621 624
622#endif /* __SELFTEST_SELFTEST_H__ */ 625#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 062f388cf38a..5a04e99d9249 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -178,8 +178,9 @@ restart_fixup:
178 if (n_range->lsr_end <= c_range->lsr_end) { 178 if (n_range->lsr_end <= c_range->lsr_end) {
179 *n_range = *c_range; 179 *n_range = *c_range;
180 fld_cache_entry_delete(cache, f_curr); 180 fld_cache_entry_delete(cache, f_curr);
181 } else 181 } else {
182 n_range->lsr_start = c_range->lsr_end; 182 n_range->lsr_start = c_range->lsr_end;
183 }
183 } 184 }
184 185
185 /* we could have overlap over next 186 /* we could have overlap over next
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 33e0b99e1fb4..c6c7f54637fb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
52 return; 52 return;
53 53
54 if (PagePrivate(page)) 54 if (PagePrivate(page))
55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
56 56
57 cancel_dirty_page(page); 57 cancel_dirty_page(page);
58 ClearPageMappedToDisk(page); 58 ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index fcb9db6e1f1a..0f70acd1a750 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@ struct lu_context_key {
1118 { \ 1118 { \
1119 type *value; \ 1119 type *value; \
1120 \ 1120 \
1121 CLASSERT(PAGE_CACHE_SIZE >= sizeof(*value)); \ 1121 CLASSERT(PAGE_SIZE >= sizeof(*value)); \
1122 \ 1122 \
1123 value = kzalloc(sizeof(*value), GFP_NOFS); \ 1123 value = kzalloc(sizeof(*value), GFP_NOFS); \
1124 if (!value) \ 1124 if (!value) \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 12e6718f2adb..c3565bf0e1fb 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1001,8 +1001,9 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr)
1001 1001
1002 size = (sizeof(struct lu_dirent) + namelen + align) & ~align; 1002 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1003 size += sizeof(struct luda_type); 1003 size += sizeof(struct luda_type);
1004 } else 1004 } else {
1005 size = sizeof(struct lu_dirent) + namelen; 1005 size = sizeof(struct lu_dirent) + namelen;
1006 }
1006 1007
1007 return (size + 7) & ~7; 1008 return (size + 7) & ~7;
1008} 1009}
@@ -1022,16 +1023,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
1022 * MDS_READPAGE page size 1023 * MDS_READPAGE page size
1023 * 1024 *
1024 * This is the directory page size packed in MDS_READPAGE RPC. 1025 * This is the directory page size packed in MDS_READPAGE RPC.
1025 * It's different than PAGE_CACHE_SIZE because the client needs to 1026 * It's different than PAGE_SIZE because the client needs to
1026 * access the struct lu_dirpage header packed at the beginning of 1027 * access the struct lu_dirpage header packed at the beginning of
1027 * the "page" and without this there isn't any way to know find the 1028 * the "page" and without this there isn't any way to know find the
1028 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. 1029 * lu_dirpage header is if client and server PAGE_SIZE differ.
1029 */ 1030 */
1030#define LU_PAGE_SHIFT 12 1031#define LU_PAGE_SHIFT 12
1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) 1032#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1032#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) 1033#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1033 1034
1034#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) 1035#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1035 1036
1036/** @} lu_dir */ 1037/** @} lu_dir */
1037 1038
@@ -1428,6 +1429,7 @@ enum obdo_flags {
1428 */ 1429 */
1429 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ 1430 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1430 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ 1431 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1432 OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
1431 1433
1432 /* Note that while these checksum values are currently separate bits, 1434 /* Note that while these checksum values are currently separate bits,
1433 * in 2.x we can actually allow all values from 1-31 if we wanted. 1435 * in 2.x we can actually allow all values from 1-31 if we wanted.
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 19f2271cc6b9..59ba48ac31a7 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -676,7 +676,12 @@ static inline const char *changelog_type2str(int type)
676#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */ 676#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
677 /* HSM cleaning needed */ 677 /* HSM cleaning needed */
678/* Flags for rename */ 678/* Flags for rename */
679#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of target */ 679#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of
680 * target
681 */
682#define CLF_RENAME_LAST_EXISTS 0x0002 /* rename unlink last hardlink of target
683 * has an archive in backend
684 */
680 685
681/* Flags for HSM */ 686/* Flags for HSM */
682/* 12b used (from high weight to low weight): 687/* 12b used (from high weight to low weight):
@@ -833,9 +838,8 @@ struct ioc_data_version {
833 __u64 idv_flags; /* See LL_DV_xxx */ 838 __u64 idv_flags; /* See LL_DV_xxx */
834}; 839};
835 840
836#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling 841#define LL_DV_RD_FLUSH BIT(0) /* Flush dirty pages from clients */
837 * version. Dirty caches are left unchanged. 842#define LL_DV_WR_FLUSH BIT(1) /* Flush all caching pages from clients */
838 */
839 843
840#ifndef offsetof 844#ifndef offsetof
841# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb))) 845# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
@@ -1095,7 +1099,7 @@ struct hsm_action_list {
1095 __u32 padding1; 1099 __u32 padding1;
1096 char hal_fsname[0]; /* null-terminated */ 1100 char hal_fsname[0]; /* null-terminated */
1097 /* struct hsm_action_item[hal_count] follows, aligned on 8-byte 1101 /* struct hsm_action_item[hal_count] follows, aligned on 8-byte
1098 * boundaries. See hai_zero 1102 * boundaries. See hai_first
1099 */ 1103 */
1100} __packed; 1104} __packed;
1101 1105
@@ -1109,7 +1113,7 @@ static inline int cfs_size_round(int val)
1109#endif 1113#endif
1110 1114
1111/* Return pointer to first hai in action list */ 1115/* Return pointer to first hai in action list */
1112static inline struct hsm_action_item *hai_zero(struct hsm_action_list *hal) 1116static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal)
1113{ 1117{
1114 return (struct hsm_action_item *)(hal->hal_fsname + 1118 return (struct hsm_action_item *)(hal->hal_fsname +
1115 cfs_size_round(strlen(hal-> \ 1119 cfs_size_round(strlen(hal-> \
@@ -1131,7 +1135,7 @@ static inline int hal_size(struct hsm_action_list *hal)
1131 struct hsm_action_item *hai; 1135 struct hsm_action_item *hai;
1132 1136
1133 sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1); 1137 sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
1134 hai = hai_zero(hal); 1138 hai = hai_first(hal);
1135 for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai)) 1139 for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
1136 sz += cfs_size_round(hai->hai_len); 1140 sz += cfs_size_round(hai->hai_len);
1137 1141
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index df94f9f3bef2..f267ff8a6ec8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -64,9 +64,27 @@ struct obd_export;
64struct ptlrpc_request; 64struct ptlrpc_request;
65struct obd_device; 65struct obd_device;
66 66
67/**
68 * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
69 *
70 * This mutex is used to implement execute-once semantics on the MDT.
71 * The MDT stores the last transaction ID and result for every client in
72 * its last_rcvd file. If the client doesn't get a reply, it can safely
73 * resend the request and the MDT will reconstruct the reply being aware
74 * that the request has already been executed. Without this lock,
75 * execution status of concurrent in-flight requests would be
76 * overwritten.
77 *
78 * This design limits the extent to which we can keep a full pipeline of
79 * in-flight requests from a single client. This limitation could be
80 * overcome by allowing multiple slots per client in the last_rcvd file.
81 */
67struct mdc_rpc_lock { 82struct mdc_rpc_lock {
83 /** Lock protecting in-flight RPC concurrency. */
68 struct mutex rpcl_mutex; 84 struct mutex rpcl_mutex;
85 /** Intent associated with currently executing request. */
69 struct lookup_intent *rpcl_it; 86 struct lookup_intent *rpcl_it;
87 /** Used for MDS/RPC load testing purposes. */
70 int rpcl_fakes; 88 int rpcl_fakes;
71}; 89};
72 90
@@ -155,12 +173,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
155 if (cli->cl_max_mds_easize < body->max_mdsize) { 173 if (cli->cl_max_mds_easize < body->max_mdsize) {
156 cli->cl_max_mds_easize = body->max_mdsize; 174 cli->cl_max_mds_easize = body->max_mdsize;
157 cli->cl_default_mds_easize = 175 cli->cl_default_mds_easize =
158 min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); 176 min_t(__u32, body->max_mdsize, PAGE_SIZE);
159 } 177 }
160 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) { 178 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
161 cli->cl_max_mds_cookiesize = body->max_cookiesize; 179 cli->cl_max_mds_cookiesize = body->max_cookiesize;
162 cli->cl_default_mds_cookiesize = 180 cli->cl_default_mds_cookiesize =
163 min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); 181 min_t(__u32, body->max_cookiesize, PAGE_SIZE);
164 } 182 }
165 } 183 }
166} 184}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 4fa1a18b7d15..69586a522eb7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -99,21 +99,21 @@
99 */ 99 */
100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) 100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) 101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
103 103
104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) 104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE 107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
110 110
111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ 111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) 112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" 113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
114# endif 114# endif
115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) 115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" 116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
117# endif 117# endif
118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) 118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
119# error "PTLRPC_MAX_BRW_SIZE too big" 119# error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index ded7b106ac8a..e97e25bbcd2d 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -273,7 +273,7 @@ struct client_obd {
273 int cl_grant_shrink_interval; /* seconds */ 273 int cl_grant_shrink_interval; /* seconds */
274 274
275 /* A chunk is an optimal size used by osc_extent to determine 275 /* A chunk is an optimal size used by osc_extent to determine
276 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) 276 * the extent size. A chunk is max(PAGE_SIZE, OST block size)
277 */ 277 */
278 int cl_chunkbits; 278 int cl_chunkbits;
279 int cl_chunk; 279 int cl_chunk;
@@ -1316,7 +1316,7 @@ bad_format:
1316 1316
1317static inline int cli_brw_size(struct obd_device *obd) 1317static inline int cli_brw_size(struct obd_device *obd)
1318{ 1318{
1319 return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 1319 return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
1320} 1320}
1321 1321
1322#endif /* __OBD_H */ 1322#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 706869f8c98f..40f7a2374865 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -490,8 +490,9 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
490 obd->obd_lu_dev = d; 490 obd->obd_lu_dev = d;
491 d->ld_obd = obd; 491 d->ld_obd = obd;
492 rc = 0; 492 rc = 0;
493 } else 493 } else {
494 rc = PTR_ERR(d); 494 rc = PTR_ERR(d);
495 }
495 } 496 }
496 lu_context_exit(&session_ctx); 497 lu_context_exit(&session_ctx);
497 lu_context_fini(&session_ctx); 498 lu_context_fini(&session_ctx);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 225262fa67b6..f8ee3a3254ba 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -500,7 +500,7 @@ extern char obd_jobid_var[];
500 500
501#ifdef POISON_BULK 501#ifdef POISON_BULK
502#define POISON_PAGE(page, val) do { \ 502#define POISON_PAGE(page, val) do { \
503 memset(kmap(page), val, PAGE_CACHE_SIZE); \ 503 memset(kmap(page), val, PAGE_SIZE); \
504 kunmap(page); \ 504 kunmap(page); \
505} while (0) 505} while (0)
506#else 506#else
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index b88b78606aee..3f97e1ca7192 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -530,12 +530,6 @@ granted:
530 return -EIO; 530 return -EIO;
531 } 531 }
532 532
533 if (rc) {
534 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
535 rc);
536 return rc;
537 }
538
539 LDLM_DEBUG(lock, "client-side enqueue granted"); 533 LDLM_DEBUG(lock, "client-side enqueue granted");
540 534
541 lock_res_and_lock(lock); 535 lock_res_and_lock(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 351f8b44947f..ba643e68dd2d 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -218,8 +218,6 @@ enum ldlm_policy_res {
218 LDLM_POLICY_SKIP_LOCK 218 LDLM_POLICY_SKIP_LOCK
219}; 219};
220 220
221typedef enum ldlm_policy_res ldlm_policy_res_t;
222
223#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v) 221#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
224#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; } 222#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
225#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v) 223#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 7fedbec43ebf..9e58b1c7177b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
307 cli->cl_avail_grant = 0; 307 cli->cl_avail_grant = 0;
308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */ 308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */
309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; 309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
310 if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) 310 if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
311 cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); 311 cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
312 INIT_LIST_HEAD(&cli->cl_cache_waiters); 312 INIT_LIST_HEAD(&cli->cl_cache_waiters);
313 INIT_LIST_HEAD(&cli->cl_loi_ready_list); 313 INIT_LIST_HEAD(&cli->cl_loi_ready_list);
314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); 314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
353 * In the future this should likely be increased. LU-1431 353 * In the future this should likely be increased. LU-1431
354 */ 354 */
355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, 355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
356 LNET_MTU >> PAGE_CACHE_SHIFT); 356 LNET_MTU >> PAGE_SHIFT);
357 357
358 if (!strcmp(name, LUSTRE_MDC_NAME)) { 358 if (!strcmp(name, LUSTRE_MDC_NAME)) {
359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; 359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
360 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { 360 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
361 cli->cl_max_rpcs_in_flight = 2; 361 cli->cl_max_rpcs_in_flight = 2;
362 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { 362 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
363 cli->cl_max_rpcs_in_flight = 3; 363 cli->cl_max_rpcs_in_flight = 3;
364 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { 364 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
365 cli->cl_max_rpcs_in_flight = 4; 365 cli->cl_max_rpcs_in_flight = 4;
366 } else { 366 } else {
367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; 367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3e937b050203..b913ba9cf97c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -107,7 +107,7 @@
107/* 107/*
108 * 50 ldlm locks for 1MB of RAM. 108 * 50 ldlm locks for 1MB of RAM.
109 */ 109 */
110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) 110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
111 111
112/* 112/*
113 * Maximal possible grant step plan in %. 113 * Maximal possible grant step plan in %.
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 5b0e396a9908..880efdc2fa96 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
546{ 546{
547 int avail; 547 int avail;
548 548
549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; 549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
550 if (likely(avail >= 0)) 550 if (likely(avail >= 0))
551 avail /= (int)sizeof(struct lustre_handle); 551 avail /= (int)sizeof(struct lustre_handle);
552 else 552 else
@@ -1131,12 +1131,11 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
1131 * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g. 1131 * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
1132 * readahead requests, ...) 1132 * readahead requests, ...)
1133 */ 1133 */
1134static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, 1134static enum ldlm_policy_res
1135 struct ldlm_lock *lock, 1135ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1136 int unused, int added, 1136 int unused, int added, int count)
1137 int count)
1138{ 1137{
1139 ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK; 1138 enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
1140 1139
1141 /* don't check added & count since we want to process all locks 1140 /* don't check added & count since we want to process all locks
1142 * from unused list. 1141 * from unused list.
@@ -1168,10 +1167,10 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
1168 * 1167 *
1169 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1168 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1170 */ 1169 */
1171static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, 1170static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1172 struct ldlm_lock *lock, 1171 struct ldlm_lock *lock,
1173 int unused, int added, 1172 int unused, int added,
1174 int count) 1173 int count)
1175{ 1174{
1176 unsigned long cur = cfs_time_current(); 1175 unsigned long cur = cfs_time_current();
1177 struct ldlm_pool *pl = &ns->ns_pool; 1176 struct ldlm_pool *pl = &ns->ns_pool;
@@ -1214,10 +1213,10 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1214 * 1213 *
1215 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1214 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1216 */ 1215 */
1217static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns, 1216static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1218 struct ldlm_lock *lock, 1217 struct ldlm_lock *lock,
1219 int unused, int added, 1218 int unused, int added,
1220 int count) 1219 int count)
1221{ 1220{
1222 /* Stop LRU processing when we reach past @count or have checked all 1221 /* Stop LRU processing when we reach past @count or have checked all
1223 * locks in LRU. 1222 * locks in LRU.
@@ -1235,10 +1234,10 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1235 * 1234 *
1236 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1235 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1237 */ 1236 */
1238static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns, 1237static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1239 struct ldlm_lock *lock, 1238 struct ldlm_lock *lock,
1240 int unused, int added, 1239 int unused, int added,
1241 int count) 1240 int count)
1242{ 1241{
1243 if ((added >= count) && 1242 if ((added >= count) &&
1244 time_before(cfs_time_current(), 1243 time_before(cfs_time_current(),
@@ -1251,13 +1250,13 @@ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1251 return LDLM_POLICY_CANCEL_LOCK; 1250 return LDLM_POLICY_CANCEL_LOCK;
1252} 1251}
1253 1252
1254static ldlm_policy_res_t 1253static enum ldlm_policy_res
1255ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns, 1254ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
1256 struct ldlm_lock *lock, 1255 struct ldlm_lock *lock,
1257 int unused, int added, 1256 int unused, int added,
1258 int count) 1257 int count)
1259{ 1258{
1260 ldlm_policy_res_t result; 1259 enum ldlm_policy_res result;
1261 1260
1262 result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count); 1261 result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
1263 if (result == LDLM_POLICY_KEEP_LOCK) 1262 if (result == LDLM_POLICY_KEEP_LOCK)
@@ -1275,10 +1274,9 @@ ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
1275 * 1274 *
1276 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU 1275 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1277 */ 1276 */
1278static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns, 1277static enum ldlm_policy_res
1279 struct ldlm_lock *lock, 1278ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1280 int unused, int added, 1279 int unused, int added, int count)
1281 int count)
1282{ 1280{
1283 /* Stop LRU processing when we reach past count or have checked all 1281 /* Stop LRU processing when we reach past count or have checked all
1284 * locks in LRU. 1282 * locks in LRU.
@@ -1287,7 +1285,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
1287 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; 1285 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1288} 1286}
1289 1287
1290typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *, 1288typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
1289 struct ldlm_namespace *,
1291 struct ldlm_lock *, int, 1290 struct ldlm_lock *, int,
1292 int, int); 1291 int, int);
1293 1292
@@ -1368,7 +1367,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1368 LASSERT(pf); 1367 LASSERT(pf);
1369 1368
1370 while (!list_empty(&ns->ns_unused_list)) { 1369 while (!list_empty(&ns->ns_unused_list)) {
1371 ldlm_policy_res_t result; 1370 enum ldlm_policy_res result;
1372 time_t last_use = 0; 1371 time_t last_use = 0;
1373 1372
1374 /* all unused locks */ 1373 /* all unused locks */
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index b085fb4ffc56..2f0873ee7824 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
134 * a header lu_dirpage which describes the start/end hash, and whether this 134 * a header lu_dirpage which describes the start/end hash, and whether this
135 * page is empty (contains no dir entry) or hash collide with next page. 135 * page is empty (contains no dir entry) or hash collide with next page.
136 * After client receives reply, several pages will be integrated into dir page 136 * After client receives reply, several pages will be integrated into dir page
137 * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the 137 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
138 * lu_dirpage for this integrated page will be adjusted. See 138 * for this integrated page will be adjusted. See lmv_adjust_dirpages().
139 * lmv_adjust_dirpages().
140 * 139 *
141 */ 140 */
142 141
@@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
153 struct page **page_pool; 152 struct page **page_pool;
154 struct page *page; 153 struct page *page;
155 struct lu_dirpage *dp; 154 struct lu_dirpage *dp;
156 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; 155 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
157 int nrdpgs = 0; /* number of pages read actually */ 156 int nrdpgs = 0; /* number of pages read actually */
158 int npages; 157 int npages;
159 int i; 158 int i;
@@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
193 if (body->valid & OBD_MD_FLSIZE) 192 if (body->valid & OBD_MD_FLSIZE)
194 i_size_write(inode, body->size); 193 i_size_write(inode, body->size);
195 194
196 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1) 195 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
197 >> PAGE_CACHE_SHIFT; 196 >> PAGE_SHIFT;
198 SetPageUptodate(page0); 197 SetPageUptodate(page0);
199 } 198 }
200 unlock_page(page0); 199 unlock_page(page0);
@@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
209 page = page_pool[i]; 208 page = page_pool[i];
210 209
211 if (rc < 0 || i >= nrdpgs) { 210 if (rc < 0 || i >= nrdpgs) {
212 page_cache_release(page); 211 put_page(page);
213 continue; 212 continue;
214 } 213 }
215 214
@@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
230 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n", 229 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
231 offset, ret); 230 offset, ret);
232 } 231 }
233 page_cache_release(page); 232 put_page(page);
234 } 233 }
235 234
236 if (page_pool != &page0) 235 if (page_pool != &page0)
@@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove)
247 truncate_complete_page(page->mapping, page); 246 truncate_complete_page(page->mapping, page);
248 unlock_page(page); 247 unlock_page(page);
249 } 248 }
250 page_cache_release(page); 249 put_page(page);
251} 250}
252 251
253/* 252/*
@@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
273 if (found > 0 && !radix_tree_exceptional_entry(page)) { 272 if (found > 0 && !radix_tree_exceptional_entry(page)) {
274 struct lu_dirpage *dp; 273 struct lu_dirpage *dp;
275 274
276 page_cache_get(page); 275 get_page(page);
277 spin_unlock_irq(&mapping->tree_lock); 276 spin_unlock_irq(&mapping->tree_lock);
278 /* 277 /*
279 * In contrast to find_lock_page() we are sure that directory 278 * In contrast to find_lock_page() we are sure that directory
@@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
313 page = NULL; 312 page = NULL;
314 } 313 }
315 } else { 314 } else {
316 page_cache_release(page); 315 put_page(page);
317 page = ERR_PTR(-EIO); 316 page = ERR_PTR(-EIO);
318 } 317 }
319 318
@@ -612,15 +611,16 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
612 struct inode *inode = file_inode(filp); 611 struct inode *inode = file_inode(filp);
613 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp); 612 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
614 struct ll_sb_info *sbi = ll_i2sbi(inode); 613 struct ll_sb_info *sbi = ll_i2sbi(inode);
614 __u64 pos = lfd ? lfd->lfd_pos : 0;
615 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; 615 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
616 int api32 = ll_need_32bit_api(sbi); 616 int api32 = ll_need_32bit_api(sbi);
617 int rc; 617 int rc;
618 618
619 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n", 619 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
620 inode->i_ino, inode->i_generation, 620 inode->i_ino, inode->i_generation,
621 inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32); 621 inode, (unsigned long)pos, i_size_read(inode), api32);
622 622
623 if (lfd->lfd_pos == MDS_DIR_END_OFF) { 623 if (pos == MDS_DIR_END_OFF) {
624 /* 624 /*
625 * end-of-file. 625 * end-of-file.
626 */ 626 */
@@ -628,9 +628,10 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
628 goto out; 628 goto out;
629 } 629 }
630 630
631 ctx->pos = lfd->lfd_pos; 631 ctx->pos = pos;
632 rc = ll_dir_read(inode, ctx); 632 rc = ll_dir_read(inode, ctx);
633 lfd->lfd_pos = ctx->pos; 633 if (lfd)
634 lfd->lfd_pos = ctx->pos;
634 if (ctx->pos == MDS_DIR_END_OFF) { 635 if (ctx->pos == MDS_DIR_END_OFF) {
635 if (api32) 636 if (api32)
636 ctx->pos = LL_DIR_END_OFF_32BIT; 637 ctx->pos = LL_DIR_END_OFF_32BIT;
@@ -939,7 +940,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
939 } 940 }
940 941
941 /* Read current file data version */ 942 /* Read current file data version */
942 rc = ll_data_version(inode, &data_version, 1); 943 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
943 iput(inode); 944 iput(inode);
944 if (rc != 0) { 945 if (rc != 0) {
945 CDEBUG(D_HSM, "Could not read file data version of " 946 CDEBUG(D_HSM, "Could not read file data version of "
@@ -959,6 +960,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
959 } 960 }
960 961
961progress: 962progress:
963 /* On error, the request should be considered as completed */
964 if (hpk.hpk_errval > 0)
965 hpk.hpk_flags |= HP_FLAG_COMPLETED;
962 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), 966 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
963 &hpk, NULL); 967 &hpk, NULL);
964 968
@@ -1020,8 +1024,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
1020 goto progress; 1024 goto progress;
1021 } 1025 }
1022 1026
1023 rc = ll_data_version(inode, &data_version, 1027 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
1024 copy->hc_hai.hai_action == HSMA_ARCHIVE);
1025 iput(inode); 1028 iput(inode);
1026 if (rc) { 1029 if (rc) {
1027 CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n"); 1030 CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
@@ -1497,8 +1500,9 @@ free_lmv:
1497 cmd == LL_IOC_MDC_GETINFO)) { 1500 cmd == LL_IOC_MDC_GETINFO)) {
1498 rc = 0; 1501 rc = 0;
1499 goto skip_lmm; 1502 goto skip_lmm;
1500 } else 1503 } else {
1501 goto out_req; 1504 goto out_req;
1505 }
1502 } 1506 }
1503 1507
1504 if (cmd == IOC_MDC_GETFILESTRIPE || 1508 if (cmd == IOC_MDC_GETFILESTRIPE ||
@@ -1529,7 +1533,7 @@ skip_lmm:
1529 st.st_gid = body->gid; 1533 st.st_gid = body->gid;
1530 st.st_rdev = body->rdev; 1534 st.st_rdev = body->rdev;
1531 st.st_size = body->size; 1535 st.st_size = body->size;
1532 st.st_blksize = PAGE_CACHE_SIZE; 1536 st.st_blksize = PAGE_SIZE;
1533 st.st_blocks = body->blocks; 1537 st.st_blocks = body->blocks;
1534 st.st_atime = body->atime; 1538 st.st_atime = body->atime;
1535 st.st_mtime = body->mtime; 1539 st.st_mtime = body->mtime;
@@ -1711,15 +1715,16 @@ out_quotactl:
1711 return ll_flush_ctx(inode); 1715 return ll_flush_ctx(inode);
1712#ifdef CONFIG_FS_POSIX_ACL 1716#ifdef CONFIG_FS_POSIX_ACL
1713 case LL_IOC_RMTACL: { 1717 case LL_IOC_RMTACL: {
1714 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { 1718 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
1715 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1719 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1716 1720
1717 rc = rct_add(&sbi->ll_rct, current_pid(), arg); 1721 rc = rct_add(&sbi->ll_rct, current_pid(), arg);
1718 if (!rc) 1722 if (!rc)
1719 fd->fd_flags |= LL_FILE_RMTACL; 1723 fd->fd_flags |= LL_FILE_RMTACL;
1720 return rc; 1724 return rc;
1721 } else 1725 } else {
1722 return 0; 1726 return 0;
1727 }
1723 } 1728 }
1724#endif 1729#endif
1725 case LL_IOC_GETOBDCOUNT: { 1730 case LL_IOC_GETOBDCOUNT: {
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 69b56a816fcb..9b553d2ab336 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -875,16 +875,19 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
875 return och; 875 return och;
876 876
877out_close: 877out_close:
878 rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL); 878 /* Cancel open lock */
879 if (rc2)
880 CERROR("Close openhandle returned %d\n", rc2);
881
882 /* cancel open lock */
883 if (it.d.lustre.it_lock_mode != 0) { 879 if (it.d.lustre.it_lock_mode != 0) {
884 ldlm_lock_decref_and_cancel(&och->och_lease_handle, 880 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
885 it.d.lustre.it_lock_mode); 881 it.d.lustre.it_lock_mode);
886 it.d.lustre.it_lock_mode = 0; 882 it.d.lustre.it_lock_mode = 0;
883 och->och_lease_handle.cookie = 0ULL;
887 } 884 }
885 rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
886 if (rc2 < 0)
887 CERROR("%s: error closing file "DFID": %d\n",
888 ll_get_fsname(inode->i_sb, NULL, 0),
889 PFID(&ll_i2info(inode)->lli_fid), rc2);
890 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
888out_release_it: 891out_release_it:
889 ll_intent_release(&it); 892 ll_intent_release(&it);
890out: 893out:
@@ -908,7 +911,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
908 lock_res_and_lock(lock); 911 lock_res_and_lock(lock);
909 cancelled = ldlm_is_cancel(lock); 912 cancelled = ldlm_is_cancel(lock);
910 unlock_res_and_lock(lock); 913 unlock_res_and_lock(lock);
911 ldlm_lock_put(lock); 914 LDLM_LOCK_PUT(lock);
912 } 915 }
913 916
914 CDEBUG(D_INODE, "lease for " DFID " broken? %d\n", 917 CDEBUG(D_INODE, "lease for " DFID " broken? %d\n",
@@ -926,7 +929,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
926 929
927/* Fills the obdo with the attributes for the lsm */ 930/* Fills the obdo with the attributes for the lsm */
928static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, 931static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
929 struct obdo *obdo, __u64 ioepoch, int sync) 932 struct obdo *obdo, __u64 ioepoch, int dv_flags)
930{ 933{
931 struct ptlrpc_request_set *set; 934 struct ptlrpc_request_set *set;
932 struct obd_info oinfo = { }; 935 struct obd_info oinfo = { };
@@ -945,9 +948,11 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
945 OBD_MD_FLMTIME | OBD_MD_FLCTIME | 948 OBD_MD_FLMTIME | OBD_MD_FLCTIME |
946 OBD_MD_FLGROUP | OBD_MD_FLEPOCH | 949 OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
947 OBD_MD_FLDATAVERSION; 950 OBD_MD_FLDATAVERSION;
948 if (sync) { 951 if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
949 oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS; 952 oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
950 oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK; 953 oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
954 if (dv_flags & LL_DV_WR_FLUSH)
955 oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
951 } 956 }
952 957
953 set = ptlrpc_prep_set(); 958 set = ptlrpc_prep_set();
@@ -960,11 +965,16 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
960 rc = ptlrpc_set_wait(set); 965 rc = ptlrpc_set_wait(set);
961 ptlrpc_set_destroy(set); 966 ptlrpc_set_destroy(set);
962 } 967 }
963 if (rc == 0) 968 if (rc == 0) {
964 oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | 969 oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
965 OBD_MD_FLATIME | OBD_MD_FLMTIME | 970 OBD_MD_FLATIME | OBD_MD_FLMTIME |
966 OBD_MD_FLCTIME | OBD_MD_FLSIZE | 971 OBD_MD_FLCTIME | OBD_MD_FLSIZE |
967 OBD_MD_FLDATAVERSION); 972 OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
973 if (dv_flags & LL_DV_WR_FLUSH &&
974 !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
975 oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
976 return -ENOTSUPP;
977 }
968 return rc; 978 return rc;
969} 979}
970 980
@@ -980,7 +990,7 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
980 990
981 lsm = ccc_inode_lsm_get(inode); 991 lsm = ccc_inode_lsm_get(inode);
982 rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode), 992 rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
983 obdo, ioepoch, sync); 993 obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
984 if (rc == 0) { 994 if (rc == 0) {
985 struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi; 995 struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
986 996
@@ -1363,7 +1373,8 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
1363} 1373}
1364 1374
1365int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, 1375int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1366 int flags, struct lov_user_md *lum, int lum_size) 1376 __u64 flags, struct lov_user_md *lum,
1377 int lum_size)
1367{ 1378{
1368 struct lov_stripe_md *lsm = NULL; 1379 struct lov_stripe_md *lsm = NULL;
1369 struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags}; 1380 struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
@@ -1487,7 +1498,7 @@ out:
1487static int ll_lov_setea(struct inode *inode, struct file *file, 1498static int ll_lov_setea(struct inode *inode, struct file *file,
1488 unsigned long arg) 1499 unsigned long arg)
1489{ 1500{
1490 int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE; 1501 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
1491 struct lov_user_md *lump; 1502 struct lov_user_md *lump;
1492 int lum_size = sizeof(struct lov_user_md) + 1503 int lum_size = sizeof(struct lov_user_md) +
1493 sizeof(struct lov_user_ost_data); 1504 sizeof(struct lov_user_ost_data);
@@ -1521,7 +1532,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
1521 struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; 1532 struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
1522 struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; 1533 struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
1523 int lum_size, rc; 1534 int lum_size, rc;
1524 int flags = FMODE_WRITE; 1535 __u64 flags = FMODE_WRITE;
1525 1536
1526 /* first try with v1 which is smaller than v3 */ 1537 /* first try with v1 which is smaller than v3 */
1527 lum_size = sizeof(struct lov_user_md_v1); 1538 lum_size = sizeof(struct lov_user_md_v1);
@@ -1870,11 +1881,12 @@ error:
1870 * This value is computed using stripe object version on OST. 1881 * This value is computed using stripe object version on OST.
1871 * Version is computed using server side locking. 1882 * Version is computed using server side locking.
1872 * 1883 *
1873 * @param extent_lock Take extent lock. Not needed if a process is already 1884 * @param sync if do sync on the OST side;
1874 * holding the OST object group locks. 1885 * 0: no sync
1886 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
1887 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
1875 */ 1888 */
1876int ll_data_version(struct inode *inode, __u64 *data_version, 1889int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
1877 int extent_lock)
1878{ 1890{
1879 struct lov_stripe_md *lsm = NULL; 1891 struct lov_stripe_md *lsm = NULL;
1880 struct ll_sb_info *sbi = ll_i2sbi(inode); 1892 struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -1896,7 +1908,7 @@ int ll_data_version(struct inode *inode, __u64 *data_version,
1896 goto out; 1908 goto out;
1897 } 1909 }
1898 1910
1899 rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, extent_lock); 1911 rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
1900 if (rc == 0) { 1912 if (rc == 0) {
1901 if (!(obdo->o_valid & OBD_MD_FLDATAVERSION)) 1913 if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
1902 rc = -EOPNOTSUPP; 1914 rc = -EOPNOTSUPP;
@@ -1932,7 +1944,7 @@ int ll_hsm_release(struct inode *inode)
1932 } 1944 }
1933 1945
1934 /* Grab latest data_version and [am]time values */ 1946 /* Grab latest data_version and [am]time values */
1935 rc = ll_data_version(inode, &data_version, 1); 1947 rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
1936 if (rc != 0) 1948 if (rc != 0)
1937 goto out; 1949 goto out;
1938 1950
@@ -2340,9 +2352,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2340 if (copy_from_user(&idv, (char __user *)arg, sizeof(idv))) 2352 if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
2341 return -EFAULT; 2353 return -EFAULT;
2342 2354
2343 rc = ll_data_version(inode, &idv.idv_version, 2355 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
2344 !(idv.idv_flags & LL_DV_NOFLUSH)); 2356 rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags);
2345
2346 if (rc == 0 && copy_to_user((char __user *)arg, &idv, 2357 if (rc == 0 && copy_to_user((char __user *)arg, &idv,
2347 sizeof(idv))) 2358 sizeof(idv)))
2348 return -EFAULT; 2359 return -EFAULT;
@@ -2508,7 +2519,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2508 rc = och->och_flags & 2519 rc = och->och_flags &
2509 (FMODE_READ | FMODE_WRITE); 2520 (FMODE_READ | FMODE_WRITE);
2510 unlock_res_and_lock(lock); 2521 unlock_res_and_lock(lock);
2511 ldlm_lock_put(lock); 2522 LDLM_LOCK_PUT(lock);
2512 } 2523 }
2513 } 2524 }
2514 mutex_unlock(&lli->lli_och_mutex); 2525 mutex_unlock(&lli->lli_och_mutex);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 8d2398003c5a..92b73ef3222c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -323,8 +323,9 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
323 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info, 323 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
324 lli_close_list); 324 lli_close_list);
325 list_del_init(&lli->lli_close_list); 325 list_del_init(&lli->lli_close_list);
326 } else if (atomic_read(&lcq->lcq_stop)) 326 } else if (atomic_read(&lcq->lcq_stop)) {
327 lli = ERR_PTR(-EALREADY); 327 lli = ERR_PTR(-EALREADY);
328 }
328 329
329 spin_unlock(&lcq->lcq_lock); 330 spin_unlock(&lcq->lcq_lock);
330 return lli; 331 return lli;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index ba24f09ba1f9..2a11664325ef 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -319,10 +319,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
319/* default to about 40meg of readahead on a given system. That much tied 319/* default to about 40meg of readahead on a given system. That much tied
320 * up in 512k readahead requests serviced at 40ms each is about 1GB/s. 320 * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
321 */ 321 */
322#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) 322#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
323 323
324/* default to read-ahead full files smaller than 2MB on the second read */ 324/* default to read-ahead full files smaller than 2MB on the second read */
325#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) 325#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
326 326
327enum ra_stat { 327enum ra_stat {
328 RA_STAT_HIT = 0, 328 RA_STAT_HIT = 0,
@@ -745,7 +745,7 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type);
745int ll_inode_permission(struct inode *inode, int mask); 745int ll_inode_permission(struct inode *inode, int mask);
746 746
747int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, 747int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
748 int flags, struct lov_user_md *lum, 748 __u64 flags, struct lov_user_md *lum,
749 int lum_size); 749 int lum_size);
750int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, 750int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
751 struct lov_mds_md **lmm, int *lmm_size, 751 struct lov_mds_md **lmm, int *lmm_size,
@@ -757,7 +757,7 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
757int ll_fsync(struct file *file, loff_t start, loff_t end, int data); 757int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
758int ll_merge_attr(const struct lu_env *env, struct inode *inode); 758int ll_merge_attr(const struct lu_env *env, struct inode *inode);
759int ll_fid2path(struct inode *inode, void __user *arg); 759int ll_fid2path(struct inode *inode, void __user *arg);
760int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock); 760int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
761int ll_hsm_release(struct inode *inode); 761int ll_hsm_release(struct inode *inode);
762 762
763/* llite/dcache.c */ 763/* llite/dcache.c */
@@ -901,7 +901,7 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
901static inline void ll_invalidate_page(struct page *vmpage) 901static inline void ll_invalidate_page(struct page *vmpage)
902{ 902{
903 struct address_space *mapping = vmpage->mapping; 903 struct address_space *mapping = vmpage->mapping;
904 loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; 904 loff_t offset = vmpage->index << PAGE_SHIFT;
905 905
906 LASSERT(PageLocked(vmpage)); 906 LASSERT(PageLocked(vmpage));
907 if (!mapping) 907 if (!mapping)
@@ -911,7 +911,7 @@ static inline void ll_invalidate_page(struct page *vmpage)
911 * truncate_complete_page() calls 911 * truncate_complete_page() calls
912 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete(). 912 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
913 */ 913 */
914 ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); 914 ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
915 truncate_complete_page(mapping, vmpage); 915 truncate_complete_page(mapping, vmpage);
916} 916}
917 917
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 95c55c316ec0..b0948a7b860b 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -166,12 +166,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
166 return -ENOMEM; 166 return -ENOMEM;
167 } 167 }
168 168
169 if (llite_root) {
170 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
171 if (err < 0)
172 CERROR("could not register mount in <debugfs>/lustre/llite\n");
173 }
174
175 /* indicate the features supported by this client */ 169 /* indicate the features supported by this client */
176 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH | 170 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
177 OBD_CONNECT_ATTRFID | 171 OBD_CONNECT_ATTRFID |
@@ -269,12 +263,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
269 valid != CLIENT_CONNECT_MDT_REQD) { 263 valid != CLIENT_CONNECT_MDT_REQD) {
270 char *buf; 264 char *buf;
271 265
272 buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 266 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
273 if (!buf) { 267 if (!buf) {
274 err = -ENOMEM; 268 err = -ENOMEM;
275 goto out_md_fid; 269 goto out_md_fid;
276 } 270 }
277 obd_connect_flags2str(buf, PAGE_CACHE_SIZE, 271 obd_connect_flags2str(buf, PAGE_SIZE,
278 valid ^ CLIENT_CONNECT_MDT_REQD, ","); 272 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
279 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", 273 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
280 sbi->ll_md_exp->exp_obd->obd_name, buf); 274 sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -332,7 +326,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
332 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 326 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
333 sbi->ll_md_brw_size = data->ocd_brw_size; 327 sbi->ll_md_brw_size = data->ocd_brw_size;
334 else 328 else
335 sbi->ll_md_brw_size = PAGE_CACHE_SIZE; 329 sbi->ll_md_brw_size = PAGE_SIZE;
336 330
337 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { 331 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
338 LCONSOLE_INFO("Layout lock feature supported.\n"); 332 LCONSOLE_INFO("Layout lock feature supported.\n");
@@ -552,6 +546,15 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
552 kfree(data); 546 kfree(data);
553 kfree(osfs); 547 kfree(osfs);
554 548
549 if (llite_root) {
550 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
551 if (err < 0) {
552 CERROR("%s: could not register mount in debugfs: "
553 "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
554 err = 0;
555 }
556 }
557
555 return err; 558 return err;
556out_root: 559out_root:
557 iput(root); 560 iput(root);
@@ -570,7 +573,6 @@ out_md:
570out: 573out:
571 kfree(data); 574 kfree(data);
572 kfree(osfs); 575 kfree(osfs);
573 ldebugfs_unregister_mountpoint(sbi);
574 return err; 576 return err;
575} 577}
576 578
@@ -1596,8 +1598,9 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
1596 " to the "DFID", inode %lu/%u(%p)\n", 1598 " to the "DFID", inode %lu/%u(%p)\n",
1597 PFID(&lli->lli_fid), PFID(&body->fid1), 1599 PFID(&lli->lli_fid), PFID(&body->fid1),
1598 inode->i_ino, inode->i_generation, inode); 1600 inode->i_ino, inode->i_generation, inode);
1599 } else 1601 } else {
1600 lli->lli_fid = body->fid1; 1602 lli->lli_fid = body->fid1;
1603 }
1601 } 1604 }
1602 1605
1603 LASSERT(fid_seq(&lli->lli_fid) != 0); 1606 LASSERT(fid_seq(&lli->lli_fid) != 0);
@@ -2065,11 +2068,11 @@ int ll_obd_statfs(struct inode *inode, void __user *arg)
2065 } 2068 }
2066 2069
2067 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32)); 2070 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2068 if (type & LL_STATFS_LMV) 2071 if (type & LL_STATFS_LMV) {
2069 exp = sbi->ll_md_exp; 2072 exp = sbi->ll_md_exp;
2070 else if (type & LL_STATFS_LOV) 2073 } else if (type & LL_STATFS_LOV) {
2071 exp = sbi->ll_dt_exp; 2074 exp = sbi->ll_dt_exp;
2072 else { 2075 } else {
2073 rc = -ENODEV; 2076 rc = -ENODEV;
2074 goto out_statfs; 2077 goto out_statfs;
2075 } 2078 }
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 5b4382cca0d7..4f6697a599d7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
58 size_t count) 58 size_t count)
59{ 59{
60 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) + 60 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); 61 (vma->vm_pgoff << PAGE_SHIFT);
62 policy->l_extent.end = (policy->l_extent.start + count - 1) | 62 policy->l_extent.end = (policy->l_extent.start + count - 1) |
63 ~PAGE_MASK; 63 ~PAGE_MASK;
64} 64}
@@ -325,7 +325,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
325 325
326 vmpage = vio->u.fault.ft_vmpage; 326 vmpage = vio->u.fault.ft_vmpage;
327 if (result != 0 && vmpage) { 327 if (result != 0 && vmpage) {
328 page_cache_release(vmpage); 328 put_page(vmpage);
329 vmf->page = NULL; 329 vmf->page = NULL;
330 } 330 }
331 } 331 }
@@ -364,7 +364,7 @@ restart:
364 lock_page(vmpage); 364 lock_page(vmpage);
365 if (unlikely(!vmpage->mapping)) { /* unlucky */ 365 if (unlikely(!vmpage->mapping)) { /* unlucky */
366 unlock_page(vmpage); 366 unlock_page(vmpage);
367 page_cache_release(vmpage); 367 put_page(vmpage);
368 vmf->page = NULL; 368 vmf->page = NULL;
369 369
370 if (!printed && ++count > 16) { 370 if (!printed && ++count > 16) {
@@ -461,7 +461,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
461 LASSERTF(last > first, "last %llu first %llu\n", last, first); 461 LASSERTF(last > first, "last %llu first %llu\n", last, first);
462 if (mapping_mapped(mapping)) { 462 if (mapping_mapped(mapping)) {
463 rc = 0; 463 rc = 0;
464 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, 464 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
465 last - first + 1, 0); 465 last - first + 1, 0);
466 } 466 }
467 467
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index b725fc16cf49..813a9a354e5f 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; 218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
219 bio_for_each_segment(bvec, bio, iter) { 219 bio_for_each_segment(bvec, bio, iter) {
220 BUG_ON(bvec.bv_offset != 0); 220 BUG_ON(bvec.bv_offset != 0);
221 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); 221 BUG_ON(bvec.bv_len != PAGE_SIZE);
222 222
223 pages[page_count] = bvec.bv_page; 223 pages[page_count] = bvec.bv_page;
224 offsets[page_count] = offset; 224 offsets[page_count] = offset;
@@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, 232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
233 page_count); 233 page_count);
234 234
235 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; 235 pvec->ldp_size = page_count << PAGE_SHIFT;
236 pvec->ldp_nr = page_count; 236 pvec->ldp_nr = page_count;
237 237
238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to 238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -274,8 +274,9 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
274 if (lo->lo_biotail) { 274 if (lo->lo_biotail) {
275 lo->lo_biotail->bi_next = bio; 275 lo->lo_biotail->bi_next = bio;
276 lo->lo_biotail = bio; 276 lo->lo_biotail = bio;
277 } else 277 } else {
278 lo->lo_bio = lo->lo_biotail = bio; 278 lo->lo_bio = lo->lo_biotail = bio;
279 }
279 spin_unlock_irqrestore(&lo->lo_lock, flags); 280 spin_unlock_irqrestore(&lo->lo_lock, flags);
280 281
281 atomic_inc(&lo->lo_pending); 282 atomic_inc(&lo->lo_pending);
@@ -507,7 +508,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
507 508
508 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 509 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
509 510
510 lo->lo_blocksize = PAGE_CACHE_SIZE; 511 lo->lo_blocksize = PAGE_SIZE;
511 lo->lo_device = bdev; 512 lo->lo_device = bdev;
512 lo->lo_flags = lo_flags; 513 lo->lo_flags = lo_flags;
513 lo->lo_backing_file = file; 514 lo->lo_backing_file = file;
@@ -525,11 +526,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
525 lo->lo_queue->queuedata = lo; 526 lo->lo_queue->queuedata = lo;
526 527
527 /* queue parameters */ 528 /* queue parameters */
528 CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); 529 CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
529 blk_queue_logical_block_size(lo->lo_queue, 530 blk_queue_logical_block_size(lo->lo_queue,
530 (unsigned short)PAGE_CACHE_SIZE); 531 (unsigned short)PAGE_SIZE);
531 blk_queue_max_hw_sectors(lo->lo_queue, 532 blk_queue_max_hw_sectors(lo->lo_queue,
532 LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); 533 LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
533 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); 534 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
534 535
535 set_capacity(disks[lo->lo_number], size); 536 set_capacity(disks[lo->lo_number], size);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 091144fa97dd..d99d8c3d602a 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
233 pages_number = sbi->ll_ra_info.ra_max_pages; 233 pages_number = sbi->ll_ra_info.ra_max_pages;
234 spin_unlock(&sbi->ll_lock); 234 spin_unlock(&sbi->ll_lock);
235 235
236 mult = 1 << (20 - PAGE_CACHE_SHIFT); 236 mult = 1 << (20 - PAGE_SHIFT);
237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
238} 238}
239 239
@@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
251 if (rc) 251 if (rc)
252 return rc; 252 return rc;
253 253
254 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 254 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
255 255
256 if (pages_number > totalram_pages / 2) { 256 if (pages_number > totalram_pages / 2) {
257 257
258 CERROR("can't set file readahead more than %lu MB\n", 258 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/ 259 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
260 return -ERANGE; 260 return -ERANGE;
261 } 261 }
262 262
@@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file; 281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
282 spin_unlock(&sbi->ll_lock); 282 spin_unlock(&sbi->ll_lock);
283 283
284 mult = 1 << (20 - PAGE_CACHE_SHIFT); 284 mult = 1 << (20 - PAGE_SHIFT);
285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
286} 286}
287 287
@@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; 326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
327 spin_unlock(&sbi->ll_lock); 327 spin_unlock(&sbi->ll_lock);
328 328
329 mult = 1 << (20 - PAGE_CACHE_SHIFT); 329 mult = 1 << (20 - PAGE_SHIFT);
330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
331} 331}
332 332
@@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
349 */ 349 */
350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { 350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", 351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); 352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
353 return -ERANGE; 353 return -ERANGE;
354 } 354 }
355 355
@@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
366 struct super_block *sb = m->private; 366 struct super_block *sb = m->private;
367 struct ll_sb_info *sbi = ll_s2sbi(sb); 367 struct ll_sb_info *sbi = ll_s2sbi(sb);
368 struct cl_client_cache *cache = &sbi->ll_cache; 368 struct cl_client_cache *cache = &sbi->ll_cache;
369 int shift = 20 - PAGE_CACHE_SHIFT; 369 int shift = 20 - PAGE_SHIFT;
370 int max_cached_mb; 370 int max_cached_mb;
371 int unused_mb; 371 int unused_mb;
372 372
@@ -407,7 +407,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
407 return -EFAULT; 407 return -EFAULT;
408 kernbuf[count] = 0; 408 kernbuf[count] = 0;
409 409
410 mult = 1 << (20 - PAGE_CACHE_SHIFT); 410 mult = 1 << (20 - PAGE_SHIFT);
411 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - 411 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
412 kernbuf; 412 kernbuf;
413 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 413 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -417,7 +417,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
417 if (pages_number < 0 || pages_number > totalram_pages) { 417 if (pages_number < 0 || pages_number > totalram_pages) {
418 CERROR("%s: can't set max cache more than %lu MB\n", 418 CERROR("%s: can't set max cache more than %lu MB\n",
419 ll_get_fsname(sb, NULL, 0), 419 ll_get_fsname(sb, NULL, 0),
420 totalram_pages >> (20 - PAGE_CACHE_SHIFT)); 420 totalram_pages >> (20 - PAGE_SHIFT));
421 return -ERANGE; 421 return -ERANGE;
422 } 422 }
423 423
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index f8f98e4e8258..856170762ef7 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -128,10 +128,12 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
128 if (rc != 0) { 128 if (rc != 0) {
129 iget_failed(inode); 129 iget_failed(inode);
130 inode = NULL; 130 inode = NULL;
131 } else 131 } else {
132 unlock_new_inode(inode); 132 unlock_new_inode(inode);
133 } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) 133 }
134 } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
134 ll_update_inode(inode, md); 135 ll_update_inode(inode, md);
136 }
135 CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n", 137 CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n",
136 inode, PFID(&md->body->fid1)); 138 inode, PFID(&md->body->fid1));
137 } 139 }
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 7d5dd3848552..fee319ce472d 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -132,8 +132,9 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
132 lcc->lcc_page = page; 132 lcc->lcc_page = page;
133 lu_ref_add(&page->cp_reference, "cl_io", io); 133 lu_ref_add(&page->cp_reference, "cl_io", io);
134 result = 0; 134 result = 0;
135 } else 135 } else {
136 result = PTR_ERR(page); 136 result = PTR_ERR(page);
137 }
137 } 138 }
138 if (result) { 139 if (result) {
139 ll_cl_fini(lcc); 140 ll_cl_fini(lcc);
@@ -350,7 +351,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
350 } 351 }
351 if (rc != 1) 352 if (rc != 1)
352 unlock_page(vmpage); 353 unlock_page(vmpage);
353 page_cache_release(vmpage); 354 put_page(vmpage);
354 } else { 355 } else {
355 which = RA_STAT_FAILED_GRAB_PAGE; 356 which = RA_STAT_FAILED_GRAB_PAGE;
356 msg = "g_c_p_n failed"; 357 msg = "g_c_p_n failed";
@@ -373,13 +374,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
373 * striped over, rather than having a constant value for all files here. 374 * striped over, rather than having a constant value for all files here.
374 */ 375 */
375 376
376/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). 377/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
377 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled 378 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
378 * by default, this should be adjusted corresponding with max_read_ahead_mb 379 * by default, this should be adjusted corresponding with max_read_ahead_mb
379 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used 380 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
380 * up quickly which will affect read performance significantly. See LU-2816 381 * up quickly which will affect read performance significantly. See LU-2816
381 */ 382 */
382#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) 383#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
383 384
384static inline int stride_io_mode(struct ll_readahead_state *ras) 385static inline int stride_io_mode(struct ll_readahead_state *ras)
385{ 386{
@@ -488,8 +489,9 @@ static int ll_read_ahead_pages(const struct lu_env *env,
488 if (rc == 1) { 489 if (rc == 1) {
489 (*reserved_pages)--; 490 (*reserved_pages)--;
490 count++; 491 count++;
491 } else if (rc == -ENOLCK) 492 } else if (rc == -ENOLCK) {
492 break; 493 break;
494 }
493 } else if (stride_ria) { 495 } else if (stride_ria) {
494 /* If it is not in the read-ahead window, and it is 496 /* If it is not in the read-ahead window, and it is
495 * read-ahead mode, then check whether it should skip 497 * read-ahead mode, then check whether it should skip
@@ -587,7 +589,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
587 end = rpc_boundary; 589 end = rpc_boundary;
588 590
589 /* Truncate RA window to end of file */ 591 /* Truncate RA window to end of file */
590 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); 592 end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
591 593
592 ras->ras_next_readahead = max(end, end + 1); 594 ras->ras_next_readahead = max(end, end + 1);
593 RAS_CDEBUG(ras); 595 RAS_CDEBUG(ras);
@@ -643,7 +645,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
643 if (reserved != 0) 645 if (reserved != 0)
644 ll_ra_count_put(ll_i2sbi(inode), reserved); 646 ll_ra_count_put(ll_i2sbi(inode), reserved);
645 647
646 if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) 648 if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
647 ll_ra_stats_inc(inode, RA_STAT_EOF); 649 ll_ra_stats_inc(inode, RA_STAT_EOF);
648 650
649 /* if we didn't get to the end of the region we reserved from 651 /* if we didn't get to the end of the region we reserved from
@@ -852,8 +854,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
852 if (ras->ras_requests == 2 && !ras->ras_request_index) { 854 if (ras->ras_requests == 2 && !ras->ras_request_index) {
853 __u64 kms_pages; 855 __u64 kms_pages;
854 856
855 kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 857 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
856 PAGE_CACHE_SHIFT; 858 PAGE_SHIFT;
857 859
858 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, 860 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
859 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); 861 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1043,7 +1045,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1043 * PageWriteback or clean the page. 1045 * PageWriteback or clean the page.
1044 */ 1046 */
1045 result = cl_sync_file_range(inode, offset, 1047 result = cl_sync_file_range(inode, offset,
1046 offset + PAGE_CACHE_SIZE - 1, 1048 offset + PAGE_SIZE - 1,
1047 CL_FSYNC_LOCAL, 1); 1049 CL_FSYNC_LOCAL, 1);
1048 if (result > 0) { 1050 if (result > 0) {
1049 /* actually we may have written more than one page. 1051 /* actually we may have written more than one page.
@@ -1081,7 +1083,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1081 int ignore_layout = 0; 1083 int ignore_layout = 0;
1082 1084
1083 if (wbc->range_cyclic) { 1085 if (wbc->range_cyclic) {
1084 start = mapping->writeback_index << PAGE_CACHE_SHIFT; 1086 start = mapping->writeback_index << PAGE_SHIFT;
1085 end = OBD_OBJECT_EOF; 1087 end = OBD_OBJECT_EOF;
1086 } else { 1088 } else {
1087 start = wbc->range_start; 1089 start = wbc->range_start;
@@ -1111,7 +1113,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1111 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { 1113 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1112 if (end == OBD_OBJECT_EOF) 1114 if (end == OBD_OBJECT_EOF)
1113 end = i_size_read(inode); 1115 end = i_size_read(inode);
1114 mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; 1116 mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
1115 } 1117 }
1116 return result; 1118 return result;
1117} 1119}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 65baeebead72..cad6aa935bb2 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
87 * below because they are run with page locked and all our io is 87 * below because they are run with page locked and all our io is
88 * happening with locked page too 88 * happening with locked page too
89 */ 89 */
90 if (offset == 0 && length == PAGE_CACHE_SIZE) { 90 if (offset == 0 && length == PAGE_SIZE) {
91 env = cl_env_get(&refcheck); 91 env = cl_env_get(&refcheck);
92 if (!IS_ERR(env)) { 92 if (!IS_ERR(env)) {
93 inode = vmpage->mapping->host; 93 inode = vmpage->mapping->host;
@@ -98,8 +98,9 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
98 cl_page_delete(env, page); 98 cl_page_delete(env, page);
99 cl_page_put(env, page); 99 cl_page_put(env, page);
100 } 100 }
101 } else 101 } else {
102 LASSERT(vmpage->private == 0); 102 LASSERT(vmpage->private == 0);
103 }
103 cl_env_put(env, &refcheck); 104 cl_env_put(env, &refcheck);
104 } 105 }
105 } 106 }
@@ -178,8 +179,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
178 return -EFBIG; 179 return -EFBIG;
179 } 180 }
180 181
181 *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 182 *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
182 *max_pages -= user_addr >> PAGE_CACHE_SHIFT; 183 *max_pages -= user_addr >> PAGE_SHIFT;
183 184
184 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); 185 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
185 if (*pages) { 186 if (*pages) {
@@ -202,7 +203,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
202 for (i = 0; i < npages; i++) { 203 for (i = 0; i < npages; i++) {
203 if (do_dirty) 204 if (do_dirty)
204 set_page_dirty_lock(pages[i]); 205 set_page_dirty_lock(pages[i]);
205 page_cache_release(pages[i]); 206 put_page(pages[i]);
206 } 207 }
207 kvfree(pages); 208 kvfree(pages);
208} 209}
@@ -342,7 +343,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
342 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. 343 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
343 */ 344 */
344#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ 345#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
345 PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) 346 PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
346static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, 347static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
347 loff_t file_offset) 348 loff_t file_offset)
348{ 349{
@@ -367,8 +368,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
367 CDEBUG(D_VFSTRACE, 368 CDEBUG(D_VFSTRACE,
368 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", 369 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
369 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, 370 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
370 file_offset, file_offset, count >> PAGE_CACHE_SHIFT, 371 file_offset, file_offset, count >> PAGE_SHIFT,
371 MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); 372 MAX_DIO_SIZE >> PAGE_SHIFT);
372 373
373 /* Check that all user buffers are aligned as well */ 374 /* Check that all user buffers are aligned as well */
374 if (iov_iter_alignment(iter) & ~PAGE_MASK) 375 if (iov_iter_alignment(iter) & ~PAGE_MASK)
@@ -417,8 +418,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
417 * page worth of page pointers = 4MB on i386. 418 * page worth of page pointers = 4MB on i386.
418 */ 419 */
419 if (result == -ENOMEM && 420 if (result == -ENOMEM &&
420 size > (PAGE_CACHE_SIZE / sizeof(*pages)) * 421 size > (PAGE_SIZE / sizeof(*pages)) *
421 PAGE_CACHE_SIZE) { 422 PAGE_SIZE) {
422 size = ((((size / 2) - 1) | 423 size = ((((size / 2) - 1) |
423 ~PAGE_MASK) + 1) & 424 ~PAGE_MASK) + 1) &
424 PAGE_MASK; 425 PAGE_MASK;
@@ -493,9 +494,9 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
493 struct cl_io *io; 494 struct cl_io *io;
494 struct cl_page *page; 495 struct cl_page *page;
495 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; 496 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
496 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 497 pgoff_t index = pos >> PAGE_SHIFT;
497 struct page *vmpage = NULL; 498 struct page *vmpage = NULL;
498 unsigned int from = pos & (PAGE_CACHE_SIZE - 1); 499 unsigned int from = pos & (PAGE_SIZE - 1);
499 unsigned int to = from + len; 500 unsigned int to = from + len;
500 int result = 0; 501 int result = 0;
501 502
@@ -524,7 +525,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
524 */ 525 */
525 if (vmpage && plist->pl_nr > 0) { 526 if (vmpage && plist->pl_nr > 0) {
526 unlock_page(vmpage); 527 unlock_page(vmpage);
527 page_cache_release(vmpage); 528 put_page(vmpage);
528 vmpage = NULL; 529 vmpage = NULL;
529 } 530 }
530 531
@@ -577,7 +578,7 @@ out:
577 if (result < 0) { 578 if (result < 0) {
578 if (vmpage) { 579 if (vmpage) {
579 unlock_page(vmpage); 580 unlock_page(vmpage);
580 page_cache_release(vmpage); 581 put_page(vmpage);
581 } 582 }
582 if (!IS_ERR(lcc)) 583 if (!IS_ERR(lcc))
583 ll_cl_fini(lcc); 584 ll_cl_fini(lcc);
@@ -597,11 +598,11 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
597 struct cl_io *io; 598 struct cl_io *io;
598 struct vvp_io *vio; 599 struct vvp_io *vio;
599 struct cl_page *page; 600 struct cl_page *page;
600 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 601 unsigned from = pos & (PAGE_SIZE - 1);
601 bool unplug = false; 602 bool unplug = false;
602 int result = 0; 603 int result = 0;
603 604
604 page_cache_release(vmpage); 605 put_page(vmpage);
605 606
606 env = lcc->lcc_env; 607 env = lcc->lcc_env;
607 page = lcc->lcc_page; 608 page = lcc->lcc_page;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index e35c1a1f272e..644a31f62464 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -342,8 +342,9 @@ int cl_sb_init(struct super_block *sb)
342 sbi->ll_site = cl2lu_dev(cl)->ld_site; 342 sbi->ll_site = cl2lu_dev(cl)->ld_site;
343 } 343 }
344 cl_env_put(env, &refcheck); 344 cl_env_put(env, &refcheck);
345 } else 345 } else {
346 rc = PTR_ERR(env); 346 rc = PTR_ERR(env);
347 }
347 return rc; 348 return rc;
348} 349}
349 350
@@ -498,7 +499,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
498 id.vpi_index = vmpage->index; 499 id.vpi_index = vmpage->index;
499 /* Cant support over 16T file */ 500 /* Cant support over 16T file */
500 nr = !(vmpage->index > 0xffffffff); 501 nr = !(vmpage->index > 0xffffffff);
501 page_cache_release(vmpage); 502 put_page(vmpage);
502 } 503 }
503 504
504 lu_object_ref_del(&clob->co_lu, "dump", current); 505 lu_object_ref_del(&clob->co_lu, "dump", current);
@@ -580,8 +581,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
580 lock_page(vmpage); 581 lock_page(vmpage);
581 page = cl_vmpage_page(vmpage, clob); 582 page = cl_vmpage_page(vmpage, clob);
582 unlock_page(vmpage); 583 unlock_page(vmpage);
583 584 put_page(vmpage);
584 page_cache_release(vmpage);
585 } 585 }
586 586
587 seq_printf(f, "%8x@" DFID ": ", id.vpi_index, 587 seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
@@ -589,16 +589,19 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
589 if (page) { 589 if (page) {
590 vvp_pgcache_page_show(env, f, page); 590 vvp_pgcache_page_show(env, f, page);
591 cl_page_put(env, page); 591 cl_page_put(env, page);
592 } else 592 } else {
593 seq_puts(f, "missing\n"); 593 seq_puts(f, "missing\n");
594 }
594 lu_object_ref_del(&clob->co_lu, "dump", current); 595 lu_object_ref_del(&clob->co_lu, "dump", current);
595 cl_object_put(env, clob); 596 cl_object_put(env, clob);
596 } else 597 } else {
597 seq_printf(f, "%llx missing\n", pos); 598 seq_printf(f, "%llx missing\n", pos);
599 }
598 cl_env_put(env, &refcheck); 600 cl_env_put(env, &refcheck);
599 result = 0; 601 result = 0;
600 } else 602 } else {
601 result = PTR_ERR(env); 603 result = PTR_ERR(env);
604 }
602 return result; 605 return result;
603} 606}
604 607
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index aed7b8e41a51..26dfbf1d2345 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -180,9 +180,8 @@ static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
180 * --bug 17336 180 * --bug 17336
181 */ 181 */
182 loff_t size = i_size_read(inode); 182 loff_t size = i_size_read(inode);
183 loff_t cur_index = start >> PAGE_CACHE_SHIFT; 183 loff_t cur_index = start >> PAGE_SHIFT;
184 loff_t size_index = (size - 1) >> 184 loff_t size_index = (size - 1) >> PAGE_SHIFT;
185 PAGE_CACHE_SHIFT;
186 185
187 if ((size == 0 && cur_index != 0) || 186 if ((size == 0 && cur_index != 0) ||
188 size_index < cur_index) 187 size_index < cur_index)
@@ -714,7 +713,7 @@ static int vvp_io_read_start(const struct lu_env *env,
714 if (!vio->vui_ra_valid) { 713 if (!vio->vui_ra_valid) {
715 vio->vui_ra_valid = true; 714 vio->vui_ra_valid = true;
716 vio->vui_ra_start = cl_index(obj, pos); 715 vio->vui_ra_start = cl_index(obj, pos);
717 vio->vui_ra_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); 716 vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
718 ll_ras_enter(file); 717 ll_ras_enter(file);
719 } 718 }
720 719
@@ -1179,8 +1178,9 @@ static int vvp_io_fault_start(const struct lu_env *env,
1179 if (result == -EDQUOT) 1178 if (result == -EDQUOT)
1180 result = -ENOSPC; 1179 result = -ENOSPC;
1181 goto out; 1180 goto out;
1182 } else 1181 } else {
1183 cl_page_disown(env, io, page); 1182 cl_page_disown(env, io, page);
1183 }
1184 } 1184 }
1185 } 1185 }
1186 1186
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 0c92293dbf2e..6cd2af7a958f 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -64,7 +64,7 @@ static void vvp_page_fini_common(struct vvp_page *vpg)
64 struct page *vmpage = vpg->vpg_page; 64 struct page *vmpage = vpg->vpg_page;
65 65
66 LASSERT(vmpage); 66 LASSERT(vmpage);
67 page_cache_release(vmpage); 67 put_page(vmpage);
68} 68}
69 69
70static void vvp_page_fini(const struct lu_env *env, 70static void vvp_page_fini(const struct lu_env *env,
@@ -561,7 +561,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
561 CLOBINVRNT(env, obj, vvp_object_invariant(obj)); 561 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
562 562
563 vpg->vpg_page = vmpage; 563 vpg->vpg_page = vmpage;
564 page_cache_get(vmpage); 564 get_page(vmpage);
565 565
566 INIT_LIST_HEAD(&vpg->vpg_pending_linkage); 566 INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
567 if (page->cp_type == CPT_CACHEABLE) { 567 if (page->cp_type == CPT_CACHEABLE) {
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index b68dcc921ca2..9f6fcfe8b988 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -181,8 +181,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
181 size = rc; 181 size = rc;
182 182
183 pv = (const char *)new_value; 183 pv = (const char *)new_value;
184 } else 184 } else {
185 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
186 }
186 187
187 valid |= rce_ops2valid(rce->rce_ops); 188 valid |= rce_ops2valid(rce->rce_ops);
188 } 189 }
@@ -243,12 +244,12 @@ int ll_setxattr(struct dentry *dentry, const char *name,
243 lump->lmm_stripe_offset = -1; 244 lump->lmm_stripe_offset = -1;
244 245
245 if (lump && S_ISREG(inode->i_mode)) { 246 if (lump && S_ISREG(inode->i_mode)) {
246 int flags = FMODE_WRITE; 247 __u64 it_flags = FMODE_WRITE;
247 int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ? 248 int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
248 sizeof(*lump) : sizeof(struct lov_user_md_v3); 249 sizeof(*lump) : sizeof(struct lov_user_md_v3);
249 250
250 rc = ll_lov_setstripe_ea_info(inode, dentry, flags, lump, 251 rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags,
251 lum_size); 252 lump, lum_size);
252 /* b10667: rc always be 0 here for now */ 253 /* b10667: rc always be 0 here for now */
253 rc = 0; 254 rc = 0;
254 } else if (S_ISDIR(inode->i_mode)) { 255 } else if (S_ISDIR(inode->i_mode)) {
@@ -423,8 +424,7 @@ getxattr_nocache:
423 if (rce && rce->rce_ops == RMT_LSETFACL) { 424 if (rce && rce->rce_ops == RMT_LSETFACL) {
424 ext_acl_xattr_header *acl; 425 ext_acl_xattr_header *acl;
425 426
426 acl = lustre_posix_acl_xattr_2ext( 427 acl = lustre_posix_acl_xattr_2ext(buffer, rc);
427 (posix_acl_xattr_header *)buffer, rc);
428 if (IS_ERR(acl)) { 428 if (IS_ERR(acl)) {
429 rc = PTR_ERR(acl); 429 rc = PTR_ERR(acl);
430 goto out; 430 goto out;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 8bd2dc577bf8..2f6457f5e98c 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -926,7 +926,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
926 struct obd_quotactl *oqctl; 926 struct obd_quotactl *oqctl;
927 927
928 if (qctl->qc_valid == QC_MDTIDX) { 928 if (qctl->qc_valid == QC_MDTIDX) {
929 if (qctl->qc_idx < 0 || count <= qctl->qc_idx) 929 if (count <= qctl->qc_idx)
930 return -EINVAL; 930 return -EINVAL;
931 931
932 tgt = lmv->tgts[qctl->qc_idx]; 932 tgt = lmv->tgts[qctl->qc_idx];
@@ -1122,8 +1122,9 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
1122 if (!rc) 1122 if (!rc)
1123 rc = err; 1123 rc = err;
1124 } 1124 }
1125 } else 1125 } else {
1126 set = 1; 1126 set = 1;
1127 }
1127 } 1128 }
1128 if (!set && !rc) 1129 if (!set && !rc)
1129 rc = -EIO; 1130 rc = -EIO;
@@ -2017,7 +2018,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2017 * |s|e|f|p|ent| 0 | ... | 0 | 2018 * |s|e|f|p|ent| 0 | ... | 0 |
2018 * '----------------- -----' 2019 * '----------------- -----'
2019 * 2020 *
2020 * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is 2021 * However, on hosts where the native VM page size (PAGE_SIZE) is
2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple 2022 * larger than LU_PAGE_SIZE, a single host page may contain multiple
2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the 2023 * lu_dirpages. After reading the lu_dirpages from the MDS, the
2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately 2024 * ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2049,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span 2049 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
2049 * to the first entry of the next lu_dirpage. 2050 * to the first entry of the next lu_dirpage.
2050 */ 2051 */
2051#if PAGE_CACHE_SIZE > LU_PAGE_SIZE 2052#if PAGE_SIZE > LU_PAGE_SIZE
2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) 2053static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2053{ 2054{
2054 int i; 2055 int i;
@@ -2101,7 +2102,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2101} 2102}
2102#else 2103#else
2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) 2104#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
2104#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ 2105#endif /* PAGE_SIZE > LU_PAGE_SIZE */
2105 2106
2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, 2107static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2107 struct page **pages, struct ptlrpc_request **request) 2108 struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2111,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2110 struct lmv_obd *lmv = &obd->u.lmv; 2111 struct lmv_obd *lmv = &obd->u.lmv;
2111 __u64 offset = op_data->op_offset; 2112 __u64 offset = op_data->op_offset;
2112 int rc; 2113 int rc;
2113 int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ 2114 int ncfspgs; /* pages read in PAGE_SIZE */
2114 int nlupgs; /* pages read in LU_PAGE_SIZE */ 2115 int nlupgs; /* pages read in LU_PAGE_SIZE */
2115 struct lmv_tgt_desc *tgt; 2116 struct lmv_tgt_desc *tgt;
2116 2117
@@ -2129,8 +2130,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2129 if (rc != 0) 2130 if (rc != 0)
2130 return rc; 2131 return rc;
2131 2132
2132 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1) 2133 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
2133 >> PAGE_CACHE_SHIFT; 2134 >> PAGE_SHIFT;
2134 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; 2135 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
2135 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); 2136 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
2136 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); 2137 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index dccc63496982..dae8e89bcf6d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -262,8 +262,9 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
262 if (lr) { 262 if (lr) {
263 cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops); 263 cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
264 result = 0; 264 result = 0;
265 } else 265 } else {
266 result = -ENOMEM; 266 result = -ENOMEM;
267 }
267 return result; 268 return result;
268} 269}
269 270
@@ -332,14 +333,15 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr)
332 cl_page_list_init(&em->emrg_page_list); 333 cl_page_list_init(&em->emrg_page_list);
333 em->emrg_env = cl_env_alloc(&em->emrg_refcheck, 334 em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
334 LCT_REMEMBER | LCT_NOREF); 335 LCT_REMEMBER | LCT_NOREF);
335 if (!IS_ERR(em->emrg_env)) 336 if (!IS_ERR(em->emrg_env)) {
336 em->emrg_env->le_ctx.lc_cookie = 0x2; 337 em->emrg_env->le_ctx.lc_cookie = 0x2;
337 else { 338 } else {
338 result = PTR_ERR(em->emrg_env); 339 result = PTR_ERR(em->emrg_env);
339 em->emrg_env = NULL; 340 em->emrg_env = NULL;
340 } 341 }
341 } else 342 } else {
342 result = -ENOMEM; 343 result = -ENOMEM;
344 }
343 } 345 }
344 if (result != 0) { 346 if (result != 0) {
345 lov_emerg_free(emerg, nr); 347 lov_emerg_free(emerg, nr);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 41512372c472..da4784b474e4 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -225,8 +225,9 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env,
225 if (!sub->sub_io_initialized) { 225 if (!sub->sub_io_initialized) {
226 sub->sub_stripe = stripe; 226 sub->sub_stripe = stripe;
227 rc = lov_io_sub_init(env, lio, sub); 227 rc = lov_io_sub_init(env, lio, sub);
228 } else 228 } else {
229 rc = 0; 229 rc = 0;
230 }
230 if (rc == 0) 231 if (rc == 0)
231 lov_sub_enter(sub); 232 lov_sub_enter(sub);
232 else 233 else
@@ -276,10 +277,11 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
276static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, 277static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
277 struct cl_io *io) 278 struct cl_io *io)
278{ 279{
279 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; 280 struct lov_stripe_md *lsm;
280 int result; 281 int result;
281 282
282 LASSERT(lio->lis_object); 283 LASSERT(lio->lis_object);
284 lsm = lio->lis_object->lo_lsm;
283 285
284 /* 286 /*
285 * Need to be optimized, we can't afford to allocate a piece of memory 287 * Need to be optimized, we can't afford to allocate a piece of memory
@@ -294,8 +296,9 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
294 lio->lis_single_subio_index = -1; 296 lio->lis_single_subio_index = -1;
295 lio->lis_active_subios = 0; 297 lio->lis_active_subios = 0;
296 result = 0; 298 result = 0;
297 } else 299 } else {
298 result = -ENOMEM; 300 result = -ENOMEM;
301 }
299 return result; 302 return result;
300} 303}
301 304
@@ -413,8 +416,9 @@ static int lov_io_iter_init(const struct lu_env *env,
413 lov_sub_put(sub); 416 lov_sub_put(sub);
414 CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n", 417 CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
415 stripe, start, end); 418 stripe, start, end);
416 } else 419 } else {
417 rc = PTR_ERR(sub); 420 rc = PTR_ERR(sub);
421 }
418 422
419 if (!rc) 423 if (!rc)
420 list_add_tail(&sub->sub_linkage, &lio->lis_active); 424 list_add_tail(&sub->sub_linkage, &lio->lis_active);
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 029cd4d62796..56ef41d17ad7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -154,6 +154,7 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
154 valid &= src->o_valid; 154 valid &= src->o_valid;
155 155
156 if (*set) { 156 if (*set) {
157 tgt->o_valid &= valid;
157 if (valid & OBD_MD_FLSIZE) { 158 if (valid & OBD_MD_FLSIZE) {
158 /* this handles sparse files properly */ 159 /* this handles sparse files properly */
159 u64 lov_size; 160 u64 lov_size;
@@ -172,12 +173,22 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
172 tgt->o_mtime = src->o_mtime; 173 tgt->o_mtime = src->o_mtime;
173 if (valid & OBD_MD_FLDATAVERSION) 174 if (valid & OBD_MD_FLDATAVERSION)
174 tgt->o_data_version += src->o_data_version; 175 tgt->o_data_version += src->o_data_version;
176
177 /* handle flags */
178 if (valid & OBD_MD_FLFLAGS)
179 tgt->o_flags &= src->o_flags;
180 else
181 tgt->o_flags = 0;
175 } else { 182 } else {
176 memcpy(tgt, src, sizeof(*tgt)); 183 memcpy(tgt, src, sizeof(*tgt));
177 tgt->o_oi = lsm->lsm_oi; 184 tgt->o_oi = lsm->lsm_oi;
185 tgt->o_valid = valid;
178 if (valid & OBD_MD_FLSIZE) 186 if (valid & OBD_MD_FLSIZE)
179 tgt->o_size = lov_stripe_size(lsm, src->o_size, 187 tgt->o_size = lov_stripe_size(lsm, src->o_size,
180 stripeno); 188 stripeno);
189 tgt->o_flags = 0;
190 if (valid & OBD_MD_FLFLAGS)
191 tgt->o_flags = src->o_flags;
181 } 192 }
182 193
183 /* data_version needs to be valid on all stripes to be correct! */ 194 /* data_version needs to be valid on all stripes to be correct! */
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 6a353d18eefe..561d493b2cdf 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -283,8 +283,9 @@ static int lov_init_raid0(const struct lu_env *env,
283 } 283 }
284 if (result == 0) 284 if (result == 0)
285 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz; 285 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
286 } else 286 } else {
287 result = -ENOMEM; 287 result = -ENOMEM;
288 }
288out: 289out:
289 return result; 290 return result;
290} 291}
@@ -935,8 +936,9 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
935 * for object with different layouts. 936 * for object with different layouts.
936 */ 937 */
937 obj->lo_ops = &lov_lu_obj_ops; 938 obj->lo_ops = &lov_lu_obj_ops;
938 } else 939 } else {
939 obj = NULL; 940 obj = NULL;
941 }
940 return obj; 942 return obj;
941} 943}
942 944
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index cb7b51617498..9302f06c34ef 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -74,9 +74,8 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
74{ 74{
75 loff_t offset; 75 loff_t offset;
76 76
77 offset = lov_stripe_size(lsm, stripe_index << PAGE_CACHE_SHIFT, 77 offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
78 stripe); 78 return offset >> PAGE_SHIFT;
79 return offset >> PAGE_CACHE_SHIFT;
80} 79}
81 80
82/* we have an offset in file backed by an lov and want to find out where 81/* we have an offset in file backed by an lov and want to find out where
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 3925633a99ec..d983a30b1e3a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -444,8 +444,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
444 if (lum.lmm_magic == LOV_USER_MAGIC) { 444 if (lum.lmm_magic == LOV_USER_MAGIC) {
445 /* User request for v1, we need skip lmm_pool_name */ 445 /* User request for v1, we need skip lmm_pool_name */
446 if (lmmk->lmm_magic == LOV_MAGIC_V3) { 446 if (lmmk->lmm_magic == LOV_MAGIC_V3) {
447 memmove((char *)(&lmmk->lmm_stripe_count) + 447 memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
448 sizeof(lmmk->lmm_stripe_count),
449 ((struct lov_mds_md_v3 *)lmmk)->lmm_objects, 448 ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
450 lmmk->lmm_stripe_count * 449 lmmk->lmm_stripe_count *
451 sizeof(struct lov_ost_data_v1)); 450 sizeof(struct lov_ost_data_v1));
@@ -457,9 +456,9 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
457 } 456 }
458 457
459 /* User wasn't expecting this many OST entries */ 458 /* User wasn't expecting this many OST entries */
460 if (lum.lmm_stripe_count == 0) 459 if (lum.lmm_stripe_count == 0) {
461 lmm_size = lum_size; 460 lmm_size = lum_size;
462 else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) { 461 } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
463 rc = -EOVERFLOW; 462 rc = -EOVERFLOW;
464 goto out_set; 463 goto out_set;
465 } 464 }
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 9634c13a574d..0306f00c3f33 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -83,7 +83,7 @@ static int lov_raid0_page_is_under_lock(const struct lu_env *env,
83 } 83 }
84 84
85 /* calculate the end of current stripe */ 85 /* calculate the end of current stripe */
86 pps = loo->lo_lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT; 86 pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
87 index = ((slice->cpl_index + pps) & ~(pps - 1)) - 1; 87 index = ((slice->cpl_index + pps) & ~(pps - 1)) - 1;
88 88
89 /* never exceed the end of the stripe */ 89 /* never exceed the end of the stripe */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index c335c020f4f4..35f6b1d66ff4 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -151,8 +151,9 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
151 if (lsr) { 151 if (lsr) {
152 cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops); 152 cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
153 result = 0; 153 result = 0;
154 } else 154 } else {
155 result = -ENOMEM; 155 result = -ENOMEM;
156 }
156 return result; 157 return result;
157} 158}
158 159
@@ -182,10 +183,12 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
182 d = lovsub2lu_dev(lsd); 183 d = lovsub2lu_dev(lsd);
183 d->ld_ops = &lovsub_lu_ops; 184 d->ld_ops = &lovsub_lu_ops;
184 lsd->acid_cl.cd_ops = &lovsub_cl_ops; 185 lsd->acid_cl.cd_ops = &lovsub_cl_ops;
185 } else 186 } else {
186 d = ERR_PTR(result); 187 d = ERR_PTR(result);
187 } else 188 }
189 } else {
188 d = ERR_PTR(-ENOMEM); 190 d = ERR_PTR(-ENOMEM);
191 }
189 return d; 192 return d;
190} 193}
191 194
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 670d203ab77e..e92edfb618b7 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -77,8 +77,9 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
77 INIT_LIST_HEAD(&lsk->lss_parents); 77 INIT_LIST_HEAD(&lsk->lss_parents);
78 cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); 78 cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
79 result = 0; 79 result = 0;
80 } else 80 } else {
81 result = -ENOMEM; 81 result = -ENOMEM;
82 }
82 return result; 83 return result;
83} 84}
84 85
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 6c5430d938d0..3f51f0db02ad 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -67,8 +67,9 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
67 lu_object_add(obj, below); 67 lu_object_add(obj, below);
68 cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page)); 68 cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
69 result = 0; 69 result = 0;
70 } else 70 } else {
71 result = -ENOMEM; 71 result = -ENOMEM;
72 }
72 return result; 73 return result;
73 74
74} 75}
@@ -154,8 +155,9 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
154 lu_object_add_top(&hdr->coh_lu, obj); 155 lu_object_add_top(&hdr->coh_lu, obj);
155 los->lso_cl.co_ops = &lovsub_ops; 156 los->lso_cl.co_ops = &lovsub_ops;
156 obj->lo_ops = &lovsub_lu_obj_ops; 157 obj->lo_ops = &lovsub_lu_obj_ops;
157 } else 158 } else {
158 obj = NULL; 159 obj = NULL;
160 }
159 return obj; 161 return obj;
160} 162}
161 163
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index be0acf7feee3..53cd56f286b7 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -454,7 +454,7 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req,
454 lock = ldlm_handle2lock(&op_data->op_lease_handle); 454 lock = ldlm_handle2lock(&op_data->op_lease_handle);
455 if (lock) { 455 if (lock) {
456 data->cd_handle = lock->l_remote_handle; 456 data->cd_handle = lock->l_remote_handle;
457 ldlm_lock_put(lock); 457 LDLM_LOCK_PUT(lock);
458 } 458 }
459 ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL); 459 ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
460 460
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 98b27f1f9915..6023c2c1386b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1002,10 +1002,10 @@ restart_bulk:
1002 1002
1003 /* NB req now owns desc and will free it when it gets freed */ 1003 /* NB req now owns desc and will free it when it gets freed */
1004 for (i = 0; i < op_data->op_npages; i++) 1004 for (i = 0; i < op_data->op_npages; i++)
1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1006 1006
1007 mdc_readdir_pack(req, op_data->op_offset, 1007 mdc_readdir_pack(req, op_data->op_offset,
1008 PAGE_CACHE_SIZE * op_data->op_npages, 1008 PAGE_SIZE * op_data->op_npages,
1009 &op_data->op_fid1); 1009 &op_data->op_fid1);
1010 1010
1011 ptlrpc_request_set_replen(req); 1011 ptlrpc_request_set_replen(req);
@@ -1037,7 +1037,7 @@ restart_bulk:
1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { 1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", 1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
1039 req->rq_bulk->bd_nob_transferred, 1039 req->rq_bulk->bd_nob_transferred,
1040 PAGE_CACHE_SIZE * op_data->op_npages); 1040 PAGE_SIZE * op_data->op_npages);
1041 ptlrpc_req_finished(req); 1041 ptlrpc_req_finished(req);
1042 return -EPROTO; 1042 return -EPROTO;
1043 } 1043 }
@@ -1952,7 +1952,7 @@ static void lustre_swab_hal(struct hsm_action_list *h)
1952 __swab32s(&h->hal_count); 1952 __swab32s(&h->hal_count);
1953 __swab32s(&h->hal_archive_id); 1953 __swab32s(&h->hal_archive_id);
1954 __swab64s(&h->hal_flags); 1954 __swab64s(&h->hal_flags);
1955 hai = hai_zero(h); 1955 hai = hai_first(h);
1956 for (i = 0; i < h->hal_count; i++, hai = hai_next(hai)) 1956 for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
1957 lustre_swab_hai(hai); 1957 lustre_swab_hai(hai);
1958} 1958}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index b7dc87248032..3924b095bfb0 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
1113} 1113}
1114 1114
1115enum { 1115enum {
1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), 1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
1117 CONFIG_READ_NRPAGES = 4 1117 CONFIG_READ_NRPAGES = 4
1118}; 1118};
1119 1119
@@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1137 LASSERT(cfg->cfg_instance); 1137 LASSERT(cfg->cfg_instance);
1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance); 1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance);
1139 1139
1140 inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1140 inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
1141 if (!inst) 1141 if (!inst)
1142 return -ENOMEM; 1142 return -ENOMEM;
1143 1143
1144 pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); 1144 pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
1145 if (pos >= PAGE_CACHE_SIZE) { 1145 if (pos >= PAGE_SIZE) {
1146 kfree(inst); 1146 kfree(inst);
1147 return -E2BIG; 1147 return -E2BIG;
1148 } 1148 }
1149 1149
1150 ++pos; 1150 ++pos;
1151 buf = inst + pos; 1151 buf = inst + pos;
1152 bufsz = PAGE_CACHE_SIZE - pos; 1152 bufsz = PAGE_SIZE - pos;
1153 1153
1154 while (datalen > 0) { 1154 while (datalen > 0) {
1155 int entry_len = sizeof(*entry); 1155 int entry_len = sizeof(*entry);
@@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1181 /* Keep this swab for normal mixed endian handling. LU-1644 */ 1181 /* Keep this swab for normal mixed endian handling. LU-1644 */
1182 if (mne_swab) 1182 if (mne_swab)
1183 lustre_swab_mgs_nidtbl_entry(entry); 1183 lustre_swab_mgs_nidtbl_entry(entry);
1184 if (entry->mne_length > PAGE_CACHE_SIZE) { 1184 if (entry->mne_length > PAGE_SIZE) {
1185 CERROR("MNE too large (%u)\n", entry->mne_length); 1185 CERROR("MNE too large (%u)\n", entry->mne_length);
1186 break; 1186 break;
1187 } 1187 }
@@ -1371,7 +1371,7 @@ again:
1371 } 1371 }
1372 body->mcb_offset = cfg->cfg_last_idx + 1; 1372 body->mcb_offset = cfg->cfg_last_idx + 1;
1373 body->mcb_type = cld->cld_type; 1373 body->mcb_type = cld->cld_type;
1374 body->mcb_bits = PAGE_CACHE_SHIFT; 1374 body->mcb_bits = PAGE_SHIFT;
1375 body->mcb_units = nrpages; 1375 body->mcb_units = nrpages;
1376 1376
1377 /* allocate bulk transfer descriptor */ 1377 /* allocate bulk transfer descriptor */
@@ -1383,7 +1383,7 @@ again:
1383 } 1383 }
1384 1384
1385 for (i = 0; i < nrpages; i++) 1385 for (i = 0; i < nrpages; i++)
1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1387 1387
1388 ptlrpc_request_set_replen(req); 1388 ptlrpc_request_set_replen(req);
1389 rc = ptlrpc_queue_wait(req); 1389 rc = ptlrpc_queue_wait(req);
@@ -1411,7 +1411,7 @@ again:
1411 goto out; 1411 goto out;
1412 } 1412 }
1413 1413
1414 if (ealen > nrpages << PAGE_CACHE_SHIFT) { 1414 if (ealen > nrpages << PAGE_SHIFT) {
1415 rc = -EINVAL; 1415 rc = -EINVAL;
1416 goto out; 1416 goto out;
1417 } 1417 }
@@ -1439,7 +1439,7 @@ again:
1439 1439
1440 ptr = kmap(pages[i]); 1440 ptr = kmap(pages[i]);
1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, 1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
1442 min_t(int, ealen, PAGE_CACHE_SIZE), 1442 min_t(int, ealen, PAGE_SIZE),
1443 mne_swab); 1443 mne_swab);
1444 kunmap(pages[i]); 1444 kunmap(pages[i]);
1445 if (rc2 < 0) { 1445 if (rc2 < 0) {
@@ -1448,7 +1448,7 @@ again:
1448 break; 1448 break;
1449 } 1449 }
1450 1450
1451 ealen -= PAGE_CACHE_SIZE; 1451 ealen -= PAGE_SIZE;
1452 } 1452 }
1453 1453
1454out: 1454out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index f4b3178ec043..583fb5f33889 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -504,9 +504,9 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
504{ 504{
505 int result; 505 int result;
506 506
507 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) 507 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) {
508 result = 1; 508 result = 1;
509 else { 509 } else {
510 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo); 510 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
511 result = 0; 511 result = 0;
512 } 512 }
@@ -536,8 +536,9 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
536 result = cl_io_lock_add(env, io, link); 536 result = cl_io_lock_add(env, io, link);
537 if (result) /* lock match */ 537 if (result) /* lock match */
538 link->cill_fini(env, link); 538 link->cill_fini(env, link);
539 } else 539 } else {
540 result = -ENOMEM; 540 result = -ENOMEM;
541 }
541 542
542 return result; 543 return result;
543} 544}
@@ -1202,14 +1203,16 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1202 if (req->crq_o) { 1203 if (req->crq_o) {
1203 req->crq_nrobjs = nr_objects; 1204 req->crq_nrobjs = nr_objects;
1204 result = cl_req_init(env, req, page); 1205 result = cl_req_init(env, req, page);
1205 } else 1206 } else {
1206 result = -ENOMEM; 1207 result = -ENOMEM;
1208 }
1207 if (result != 0) { 1209 if (result != 0) {
1208 cl_req_completion(env, req, result); 1210 cl_req_completion(env, req, result);
1209 req = ERR_PTR(result); 1211 req = ERR_PTR(result);
1210 } 1212 }
1211 } else 1213 } else {
1212 req = ERR_PTR(-ENOMEM); 1214 req = ERR_PTR(-ENOMEM);
1215 }
1213 return req; 1216 return req;
1214} 1217}
1215EXPORT_SYMBOL(cl_req_alloc); 1218EXPORT_SYMBOL(cl_req_alloc);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 395b92c6480d..5940f30318ec 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -654,8 +654,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
654 lu_context_enter(&cle->ce_ses); 654 lu_context_enter(&cle->ce_ses);
655 env->le_ses = &cle->ce_ses; 655 env->le_ses = &cle->ce_ses;
656 cl_env_init0(cle, debug); 656 cl_env_init0(cle, debug);
657 } else 657 } else {
658 lu_env_fini(env); 658 lu_env_fini(env);
659 }
659 } 660 }
660 if (rc != 0) { 661 if (rc != 0) {
661 kmem_cache_free(cl_env_kmem, cle); 662 kmem_cache_free(cl_env_kmem, cle);
@@ -664,8 +665,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
664 CL_ENV_INC(create); 665 CL_ENV_INC(create);
665 CL_ENV_INC(total); 666 CL_ENV_INC(total);
666 } 667 }
667 } else 668 } else {
668 env = ERR_PTR(-ENOMEM); 669 env = ERR_PTR(-ENOMEM);
670 }
669 return env; 671 return env;
670} 672}
671 673
@@ -1053,7 +1055,7 @@ void cl_env_percpu_put(struct lu_env *env)
1053} 1055}
1054EXPORT_SYMBOL(cl_env_percpu_put); 1056EXPORT_SYMBOL(cl_env_percpu_put);
1055 1057
1056struct lu_env *cl_env_percpu_get() 1058struct lu_env *cl_env_percpu_get(void)
1057{ 1059{
1058 struct cl_env *cle; 1060 struct cl_env *cle;
1059 1061
@@ -1095,8 +1097,9 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
1095 CERROR("can't init device '%s', %d\n", typename, rc); 1097 CERROR("can't init device '%s', %d\n", typename, rc);
1096 d = ERR_PTR(rc); 1098 d = ERR_PTR(rc);
1097 } 1099 }
1098 } else 1100 } else {
1099 CERROR("Cannot allocate device: '%s'\n", typename); 1101 CERROR("Cannot allocate device: '%s'\n", typename);
1102 }
1100 return lu2cl_dev(d); 1103 return lu2cl_dev(d);
1101} 1104}
1102EXPORT_SYMBOL(cl_type_setup); 1105EXPORT_SYMBOL(cl_type_setup);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 8df39ced1725..39095e7a0a9d 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1033,7 +1033,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1033 /* 1033 /*
1034 * XXX for now. 1034 * XXX for now.
1035 */ 1035 */
1036 return (loff_t)idx << PAGE_CACHE_SHIFT; 1036 return (loff_t)idx << PAGE_SHIFT;
1037} 1037}
1038EXPORT_SYMBOL(cl_offset); 1038EXPORT_SYMBOL(cl_offset);
1039 1039
@@ -1045,13 +1045,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1045 /* 1045 /*
1046 * XXX for now. 1046 * XXX for now.
1047 */ 1047 */
1048 return offset >> PAGE_CACHE_SHIFT; 1048 return offset >> PAGE_SHIFT;
1049} 1049}
1050EXPORT_SYMBOL(cl_index); 1050EXPORT_SYMBOL(cl_index);
1051 1051
1052int cl_page_size(const struct cl_object *obj) 1052int cl_page_size(const struct cl_object *obj)
1053{ 1053{
1054 return 1 << PAGE_CACHE_SHIFT; 1054 return 1 << PAGE_SHIFT;
1055} 1055}
1056EXPORT_SYMBOL(cl_page_size); 1056EXPORT_SYMBOL(cl_page_size);
1057 1057
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index d9844ba8b9be..799e5585b64d 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -461,9 +461,9 @@ static int obd_init_checks(void)
461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); 461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
462 ret = -EINVAL; 462 ret = -EINVAL;
463 } 463 }
464 if ((u64val & ~PAGE_MASK) >= PAGE_CACHE_SIZE) { 464 if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) {
465 CWARN("mask failed: u64val %llu >= %llu\n", u64val, 465 CWARN("mask failed: u64val %llu >= %llu\n", u64val,
466 (__u64)PAGE_CACHE_SIZE); 466 (__u64)PAGE_SIZE);
467 ret = -EINVAL; 467 ret = -EINVAL;
468 } 468 }
469 469
@@ -509,7 +509,7 @@ static int __init obdclass_init(void)
509 * For clients with less memory, a larger fraction is needed 509 * For clients with less memory, a larger fraction is needed
510 * for other purposes (mostly for BGL). 510 * for other purposes (mostly for BGL).
511 */ 511 */
512 if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) 512 if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
513 obd_max_dirty_pages = totalram_pages / 4; 513 obd_max_dirty_pages = totalram_pages / 4;
514 else 514 else
515 obd_max_dirty_pages = totalram_pages / 2; 515 obd_max_dirty_pages = totalram_pages / 2;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 9496c09b2b69..b41b65e2f021 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
47#include "../../include/lustre/lustre_idl.h" 47#include "../../include/lustre/lustre_idl.h"
48 48
49#include <linux/fs.h> 49#include <linux/fs.h>
50#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
51 50
52void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) 51void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
53{ 52{
@@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
71 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) 70 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
72 dst->i_blkbits = ffs(src->o_blksize) - 1; 71 dst->i_blkbits = ffs(src->o_blksize) - 1;
73 72
74 if (dst->i_blkbits < PAGE_CACHE_SHIFT) 73 if (dst->i_blkbits < PAGE_SHIFT)
75 dst->i_blkbits = PAGE_CACHE_SHIFT; 74 dst->i_blkbits = PAGE_SHIFT;
76 75
77 /* allocation of space */ 76 /* allocation of space */
78 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) 77 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index fd333b9e968c..e6bf414a4444 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
100 char *buf) 100 char *buf)
101{ 101{
102 return sprintf(buf, "%ul\n", 102 return sprintf(buf, "%ul\n",
103 obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT))); 103 obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
104} 104}
105 105
106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, 106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
113 if (rc) 113 if (rc)
114 return rc; 114 return rc;
115 115
116 val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */ 116 val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
117 117
118 if (val > ((totalram_pages / 10) * 9)) { 118 if (val > ((totalram_pages / 10) * 9)) {
119 /* Somebody wants to assign too much memory to dirty pages */ 119 /* Somebody wants to assign too much memory to dirty pages */
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { 123 if (val < 4 << (20 - PAGE_SHIFT)) {
124 /* Less than 4 Mb for dirty cache is also bad */ 124 /* Less than 4 Mb for dirty cache is also bad */
125 return -EINVAL; 125 return -EINVAL;
126 } 126 }
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index d93f42fee420..9824c8868cdf 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -694,8 +694,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
694 694
695 do_div(sum, ret.lc_count); 695 do_div(sum, ret.lc_count);
696 ret.lc_sum = sum; 696 ret.lc_sum = sum;
697 } else 697 } else {
698 ret.lc_sum = 0; 698 ret.lc_sum = 0;
699 }
699 seq_printf(m, 700 seq_printf(m,
700 " rpcs:\n" 701 " rpcs:\n"
701 " inflight: %u\n" 702 " inflight: %u\n"
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 770d5bd742d8..990e9394c8f1 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -716,8 +716,9 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
716 obj = lu_object_locate(top->lo_header, dev->ld_type); 716 obj = lu_object_locate(top->lo_header, dev->ld_type);
717 if (!obj) 717 if (!obj)
718 lu_object_put(env, top); 718 lu_object_put(env, top);
719 } else 719 } else {
720 obj = top; 720 obj = top;
721 }
721 return obj; 722 return obj;
722} 723}
723EXPORT_SYMBOL(lu_object_find_slice); 724EXPORT_SYMBOL(lu_object_find_slice);
@@ -841,8 +842,8 @@ static int lu_htable_order(void)
841 842
842#if BITS_PER_LONG == 32 843#if BITS_PER_LONG == 32
843 /* limit hashtable size for lowmem systems to low RAM */ 844 /* limit hashtable size for lowmem systems to low RAM */
844 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) 845 if (cache_size > 1 << (30 - PAGE_SHIFT))
845 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; 846 cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
846#endif 847#endif
847 848
848 /* clear off unreasonable cache setting. */ 849 /* clear off unreasonable cache setting. */
@@ -854,7 +855,7 @@ static int lu_htable_order(void)
854 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; 855 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
855 } 856 }
856 cache_size = cache_size / 100 * lu_cache_percent * 857 cache_size = cache_size / 100 * lu_cache_percent *
857 (PAGE_CACHE_SIZE / 1024); 858 (PAGE_SIZE / 1024);
858 859
859 for (bits = 1; (1 << bits) < cache_size; ++bits) { 860 for (bits = 1; (1 << bits) < cache_size; ++bits) {
860 ; 861 ;
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5f812460b3ea..b1abe023bb35 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -163,8 +163,9 @@ int class_del_uuid(const char *uuid)
163 break; 163 break;
164 } 164 }
165 } 165 }
166 } else 166 } else {
167 list_splice_init(&g_uuid_list, &deathrow); 167 list_splice_init(&g_uuid_list, &deathrow);
168 }
168 spin_unlock(&g_uuid_lock); 169 spin_unlock(&g_uuid_lock);
169 170
170 if (uuid && list_empty(&deathrow)) { 171 if (uuid && list_empty(&deathrow)) {
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index a752bb4e946b..b271895d4395 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -269,7 +269,7 @@ static void echo_page_fini(const struct lu_env *env,
269 struct echo_object *eco = cl2echo_obj(slice->cpl_obj); 269 struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
270 270
271 atomic_dec(&eco->eo_npages); 271 atomic_dec(&eco->eo_npages);
272 page_cache_release(slice->cpl_page->cp_vmpage); 272 put_page(slice->cpl_page->cp_vmpage);
273} 273}
274 274
275static int echo_page_prep(const struct lu_env *env, 275static int echo_page_prep(const struct lu_env *env,
@@ -345,7 +345,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
345 struct echo_page *ep = cl_object_page_slice(obj, page); 345 struct echo_page *ep = cl_object_page_slice(obj, page);
346 struct echo_object *eco = cl2echo_obj(obj); 346 struct echo_object *eco = cl2echo_obj(obj);
347 347
348 page_cache_get(page->cp_vmpage); 348 get_page(page->cp_vmpage);
349 mutex_init(&ep->ep_lock); 349 mutex_init(&ep->ep_lock);
350 cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops); 350 cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
351 atomic_inc(&eco->eo_npages); 351 atomic_inc(&eco->eo_npages);
@@ -668,8 +668,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
668 struct obd_device *obd = NULL; /* to keep compiler happy */ 668 struct obd_device *obd = NULL; /* to keep compiler happy */
669 struct obd_device *tgt; 669 struct obd_device *tgt;
670 const char *tgt_type_name; 670 const char *tgt_type_name;
671 int rc; 671 int rc, err;
672 int cleanup = 0;
673 672
674 ed = kzalloc(sizeof(*ed), GFP_NOFS); 673 ed = kzalloc(sizeof(*ed), GFP_NOFS);
675 if (!ed) { 674 if (!ed) {
@@ -677,16 +676,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
677 goto out; 676 goto out;
678 } 677 }
679 678
680 cleanup = 1;
681 cd = &ed->ed_cl; 679 cd = &ed->ed_cl;
682 rc = cl_device_init(cd, t); 680 rc = cl_device_init(cd, t);
683 if (rc) 681 if (rc)
684 goto out; 682 goto out_free;
685 683
686 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops; 684 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
687 cd->cd_ops = &echo_device_cl_ops; 685 cd->cd_ops = &echo_device_cl_ops;
688 686
689 cleanup = 2;
690 obd = class_name2obd(lustre_cfg_string(cfg, 0)); 687 obd = class_name2obd(lustre_cfg_string(cfg, 0));
691 LASSERT(obd); 688 LASSERT(obd);
692 LASSERT(env); 689 LASSERT(env);
@@ -696,28 +693,25 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
696 CERROR("Can not find tgt device %s\n", 693 CERROR("Can not find tgt device %s\n",
697 lustre_cfg_string(cfg, 1)); 694 lustre_cfg_string(cfg, 1));
698 rc = -ENODEV; 695 rc = -ENODEV;
699 goto out; 696 goto out_device_fini;
700 } 697 }
701 698
702 next = tgt->obd_lu_dev; 699 next = tgt->obd_lu_dev;
703 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) { 700 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
704 CERROR("echo MDT client must be run on server\n"); 701 CERROR("echo MDT client must be run on server\n");
705 rc = -EOPNOTSUPP; 702 rc = -EOPNOTSUPP;
706 goto out; 703 goto out_device_fini;
707 } 704 }
708 705
709 rc = echo_site_init(env, ed); 706 rc = echo_site_init(env, ed);
710 if (rc) 707 if (rc)
711 goto out; 708 goto out_device_fini;
712
713 cleanup = 3;
714 709
715 rc = echo_client_setup(env, obd, cfg); 710 rc = echo_client_setup(env, obd, cfg);
716 if (rc) 711 if (rc)
717 goto out; 712 goto out_site_fini;
718 713
719 ed->ed_ec = &obd->u.echo_client; 714 ed->ed_ec = &obd->u.echo_client;
720 cleanup = 4;
721 715
722 /* if echo client is to be stacked upon ost device, the next is 716 /* if echo client is to be stacked upon ost device, the next is
723 * NULL since ost is not a clio device so far 717 * NULL since ost is not a clio device so far
@@ -729,7 +723,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
729 if (next) { 723 if (next) {
730 if (next->ld_site) { 724 if (next->ld_site) {
731 rc = -EBUSY; 725 rc = -EBUSY;
732 goto out; 726 goto out_cleanup;
733 } 727 }
734 728
735 next->ld_site = &ed->ed_site->cs_lu; 729 next->ld_site = &ed->ed_site->cs_lu;
@@ -737,7 +731,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
737 next->ld_type->ldt_name, 731 next->ld_type->ldt_name,
738 NULL); 732 NULL);
739 if (rc) 733 if (rc)
740 goto out; 734 goto out_cleanup;
741 735
742 } else { 736 } else {
743 LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0); 737 LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
@@ -745,27 +739,19 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
745 739
746 ed->ed_next = next; 740 ed->ed_next = next;
747 return &cd->cd_lu_dev; 741 return &cd->cd_lu_dev;
748out:
749 switch (cleanup) {
750 case 4: {
751 int rc2;
752
753 rc2 = echo_client_cleanup(obd);
754 if (rc2)
755 CERROR("Cleanup obd device %s error(%d)\n",
756 obd->obd_name, rc2);
757 }
758 742
759 case 3: 743out_cleanup:
760 echo_site_fini(env, ed); 744 err = echo_client_cleanup(obd);
761 case 2: 745 if (err)
762 cl_device_fini(&ed->ed_cl); 746 CERROR("Cleanup obd device %s error(%d)\n",
763 case 1: 747 obd->obd_name, err);
764 kfree(ed); 748out_site_fini:
765 case 0: 749 echo_site_fini(env, ed);
766 default: 750out_device_fini:
767 break; 751 cl_device_fini(&ed->ed_cl);
768 } 752out_free:
753 kfree(ed);
754out:
769 return ERR_PTR(rc); 755 return ERR_PTR(rc);
770} 756}
771 757
@@ -1095,7 +1081,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1095 LASSERT(rc == 0); 1081 LASSERT(rc == 0);
1096 1082
1097 rc = cl_echo_enqueue0(env, eco, offset, 1083 rc = cl_echo_enqueue0(env, eco, offset,
1098 offset + npages * PAGE_CACHE_SIZE - 1, 1084 offset + npages * PAGE_SIZE - 1,
1099 rw == READ ? LCK_PR : LCK_PW, &lh.cookie, 1085 rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
1100 CEF_NEVER); 1086 CEF_NEVER);
1101 if (rc < 0) 1087 if (rc < 0)
@@ -1270,11 +1256,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
1270 int delta; 1256 int delta;
1271 1257
1272 /* no partial pages on the client */ 1258 /* no partial pages on the client */
1273 LASSERT(count == PAGE_CACHE_SIZE); 1259 LASSERT(count == PAGE_SIZE);
1274 1260
1275 addr = kmap(page); 1261 addr = kmap(page);
1276 1262
1277 for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1263 for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1278 if (rw == OBD_BRW_WRITE) { 1264 if (rw == OBD_BRW_WRITE) {
1279 stripe_off = offset + delta; 1265 stripe_off = offset + delta;
1280 stripe_id = id; 1266 stripe_id = id;
@@ -1300,11 +1286,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
1300 int rc2; 1286 int rc2;
1301 1287
1302 /* no partial pages on the client */ 1288 /* no partial pages on the client */
1303 LASSERT(count == PAGE_CACHE_SIZE); 1289 LASSERT(count == PAGE_SIZE);
1304 1290
1305 addr = kmap(page); 1291 addr = kmap(page);
1306 1292
1307 for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1293 for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1308 stripe_off = offset + delta; 1294 stripe_off = offset + delta;
1309 stripe_id = id; 1295 stripe_id = id;
1310 1296
@@ -1350,7 +1336,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1350 return -EINVAL; 1336 return -EINVAL;
1351 1337
1352 /* XXX think again with misaligned I/O */ 1338 /* XXX think again with misaligned I/O */
1353 npages = count >> PAGE_CACHE_SHIFT; 1339 npages = count >> PAGE_SHIFT;
1354 1340
1355 if (rw == OBD_BRW_WRITE) 1341 if (rw == OBD_BRW_WRITE)
1356 brw_flags = OBD_BRW_ASYNC; 1342 brw_flags = OBD_BRW_ASYNC;
@@ -1367,7 +1353,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1367 1353
1368 for (i = 0, pgp = pga, off = offset; 1354 for (i = 0, pgp = pga, off = offset;
1369 i < npages; 1355 i < npages;
1370 i++, pgp++, off += PAGE_CACHE_SIZE) { 1356 i++, pgp++, off += PAGE_SIZE) {
1371 1357
1372 LASSERT(!pgp->pg); /* for cleanup */ 1358 LASSERT(!pgp->pg); /* for cleanup */
1373 1359
@@ -1377,7 +1363,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1377 goto out; 1363 goto out;
1378 1364
1379 pages[i] = pgp->pg; 1365 pages[i] = pgp->pg;
1380 pgp->count = PAGE_CACHE_SIZE; 1366 pgp->count = PAGE_SIZE;
1381 pgp->off = off; 1367 pgp->off = off;
1382 pgp->flag = brw_flags; 1368 pgp->flag = brw_flags;
1383 1369
@@ -1432,8 +1418,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
1432 if (count <= 0 || (count & (~PAGE_MASK)) != 0) 1418 if (count <= 0 || (count & (~PAGE_MASK)) != 0)
1433 return -EINVAL; 1419 return -EINVAL;
1434 1420
1435 npages = batch >> PAGE_CACHE_SHIFT; 1421 npages = batch >> PAGE_SHIFT;
1436 tot_pages = count >> PAGE_CACHE_SHIFT; 1422 tot_pages = count >> PAGE_SHIFT;
1437 1423
1438 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); 1424 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
1439 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); 1425 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@@ -1456,9 +1442,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
1456 if (tot_pages < npages) 1442 if (tot_pages < npages)
1457 npages = tot_pages; 1443 npages = tot_pages;
1458 1444
1459 for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { 1445 for (i = 0; i < npages; i++, off += PAGE_SIZE) {
1460 rnb[i].offset = off; 1446 rnb[i].offset = off;
1461 rnb[i].len = PAGE_CACHE_SIZE; 1447 rnb[i].len = PAGE_SIZE;
1462 rnb[i].flags = brw_flags; 1448 rnb[i].flags = brw_flags;
1463 } 1449 }
1464 1450
@@ -1837,7 +1823,7 @@ static int __init obdecho_init(void)
1837{ 1823{
1838 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); 1824 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
1839 1825
1840 LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); 1826 LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
1841 1827
1842 return echo_client_init(); 1828 return echo_client_init();
1843} 1829}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 911e5054a9c4..6e57f534117b 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
162 if (rc) 162 if (rc)
163 return rc; 163 return rc;
164 164
165 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 165 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
166 166
167 if (pages_number <= 0 || 167 if (pages_number <= 0 ||
168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || 168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
169 pages_number > totalram_pages / 4) /* 1/4 of RAM */ 169 pages_number > totalram_pages / 4) /* 1/4 of RAM */
170 return -ERANGE; 170 return -ERANGE;
171 171
172 spin_lock(&cli->cl_loi_list_lock); 172 spin_lock(&cli->cl_loi_list_lock);
173 cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT); 173 cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
174 osc_wake_cache_waiters(cli); 174 osc_wake_cache_waiters(cli);
175 spin_unlock(&cli->cl_loi_list_lock); 175 spin_unlock(&cli->cl_loi_list_lock);
176 176
@@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
182{ 182{
183 struct obd_device *dev = m->private; 183 struct obd_device *dev = m->private;
184 struct client_obd *cli = &dev->u.cli; 184 struct client_obd *cli = &dev->u.cli;
185 int shift = 20 - PAGE_CACHE_SHIFT; 185 int shift = 20 - PAGE_SHIFT;
186 186
187 seq_printf(m, 187 seq_printf(m,
188 "used_mb: %d\n" 188 "used_mb: %d\n"
@@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
211 return -EFAULT; 211 return -EFAULT;
212 kernbuf[count] = 0; 212 kernbuf[count] = 0;
213 213
214 mult = 1 << (20 - PAGE_CACHE_SHIFT); 214 mult = 1 << (20 - PAGE_SHIFT);
215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - 215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
216 kernbuf; 216 kernbuf;
217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -577,12 +577,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
577 577
578 /* if the max_pages is specified in bytes, convert to pages */ 578 /* if the max_pages is specified in bytes, convert to pages */
579 if (val >= ONE_MB_BRW_SIZE) 579 if (val >= ONE_MB_BRW_SIZE)
580 val >>= PAGE_CACHE_SHIFT; 580 val >>= PAGE_SHIFT;
581 581
582 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); 582 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
583 /* max_pages_per_rpc must be chunk aligned */ 583 /* max_pages_per_rpc must be chunk aligned */
584 val = (val + ~chunk_mask) & chunk_mask; 584 val = (val + ~chunk_mask) & chunk_mask;
585 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { 585 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
586 return -ERANGE; 586 return -ERANGE;
587 } 587 }
588 spin_lock(&cli->cl_loi_list_lock); 588 spin_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index d01f2a207a91..ef6882107e93 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -557,7 +557,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
557 return -ERANGE; 557 return -ERANGE;
558 558
559 LASSERT(cur->oe_dlmlock == victim->oe_dlmlock); 559 LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
560 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; 560 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
561 chunk_start = cur->oe_start >> ppc_bits; 561 chunk_start = cur->oe_start >> ppc_bits;
562 chunk_end = cur->oe_end >> ppc_bits; 562 chunk_end = cur->oe_end >> ppc_bits;
563 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && 563 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -664,8 +664,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
664 descr = &olck->ols_cl.cls_lock->cll_descr; 664 descr = &olck->ols_cl.cls_lock->cll_descr;
665 LASSERT(descr->cld_mode >= CLM_WRITE); 665 LASSERT(descr->cld_mode >= CLM_WRITE);
666 666
667 LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); 667 LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
668 ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 668 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
669 chunk_mask = ~((1 << ppc_bits) - 1); 669 chunk_mask = ~((1 << ppc_bits) - 1);
670 chunksize = 1 << cli->cl_chunkbits; 670 chunksize = 1 << cli->cl_chunkbits;
671 chunk = index >> ppc_bits; 671 chunk = index >> ppc_bits;
@@ -894,8 +894,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
894 894
895 if (!sent) { 895 if (!sent) {
896 lost_grant = ext->oe_grants; 896 lost_grant = ext->oe_grants;
897 } else if (blocksize < PAGE_CACHE_SIZE && 897 } else if (blocksize < PAGE_SIZE &&
898 last_count != PAGE_CACHE_SIZE) { 898 last_count != PAGE_SIZE) {
899 /* For short writes we shouldn't count parts of pages that 899 /* For short writes we shouldn't count parts of pages that
900 * span a whole chunk on the OST side, or our accounting goes 900 * span a whole chunk on the OST side, or our accounting goes
901 * wrong. Should match the code in filter_grant_check. 901 * wrong. Should match the code in filter_grant_check.
@@ -906,7 +906,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
906 if (end) 906 if (end)
907 count += blocksize - end; 907 count += blocksize - end;
908 908
909 lost_grant = PAGE_CACHE_SIZE - count; 909 lost_grant = PAGE_SIZE - count;
910 } 910 }
911 if (ext->oe_grants > 0) 911 if (ext->oe_grants > 0)
912 osc_free_grant(cli, nr_pages, lost_grant); 912 osc_free_grant(cli, nr_pages, lost_grant);
@@ -989,7 +989,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
989 struct osc_async_page *oap; 989 struct osc_async_page *oap;
990 struct osc_async_page *tmp; 990 struct osc_async_page *tmp;
991 int pages_in_chunk = 0; 991 int pages_in_chunk = 0;
992 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 992 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
993 __u64 trunc_chunk = trunc_index >> ppc_bits; 993 __u64 trunc_chunk = trunc_index >> ppc_bits;
994 int grants = 0; 994 int grants = 0;
995 int nr_pages = 0; 995 int nr_pages = 0;
@@ -1146,7 +1146,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1146 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { 1146 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
1147 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); 1147 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1148 LASSERT(last->oap_count > 0); 1148 LASSERT(last->oap_count > 0);
1149 LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); 1149 LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
1150 last->oap_async_flags |= ASYNC_COUNT_STABLE; 1150 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1151 } 1151 }
1152 1152
@@ -1155,7 +1155,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1155 */ 1155 */
1156 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { 1156 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1157 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { 1157 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1158 oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; 1158 oap->oap_count = PAGE_SIZE - oap->oap_page_off;
1159 oap->oap_async_flags |= ASYNC_COUNT_STABLE; 1159 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1160 } 1160 }
1161 } 1161 }
@@ -1179,7 +1179,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
1179 struct osc_object *obj = ext->oe_obj; 1179 struct osc_object *obj = ext->oe_obj;
1180 struct client_obd *cli = osc_cli(obj); 1180 struct client_obd *cli = osc_cli(obj);
1181 struct osc_extent *next; 1181 struct osc_extent *next;
1182 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 1182 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
1183 pgoff_t chunk = index >> ppc_bits; 1183 pgoff_t chunk = index >> ppc_bits;
1184 pgoff_t end_chunk; 1184 pgoff_t end_chunk;
1185 pgoff_t end_index; 1185 pgoff_t end_index;
@@ -1314,9 +1314,9 @@ static int osc_refresh_count(const struct lu_env *env,
1314 return 0; 1314 return 0;
1315 else if (cl_offset(obj, index + 1) > kms) 1315 else if (cl_offset(obj, index + 1) > kms)
1316 /* catch sub-page write at end of file */ 1316 /* catch sub-page write at end of file */
1317 return kms % PAGE_CACHE_SIZE; 1317 return kms % PAGE_SIZE;
1318 else 1318 else
1319 return PAGE_CACHE_SIZE; 1319 return PAGE_SIZE;
1320} 1320}
1321 1321
1322static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, 1322static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1397,10 +1397,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
1397 assert_spin_locked(&cli->cl_loi_list_lock); 1397 assert_spin_locked(&cli->cl_loi_list_lock);
1398 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); 1398 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1399 atomic_inc(&obd_dirty_pages); 1399 atomic_inc(&obd_dirty_pages);
1400 cli->cl_dirty += PAGE_CACHE_SIZE; 1400 cli->cl_dirty += PAGE_SIZE;
1401 pga->flag |= OBD_BRW_FROM_GRANT; 1401 pga->flag |= OBD_BRW_FROM_GRANT;
1402 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", 1402 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
1403 PAGE_CACHE_SIZE, pga, pga->pg); 1403 PAGE_SIZE, pga, pga->pg);
1404 osc_update_next_shrink(cli); 1404 osc_update_next_shrink(cli);
1405} 1405}
1406 1406
@@ -1417,11 +1417,11 @@ static void osc_release_write_grant(struct client_obd *cli,
1417 1417
1418 pga->flag &= ~OBD_BRW_FROM_GRANT; 1418 pga->flag &= ~OBD_BRW_FROM_GRANT;
1419 atomic_dec(&obd_dirty_pages); 1419 atomic_dec(&obd_dirty_pages);
1420 cli->cl_dirty -= PAGE_CACHE_SIZE; 1420 cli->cl_dirty -= PAGE_SIZE;
1421 if (pga->flag & OBD_BRW_NOCACHE) { 1421 if (pga->flag & OBD_BRW_NOCACHE) {
1422 pga->flag &= ~OBD_BRW_NOCACHE; 1422 pga->flag &= ~OBD_BRW_NOCACHE;
1423 atomic_dec(&obd_dirty_transit_pages); 1423 atomic_dec(&obd_dirty_transit_pages);
1424 cli->cl_dirty_transit -= PAGE_CACHE_SIZE; 1424 cli->cl_dirty_transit -= PAGE_SIZE;
1425 } 1425 }
1426} 1426}
1427 1427
@@ -1477,7 +1477,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
1477 * used, we should return these grants to OST. There're two cases where grants 1477 * used, we should return these grants to OST. There're two cases where grants
1478 * can be lost: 1478 * can be lost:
1479 * 1. truncate; 1479 * 1. truncate;
1480 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was 1480 * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
1481 * written. In this case OST may use less chunks to serve this partial 1481 * written. In this case OST may use less chunks to serve this partial
1482 * write. OSTs don't actually know the page size on the client side. so 1482 * write. OSTs don't actually know the page size on the client side. so
1483 * clients have to calculate lost grant by the blocksize on the OST. 1483 * clients have to calculate lost grant by the blocksize on the OST.
@@ -1490,7 +1490,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1490 1490
1491 spin_lock(&cli->cl_loi_list_lock); 1491 spin_lock(&cli->cl_loi_list_lock);
1492 atomic_sub(nr_pages, &obd_dirty_pages); 1492 atomic_sub(nr_pages, &obd_dirty_pages);
1493 cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; 1493 cli->cl_dirty -= nr_pages << PAGE_SHIFT;
1494 cli->cl_lost_grant += lost_grant; 1494 cli->cl_lost_grant += lost_grant;
1495 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { 1495 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
1496 /* borrow some grant from truncate to avoid the case that 1496 /* borrow some grant from truncate to avoid the case that
@@ -1533,11 +1533,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
1533 if (rc < 0) 1533 if (rc < 0)
1534 return 0; 1534 return 0;
1535 1535
1536 if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && 1536 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1537 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { 1537 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
1538 osc_consume_write_grant(cli, &oap->oap_brw_page); 1538 osc_consume_write_grant(cli, &oap->oap_brw_page);
1539 if (transient) { 1539 if (transient) {
1540 cli->cl_dirty_transit += PAGE_CACHE_SIZE; 1540 cli->cl_dirty_transit += PAGE_SIZE;
1541 atomic_inc(&obd_dirty_transit_pages); 1541 atomic_inc(&obd_dirty_transit_pages);
1542 oap->oap_brw_flags |= OBD_BRW_NOCACHE; 1542 oap->oap_brw_flags |= OBD_BRW_NOCACHE;
1543 } 1543 }
@@ -1583,7 +1583,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1583 * of queued writes and create a discontiguous rpc stream 1583 * of queued writes and create a discontiguous rpc stream
1584 */ 1584 */
1585 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || 1585 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
1586 cli->cl_dirty_max < PAGE_CACHE_SIZE || 1586 cli->cl_dirty_max < PAGE_SIZE ||
1587 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { 1587 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
1588 rc = -EDQUOT; 1588 rc = -EDQUOT;
1589 goto out; 1589 goto out;
@@ -1653,7 +1653,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
1653 1653
1654 ocw->ocw_rc = -EDQUOT; 1654 ocw->ocw_rc = -EDQUOT;
1655 /* we can't dirty more */ 1655 /* we can't dirty more */
1656 if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || 1656 if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
1657 (atomic_read(&obd_dirty_pages) + 1 > 1657 (atomic_read(&obd_dirty_pages) + 1 >
1658 obd_max_dirty_pages)) { 1658 obd_max_dirty_pages)) {
1659 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", 1659 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index cf7743d2f148..c1efcb306f24 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -321,7 +321,7 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
321 if (cl_io_is_append(io)) 321 if (cl_io_is_append(io))
322 return 0; 322 return 0;
323 323
324 npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; 324 npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
325 if (io->u.ci_rw.crw_pos & ~PAGE_MASK) 325 if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
326 ++npages; 326 ++npages;
327 327
@@ -842,8 +842,9 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev,
842 if (or) { 842 if (or) {
843 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); 843 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
844 result = 0; 844 result = 0;
845 } else 845 } else {
846 result = -ENOMEM; 846 result = -ENOMEM;
847 }
847 return result; 848 return result;
848} 849}
849 850
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 49dfe9f8bc4b..7ea64896a4f6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -624,8 +624,9 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
624 result = -ELDLM_NO_LOCK_DATA; 624 result = -ELDLM_NO_LOCK_DATA;
625 } 625 }
626 cl_env_nested_put(&nest, env); 626 cl_env_nested_put(&nest, env);
627 } else 627 } else {
628 result = PTR_ERR(env); 628 result = PTR_ERR(env);
629 }
629 req->rq_status = result; 630 req->rq_status = result;
630 return result; 631 return result;
631} 632}
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index a06bdf10b6ff..738ab10ab274 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -292,8 +292,9 @@ struct lu_object *osc_object_alloc(const struct lu_env *env,
292 lu_object_init(obj, NULL, dev); 292 lu_object_init(obj, NULL, dev);
293 osc->oo_cl.co_ops = &osc_ops; 293 osc->oo_cl.co_ops = &osc_ops;
294 obj->lo_ops = &osc_lu_obj_ops; 294 obj->lo_ops = &osc_lu_obj_ops;
295 } else 295 } else {
296 obj = NULL; 296 obj = NULL;
297 }
297 return obj; 298 return obj;
298} 299}
299 300
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 82979f4039c1..b55f46739dfa 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -309,7 +309,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
309 int result; 309 int result;
310 310
311 opg->ops_from = 0; 311 opg->ops_from = 0;
312 opg->ops_to = PAGE_CACHE_SIZE; 312 opg->ops_to = PAGE_SIZE;
313 313
314 result = osc_prep_async_page(osc, opg, page->cp_vmpage, 314 result = osc_prep_async_page(osc, opg, page->cp_vmpage,
315 cl_offset(obj, index)); 315 cl_offset(obj, index));
@@ -395,9 +395,9 @@ static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
395/* LRU pages are freed in batch mode. OSC should at least free this 395/* LRU pages are freed in batch mode. OSC should at least free this
396 * number of pages to avoid running out of LRU budget, and.. 396 * number of pages to avoid running out of LRU budget, and..
397 */ 397 */
398static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ 398static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
399/* free this number at most otherwise it will take too long time to finish. */ 399/* free this number at most otherwise it will take too long time to finish. */
400static const int lru_shrink_max = 8 << (20 - PAGE_CACHE_SHIFT); /* 8M */ 400static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
401 401
402/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, 402/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
403 * we should free slots aggressively. In this way, slots are freed in a steady 403 * we should free slots aggressively. In this way, slots are freed in a steady
@@ -421,8 +421,9 @@ static int osc_cache_too_much(struct client_obd *cli)
421 return lru_shrink_max; 421 return lru_shrink_max;
422 else if (pages >= budget / 2) 422 else if (pages >= budget / 2)
423 return lru_shrink_min; 423 return lru_shrink_min;
424 } else if (pages >= budget * 2) 424 } else if (pages >= budget * 2) {
425 return lru_shrink_min; 425 return lru_shrink_min;
426 }
426 return 0; 427 return 0;
427} 428}
428 429
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 547539c74a7b..4d0f831990f2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -827,7 +827,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
827 oa->o_undirty = 0; 827 oa->o_undirty = 0;
828 } else { 828 } else {
829 long max_in_flight = (cli->cl_max_pages_per_rpc << 829 long max_in_flight = (cli->cl_max_pages_per_rpc <<
830 PAGE_CACHE_SHIFT)* 830 PAGE_SHIFT)*
831 (cli->cl_max_rpcs_in_flight + 1); 831 (cli->cl_max_rpcs_in_flight + 1);
832 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); 832 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
833 } 833 }
@@ -910,11 +910,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
910static int osc_shrink_grant(struct client_obd *cli) 910static int osc_shrink_grant(struct client_obd *cli)
911{ 911{
912 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * 912 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
913 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); 913 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
914 914
915 spin_lock(&cli->cl_loi_list_lock); 915 spin_lock(&cli->cl_loi_list_lock);
916 if (cli->cl_avail_grant <= target_bytes) 916 if (cli->cl_avail_grant <= target_bytes)
917 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 917 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
918 spin_unlock(&cli->cl_loi_list_lock); 918 spin_unlock(&cli->cl_loi_list_lock);
919 919
920 return osc_shrink_grant_to_target(cli, target_bytes); 920 return osc_shrink_grant_to_target(cli, target_bytes);
@@ -930,8 +930,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
930 * We don't want to shrink below a single RPC, as that will negatively 930 * We don't want to shrink below a single RPC, as that will negatively
931 * impact block allocation and long-term performance. 931 * impact block allocation and long-term performance.
932 */ 932 */
933 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) 933 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
934 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 934 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
935 935
936 if (target_bytes >= cli->cl_avail_grant) { 936 if (target_bytes >= cli->cl_avail_grant) {
937 spin_unlock(&cli->cl_loi_list_lock); 937 spin_unlock(&cli->cl_loi_list_lock);
@@ -979,7 +979,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
979 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) 979 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
980 * Keep comment here so that it can be found by searching. 980 * Keep comment here so that it can be found by searching.
981 */ 981 */
982 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 982 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
983 983
984 if (client->cl_import->imp_state == LUSTRE_IMP_FULL && 984 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
985 client->cl_avail_grant > brw_size) 985 client->cl_avail_grant > brw_size)
@@ -1053,7 +1053,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1053 } 1053 }
1054 1054
1055 /* determine the appropriate chunk size used by osc_extent. */ 1055 /* determine the appropriate chunk size used by osc_extent. */
1056 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); 1056 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
1057 spin_unlock(&cli->cl_loi_list_lock); 1057 spin_unlock(&cli->cl_loi_list_lock);
1058 1058
1059 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", 1059 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1315,9 +1315,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1315 LASSERT(pg->count > 0); 1315 LASSERT(pg->count > 0);
1316 /* make sure there is no gap in the middle of page array */ 1316 /* make sure there is no gap in the middle of page array */
1317 LASSERTF(page_count == 1 || 1317 LASSERTF(page_count == 1 ||
1318 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && 1318 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1319 ergo(i > 0 && i < page_count - 1, 1319 ergo(i > 0 && i < page_count - 1,
1320 poff == 0 && pg->count == PAGE_CACHE_SIZE) && 1320 poff == 0 && pg->count == PAGE_SIZE) &&
1321 ergo(i == page_count - 1, poff == 0)), 1321 ergo(i == page_count - 1, poff == 0)),
1322 "i: %d/%d pg: %p off: %llu, count: %u\n", 1322 "i: %d/%d pg: %p off: %llu, count: %u\n",
1323 i, page_count, pg, pg->off, pg->count); 1323 i, page_count, pg, pg->off, pg->count);
@@ -1891,7 +1891,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1891 oap->oap_count; 1891 oap->oap_count;
1892 else 1892 else
1893 LASSERT(oap->oap_page_off + oap->oap_count == 1893 LASSERT(oap->oap_page_off + oap->oap_count ==
1894 PAGE_CACHE_SIZE); 1894 PAGE_SIZE);
1895 } 1895 }
1896 } 1896 }
1897 1897
@@ -2007,7 +2007,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2007 tmp->oap_request = ptlrpc_request_addref(req); 2007 tmp->oap_request = ptlrpc_request_addref(req);
2008 2008
2009 spin_lock(&cli->cl_loi_list_lock); 2009 spin_lock(&cli->cl_loi_list_lock);
2010 starting_offset >>= PAGE_CACHE_SHIFT; 2010 starting_offset >>= PAGE_SHIFT;
2011 if (cmd == OBD_BRW_READ) { 2011 if (cmd == OBD_BRW_READ) {
2012 cli->cl_r_in_flight++; 2012 cli->cl_r_in_flight++;
2013 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); 2013 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2782,12 +2782,12 @@ out:
2782 PAGE_MASK; 2782 PAGE_MASK;
2783 2783
2784 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= 2784 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2785 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) 2785 fm_key->fiemap.fm_start + PAGE_SIZE - 1)
2786 policy.l_extent.end = OBD_OBJECT_EOF; 2786 policy.l_extent.end = OBD_OBJECT_EOF;
2787 else 2787 else
2788 policy.l_extent.end = (fm_key->fiemap.fm_start + 2788 policy.l_extent.end = (fm_key->fiemap.fm_start +
2789 fm_key->fiemap.fm_length + 2789 fm_key->fiemap.fm_length +
2790 PAGE_CACHE_SIZE - 1) & PAGE_MASK; 2790 PAGE_SIZE - 1) & PAGE_MASK;
2791 2791
2792 ostid_build_res_name(&fm_key->oa.o_oi, &res_id); 2792 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2793 mode = ldlm_lock_match(exp->exp_obd->obd_namespace, 2793 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
@@ -3065,8 +3065,9 @@ static int osc_import_event(struct obd_device *obd,
3065 3065
3066 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); 3066 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3067 cl_env_put(env, &refcheck); 3067 cl_env_put(env, &refcheck);
3068 } else 3068 } else {
3069 rc = PTR_ERR(env); 3069 rc = PTR_ERR(env);
3070 }
3070 break; 3071 break;
3071 } 3072 }
3072 case IMP_EVENT_ACTIVE: { 3073 case IMP_EVENT_ACTIVE: {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 32a7c8710119..e02d95d1d537 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
174 LASSERT(page); 174 LASSERT(page);
175 LASSERT(pageoffset >= 0); 175 LASSERT(pageoffset >= 0);
176 LASSERT(len > 0); 176 LASSERT(len > 0);
177 LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); 177 LASSERT(pageoffset + len <= PAGE_SIZE);
178 178
179 desc->bd_nob += len; 179 desc->bd_nob += len;
180 180
181 if (pin) 181 if (pin)
182 page_cache_get(page); 182 get_page(page);
183 183
184 ptlrpc_add_bulk_page(desc, page, pageoffset, len); 184 ptlrpc_add_bulk_page(desc, page, pageoffset, len);
185} 185}
@@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
206 206
207 if (unpin) { 207 if (unpin) {
208 for (i = 0; i < desc->bd_iov_count; i++) 208 for (i = 0; i < desc->bd_iov_count; i++)
209 page_cache_release(desc->bd_iov[i].kiov_page); 209 put_page(desc->bd_iov[i].kiov_page);
210 } 210 }
211 211
212 kfree(desc); 212 kfree(desc);
@@ -595,9 +595,9 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
595 struct obd_import *imp = request->rq_import; 595 struct obd_import *imp = request->rq_import;
596 int rc; 596 int rc;
597 597
598 if (unlikely(ctx)) 598 if (unlikely(ctx)) {
599 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx); 599 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
600 else { 600 } else {
601 rc = sptlrpc_req_get_ctx(request); 601 rc = sptlrpc_req_get_ctx(request);
602 if (rc) 602 if (rc)
603 goto out_free; 603 goto out_free;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index b4eddf291269..cd94fed0ffdf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1092,7 +1092,7 @@ finish:
1092 1092
1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1094 cli->cl_max_pages_per_rpc = 1094 cli->cl_max_pages_per_rpc =
1095 min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, 1095 min(ocd->ocd_brw_size >> PAGE_SHIFT,
1096 cli->cl_max_pages_per_rpc); 1096 cli->cl_max_pages_per_rpc);
1097 else if (imp->imp_connect_op == MDS_CONNECT || 1097 else if (imp->imp_connect_op == MDS_CONNECT ||
1098 imp->imp_connect_op == MGS_CONNECT) 1098 imp->imp_connect_op == MGS_CONNECT)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cee04efb6fb5..a35b56ec687e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
308 * hose a kernel by allowing the request history to grow too 308 * hose a kernel by allowing the request history to grow too
309 * far. 309 * far.
310 */ 310 */
311 bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 311 bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
312 if (val > totalram_pages / (2 * bufpages)) 312 if (val > totalram_pages / (2 * bufpages))
313 return -ERANGE; 313 return -ERANGE;
314 314
@@ -679,11 +679,11 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
679 /** 679 /**
680 * The second token is either NULL, or an optional [reg|hp] string 680 * The second token is either NULL, or an optional [reg|hp] string
681 */ 681 */
682 if (strcmp(cmd, "reg") == 0) 682 if (strcmp(cmd, "reg") == 0) {
683 queue = PTLRPC_NRS_QUEUE_REG; 683 queue = PTLRPC_NRS_QUEUE_REG;
684 else if (strcmp(cmd, "hp") == 0) 684 } else if (strcmp(cmd, "hp") == 0) {
685 queue = PTLRPC_NRS_QUEUE_HP; 685 queue = PTLRPC_NRS_QUEUE_HP;
686 else { 686 } else {
687 rc = -EINVAL; 687 rc = -EINVAL;
688 goto out; 688 goto out;
689 } 689 }
@@ -693,8 +693,9 @@ default_queue:
693 if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) { 693 if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
694 rc = -ENODEV; 694 rc = -ENODEV;
695 goto out; 695 goto out;
696 } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) 696 } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) {
697 queue = PTLRPC_NRS_QUEUE_REG; 697 queue = PTLRPC_NRS_QUEUE_REG;
698 }
698 699
699 /** 700 /**
700 * Serialize NRS core lprocfs operations with policy registration/ 701 * Serialize NRS core lprocfs operations with policy registration/
@@ -1226,7 +1227,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
1226 const char prefix[] = "connection="; 1227 const char prefix[] = "connection=";
1227 const int prefix_len = sizeof(prefix) - 1; 1228 const int prefix_len = sizeof(prefix) - 1;
1228 1229
1229 if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) 1230 if (count > PAGE_SIZE - 1 || count <= prefix_len)
1230 return -EINVAL; 1231 return -EINVAL;
1231 1232
1232 kbuf = kzalloc(count + 1, GFP_NOFS); 1233 kbuf = kzalloc(count + 1, GFP_NOFS);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 5f27d9c2e4ef..30d9a164e52d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
195 } 195 }
196 196
197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { 197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
198 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, 198 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
199 "req %p bad\n", req); 199 "req %p bad\n", req);
200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); 200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
201 if (!ptlrpc_no_resend(req)) 201 if (!ptlrpc_no_resend(req))
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 33d141149afd..02e6cda4c995 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -57,7 +57,7 @@
57 * bulk encryption page pools * 57 * bulk encryption page pools *
58 ****************************************/ 58 ****************************************/
59 59
60#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 60#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
61#define PAGES_PER_POOL (POINTERS_PER_PAGE) 61#define PAGES_PER_POOL (POINTERS_PER_PAGE)
62 62
63#define IDLE_IDX_MAX (100) 63#define IDLE_IDX_MAX (100)
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
new file mode 100644
index 000000000000..d277f048789e
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -0,0 +1,35 @@
1config FB_OLPC_DCON
2 tristate "One Laptop Per Child Display CONtroller support"
3 depends on OLPC && FB
4 depends on I2C
5 depends on (GPIO_CS5535 || GPIO_CS5535=n)
6 select BACKLIGHT_CLASS_DEVICE
7 ---help---
8 In order to support very low power operation, the XO laptop uses a
9 secondary Display CONtroller, or DCON. This secondary controller
10 is present in the video pipeline between the primary display
11 controller (integrate into the processor or chipset) and the LCD
12 panel. It allows the main processor/display controller to be
13 completely powered off while still retaining an image on the display.
14 This controller is only available on OLPC platforms. Unless you have
15 one of these platforms, you will want to say 'N'.
16
17config FB_OLPC_DCON_1
18 bool "OLPC XO-1 DCON support"
19 depends on FB_OLPC_DCON && GPIO_CS5535
20 default y
21 ---help---
22 Enable support for the DCON in XO-1 model laptops. The kernel
23 communicates with the DCON using model-specific code. If you
24 have an XO-1 (or if you're unsure what model you have), you should
25 say 'Y'.
26
27config FB_OLPC_DCON_1_5
28 bool "OLPC XO-1.5 DCON support"
29 depends on FB_OLPC_DCON && ACPI
30 default y
31 ---help---
32 Enable support for the DCON in XO-1.5 model laptops. The kernel
33 communicates with the DCON using model-specific code. If you
34 have an XO-1.5 (or if you're unsure what model you have), you
35 should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
new file mode 100644
index 000000000000..36c7e67fec20
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -0,0 +1,6 @@
1olpc-dcon-objs += olpc_dcon.o
2olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
3olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
4obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
5
6
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
new file mode 100644
index 000000000000..61c2e65ac354
--- /dev/null
+++ b/drivers/staging/olpc_dcon/TODO
@@ -0,0 +1,9 @@
1TODO:
2 - see if vx855 gpio API can be made similar enough to cs5535 so we can
3 share more code
4 - allow simultaneous XO-1 and XO-1.5 support
5
6Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
7copy:
8 Daniel Drake <dsd@laptop.org>
9 Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
new file mode 100644
index 000000000000..f45b2ef05f48
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -0,0 +1,813 @@
1/*
2 * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
3 *
4 * Copyright © 2006-2007 Red Hat, Inc.
5 * Copyright © 2006-2007 Advanced Micro Devices, Inc.
6 * Copyright © 2009 VIA Technology, Inc.
7 * Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net>
8 *
9 * This program is free software. You can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/kernel.h>
17#include <linux/fb.h>
18#include <linux/console.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/module.h>
24#include <linux/backlight.h>
25#include <linux/device.h>
26#include <linux/uaccess.h>
27#include <linux/ctype.h>
28#include <linux/reboot.h>
29#include <linux/olpc-ec.h>
30#include <asm/tsc.h>
31#include <asm/olpc.h>
32
33#include "olpc_dcon.h"
34
35/* Module definitions */
36
37static ushort resumeline = 898;
38module_param(resumeline, ushort, 0444);
39
40static struct dcon_platform_data *pdata;
41
42/* I2C structures */
43
44/* Platform devices */
45static struct platform_device *dcon_device;
46
47static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
48
49static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
50{
51 return i2c_smbus_write_word_data(dcon->client, reg, val);
52}
53
54static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
55{
56 return i2c_smbus_read_word_data(dcon->client, reg);
57}
58
59/* ===== API functions - these are called by a variety of users ==== */
60
61static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
62{
63 u16 ver;
64 int rc = 0;
65
66 ver = dcon_read(dcon, DCON_REG_ID);
67 if ((ver >> 8) != 0xDC) {
68 pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
69 rc = -ENXIO;
70 goto err;
71 }
72
73 if (is_init) {
74 pr_info("Discovered DCON version %x\n", ver & 0xFF);
75 rc = pdata->init(dcon);
76 if (rc != 0) {
77 pr_err("Unable to init.\n");
78 goto err;
79 }
80 }
81
82 if (ver < 0xdc02) {
83 dev_err(&dcon->client->dev,
84 "DCON v1 is unsupported, giving up..\n");
85 rc = -ENODEV;
86 goto err;
87 }
88
89 /* SDRAM setup/hold time */
90 dcon_write(dcon, 0x3a, 0xc040);
91 dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */
92 dcon_write(dcon, DCON_REG_MEM_OPT_A,
93 MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
94 dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
95
96 /* Colour swizzle, AA, no passthrough, backlight */
97 if (is_init) {
98 dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
99 MODE_CSWIZZLE | MODE_COL_AA;
100 }
101 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
102
103 /* Set the scanline to interrupt on during resume */
104 dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
105
106err:
107 return rc;
108}
109
110/*
111 * The smbus doesn't always come back due to what is believed to be
112 * hardware (power rail) bugs. For older models where this is known to
113 * occur, our solution is to attempt to wait for the bus to stabilize;
114 * if it doesn't happen, cut power to the dcon, repower it, and wait
115 * for the bus to stabilize. Rinse, repeat until we have a working
116 * smbus. For newer models, we simply BUG(); we want to know if this
117 * still happens despite the power fixes that have been made!
118 */
119static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
120{
121 unsigned long timeout;
122 u8 pm;
123 int x;
124
125power_up:
126 if (is_powered_down) {
127 pm = 1;
128 x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
129 if (x) {
130 pr_warn("unable to force dcon to power up: %d!\n", x);
131 return x;
132 }
133 usleep_range(10000, 11000); /* we'll be conservative */
134 }
135
136 pdata->bus_stabilize_wiggle();
137
138 for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
139 usleep_range(1000, 1100);
140 x = dcon_read(dcon, DCON_REG_ID);
141 }
142 if (x < 0) {
143 pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
144 BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
145 pm = 0;
146 olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
147 msleep(100);
148 is_powered_down = 1;
149 goto power_up; /* argh, stupid hardware.. */
150 }
151
152 if (is_powered_down)
153 return dcon_hw_init(dcon, 0);
154 return 0;
155}
156
157static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
158{
159 dcon->bl_val = level;
160 dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
161
162 /* Purposely turn off the backlight when we go to level 0 */
163 if (dcon->bl_val == 0) {
164 dcon->disp_mode &= ~MODE_BL_ENABLE;
165 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
166 } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
167 dcon->disp_mode |= MODE_BL_ENABLE;
168 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
169 }
170}
171
172/* Set the output type to either color or mono */
173static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
174{
175 if (dcon->mono == enable_mono)
176 return 0;
177
178 dcon->mono = enable_mono;
179
180 if (enable_mono) {
181 dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
182 dcon->disp_mode |= MODE_MONO_LUMA;
183 } else {
184 dcon->disp_mode &= ~(MODE_MONO_LUMA);
185 dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
186 }
187
188 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
189 return 0;
190}
191
192/* For now, this will be really stupid - we need to address how
193 * DCONLOAD works in a sleep and account for it accordingly
194 */
195
196static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
197{
198 int x;
199
200 /* Turn off the backlight and put the DCON to sleep */
201
202 if (dcon->asleep == sleep)
203 return;
204
205 if (!olpc_board_at_least(olpc_board(0xc2)))
206 return;
207
208 if (sleep) {
209 u8 pm = 0;
210
211 x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
212 if (x)
213 pr_warn("unable to force dcon to power down: %d!\n", x);
214 else
215 dcon->asleep = sleep;
216 } else {
217 /* Only re-enable the backlight if the backlight value is set */
218 if (dcon->bl_val != 0)
219 dcon->disp_mode |= MODE_BL_ENABLE;
220 x = dcon_bus_stabilize(dcon, 1);
221 if (x)
222 pr_warn("unable to reinit dcon hardware: %d!\n", x);
223 else
224 dcon->asleep = sleep;
225
226 /* Restore backlight */
227 dcon_set_backlight(dcon, dcon->bl_val);
228 }
229
230 /* We should turn off some stuff in the framebuffer - but what? */
231}
232
233/* the DCON seems to get confused if we change DCONLOAD too
234 * frequently -- i.e., approximately faster than frame time.
235 * normally we don't change it this fast, so in general we won't
236 * delay here.
237 */
238static void dcon_load_holdoff(struct dcon_priv *dcon)
239{
240 ktime_t delta_t, now;
241
242 while (1) {
243 now = ktime_get();
244 delta_t = ktime_sub(now, dcon->load_time);
245 if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20)
246 break;
247 mdelay(4);
248 }
249}
250
251static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
252{
253 int err;
254
255 console_lock();
256 if (!lock_fb_info(dcon->fbinfo)) {
257 console_unlock();
258 dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
259 return false;
260 }
261
262 dcon->ignore_fb_events = true;
263 err = fb_blank(dcon->fbinfo,
264 blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
265 dcon->ignore_fb_events = false;
266 unlock_fb_info(dcon->fbinfo);
267 console_unlock();
268
269 if (err) {
270 dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
271 blank ? "" : "un");
272 return false;
273 }
274 return true;
275}
276
277/* Set the source of the display (CPU or DCON) */
278static void dcon_source_switch(struct work_struct *work)
279{
280 struct dcon_priv *dcon = container_of(work, struct dcon_priv,
281 switch_source);
282 int source = dcon->pending_src;
283
284 if (dcon->curr_src == source)
285 return;
286
287 dcon_load_holdoff(dcon);
288
289 dcon->switched = false;
290
291 switch (source) {
292 case DCON_SOURCE_CPU:
293 pr_info("dcon_source_switch to CPU\n");
294 /* Enable the scanline interrupt bit */
295 if (dcon_write(dcon, DCON_REG_MODE,
296 dcon->disp_mode | MODE_SCAN_INT))
297 pr_err("couldn't enable scanline interrupt!\n");
298 else
299 /* Wait up to one second for the scanline interrupt */
300 wait_event_timeout(dcon->waitq, dcon->switched, HZ);
301
302 if (!dcon->switched)
303 pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
304
305 /* Turn off the scanline interrupt */
306 if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
307 pr_err("couldn't disable scanline interrupt!\n");
308
309 /*
310 * Ideally we'd like to disable interrupts here so that the
311 * fb unblanking and DCON turn on happen at a known time value;
312 * however, we can't do that right now with fb_blank
313 * messing with semaphores.
314 *
315 * For now, we just hope..
316 */
317 if (!dcon_blank_fb(dcon, false)) {
318 pr_err("Failed to enter CPU mode\n");
319 dcon->pending_src = DCON_SOURCE_DCON;
320 return;
321 }
322
323 /* And turn off the DCON */
324 pdata->set_dconload(1);
325 dcon->load_time = ktime_get();
326
327 pr_info("The CPU has control\n");
328 break;
329 case DCON_SOURCE_DCON:
330 {
331 ktime_t delta_t;
332
333 pr_info("dcon_source_switch to DCON\n");
334
335 /* Clear DCONLOAD - this implies that the DCON is in control */
336 pdata->set_dconload(0);
337 dcon->load_time = ktime_get();
338
339 wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
340
341 if (!dcon->switched) {
342 pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
343 } else {
344 /* sometimes the DCON doesn't follow its own rules,
345 * and doesn't wait for two vsync pulses before
346 * ack'ing the frame load with an IRQ. the result
347 * is that the display shows the *previously*
348 * loaded frame. we can detect this by looking at
349 * the time between asserting DCONLOAD and the IRQ --
350 * if it's less than 20msec, then the DCON couldn't
351 * have seen two VSYNC pulses. in that case we
352 * deassert and reassert, and hope for the best.
353 * see http://dev.laptop.org/ticket/9664
354 */
355 delta_t = ktime_sub(dcon->irq_time, dcon->load_time);
356 if (dcon->switched && ktime_to_ns(delta_t)
357 < NSEC_PER_MSEC * 20) {
358 pr_err("missed loading, retrying\n");
359 pdata->set_dconload(1);
360 mdelay(41);
361 pdata->set_dconload(0);
362 dcon->load_time = ktime_get();
363 mdelay(41);
364 }
365 }
366
367 dcon_blank_fb(dcon, true);
368 pr_info("The DCON has control\n");
369 break;
370 }
371 default:
372 BUG();
373 }
374
375 dcon->curr_src = source;
376}
377
378static void dcon_set_source(struct dcon_priv *dcon, int arg)
379{
380 if (dcon->pending_src == arg)
381 return;
382
383 dcon->pending_src = arg;
384
385 if (dcon->curr_src != arg)
386 schedule_work(&dcon->switch_source);
387}
388
389static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
390{
391 dcon_set_source(dcon, arg);
392 flush_scheduled_work();
393}
394
395static ssize_t dcon_mode_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
397{
398 struct dcon_priv *dcon = dev_get_drvdata(dev);
399
400 return sprintf(buf, "%4.4X\n", dcon->disp_mode);
401}
402
403static ssize_t dcon_sleep_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 struct dcon_priv *dcon = dev_get_drvdata(dev);
407
408 return sprintf(buf, "%d\n", dcon->asleep);
409}
410
411static ssize_t dcon_freeze_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
413{
414 struct dcon_priv *dcon = dev_get_drvdata(dev);
415
416 return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
417}
418
419static ssize_t dcon_mono_show(struct device *dev,
420 struct device_attribute *attr, char *buf)
421{
422 struct dcon_priv *dcon = dev_get_drvdata(dev);
423
424 return sprintf(buf, "%d\n", dcon->mono);
425}
426
427static ssize_t dcon_resumeline_show(struct device *dev,
428 struct device_attribute *attr, char *buf)
429{
430 return sprintf(buf, "%d\n", resumeline);
431}
432
433static ssize_t dcon_mono_store(struct device *dev,
434 struct device_attribute *attr, const char *buf, size_t count)
435{
436 unsigned long enable_mono;
437 int rc;
438
439 rc = kstrtoul(buf, 10, &enable_mono);
440 if (rc)
441 return rc;
442
443 dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
444
445 return count;
446}
447
448static ssize_t dcon_freeze_store(struct device *dev,
449 struct device_attribute *attr, const char *buf, size_t count)
450{
451 struct dcon_priv *dcon = dev_get_drvdata(dev);
452 unsigned long output;
453 int ret;
454
455 ret = kstrtoul(buf, 10, &output);
456 if (ret)
457 return ret;
458
459 pr_info("dcon_freeze_store: %lu\n", output);
460
461 switch (output) {
462 case 0:
463 dcon_set_source(dcon, DCON_SOURCE_CPU);
464 break;
465 case 1:
466 dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
467 break;
468 case 2: /* normally unused */
469 dcon_set_source(dcon, DCON_SOURCE_DCON);
470 break;
471 default:
472 return -EINVAL;
473 }
474
475 return count;
476}
477
478static ssize_t dcon_resumeline_store(struct device *dev,
479 struct device_attribute *attr, const char *buf, size_t count)
480{
481 unsigned short rl;
482 int rc;
483
484 rc = kstrtou16(buf, 10, &rl);
485 if (rc)
486 return rc;
487
488 resumeline = rl;
489 dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
490
491 return count;
492}
493
494static ssize_t dcon_sleep_store(struct device *dev,
495 struct device_attribute *attr, const char *buf, size_t count)
496{
497 unsigned long output;
498 int ret;
499
500 ret = kstrtoul(buf, 10, &output);
501 if (ret)
502 return ret;
503
504 dcon_sleep(dev_get_drvdata(dev), output ? true : false);
505 return count;
506}
507
508static struct device_attribute dcon_device_files[] = {
509 __ATTR(mode, 0444, dcon_mode_show, NULL),
510 __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
511 __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
512 __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
513 __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
514};
515
516static int dcon_bl_update(struct backlight_device *dev)
517{
518 struct dcon_priv *dcon = bl_get_data(dev);
519 u8 level = dev->props.brightness & 0x0F;
520
521 if (dev->props.power != FB_BLANK_UNBLANK)
522 level = 0;
523
524 if (level != dcon->bl_val)
525 dcon_set_backlight(dcon, level);
526
527 /* power down the DCON when the screen is blanked */
528 if (!dcon->ignore_fb_events)
529 dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
530
531 return 0;
532}
533
534static int dcon_bl_get(struct backlight_device *dev)
535{
536 struct dcon_priv *dcon = bl_get_data(dev);
537
538 return dcon->bl_val;
539}
540
541static const struct backlight_ops dcon_bl_ops = {
542 .update_status = dcon_bl_update,
543 .get_brightness = dcon_bl_get,
544};
545
546static struct backlight_properties dcon_bl_props = {
547 .max_brightness = 15,
548 .type = BACKLIGHT_RAW,
549 .power = FB_BLANK_UNBLANK,
550};
551
552static int dcon_reboot_notify(struct notifier_block *nb,
553 unsigned long foo, void *bar)
554{
555 struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
556
557 if (!dcon || !dcon->client)
558 return NOTIFY_DONE;
559
560 /* Turn off the DCON. Entirely. */
561 dcon_write(dcon, DCON_REG_MODE, 0x39);
562 dcon_write(dcon, DCON_REG_MODE, 0x32);
563 return NOTIFY_DONE;
564}
565
566static int unfreeze_on_panic(struct notifier_block *nb,
567 unsigned long e, void *p)
568{
569 pdata->set_dconload(1);
570 return NOTIFY_DONE;
571}
572
573static struct notifier_block dcon_panic_nb = {
574 .notifier_call = unfreeze_on_panic,
575};
576
577static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
578{
579 strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
580
581 return 0;
582}
583
584static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
585{
586 struct dcon_priv *dcon;
587 int rc, i, j;
588
589 if (!pdata)
590 return -ENXIO;
591
592 dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
593 if (!dcon)
594 return -ENOMEM;
595
596 dcon->client = client;
597 init_waitqueue_head(&dcon->waitq);
598 INIT_WORK(&dcon->switch_source, dcon_source_switch);
599 dcon->reboot_nb.notifier_call = dcon_reboot_notify;
600 dcon->reboot_nb.priority = -1;
601
602 i2c_set_clientdata(client, dcon);
603
604 if (num_registered_fb < 1) {
605 dev_err(&client->dev, "DCON driver requires a registered fb\n");
606 rc = -EIO;
607 goto einit;
608 }
609 dcon->fbinfo = registered_fb[0];
610
611 rc = dcon_hw_init(dcon, 1);
612 if (rc)
613 goto einit;
614
615 /* Add the DCON device */
616
617 dcon_device = platform_device_alloc("dcon", -1);
618
619 if (!dcon_device) {
620 pr_err("Unable to create the DCON device\n");
621 rc = -ENOMEM;
622 goto eirq;
623 }
624 rc = platform_device_add(dcon_device);
625 platform_set_drvdata(dcon_device, dcon);
626
627 if (rc) {
628 pr_err("Unable to add the DCON device\n");
629 goto edev;
630 }
631
632 for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
633 rc = device_create_file(&dcon_device->dev,
634 &dcon_device_files[i]);
635 if (rc) {
636 dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
637 goto ecreate;
638 }
639 }
640
641 dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
642
643 /* Add the backlight device for the DCON */
644 dcon_bl_props.brightness = dcon->bl_val;
645 dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
646 dcon, &dcon_bl_ops, &dcon_bl_props);
647 if (IS_ERR(dcon->bl_dev)) {
648 dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
649 PTR_ERR(dcon->bl_dev));
650 dcon->bl_dev = NULL;
651 }
652
653 register_reboot_notifier(&dcon->reboot_nb);
654 atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
655
656 return 0;
657
658 ecreate:
659 for (j = 0; j < i; j++)
660 device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
661 edev:
662 platform_device_unregister(dcon_device);
663 dcon_device = NULL;
664 eirq:
665 free_irq(DCON_IRQ, dcon);
666 einit:
667 kfree(dcon);
668 return rc;
669}
670
671static int dcon_remove(struct i2c_client *client)
672{
673 struct dcon_priv *dcon = i2c_get_clientdata(client);
674
675 unregister_reboot_notifier(&dcon->reboot_nb);
676 atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
677
678 free_irq(DCON_IRQ, dcon);
679
680 backlight_device_unregister(dcon->bl_dev);
681
682 if (dcon_device)
683 platform_device_unregister(dcon_device);
684 cancel_work_sync(&dcon->switch_source);
685
686 kfree(dcon);
687
688 return 0;
689}
690
691#ifdef CONFIG_PM
692static int dcon_suspend(struct device *dev)
693{
694 struct i2c_client *client = to_i2c_client(dev);
695 struct dcon_priv *dcon = i2c_get_clientdata(client);
696
697 if (!dcon->asleep) {
698 /* Set up the DCON to have the source */
699 dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
700 }
701
702 return 0;
703}
704
705static int dcon_resume(struct device *dev)
706{
707 struct i2c_client *client = to_i2c_client(dev);
708 struct dcon_priv *dcon = i2c_get_clientdata(client);
709
710 if (!dcon->asleep) {
711 dcon_bus_stabilize(dcon, 0);
712 dcon_set_source(dcon, DCON_SOURCE_CPU);
713 }
714
715 return 0;
716}
717
718#else
719
720#define dcon_suspend NULL
721#define dcon_resume NULL
722
723#endif /* CONFIG_PM */
724
725irqreturn_t dcon_interrupt(int irq, void *id)
726{
727 struct dcon_priv *dcon = id;
728 u8 status;
729
730 if (pdata->read_status(&status))
731 return IRQ_NONE;
732
733 switch (status & 3) {
734 case 3:
735 pr_debug("DCONLOAD_MISSED interrupt\n");
736 break;
737
738 case 2: /* switch to DCON mode */
739 case 1: /* switch to CPU mode */
740 dcon->switched = true;
741 dcon->irq_time = ktime_get();
742 wake_up(&dcon->waitq);
743 break;
744
745 case 0:
746 /* workaround resume case: the DCON (on 1.5) doesn't
747 * ever assert status 0x01 when switching to CPU mode
748 * during resume. this is because DCONLOAD is de-asserted
749 * _immediately_ upon exiting S3, so the actual release
750 * of the DCON happened long before this point.
751 * see http://dev.laptop.org/ticket/9869
752 */
753 if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
754 dcon->switched = true;
755 dcon->irq_time = ktime_get();
756 wake_up(&dcon->waitq);
757 pr_debug("switching w/ status 0/0\n");
758 } else {
759 pr_debug("scanline interrupt w/CPU\n");
760 }
761 }
762
763 return IRQ_HANDLED;
764}
765
766static const struct dev_pm_ops dcon_pm_ops = {
767 .suspend = dcon_suspend,
768 .resume = dcon_resume,
769};
770
771static const struct i2c_device_id dcon_idtable[] = {
772 { "olpc_dcon", 0 },
773 { }
774};
775MODULE_DEVICE_TABLE(i2c, dcon_idtable);
776
777static struct i2c_driver dcon_driver = {
778 .driver = {
779 .name = "olpc_dcon",
780 .pm = &dcon_pm_ops,
781 },
782 .class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
783 .id_table = dcon_idtable,
784 .probe = dcon_probe,
785 .remove = dcon_remove,
786 .detect = dcon_detect,
787 .address_list = normal_i2c,
788};
789
790static int __init olpc_dcon_init(void)
791{
792#ifdef CONFIG_FB_OLPC_DCON_1_5
793 /* XO-1.5 */
794 if (olpc_board_at_least(olpc_board(0xd0)))
795 pdata = &dcon_pdata_xo_1_5;
796#endif
797#ifdef CONFIG_FB_OLPC_DCON_1
798 if (!pdata)
799 pdata = &dcon_pdata_xo_1;
800#endif
801
802 return i2c_add_driver(&dcon_driver);
803}
804
805static void __exit olpc_dcon_exit(void)
806{
807 i2c_del_driver(&dcon_driver);
808}
809
810module_init(olpc_dcon_init);
811module_exit(olpc_dcon_exit);
812
813MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
new file mode 100644
index 000000000000..215e7ec4dea2
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -0,0 +1,111 @@
1#ifndef OLPC_DCON_H_
2#define OLPC_DCON_H_
3
4#include <linux/notifier.h>
5#include <linux/workqueue.h>
6
7/* DCON registers */
8
9#define DCON_REG_ID 0
10#define DCON_REG_MODE 1
11
12#define MODE_PASSTHRU (1<<0)
13#define MODE_SLEEP (1<<1)
14#define MODE_SLEEP_AUTO (1<<2)
15#define MODE_BL_ENABLE (1<<3)
16#define MODE_BLANK (1<<4)
17#define MODE_CSWIZZLE (1<<5)
18#define MODE_COL_AA (1<<6)
19#define MODE_MONO_LUMA (1<<7)
20#define MODE_SCAN_INT (1<<8)
21#define MODE_CLOCKDIV (1<<9)
22#define MODE_DEBUG (1<<14)
23#define MODE_SELFTEST (1<<15)
24
25#define DCON_REG_HRES 0x2
26#define DCON_REG_HTOTAL 0x3
27#define DCON_REG_HSYNC_WIDTH 0x4
28#define DCON_REG_VRES 0x5
29#define DCON_REG_VTOTAL 0x6
30#define DCON_REG_VSYNC_WIDTH 0x7
31#define DCON_REG_TIMEOUT 0x8
32#define DCON_REG_SCAN_INT 0x9
33#define DCON_REG_BRIGHT 0xa
34#define DCON_REG_MEM_OPT_A 0x41
35#define DCON_REG_MEM_OPT_B 0x42
36
37/* Load Delay Locked Loop (DLL) settings for clock delay */
38#define MEM_DLL_CLOCK_DELAY (1<<0)
39/* Memory controller power down function */
40#define MEM_POWER_DOWN (1<<8)
41/* Memory controller software reset */
42#define MEM_SOFT_RESET (1<<0)
43
44/* Status values */
45
46#define DCONSTAT_SCANINT 0
47#define DCONSTAT_SCANINT_DCON 1
48#define DCONSTAT_DISPLAYLOAD 2
49#define DCONSTAT_MISSED 3
50
51/* Source values */
52
53#define DCON_SOURCE_DCON 0
54#define DCON_SOURCE_CPU 1
55
56/* Interrupt */
57#define DCON_IRQ 6
58
59struct dcon_priv {
60 struct i2c_client *client;
61 struct fb_info *fbinfo;
62 struct backlight_device *bl_dev;
63
64 wait_queue_head_t waitq;
65 struct work_struct switch_source;
66 struct notifier_block reboot_nb;
67
68 /* Shadow register for the DCON_REG_MODE register */
69 u8 disp_mode;
70
71 /* The current backlight value - this saves us some smbus traffic */
72 u8 bl_val;
73
74 /* Current source, initialized at probe time */
75 int curr_src;
76
77 /* Desired source */
78 int pending_src;
79
80 /* Variables used during switches */
81 bool switched;
82 ktime_t irq_time;
83 ktime_t load_time;
84
85 /* Current output type; true == mono, false == color */
86 bool mono;
87 bool asleep;
88 /* This get set while controlling fb blank state from the driver */
89 bool ignore_fb_events;
90};
91
92struct dcon_platform_data {
93 int (*init)(struct dcon_priv *);
94 void (*bus_stabilize_wiggle)(void);
95 void (*set_dconload)(int);
96 int (*read_status)(u8 *);
97};
98
99#include <linux/interrupt.h>
100
101irqreturn_t dcon_interrupt(int irq, void *id);
102
103#ifdef CONFIG_FB_OLPC_DCON_1
104extern struct dcon_platform_data dcon_pdata_xo_1;
105#endif
106
107#ifdef CONFIG_FB_OLPC_DCON_1_5
108extern struct dcon_platform_data dcon_pdata_xo_1_5;
109#endif
110
111#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
new file mode 100644
index 000000000000..0c5a10c69401
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -0,0 +1,205 @@
1/*
2 * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
3 *
4 * Copyright © 2006-2007 Red Hat, Inc.
5 * Copyright © 2006-2007 Advanced Micro Devices, Inc.
6 * Copyright © 2009 VIA Technology, Inc.
7 * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
8 *
9 * This program is free software. You can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cs5535.h>
17#include <linux/gpio.h>
18#include <linux/delay.h>
19#include <asm/olpc.h>
20
21#include "olpc_dcon.h"
22
23static int dcon_init_xo_1(struct dcon_priv *dcon)
24{
25 unsigned char lob;
26
27 if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
28 pr_err("failed to request STAT0 GPIO\n");
29 return -EIO;
30 }
31 if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
32 pr_err("failed to request STAT1 GPIO\n");
33 goto err_gp_stat1;
34 }
35 if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
36 pr_err("failed to request IRQ GPIO\n");
37 goto err_gp_irq;
38 }
39 if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
40 pr_err("failed to request LOAD GPIO\n");
41 goto err_gp_load;
42 }
43 if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
44 pr_err("failed to request BLANK GPIO\n");
45 goto err_gp_blank;
46 }
47
48 /* Turn off the event enable for GPIO7 just to be safe */
49 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
50
51 /*
52 * Determine the current state by reading the GPIO bit; earlier
53 * stages of the boot process have established the state.
54 *
55 * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
56 * this is because OFW will disable input for the pin and set a value..
57 * READ_BACK will only contain a valid value if input is enabled and
58 * then a value is set. So, future readings of the pin can use
59 * READ_BACK, but the first one cannot. Awesome, huh?
60 */
61 dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
62 ? DCON_SOURCE_CPU
63 : DCON_SOURCE_DCON;
64 dcon->pending_src = dcon->curr_src;
65
66 /* Set the directions for the GPIO pins */
67 gpio_direction_input(OLPC_GPIO_DCON_STAT0);
68 gpio_direction_input(OLPC_GPIO_DCON_STAT1);
69 gpio_direction_input(OLPC_GPIO_DCON_IRQ);
70 gpio_direction_input(OLPC_GPIO_DCON_BLANK);
71 gpio_direction_output(OLPC_GPIO_DCON_LOAD,
72 dcon->curr_src == DCON_SOURCE_CPU);
73
74 /* Set up the interrupt mappings */
75
76 /* Set the IRQ to pair 2 */
77 cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
78
79 /* Enable group 2 to trigger the DCON interrupt */
80 cs5535_gpio_set_irq(2, DCON_IRQ);
81
82 /* Select edge level for interrupt (in PIC) */
83 lob = inb(0x4d0);
84 lob &= ~(1 << DCON_IRQ);
85 outb(lob, 0x4d0);
86
87 /* Register the interrupt handler */
88 if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
89 pr_err("failed to request DCON's irq\n");
90 goto err_req_irq;
91 }
92
93 /* Clear INV_EN for GPIO7 (DCONIRQ) */
94 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
95
96 /* Enable filter for GPIO12 (DCONBLANK) */
97 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
98
99 /* Disable filter for GPIO7 */
100 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
101
102 /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
103 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
104 cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
105
106 /* Add GPIO12 to the Filter Event Pair #7 */
107 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
108
109 /* Turn off negative Edge Enable for GPIO12 */
110 cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
111
112 /* Enable negative Edge Enable for GPIO7 */
113 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
114
115 /* Zero the filter amount for Filter Event Pair #7 */
116 cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
117
118 /* Clear the negative edge status for GPIO7 and GPIO12 */
119 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
120 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
121
122 /* FIXME: Clear the positive status as well, just to be sure */
123 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
124 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
125
126 /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
127 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
128 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
129
130 return 0;
131
132err_req_irq:
133 gpio_free(OLPC_GPIO_DCON_BLANK);
134err_gp_blank:
135 gpio_free(OLPC_GPIO_DCON_LOAD);
136err_gp_load:
137 gpio_free(OLPC_GPIO_DCON_IRQ);
138err_gp_irq:
139 gpio_free(OLPC_GPIO_DCON_STAT1);
140err_gp_stat1:
141 gpio_free(OLPC_GPIO_DCON_STAT0);
142 return -EIO;
143}
144
145static void dcon_wiggle_xo_1(void)
146{
147 int x;
148
149 /*
150 * According to HiMax, when powering the DCON up we should hold
151 * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
152 * state machine to reset to a (sane) initial state. Mitch Bradley
153 * did some testing and discovered that holding for 16 SMB_CLK cycles
154 * worked a lot more reliably, so that's what we do here.
155 *
156 * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
157 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
158 * GPIO15.
159 */
160 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
161 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
162 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
163 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
164 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
165 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
166 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
167 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
168 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
169 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
170
171 for (x = 0; x < 16; x++) {
172 udelay(5);
173 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
174 udelay(5);
175 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
176 }
177 udelay(5);
178 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
179 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
180 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
181 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
182}
183
184static void dcon_set_dconload_1(int val)
185{
186 gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
187}
188
189static int dcon_read_status_xo_1(u8 *status)
190{
191 *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
192 *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
193
194 /* Clear the negative edge status for GPIO7 */
195 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
196
197 return 0;
198}
199
200struct dcon_platform_data dcon_pdata_xo_1 = {
201 .init = dcon_init_xo_1,
202 .bus_stabilize_wiggle = dcon_wiggle_xo_1,
203 .set_dconload = dcon_set_dconload_1,
204 .read_status = dcon_read_status_xo_1,
205};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
new file mode 100644
index 000000000000..6a4d379c16a3
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright (c) 2009,2010 One Laptop per Child
3 *
4 * This program is free software. You can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/acpi.h>
12#include <linux/delay.h>
13#include <linux/gpio.h>
14#include <asm/olpc.h>
15
16/* TODO: this eventually belongs in linux/vx855.h */
17#define NR_VX855_GPI 14
18#define NR_VX855_GPO 13
19#define NR_VX855_GPIO 15
20
21#define VX855_GPI(n) (n)
22#define VX855_GPO(n) (NR_VX855_GPI + (n))
23#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n))
24
25#include "olpc_dcon.h"
26
27/* Hardware setup on the XO 1.5:
28 * DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
29 * DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver
30 * DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
31 * DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
32 * DCONIRQ connects to VX855_GPIO12
33 * DCONSMBDATA connects to VX855 graphics CRTSPD
34 * DCONSMBCLK connects to VX855 graphics CRTSPCLK
35 */
36
37#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
38#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */
39#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */
40#define BIT_GPIO12 0x40
41
42#define PREFIX "OLPC DCON:"
43
44static void dcon_clear_irq(void)
45{
46 /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
47 outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
48}
49
50static int dcon_was_irq(void)
51{
52 u_int8_t tmp;
53
54 /* irq status will appear in PMIO_Rx50[6] on gpio12 */
55 tmp = inb(VX855_GPI_STATUS_CHG);
56 return !!(tmp & BIT_GPIO12);
57
58 return 0;
59}
60
61static int dcon_init_xo_1_5(struct dcon_priv *dcon)
62{
63 unsigned int irq;
64
65 dcon_clear_irq();
66
67 /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
68 outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
69
70 /* Determine the current state of DCONLOAD, likely set by firmware */
71 /* GPIO1 */
72 dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
73 DCON_SOURCE_CPU : DCON_SOURCE_DCON;
74 dcon->pending_src = dcon->curr_src;
75
76 /* we're sharing the IRQ with ACPI */
77 irq = acpi_gbl_FADT.sci_interrupt;
78 if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
79 pr_err("DCON (IRQ%d) allocation failed\n", irq);
80 return 1;
81 }
82
83 return 0;
84}
85
86static void set_i2c_line(int sda, int scl)
87{
88 unsigned char tmp;
89 unsigned int port = 0x26;
90
91 /* FIXME: This directly accesses the CRT GPIO controller !!! */
92 outb(port, 0x3c4);
93 tmp = inb(0x3c5);
94
95 if (scl)
96 tmp |= 0x20;
97 else
98 tmp &= ~0x20;
99
100 if (sda)
101 tmp |= 0x10;
102 else
103 tmp &= ~0x10;
104
105 tmp |= 0x01;
106
107 outb(port, 0x3c4);
108 outb(tmp, 0x3c5);
109}
110
111
112static void dcon_wiggle_xo_1_5(void)
113{
114 int x;
115
116 /*
117 * According to HiMax, when powering the DCON up we should hold
118 * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
119 * state machine to reset to a (sane) initial state. Mitch Bradley
120 * did some testing and discovered that holding for 16 SMB_CLK cycles
121 * worked a lot more reliably, so that's what we do here.
122 */
123 set_i2c_line(1, 1);
124
125 for (x = 0; x < 16; x++) {
126 udelay(5);
127 set_i2c_line(1, 0);
128 udelay(5);
129 set_i2c_line(1, 1);
130 }
131 udelay(5);
132
133 /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
134 outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
135}
136
137static void dcon_set_dconload_xo_1_5(int val)
138{
139 gpio_set_value(VX855_GPIO(1), val);
140}
141
142static int dcon_read_status_xo_1_5(u8 *status)
143{
144 if (!dcon_was_irq())
145 return -1;
146
147 /* i believe this is the same as "inb(0x44b) & 3" */
148 *status = gpio_get_value(VX855_GPI(10));
149 *status |= gpio_get_value(VX855_GPI(11)) << 1;
150
151 dcon_clear_irq();
152
153 return 0;
154}
155
156struct dcon_platform_data dcon_pdata_xo_1_5 = {
157 .init = dcon_init_xo_1_5,
158 .bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
159 .set_dconload = dcon_set_dconload_xo_1_5,
160 .read_status = dcon_read_status_xo_1_5,
161};
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
index 3e668d852f03..a925fb0db706 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/staging/rdma/hfi1/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_HFI1
2 tristate "Intel OPA Gen1 support" 2 tristate "Intel OPA Gen1 support"
3 depends on X86_64 && INFINIBAND_RDMAVT 3 depends on X86_64 && INFINIBAND_RDMAVT
4 select MMU_NOTIFIER 4 select MMU_NOTIFIER
5 select CRC32
5 default m 6 default m
6 ---help--- 7 ---help---
7 This is a low-level driver for Intel OPA Gen1 adapter. 8 This is a low-level driver for Intel OPA Gen1 adapter.
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 3788d167b3c6..db59d9731ec2 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -134,7 +134,6 @@ struct visor_device {
134 struct list_head list_all; 134 struct list_head list_all;
135 struct periodic_work *periodic_work; 135 struct periodic_work *periodic_work;
136 bool being_removed; 136 bool being_removed;
137 bool responded_to_device_create;
138 /* the code will detect and behave appropriately) */ 137 /* the code will detect and behave appropriately) */
139 struct semaphore visordriver_callback_lock; 138 struct semaphore visordriver_callback_lock;
140 bool pausing; 139 bool pausing;
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index b68a904ac617..43373582cf1d 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -40,7 +40,6 @@ struct visorchannel {
40 bool requested; 40 bool requested;
41 struct channel_header chan_hdr; 41 struct channel_header chan_hdr;
42 uuid_le guid; 42 uuid_le guid;
43 ulong size;
44 bool needs_lock; /* channel creator knows if more than one */ 43 bool needs_lock; /* channel creator knows if more than one */
45 /* thread will be inserting or removing */ 44 /* thread will be inserting or removing */
46 spinlock_t insert_lock; /* protect head writes in chan_hdr */ 45 spinlock_t insert_lock; /* protect head writes in chan_hdr */
@@ -134,8 +133,6 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
134 } 133 }
135 134
136 channel->nbytes = channel_bytes; 135 channel->nbytes = channel_bytes;
137
138 channel->size = channel_bytes;
139 channel->guid = guid; 136 channel->guid = guid;
140 return channel; 137 return channel;
141 138
@@ -186,7 +183,7 @@ EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
186ulong 183ulong
187visorchannel_get_nbytes(struct visorchannel *channel) 184visorchannel_get_nbytes(struct visorchannel *channel)
188{ 185{
189 return channel->size; 186 return channel->nbytes;
190} 187}
191EXPORT_SYMBOL_GPL(visorchannel_get_nbytes); 188EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
192 189
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 61877722b1da..c1b872c02974 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -66,7 +66,7 @@ static u32 dump_vhba_bus;
66static int 66static int
67visorchipset_open(struct inode *inode, struct file *file) 67visorchipset_open(struct inode *inode, struct file *file)
68{ 68{
69 unsigned minor_number = iminor(inode); 69 unsigned int minor_number = iminor(inode);
70 70
71 if (minor_number) 71 if (minor_number)
72 return -ENODEV; 72 return -ENODEV;
@@ -530,10 +530,11 @@ static ssize_t toolaction_store(struct device *dev,
530 if (kstrtou8(buf, 10, &tool_action)) 530 if (kstrtou8(buf, 10, &tool_action))
531 return -EINVAL; 531 return -EINVAL;
532 532
533 ret = visorchannel_write(controlvm_channel, 533 ret = visorchannel_write
534 offsetof(struct spar_controlvm_channel_protocol, 534 (controlvm_channel,
535 tool_action), 535 offsetof(struct spar_controlvm_channel_protocol,
536 &tool_action, sizeof(u8)); 536 tool_action),
537 &tool_action, sizeof(u8));
537 538
538 if (ret) 539 if (ret)
539 return ret; 540 return ret;
@@ -565,10 +566,11 @@ static ssize_t boottotool_store(struct device *dev,
565 return -EINVAL; 566 return -EINVAL;
566 567
567 efi_spar_indication.boot_to_tool = val; 568 efi_spar_indication.boot_to_tool = val;
568 ret = visorchannel_write(controlvm_channel, 569 ret = visorchannel_write
569 offsetof(struct spar_controlvm_channel_protocol, 570 (controlvm_channel,
570 efi_spar_ind), &(efi_spar_indication), 571 offsetof(struct spar_controlvm_channel_protocol,
571 sizeof(struct efi_spar_indication)); 572 efi_spar_ind), &(efi_spar_indication),
573 sizeof(struct efi_spar_indication));
572 574
573 if (ret) 575 if (ret)
574 return ret; 576 return ret;
@@ -596,10 +598,11 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
596 if (kstrtou32(buf, 10, &error)) 598 if (kstrtou32(buf, 10, &error))
597 return -EINVAL; 599 return -EINVAL;
598 600
599 ret = visorchannel_write(controlvm_channel, 601 ret = visorchannel_write
600 offsetof(struct spar_controlvm_channel_protocol, 602 (controlvm_channel,
601 installation_error), 603 offsetof(struct spar_controlvm_channel_protocol,
602 &error, sizeof(u32)); 604 installation_error),
605 &error, sizeof(u32));
603 if (ret) 606 if (ret)
604 return ret; 607 return ret;
605 return count; 608 return count;
@@ -610,10 +613,11 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
610{ 613{
611 u32 text_id; 614 u32 text_id;
612 615
613 visorchannel_read(controlvm_channel, 616 visorchannel_read
614 offsetof(struct spar_controlvm_channel_protocol, 617 (controlvm_channel,
615 installation_text_id), 618 offsetof(struct spar_controlvm_channel_protocol,
616 &text_id, sizeof(u32)); 619 installation_text_id),
620 &text_id, sizeof(u32));
617 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id); 621 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
618} 622}
619 623
@@ -626,10 +630,11 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
626 if (kstrtou32(buf, 10, &text_id)) 630 if (kstrtou32(buf, 10, &text_id))
627 return -EINVAL; 631 return -EINVAL;
628 632
629 ret = visorchannel_write(controlvm_channel, 633 ret = visorchannel_write
630 offsetof(struct spar_controlvm_channel_protocol, 634 (controlvm_channel,
631 installation_text_id), 635 offsetof(struct spar_controlvm_channel_protocol,
632 &text_id, sizeof(u32)); 636 installation_text_id),
637 &text_id, sizeof(u32));
633 if (ret) 638 if (ret)
634 return ret; 639 return ret;
635 return count; 640 return count;
@@ -657,10 +662,11 @@ static ssize_t remaining_steps_store(struct device *dev,
657 if (kstrtou16(buf, 10, &remaining_steps)) 662 if (kstrtou16(buf, 10, &remaining_steps))
658 return -EINVAL; 663 return -EINVAL;
659 664
660 ret = visorchannel_write(controlvm_channel, 665 ret = visorchannel_write
661 offsetof(struct spar_controlvm_channel_protocol, 666 (controlvm_channel,
662 installation_remaining_steps), 667 offsetof(struct spar_controlvm_channel_protocol,
663 &remaining_steps, sizeof(u16)); 668 installation_remaining_steps),
669 &remaining_steps, sizeof(u16));
664 if (ret) 670 if (ret)
665 return ret; 671 return ret;
666 return count; 672 return count;
@@ -959,7 +965,6 @@ bus_epilog(struct visor_device *bus_info,
959 u32 cmd, struct controlvm_message_header *msg_hdr, 965 u32 cmd, struct controlvm_message_header *msg_hdr,
960 int response, bool need_response) 966 int response, bool need_response)
961{ 967{
962 bool notified = false;
963 struct controlvm_message_header *pmsg_hdr = NULL; 968 struct controlvm_message_header *pmsg_hdr = NULL;
964 969
965 down(&notifier_lock); 970 down(&notifier_lock);
@@ -997,32 +1002,20 @@ bus_epilog(struct visor_device *bus_info,
997 case CONTROLVM_BUS_CREATE: 1002 case CONTROLVM_BUS_CREATE:
998 if (busdev_notifiers.bus_create) { 1003 if (busdev_notifiers.bus_create) {
999 (*busdev_notifiers.bus_create) (bus_info); 1004 (*busdev_notifiers.bus_create) (bus_info);
1000 notified = true; 1005 goto out_unlock;
1001 } 1006 }
1002 break; 1007 break;
1003 case CONTROLVM_BUS_DESTROY: 1008 case CONTROLVM_BUS_DESTROY:
1004 if (busdev_notifiers.bus_destroy) { 1009 if (busdev_notifiers.bus_destroy) {
1005 (*busdev_notifiers.bus_destroy) (bus_info); 1010 (*busdev_notifiers.bus_destroy) (bus_info);
1006 notified = true; 1011 goto out_unlock;
1007 } 1012 }
1008 break; 1013 break;
1009 } 1014 }
1010 } 1015 }
1011 1016
1012out_respond_and_unlock: 1017out_respond_and_unlock:
1013 if (notified) 1018 bus_responder(cmd, pmsg_hdr, response);
1014 /* The callback function just called above is responsible
1015 * for calling the appropriate visorchipset_busdev_responders
1016 * function, which will call bus_responder()
1017 */
1018 ;
1019 else
1020 /*
1021 * Do not kfree(pmsg_hdr) as this is the failure path.
1022 * The success path ('notified') will call the responder
1023 * directly and kfree() there.
1024 */
1025 bus_responder(cmd, pmsg_hdr, response);
1026 1019
1027out_unlock: 1020out_unlock:
1028 up(&notifier_lock); 1021 up(&notifier_lock);
@@ -1035,30 +1028,30 @@ device_epilog(struct visor_device *dev_info,
1035 bool need_response, bool for_visorbus) 1028 bool need_response, bool for_visorbus)
1036{ 1029{
1037 struct visorchipset_busdev_notifiers *notifiers; 1030 struct visorchipset_busdev_notifiers *notifiers;
1038 bool notified = false;
1039 struct controlvm_message_header *pmsg_hdr = NULL; 1031 struct controlvm_message_header *pmsg_hdr = NULL;
1040 1032
1041 notifiers = &busdev_notifiers; 1033 notifiers = &busdev_notifiers;
1042 1034
1035 down(&notifier_lock);
1043 if (!dev_info) { 1036 if (!dev_info) {
1044 /* relying on a valid passed in response code */ 1037 /* relying on a valid passed in response code */
1045 /* be lazy and re-use msg_hdr for this failure, is this ok?? */ 1038 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1046 pmsg_hdr = msg_hdr; 1039 pmsg_hdr = msg_hdr;
1047 goto away; 1040 goto out_respond_and_unlock;
1048 } 1041 }
1049 1042
1050 if (dev_info->pending_msg_hdr) { 1043 if (dev_info->pending_msg_hdr) {
1051 /* only non-NULL if dev is still waiting on a response */ 1044 /* only non-NULL if dev is still waiting on a response */
1052 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; 1045 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1053 pmsg_hdr = dev_info->pending_msg_hdr; 1046 pmsg_hdr = dev_info->pending_msg_hdr;
1054 goto away; 1047 goto out_respond_and_unlock;
1055 } 1048 }
1056 1049
1057 if (need_response) { 1050 if (need_response) {
1058 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); 1051 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1059 if (!pmsg_hdr) { 1052 if (!pmsg_hdr) {
1060 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; 1053 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1061 goto away; 1054 goto out_respond_and_unlock;
1062 } 1055 }
1063 1056
1064 memcpy(pmsg_hdr, msg_hdr, 1057 memcpy(pmsg_hdr, msg_hdr,
@@ -1066,13 +1059,12 @@ device_epilog(struct visor_device *dev_info,
1066 dev_info->pending_msg_hdr = pmsg_hdr; 1059 dev_info->pending_msg_hdr = pmsg_hdr;
1067 } 1060 }
1068 1061
1069 down(&notifier_lock);
1070 if (response >= 0) { 1062 if (response >= 0) {
1071 switch (cmd) { 1063 switch (cmd) {
1072 case CONTROLVM_DEVICE_CREATE: 1064 case CONTROLVM_DEVICE_CREATE:
1073 if (notifiers->device_create) { 1065 if (notifiers->device_create) {
1074 (*notifiers->device_create) (dev_info); 1066 (*notifiers->device_create) (dev_info);
1075 notified = true; 1067 goto out_unlock;
1076 } 1068 }
1077 break; 1069 break;
1078 case CONTROLVM_DEVICE_CHANGESTATE: 1070 case CONTROLVM_DEVICE_CHANGESTATE:
@@ -1082,7 +1074,7 @@ device_epilog(struct visor_device *dev_info,
1082 segment_state_running.operating) { 1074 segment_state_running.operating) {
1083 if (notifiers->device_resume) { 1075 if (notifiers->device_resume) {
1084 (*notifiers->device_resume) (dev_info); 1076 (*notifiers->device_resume) (dev_info);
1085 notified = true; 1077 goto out_unlock;
1086 } 1078 }
1087 } 1079 }
1088 /* ServerNotReady / ServerLost / SegmentStateStandby */ 1080 /* ServerNotReady / ServerLost / SegmentStateStandby */
@@ -1094,32 +1086,23 @@ device_epilog(struct visor_device *dev_info,
1094 */ 1086 */
1095 if (notifiers->device_pause) { 1087 if (notifiers->device_pause) {
1096 (*notifiers->device_pause) (dev_info); 1088 (*notifiers->device_pause) (dev_info);
1097 notified = true; 1089 goto out_unlock;
1098 } 1090 }
1099 } 1091 }
1100 break; 1092 break;
1101 case CONTROLVM_DEVICE_DESTROY: 1093 case CONTROLVM_DEVICE_DESTROY:
1102 if (notifiers->device_destroy) { 1094 if (notifiers->device_destroy) {
1103 (*notifiers->device_destroy) (dev_info); 1095 (*notifiers->device_destroy) (dev_info);
1104 notified = true; 1096 goto out_unlock;
1105 } 1097 }
1106 break; 1098 break;
1107 } 1099 }
1108 } 1100 }
1109away: 1101
1110 if (notified) 1102out_respond_and_unlock:
1111 /* The callback function just called above is responsible 1103 device_responder(cmd, pmsg_hdr, response);
1112 * for calling the appropriate visorchipset_busdev_responders 1104
1113 * function, which will call device_responder() 1105out_unlock:
1114 */
1115 ;
1116 else
1117 /*
1118 * Do not kfree(pmsg_hdr) as this is the failure path.
1119 * The success path ('notified') will call the responder
1120 * directly and kfree() there.
1121 */
1122 device_responder(cmd, pmsg_hdr, response);
1123 up(&notifier_lock); 1106 up(&notifier_lock);
1124} 1107}
1125 1108
@@ -1226,8 +1209,9 @@ bus_configure(struct controlvm_message *inmsg,
1226 POSTCODE_SEVERITY_ERR); 1209 POSTCODE_SEVERITY_ERR);
1227 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; 1210 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1228 } else { 1211 } else {
1229 visorchannel_set_clientpartition(bus_info->visorchannel, 1212 visorchannel_set_clientpartition
1230 cmd->configure_bus.guest_handle); 1213 (bus_info->visorchannel,
1214 cmd->configure_bus.guest_handle);
1231 bus_info->partition_uuid = parser_id_get(parser_ctx); 1215 bus_info->partition_uuid = parser_id_get(parser_ctx);
1232 parser_param_start(parser_ctx, PARSERSTRING_NAME); 1216 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1233 bus_info->name = parser_string_get(parser_ctx); 1217 bus_info->name = parser_string_get(parser_ctx);
@@ -1709,9 +1693,10 @@ parahotplug_process_message(struct controlvm_message *inmsg)
1709 * initialization. 1693 * initialization.
1710 */ 1694 */
1711 parahotplug_request_kickoff(req); 1695 parahotplug_request_kickoff(req);
1712 controlvm_respond_physdev_changestate(&inmsg->hdr, 1696 controlvm_respond_physdev_changestate
1713 CONTROLVM_RESP_SUCCESS, 1697 (&inmsg->hdr,
1714 inmsg->cmd.device_change_state.state); 1698 CONTROLVM_RESP_SUCCESS,
1699 inmsg->cmd.device_change_state.state);
1715 parahotplug_request_destroy(req); 1700 parahotplug_request_destroy(req);
1716 } else { 1701 } else {
1717 /* For disable messages, add the request to the 1702 /* For disable messages, add the request to the
@@ -1823,8 +1808,9 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
1823 break; 1808 break;
1824 default: 1809 default:
1825 if (inmsg.hdr.flags.response_expected) 1810 if (inmsg.hdr.flags.response_expected)
1826 controlvm_respond(&inmsg.hdr, 1811 controlvm_respond
1827 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN); 1812 (&inmsg.hdr,
1813 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1828 break; 1814 break;
1829 } 1815 }
1830 1816
@@ -2177,10 +2163,11 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2177 if (!*file_controlvm_channel) 2163 if (!*file_controlvm_channel)
2178 return -ENXIO; 2164 return -ENXIO;
2179 2165
2180 visorchannel_read(*file_controlvm_channel, 2166 visorchannel_read
2181 offsetof(struct spar_controlvm_channel_protocol, 2167 (*file_controlvm_channel,
2182 gp_control_channel), 2168 offsetof(struct spar_controlvm_channel_protocol,
2183 &addr, sizeof(addr)); 2169 gp_control_channel),
2170 &addr, sizeof(addr));
2184 if (!addr) 2171 if (!addr)
2185 return -ENXIO; 2172 return -ENXIO;
2186 2173
@@ -2280,16 +2267,25 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2280 return 0; 2267 return 0;
2281} 2268}
2282 2269
2270static void
2271visorchipset_file_cleanup(dev_t major_dev)
2272{
2273 if (file_cdev.ops)
2274 cdev_del(&file_cdev);
2275 file_cdev.ops = NULL;
2276 unregister_chrdev_region(major_dev, 1);
2277}
2278
2283static int 2279static int
2284visorchipset_init(struct acpi_device *acpi_device) 2280visorchipset_init(struct acpi_device *acpi_device)
2285{ 2281{
2286 int rc = 0; 2282 int err = -ENODEV;
2287 u64 addr; 2283 u64 addr;
2288 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID; 2284 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2289 2285
2290 addr = controlvm_get_channel_address(); 2286 addr = controlvm_get_channel_address();
2291 if (!addr) 2287 if (!addr)
2292 return -ENODEV; 2288 goto error;
2293 2289
2294 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers)); 2290 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2295 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info)); 2291 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
@@ -2297,22 +2293,19 @@ visorchipset_init(struct acpi_device *acpi_device)
2297 controlvm_channel = visorchannel_create_with_lock(addr, 0, 2293 controlvm_channel = visorchannel_create_with_lock(addr, 0,
2298 GFP_KERNEL, uuid); 2294 GFP_KERNEL, uuid);
2299 if (!controlvm_channel) 2295 if (!controlvm_channel)
2300 return -ENODEV; 2296 goto error;
2297
2301 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT( 2298 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2302 visorchannel_get_header(controlvm_channel))) { 2299 visorchannel_get_header(controlvm_channel))) {
2303 initialize_controlvm_payload(); 2300 initialize_controlvm_payload();
2304 } else { 2301 } else {
2305 visorchannel_destroy(controlvm_channel); 2302 goto error_destroy_channel;
2306 controlvm_channel = NULL;
2307 return -ENODEV;
2308 } 2303 }
2309 2304
2310 major_dev = MKDEV(visorchipset_major, 0); 2305 major_dev = MKDEV(visorchipset_major, 0);
2311 rc = visorchipset_file_init(major_dev, &controlvm_channel); 2306 err = visorchipset_file_init(major_dev, &controlvm_channel);
2312 if (rc < 0) { 2307 if (err < 0)
2313 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR); 2308 goto error_destroy_payload;
2314 goto cleanup;
2315 }
2316 2309
2317 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header)); 2310 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2318 2311
@@ -2331,27 +2324,33 @@ visorchipset_init(struct acpi_device *acpi_device)
2331 visorchipset_platform_device.dev.devt = major_dev; 2324 visorchipset_platform_device.dev.devt = major_dev;
2332 if (platform_device_register(&visorchipset_platform_device) < 0) { 2325 if (platform_device_register(&visorchipset_platform_device) < 0) {
2333 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR); 2326 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2334 rc = -ENODEV; 2327 err = -ENODEV;
2335 goto cleanup; 2328 goto error_cancel_work;
2336 } 2329 }
2337 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO); 2330 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2338 2331
2339 rc = visorbus_init(); 2332 err = visorbus_init();
2340cleanup: 2333 if (err < 0)
2341 if (rc) { 2334 goto error_unregister;
2342 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2343 POSTCODE_SEVERITY_ERR);
2344 }
2345 return rc;
2346}
2347 2335
2348static void 2336 return 0;
2349visorchipset_file_cleanup(dev_t major_dev) 2337
2350{ 2338error_unregister:
2351 if (file_cdev.ops) 2339 platform_device_unregister(&visorchipset_platform_device);
2352 cdev_del(&file_cdev); 2340
2353 file_cdev.ops = NULL; 2341error_cancel_work:
2354 unregister_chrdev_region(major_dev, 1); 2342 cancel_delayed_work_sync(&periodic_controlvm_work);
2343 visorchipset_file_cleanup(major_dev);
2344
2345error_destroy_payload:
2346 destroy_controlvm_payload_info(&controlvm_payload_info);
2347
2348error_destroy_channel:
2349 visorchannel_destroy(controlvm_channel);
2350
2351error:
2352 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
2353 return err;
2355} 2354}
2356 2355
2357static int 2356static int
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 3299cf502aa0..dc94261b31f9 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -123,9 +123,9 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
123 [38] = KEY_L, 123 [38] = KEY_L,
124 [39] = KEY_SEMICOLON, 124 [39] = KEY_SEMICOLON,
125 [40] = KEY_APOSTROPHE, 125 [40] = KEY_APOSTROPHE,
126 [41] = KEY_GRAVE, /* FIXME, '#' */ 126 [41] = KEY_GRAVE,
127 [42] = KEY_LEFTSHIFT, 127 [42] = KEY_LEFTSHIFT,
128 [43] = KEY_BACKSLASH, /* FIXME, '~' */ 128 [43] = KEY_BACKSLASH,
129 [44] = KEY_Z, 129 [44] = KEY_Z,
130 [45] = KEY_X, 130 [45] = KEY_X,
131 [46] = KEY_C, 131 [46] = KEY_C,
@@ -173,7 +173,7 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
173 [88] = KEY_F12, 173 [88] = KEY_F12,
174 [90] = KEY_KPLEFTPAREN, 174 [90] = KEY_KPLEFTPAREN,
175 [91] = KEY_KPRIGHTPAREN, 175 [91] = KEY_KPRIGHTPAREN,
176 [92] = KEY_KPASTERISK, /* FIXME */ 176 [92] = KEY_KPASTERISK,
177 [93] = KEY_KPASTERISK, 177 [93] = KEY_KPASTERISK,
178 [94] = KEY_KPPLUS, 178 [94] = KEY_KPPLUS,
179 [95] = KEY_HELP, 179 [95] = KEY_HELP,
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index be0d057346c3..0ec952ac0dac 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -356,28 +356,38 @@ visornic_serverdown(struct visornic_devdata *devdata,
356 visorbus_state_complete_func complete_func) 356 visorbus_state_complete_func complete_func)
357{ 357{
358 unsigned long flags; 358 unsigned long flags;
359 int err;
359 360
360 spin_lock_irqsave(&devdata->priv_lock, flags); 361 spin_lock_irqsave(&devdata->priv_lock, flags);
361 if (!devdata->server_down && !devdata->server_change_state) { 362 if (devdata->server_change_state) {
362 if (devdata->going_away) {
363 spin_unlock_irqrestore(&devdata->priv_lock, flags);
364 dev_dbg(&devdata->dev->device,
365 "%s aborting because device removal pending\n",
366 __func__);
367 return -ENODEV;
368 }
369 devdata->server_change_state = true;
370 devdata->server_down_complete_func = complete_func;
371 spin_unlock_irqrestore(&devdata->priv_lock, flags);
372 visornic_serverdown_complete(devdata);
373 } else if (devdata->server_change_state) {
374 dev_dbg(&devdata->dev->device, "%s changing state\n", 363 dev_dbg(&devdata->dev->device, "%s changing state\n",
375 __func__); 364 __func__);
376 spin_unlock_irqrestore(&devdata->priv_lock, flags); 365 err = -EINVAL;
377 return -EINVAL; 366 goto err_unlock;
367 }
368 if (devdata->server_down) {
369 dev_dbg(&devdata->dev->device, "%s already down\n",
370 __func__);
371 err = -EINVAL;
372 goto err_unlock;
378 } 373 }
374 if (devdata->going_away) {
375 dev_dbg(&devdata->dev->device,
376 "%s aborting because device removal pending\n",
377 __func__);
378 err = -ENODEV;
379 goto err_unlock;
380 }
381 devdata->server_change_state = true;
382 devdata->server_down_complete_func = complete_func;
379 spin_unlock_irqrestore(&devdata->priv_lock, flags); 383 spin_unlock_irqrestore(&devdata->priv_lock, flags);
384
385 visornic_serverdown_complete(devdata);
380 return 0; 386 return 0;
387
388err_unlock:
389 spin_unlock_irqrestore(&devdata->priv_lock, flags);
390 return err;
381} 391}
382 392
383/** 393/**
@@ -436,8 +446,8 @@ post_skb(struct uiscmdrsp *cmdrsp,
436 cmdrsp->net.type = NET_RCV_POST; 446 cmdrsp->net.type = NET_RCV_POST;
437 cmdrsp->cmdtype = CMD_NET_TYPE; 447 cmdrsp->cmdtype = CMD_NET_TYPE;
438 if (visorchannel_signalinsert(devdata->dev->visorchannel, 448 if (visorchannel_signalinsert(devdata->dev->visorchannel,
439 IOCHAN_TO_IOPART, 449 IOCHAN_TO_IOPART,
440 cmdrsp)) { 450 cmdrsp)) {
441 atomic_inc(&devdata->num_rcvbuf_in_iovm); 451 atomic_inc(&devdata->num_rcvbuf_in_iovm);
442 devdata->chstat.sent_post++; 452 devdata->chstat.sent_post++;
443 } else { 453 } else {
@@ -465,8 +475,8 @@ send_enbdis(struct net_device *netdev, int state,
465 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS; 475 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
466 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE; 476 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
467 if (visorchannel_signalinsert(devdata->dev->visorchannel, 477 if (visorchannel_signalinsert(devdata->dev->visorchannel,
468 IOCHAN_TO_IOPART, 478 IOCHAN_TO_IOPART,
469 devdata->cmdrsp_rcv)) 479 devdata->cmdrsp_rcv))
470 devdata->chstat.sent_enbdis++; 480 devdata->chstat.sent_enbdis++;
471} 481}
472 482
@@ -1647,8 +1657,9 @@ service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1647 * the lower watermark for 1657 * the lower watermark for
1648 * netif_wake_queue() 1658 * netif_wake_queue()
1649 */ 1659 */
1650 if (vnic_hit_low_watermark(devdata, 1660 if (vnic_hit_low_watermark
1651 devdata->lower_threshold_net_xmits)) { 1661 (devdata,
1662 devdata->lower_threshold_net_xmits)) {
1652 /* enough NET_XMITs completed 1663 /* enough NET_XMITs completed
1653 * so can restart netif queue 1664 * so can restart netif queue
1654 */ 1665 */
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index a24443ba59ea..97e5b69e0668 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -779,14 +779,6 @@ static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
779 return 0; 779 return 0;
780} 780}
781 781
782static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
783{
784 struct iscsi_node_acl *acl = container_of(se_nacl,
785 struct iscsi_node_acl, se_node_acl);
786
787 configfs_remove_default_groups(&acl->se_node_acl.acl_fabric_stat_group);
788}
789
790/* End items for lio_target_acl_cit */ 782/* End items for lio_target_acl_cit */
791 783
792/* Start items for lio_target_tpg_attrib_cit */ 784/* Start items for lio_target_tpg_attrib_cit */
@@ -1247,6 +1239,16 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
1247 if (IS_ERR(tiqn)) 1239 if (IS_ERR(tiqn))
1248 return ERR_CAST(tiqn); 1240 return ERR_CAST(tiqn);
1249 1241
1242 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1243 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
1244 " %s\n", name);
1245 return &tiqn->tiqn_wwn;
1246}
1247
1248static void lio_target_add_wwn_groups(struct se_wwn *wwn)
1249{
1250 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1251
1250 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, 1252 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
1251 "iscsi_instance", &iscsi_stat_instance_cit); 1253 "iscsi_instance", &iscsi_stat_instance_cit);
1252 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group, 1254 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
@@ -1271,12 +1273,6 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
1271 "iscsi_logout_stats", &iscsi_stat_logout_cit); 1273 "iscsi_logout_stats", &iscsi_stat_logout_cit);
1272 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, 1274 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
1273 &tiqn->tiqn_wwn.fabric_stat_group); 1275 &tiqn->tiqn_wwn.fabric_stat_group);
1274
1275
1276 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1277 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
1278 " %s\n", name);
1279 return &tiqn->tiqn_wwn;
1280} 1276}
1281 1277
1282static void lio_target_call_coredeltiqn( 1278static void lio_target_call_coredeltiqn(
@@ -1284,8 +1280,6 @@ static void lio_target_call_coredeltiqn(
1284{ 1280{
1285 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); 1281 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1286 1282
1287 configfs_remove_default_groups(&tiqn->tiqn_wwn.fabric_stat_group);
1288
1289 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n", 1283 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
1290 tiqn->tiqn); 1284 tiqn->tiqn);
1291 iscsit_del_tiqn(tiqn); 1285 iscsit_del_tiqn(tiqn);
@@ -1660,12 +1654,12 @@ const struct target_core_fabric_ops iscsi_ops = {
1660 .aborted_task = lio_aborted_task, 1654 .aborted_task = lio_aborted_task,
1661 .fabric_make_wwn = lio_target_call_coreaddtiqn, 1655 .fabric_make_wwn = lio_target_call_coreaddtiqn,
1662 .fabric_drop_wwn = lio_target_call_coredeltiqn, 1656 .fabric_drop_wwn = lio_target_call_coredeltiqn,
1657 .add_wwn_groups = lio_target_add_wwn_groups,
1663 .fabric_make_tpg = lio_target_tiqn_addtpg, 1658 .fabric_make_tpg = lio_target_tiqn_addtpg,
1664 .fabric_drop_tpg = lio_target_tiqn_deltpg, 1659 .fabric_drop_tpg = lio_target_tiqn_deltpg,
1665 .fabric_make_np = lio_target_call_addnptotpg, 1660 .fabric_make_np = lio_target_call_addnptotpg,
1666 .fabric_drop_np = lio_target_call_delnpfromtpg, 1661 .fabric_drop_np = lio_target_call_delnpfromtpg,
1667 .fabric_init_nodeacl = lio_target_init_nodeacl, 1662 .fabric_init_nodeacl = lio_target_init_nodeacl,
1668 .fabric_cleanup_nodeacl = lio_target_cleanup_nodeacl,
1669 1663
1670 .tfc_discovery_attrs = lio_target_discovery_auth_attrs, 1664 .tfc_discovery_attrs = lio_target_discovery_auth_attrs,
1671 .tfc_wwn_attrs = lio_target_wwn_attrs, 1665 .tfc_wwn_attrs = lio_target_wwn_attrs,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 1bd5c72b663e..31a096aa16ab 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -338,10 +338,8 @@ static void target_fabric_nacl_base_release(struct config_item *item)
338{ 338{
339 struct se_node_acl *se_nacl = container_of(to_config_group(item), 339 struct se_node_acl *se_nacl = container_of(to_config_group(item),
340 struct se_node_acl, acl_group); 340 struct se_node_acl, acl_group);
341 struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
342 341
343 if (tf->tf_ops->fabric_cleanup_nodeacl) 342 configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
344 tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
345 core_tpg_del_initiator_node_acl(se_nacl); 343 core_tpg_del_initiator_node_acl(se_nacl);
346} 344}
347 345
@@ -383,14 +381,6 @@ static struct config_group *target_fabric_make_nodeacl(
383 if (IS_ERR(se_nacl)) 381 if (IS_ERR(se_nacl))
384 return ERR_CAST(se_nacl); 382 return ERR_CAST(se_nacl);
385 383
386 if (tf->tf_ops->fabric_init_nodeacl) {
387 int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
388 if (ret) {
389 core_tpg_del_initiator_node_acl(se_nacl);
390 return ERR_PTR(ret);
391 }
392 }
393
394 config_group_init_type_name(&se_nacl->acl_group, name, 384 config_group_init_type_name(&se_nacl->acl_group, name,
395 &tf->tf_tpg_nacl_base_cit); 385 &tf->tf_tpg_nacl_base_cit);
396 386
@@ -414,6 +404,15 @@ static struct config_group *target_fabric_make_nodeacl(
414 configfs_add_default_group(&se_nacl->acl_fabric_stat_group, 404 configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
415 &se_nacl->acl_group); 405 &se_nacl->acl_group);
416 406
407 if (tf->tf_ops->fabric_init_nodeacl) {
408 int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
409 if (ret) {
410 configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
411 core_tpg_del_initiator_node_acl(se_nacl);
412 return ERR_PTR(ret);
413 }
414 }
415
417 return &se_nacl->acl_group; 416 return &se_nacl->acl_group;
418} 417}
419 418
@@ -892,6 +891,7 @@ static void target_fabric_release_wwn(struct config_item *item)
892 struct se_wwn, wwn_group); 891 struct se_wwn, wwn_group);
893 struct target_fabric_configfs *tf = wwn->wwn_tf; 892 struct target_fabric_configfs *tf = wwn->wwn_tf;
894 893
894 configfs_remove_default_groups(&wwn->fabric_stat_group);
895 tf->tf_ops->fabric_drop_wwn(wwn); 895 tf->tf_ops->fabric_drop_wwn(wwn);
896} 896}
897 897
@@ -945,6 +945,8 @@ static struct config_group *target_fabric_make_wwn(
945 &tf->tf_wwn_fabric_stats_cit); 945 &tf->tf_wwn_fabric_stats_cit);
946 configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group); 946 configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
947 947
948 if (tf->tf_ops->add_wwn_groups)
949 tf->tf_ops->add_wwn_groups(wwn);
948 return &wwn->wwn_group; 950 return &wwn->wwn_group;
949} 951}
950 952
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8d26ed79bb4c..9b04d72e752e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2049,14 +2049,13 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
2049 if (tty) { 2049 if (tty) {
2050 mutex_unlock(&tty_mutex); 2050 mutex_unlock(&tty_mutex);
2051 retval = tty_lock_interruptible(tty); 2051 retval = tty_lock_interruptible(tty);
2052 tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
2052 if (retval) { 2053 if (retval) {
2053 if (retval == -EINTR) 2054 if (retval == -EINTR)
2054 retval = -ERESTARTSYS; 2055 retval = -ERESTARTSYS;
2055 tty = ERR_PTR(retval); 2056 tty = ERR_PTR(retval);
2056 goto out; 2057 goto out;
2057 } 2058 }
2058 /* safe to drop the kref from tty_driver_lookup_tty() */
2059 tty_kref_put(tty);
2060 retval = tty_reopen(tty); 2059 retval = tty_reopen(tty);
2061 if (retval < 0) { 2060 if (retval < 0) {
2062 tty_unlock(tty); 2061 tty_unlock(tty);
@@ -2158,7 +2157,7 @@ retry_open:
2158 read_lock(&tasklist_lock); 2157 read_lock(&tasklist_lock);
2159 spin_lock_irq(&current->sighand->siglock); 2158 spin_lock_irq(&current->sighand->siglock);
2160 noctty = (filp->f_flags & O_NOCTTY) || 2159 noctty = (filp->f_flags & O_NOCTTY) ||
2161 device == MKDEV(TTY_MAJOR, 0) || 2160 (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
2162 device == MKDEV(TTYAUX_MAJOR, 1) || 2161 device == MKDEV(TTYAUX_MAJOR, 1) ||
2163 (tty->driver->type == TTY_DRIVER_TYPE_PTY && 2162 (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
2164 tty->driver->subtype == PTY_TYPE_MASTER); 2163 tty->driver->subtype == PTY_TYPE_MASTER);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5eb1a87228b4..31ccdccd7a04 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -75,8 +75,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
75 * be the first thing immediately following the endpoint descriptor. 75 * be the first thing immediately following the endpoint descriptor.
76 */ 76 */
77 desc = (struct usb_ss_ep_comp_descriptor *) buffer; 77 desc = (struct usb_ss_ep_comp_descriptor *) buffer;
78 buffer += desc->bLength;
79 size -= desc->bLength;
80 78
81 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || 79 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
82 size < USB_DT_SS_EP_COMP_SIZE) { 80 size < USB_DT_SS_EP_COMP_SIZE) {
@@ -100,7 +98,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
100 ep->desc.wMaxPacketSize; 98 ep->desc.wMaxPacketSize;
101 return; 99 return;
102 } 100 }
103 101 buffer += desc->bLength;
102 size -= desc->bLength;
104 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); 103 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
105 104
106 /* Check the various values */ 105 /* Check the various values */
@@ -146,12 +145,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
146 ep->ss_ep_comp.bmAttributes = 2; 145 ep->ss_ep_comp.bmAttributes = 2;
147 } 146 }
148 147
149 /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
150 if (usb_endpoint_xfer_isoc(&ep->desc) &&
151 USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
152 usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
153 ep, buffer, size);
154
155 if (usb_endpoint_xfer_isoc(&ep->desc)) 148 if (usb_endpoint_xfer_isoc(&ep->desc))
156 max_tx = (desc->bMaxBurst + 1) * 149 max_tx = (desc->bMaxBurst + 1) *
157 (USB_SS_MULT(desc->bmAttributes)) * 150 (USB_SS_MULT(desc->bmAttributes)) *
@@ -171,6 +164,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
171 max_tx); 164 max_tx);
172 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); 165 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
173 } 166 }
167 /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
168 if (usb_endpoint_xfer_isoc(&ep->desc) &&
169 USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
170 usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
171 ep, buffer, size);
174} 172}
175 173
176static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, 174static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e9940dd004e4..818f158232bb 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2254,6 +2254,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2254{ 2254{
2255 u32 intmsk; 2255 u32 intmsk;
2256 u32 val; 2256 u32 val;
2257 u32 usbcfg;
2257 2258
2258 /* Kill any ep0 requests as controller will be reinitialized */ 2259 /* Kill any ep0 requests as controller will be reinitialized */
2259 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 2260 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2267,10 +2268,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2267 * set configuration. 2268 * set configuration.
2268 */ 2269 */
2269 2270
2271 /* keep other bits untouched (so e.g. forced modes are not lost) */
2272 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2273 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
2274 GUSBCFG_HNPCAP);
2275
2270 /* set the PLL on, remove the HNP/SRP and set the PHY */ 2276 /* set the PLL on, remove the HNP/SRP and set the PHY */
2271 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 2277 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
2272 dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | 2278 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
2273 (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG); 2279 (val << GUSBCFG_USBTRDTIM_SHIFT);
2280 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
2274 2281
2275 dwc2_hsotg_init_fifo(hsotg); 2282 dwc2_hsotg_init_fifo(hsotg);
2276 2283
@@ -3031,6 +3038,7 @@ static struct usb_ep_ops dwc2_hsotg_ep_ops = {
3031static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) 3038static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
3032{ 3039{
3033 u32 trdtim; 3040 u32 trdtim;
3041 u32 usbcfg;
3034 /* unmask subset of endpoint interrupts */ 3042 /* unmask subset of endpoint interrupts */
3035 3043
3036 dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 3044 dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -3054,11 +3062,16 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
3054 3062
3055 dwc2_hsotg_init_fifo(hsotg); 3063 dwc2_hsotg_init_fifo(hsotg);
3056 3064
3065 /* keep other bits untouched (so e.g. forced modes are not lost) */
3066 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3067 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3068 GUSBCFG_HNPCAP);
3069
3057 /* set the PLL on, remove the HNP/SRP and set the PHY */ 3070 /* set the PLL on, remove the HNP/SRP and set the PHY */
3058 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 3071 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
3059 dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | 3072 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
3060 (trdtim << GUSBCFG_USBTRDTIM_SHIFT), 3073 (trdtim << GUSBCFG_USBTRDTIM_SHIFT);
3061 hsotg->regs + GUSBCFG); 3074 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
3062 3075
3063 if (using_dma(hsotg)) 3076 if (using_dma(hsotg))
3064 __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN); 3077 __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 17fd81447c9f..fa20f5a99d12 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -67,23 +67,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
67static int dwc3_core_soft_reset(struct dwc3 *dwc) 67static int dwc3_core_soft_reset(struct dwc3 *dwc)
68{ 68{
69 u32 reg; 69 u32 reg;
70 int retries = 1000;
70 int ret; 71 int ret;
71 72
72 /* Before Resetting PHY, put Core in Reset */
73 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
74 reg |= DWC3_GCTL_CORESOFTRESET;
75 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
76
77 /* Assert USB3 PHY reset */
78 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
79 reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
80 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
81
82 /* Assert USB2 PHY reset */
83 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
84 reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
85 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
86
87 usb_phy_init(dwc->usb2_phy); 73 usb_phy_init(dwc->usb2_phy);
88 usb_phy_init(dwc->usb3_phy); 74 usb_phy_init(dwc->usb3_phy);
89 ret = phy_init(dwc->usb2_generic_phy); 75 ret = phy_init(dwc->usb2_generic_phy);
@@ -95,26 +81,28 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
95 phy_exit(dwc->usb2_generic_phy); 81 phy_exit(dwc->usb2_generic_phy);
96 return ret; 82 return ret;
97 } 83 }
98 mdelay(100);
99 84
100 /* Clear USB3 PHY reset */ 85 /*
101 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 86 * We're resetting only the device side because, if we're in host mode,
102 reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; 87 * XHCI driver will reset the host block. If dwc3 was configured for
103 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 88 * host-only mode, then we can return early.
89 */
90 if (dwc->dr_mode == USB_DR_MODE_HOST)
91 return 0;
104 92
105 /* Clear USB2 PHY reset */ 93 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 94 reg |= DWC3_DCTL_CSFTRST;
107 reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; 95 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
108 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
109 96
110 mdelay(100); 97 do {
98 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
99 if (!(reg & DWC3_DCTL_CSFTRST))
100 return 0;
111 101
112 /* After PHYs are stable we can take Core out of reset state */ 102 udelay(1);
113 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 103 } while (--retries);
114 reg &= ~DWC3_GCTL_CORESOFTRESET;
115 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
116 104
117 return 0; 105 return -ETIMEDOUT;
118} 106}
119 107
120/** 108/**
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d2423d..72664700b8a2 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -39,8 +39,6 @@
39#define USBSS_IRQ_COREIRQ_EN BIT(0) 39#define USBSS_IRQ_COREIRQ_EN BIT(0)
40#define USBSS_IRQ_COREIRQ_CLR BIT(0) 40#define USBSS_IRQ_COREIRQ_CLR BIT(0)
41 41
42static u64 kdwc3_dma_mask;
43
44struct dwc3_keystone { 42struct dwc3_keystone {
45 struct device *dev; 43 struct device *dev;
46 struct clk *clk; 44 struct clk *clk;
@@ -108,9 +106,6 @@ static int kdwc3_probe(struct platform_device *pdev)
108 if (IS_ERR(kdwc->usbss)) 106 if (IS_ERR(kdwc->usbss))
109 return PTR_ERR(kdwc->usbss); 107 return PTR_ERR(kdwc->usbss);
110 108
111 kdwc3_dma_mask = dma_get_mask(dev);
112 dev->dma_mask = &kdwc3_dma_mask;
113
114 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb");
115 110
116 error = clk_prepare_enable(kdwc->clk); 111 error = clk_prepare_enable(kdwc->clk);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d83048c8c..adc1e8a624cb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -35,6 +35,7 @@
35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa 37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
38#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
39 40
40static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 41static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -213,6 +214,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
213 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, 216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
217 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 218 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
217 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 219 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
218 { } /* Terminating Entry */ 220 { } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3ac170f9d94d..d54a028cdfeb 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -568,7 +568,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
569 569
570 if (!usb_endpoint_xfer_isoc(desc)) 570 if (!usb_endpoint_xfer_isoc(desc))
571 return 0; 571 goto out;
572 572
573 /* Link TRB for ISOC. The HWO bit is never reset */ 573 /* Link TRB for ISOC. The HWO bit is never reset */
574 trb_st_hw = &dep->trb_pool[0]; 574 trb_st_hw = &dep->trb_pool[0];
@@ -582,9 +582,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
583 } 583 }
584 584
585out:
585 switch (usb_endpoint_type(desc)) { 586 switch (usb_endpoint_type(desc)) {
586 case USB_ENDPOINT_XFER_CONTROL: 587 case USB_ENDPOINT_XFER_CONTROL:
587 strlcat(dep->name, "-control", sizeof(dep->name)); 588 /* don't change name */
588 break; 589 break;
589 case USB_ENDPOINT_XFER_ISOC: 590 case USB_ENDPOINT_XFER_ISOC:
590 strlcat(dep->name, "-isoc", sizeof(dep->name)); 591 strlcat(dep->name, "-isoc", sizeof(dep->name));
@@ -2487,7 +2488,11 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2487 * implemented. 2488 * implemented.
2488 */ 2489 */
2489 2490
2490 dwc->gadget_driver->resume(&dwc->gadget); 2491 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2492 spin_unlock(&dwc->lock);
2493 dwc->gadget_driver->resume(&dwc->gadget);
2494 spin_lock(&dwc->lock);
2495 }
2491} 2496}
2492 2497
2493static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2498static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a5c62093c26c..de9ffd60fcfa 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -656,7 +656,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
656 ssp_cap->bmAttributes = cpu_to_le32(1); 656 ssp_cap->bmAttributes = cpu_to_le32(1);
657 657
658 /* Min RX/TX Lane Count = 1 */ 658 /* Min RX/TX Lane Count = 1 */
659 ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12); 659 ssp_cap->wFunctionalitySupport =
660 cpu_to_le16((1 << 8) | (1 << 12));
660 661
661 /* 662 /*
662 * bmSublinkSpeedAttr[0]: 663 * bmSublinkSpeedAttr[0]:
@@ -666,7 +667,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
666 * LSM = 10 (10 Gbps) 667 * LSM = 10 (10 Gbps)
667 */ 668 */
668 ssp_cap->bmSublinkSpeedAttr[0] = 669 ssp_cap->bmSublinkSpeedAttr[0] =
669 (3 << 4) | (1 << 14) | (0xa << 16); 670 cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
670 /* 671 /*
671 * bmSublinkSpeedAttr[1] = 672 * bmSublinkSpeedAttr[1] =
672 * ST = Symmetric, TX 673 * ST = Symmetric, TX
@@ -675,7 +676,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
675 * LSM = 10 (10 Gbps) 676 * LSM = 10 (10 Gbps)
676 */ 677 */
677 ssp_cap->bmSublinkSpeedAttr[1] = 678 ssp_cap->bmSublinkSpeedAttr[1] =
678 (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7); 679 cpu_to_le32((3 << 4) | (1 << 14) |
680 (0xa << 16) | (1 << 7));
679 } 681 }
680 682
681 return le16_to_cpu(bos->wTotalLength); 683 return le16_to_cpu(bos->wTotalLength);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8cfce105c7ee..e21ca2bd6839 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1147,8 +1147,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1147 ffs->sb = sb; 1147 ffs->sb = sb;
1148 data->ffs_data = NULL; 1148 data->ffs_data = NULL;
1149 sb->s_fs_info = ffs; 1149 sb->s_fs_info = ffs;
1150 sb->s_blocksize = PAGE_CACHE_SIZE; 1150 sb->s_blocksize = PAGE_SIZE;
1151 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1151 sb->s_blocksize_bits = PAGE_SHIFT;
1152 sb->s_magic = FUNCTIONFS_MAGIC; 1152 sb->s_magic = FUNCTIONFS_MAGIC;
1153 sb->s_op = &ffs_sb_operations; 1153 sb->s_op = &ffs_sb_operations;
1154 sb->s_time_gran = 1; 1154 sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 84c0ee5ebd1e..58fc199a18ec 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/kfifo.h> 26#include <linux/kfifo.h>
27#include <linux/spinlock.h>
27 28
28#include <sound/core.h> 29#include <sound/core.h>
29#include <sound/initval.h> 30#include <sound/initval.h>
@@ -89,6 +90,7 @@ struct f_midi {
89 unsigned int buflen, qlen; 90 unsigned int buflen, qlen;
90 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */ 91 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
91 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *); 92 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
93 spinlock_t transmit_lock;
92 unsigned int in_last_port; 94 unsigned int in_last_port;
93 95
94 struct gmidi_in_port in_ports_array[/* in_ports */]; 96 struct gmidi_in_port in_ports_array[/* in_ports */];
@@ -358,7 +360,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
358 /* allocate a bunch of read buffers and queue them all at once. */ 360 /* allocate a bunch of read buffers and queue them all at once. */
359 for (i = 0; i < midi->qlen && err == 0; i++) { 361 for (i = 0; i < midi->qlen && err == 0; i++) {
360 struct usb_request *req = 362 struct usb_request *req =
361 midi_alloc_ep_req(midi->out_ep, midi->buflen); 363 midi_alloc_ep_req(midi->out_ep,
364 max_t(unsigned, midi->buflen,
365 bulk_out_desc.wMaxPacketSize));
362 if (req == NULL) 366 if (req == NULL)
363 return -ENOMEM; 367 return -ENOMEM;
364 368
@@ -597,17 +601,24 @@ static void f_midi_transmit(struct f_midi *midi)
597{ 601{
598 struct usb_ep *ep = midi->in_ep; 602 struct usb_ep *ep = midi->in_ep;
599 int ret; 603 int ret;
604 unsigned long flags;
600 605
601 /* We only care about USB requests if IN endpoint is enabled */ 606 /* We only care about USB requests if IN endpoint is enabled */
602 if (!ep || !ep->enabled) 607 if (!ep || !ep->enabled)
603 goto drop_out; 608 goto drop_out;
604 609
610 spin_lock_irqsave(&midi->transmit_lock, flags);
611
605 do { 612 do {
606 ret = f_midi_do_transmit(midi, ep); 613 ret = f_midi_do_transmit(midi, ep);
607 if (ret < 0) 614 if (ret < 0) {
615 spin_unlock_irqrestore(&midi->transmit_lock, flags);
608 goto drop_out; 616 goto drop_out;
617 }
609 } while (ret); 618 } while (ret);
610 619
620 spin_unlock_irqrestore(&midi->transmit_lock, flags);
621
611 return; 622 return;
612 623
613drop_out: 624drop_out:
@@ -1201,6 +1212,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1201 if (status) 1212 if (status)
1202 goto setup_fail; 1213 goto setup_fail;
1203 1214
1215 spin_lock_init(&midi->transmit_lock);
1216
1204 ++opts->refcnt; 1217 ++opts->refcnt;
1205 mutex_unlock(&opts->lock); 1218 mutex_unlock(&opts->lock);
1206 1219
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5cdaf0150a4e..e64479f882a5 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
1954 return -ENODEV; 1954 return -ENODEV;
1955 1955
1956 /* superblock */ 1956 /* superblock */
1957 sb->s_blocksize = PAGE_CACHE_SIZE; 1957 sb->s_blocksize = PAGE_SIZE;
1958 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1958 sb->s_blocksize_bits = PAGE_SHIFT;
1959 sb->s_magic = GADGETFS_MAGIC; 1959 sb->s_magic = GADGETFS_MAGIC;
1960 sb->s_op = &gadget_fs_operations; 1960 sb->s_op = &gadget_fs_operations;
1961 sb->s_time_gran = 1; 1961 sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 81d42cce885a..18569de06b04 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1045,20 +1045,6 @@ static void reset_all_endpoints(struct usba_udc *udc)
1045 list_del_init(&req->queue); 1045 list_del_init(&req->queue);
1046 request_complete(ep, req, -ECONNRESET); 1046 request_complete(ep, req, -ECONNRESET);
1047 } 1047 }
1048
1049 /* NOTE: normally, the next call to the gadget driver is in
1050 * charge of disabling endpoints... usually disconnect().
1051 * The exception would be entering a high speed test mode.
1052 *
1053 * FIXME remove this code ... and retest thoroughly.
1054 */
1055 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1056 if (ep->ep.desc) {
1057 spin_unlock(&udc->lock);
1058 usba_ep_disable(&ep->ep);
1059 spin_lock(&udc->lock);
1060 }
1061 }
1062} 1048}
1063 1049
1064static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) 1050static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 4151597e9d28..e4e70e11d0f6 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -371,12 +371,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
371 INIT_WORK(&gadget->work, usb_gadget_state_work); 371 INIT_WORK(&gadget->work, usb_gadget_state_work);
372 gadget->dev.parent = parent; 372 gadget->dev.parent = parent;
373 373
374#ifdef CONFIG_HAS_DMA
375 dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
376 gadget->dev.dma_parms = parent->dma_parms;
377 gadget->dev.dma_mask = parent->dma_mask;
378#endif
379
380 if (release) 374 if (release)
381 gadget->dev.release = release; 375 gadget->dev.release = release;
382 else 376 else
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index 579587d97217..3d7af85aecb9 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -65,9 +65,7 @@ struct phy_8x16 {
65 void __iomem *regs; 65 void __iomem *regs;
66 struct clk *core_clk; 66 struct clk *core_clk;
67 struct clk *iface_clk; 67 struct clk *iface_clk;
68 struct regulator *v3p3; 68 struct regulator_bulk_data regulator[3];
69 struct regulator *v1p8;
70 struct regulator *vdd;
71 69
72 struct reset_control *phy_reset; 70 struct reset_control *phy_reset;
73 71
@@ -78,51 +76,6 @@ struct phy_8x16 {
78 struct notifier_block reboot_notify; 76 struct notifier_block reboot_notify;
79}; 77};
80 78
81static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
82{
83 int ret;
84
85 ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
86 if (ret)
87 return ret;
88
89 ret = regulator_enable(qphy->vdd);
90 if (ret)
91 return ret;
92
93 ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
94 if (ret)
95 goto off_vdd;
96
97 ret = regulator_enable(qphy->v3p3);
98 if (ret)
99 goto off_vdd;
100
101 ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
102 if (ret)
103 goto off_3p3;
104
105 ret = regulator_enable(qphy->v1p8);
106 if (ret)
107 goto off_3p3;
108
109 return 0;
110
111off_3p3:
112 regulator_disable(qphy->v3p3);
113off_vdd:
114 regulator_disable(qphy->vdd);
115
116 return ret;
117}
118
119static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
120{
121 regulator_disable(qphy->v1p8);
122 regulator_disable(qphy->v3p3);
123 regulator_disable(qphy->vdd);
124}
125
126static int phy_8x16_notify_connect(struct usb_phy *phy, 79static int phy_8x16_notify_connect(struct usb_phy *phy,
127 enum usb_device_speed speed) 80 enum usb_device_speed speed)
128{ 81{
@@ -261,7 +214,6 @@ static void phy_8x16_shutdown(struct usb_phy *phy)
261 214
262static int phy_8x16_read_devicetree(struct phy_8x16 *qphy) 215static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
263{ 216{
264 struct regulator_bulk_data regs[3];
265 struct device *dev = qphy->phy.dev; 217 struct device *dev = qphy->phy.dev;
266 int ret; 218 int ret;
267 219
@@ -273,18 +225,15 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
273 if (IS_ERR(qphy->iface_clk)) 225 if (IS_ERR(qphy->iface_clk))
274 return PTR_ERR(qphy->iface_clk); 226 return PTR_ERR(qphy->iface_clk);
275 227
276 regs[0].supply = "v3p3"; 228 qphy->regulator[0].supply = "v3p3";
277 regs[1].supply = "v1p8"; 229 qphy->regulator[1].supply = "v1p8";
278 regs[2].supply = "vddcx"; 230 qphy->regulator[2].supply = "vddcx";
279 231
280 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs); 232 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
233 qphy->regulator);
281 if (ret) 234 if (ret)
282 return ret; 235 return ret;
283 236
284 qphy->v3p3 = regs[0].consumer;
285 qphy->v1p8 = regs[1].consumer;
286 qphy->vdd = regs[2].consumer;
287
288 qphy->phy_reset = devm_reset_control_get(dev, "phy"); 237 qphy->phy_reset = devm_reset_control_get(dev, "phy");
289 if (IS_ERR(qphy->phy_reset)) 238 if (IS_ERR(qphy->phy_reset))
290 return PTR_ERR(qphy->phy_reset); 239 return PTR_ERR(qphy->phy_reset);
@@ -364,8 +313,9 @@ static int phy_8x16_probe(struct platform_device *pdev)
364 if (ret < 0) 313 if (ret < 0)
365 goto off_core; 314 goto off_core;
366 315
367 ret = phy_8x16_regulators_enable(qphy); 316 ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
368 if (0 && ret) 317 qphy->regulator);
318 if (WARN_ON(ret))
369 goto off_clks; 319 goto off_clks;
370 320
371 qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify; 321 qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
@@ -387,7 +337,7 @@ off_extcon:
387 extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB, 337 extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB,
388 &qphy->vbus_notify); 338 &qphy->vbus_notify);
389off_power: 339off_power:
390 phy_8x16_regulators_disable(qphy); 340 regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
391off_clks: 341off_clks:
392 clk_disable_unprepare(qphy->iface_clk); 342 clk_disable_unprepare(qphy->iface_clk);
393off_core: 343off_core:
@@ -413,7 +363,7 @@ static int phy_8x16_remove(struct platform_device *pdev)
413 363
414 clk_disable_unprepare(qphy->iface_clk); 364 clk_disable_unprepare(qphy->iface_clk);
415 clk_disable_unprepare(qphy->core_clk); 365 clk_disable_unprepare(qphy->core_clk);
416 phy_8x16_regulators_disable(qphy); 366 regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
417 return 0; 367 return 0;
418} 368}
419 369
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b4de70ee16d3..000f9750149f 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
190 goto __usbhs_pkt_handler_end; 190 goto __usbhs_pkt_handler_end;
191 } 191 }
192 192
193 ret = func(pkt, &is_done); 193 if (likely(func))
194 ret = func(pkt, &is_done);
194 195
195 if (is_done) 196 if (is_done)
196 __usbhsf_pkt_del(pkt); 197 __usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
889 890
890 pkt->trans = len; 891 pkt->trans = len;
891 892
893 usbhsf_tx_irq_ctrl(pipe, 0);
892 INIT_WORK(&pkt->work, xfer_work); 894 INIT_WORK(&pkt->work, xfer_work);
893 schedule_work(&pkt->work); 895 schedule_work(&pkt->work);
894 896
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 664b263e4b20..53d104b56ef1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
158 struct usbhs_pipe *pipe = pkt->pipe; 158 struct usbhs_pipe *pipe = pkt->pipe;
159 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); 159 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
160 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); 160 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
161 unsigned long flags;
161 162
162 ureq->req.actual = pkt->actual; 163 ureq->req.actual = pkt->actual;
163 164
164 usbhsg_queue_pop(uep, ureq, 0); 165 usbhs_lock(priv, flags);
166 if (uep)
167 __usbhsg_queue_pop(uep, ureq, 0);
168 usbhs_unlock(priv, flags);
165} 169}
166 170
167static void usbhsg_queue_push(struct usbhsg_uep *uep, 171static void usbhsg_queue_push(struct usbhsg_uep *uep,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbfe761c7fba..dd47823bb014 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
168 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
168 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ 169 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
169 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 170 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
170 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 171 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index b283eb8b86d6..bbeeb2bd55a8 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
447 struct usb_serial *serial = port->serial; 447 struct usb_serial *serial = port->serial;
448 struct cypress_private *priv; 448 struct cypress_private *priv;
449 449
450 if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
451 dev_err(&port->dev, "required endpoint is missing\n");
452 return -ENODEV;
453 }
454
450 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL); 455 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
451 if (!priv) 456 if (!priv)
452 return -ENOMEM; 457 return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
606 cypress_set_termios(tty, port, &priv->tmp_termios); 611 cypress_set_termios(tty, port, &priv->tmp_termios);
607 612
608 /* setup the port and start reading from the device */ 613 /* setup the port and start reading from the device */
609 if (!port->interrupt_in_urb) {
610 dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
611 __func__);
612 return -1;
613 }
614
615 usb_fill_int_urb(port->interrupt_in_urb, serial->dev, 614 usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
616 usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), 615 usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
617 port->interrupt_in_urb->transfer_buffer, 616 port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 010a42a92688..16e8e37b3b36 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
1251 1251
1252static int digi_startup(struct usb_serial *serial) 1252static int digi_startup(struct usb_serial *serial)
1253{ 1253{
1254 struct device *dev = &serial->interface->dev;
1254 struct digi_serial *serial_priv; 1255 struct digi_serial *serial_priv;
1255 int ret; 1256 int ret;
1257 int i;
1258
1259 /* check whether the device has the expected number of endpoints */
1260 if (serial->num_port_pointers < serial->type->num_ports + 1) {
1261 dev_err(dev, "OOB endpoints missing\n");
1262 return -ENODEV;
1263 }
1264
1265 for (i = 0; i < serial->type->num_ports + 1 ; i++) {
1266 if (!serial->port[i]->read_urb) {
1267 dev_err(dev, "bulk-in endpoint missing\n");
1268 return -ENODEV;
1269 }
1270 if (!serial->port[i]->write_urb) {
1271 dev_err(dev, "bulk-out endpoint missing\n");
1272 return -ENODEV;
1273 }
1274 }
1256 1275
1257 serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL); 1276 serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
1258 if (!serial_priv) 1277 if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 427ae43ee898..3a814e802dee 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
1004 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, 1004 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
1005 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, 1005 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
1006 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, 1006 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
1007 /* ICP DAS I-756xU devices */
1008 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
1009 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1010 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1007 { } /* Terminating entry */ 1011 { } /* Terminating entry */
1008}; 1012};
1009 1013
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df2513994..c5d6c1e73e8e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
872#define NOVITUS_BONO_E_PID 0x6010 872#define NOVITUS_BONO_E_PID 0x6010
873 873
874/* 874/*
875 * ICPDAS I-756*U devices
876 */
877#define ICPDAS_VID 0x1b5c
878#define ICPDAS_I7560U_PID 0x0103
879#define ICPDAS_I7561U_PID 0x0104
880#define ICPDAS_I7563U_PID 0x0105
881
882/*
875 * RT Systems programming cables for various ham radios 883 * RT Systems programming cables for various ham radios
876 */ 884 */
877#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 885#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 4446b8d70ac2..885655315de1 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
376 376
377static int mct_u232_port_probe(struct usb_serial_port *port) 377static int mct_u232_port_probe(struct usb_serial_port *port)
378{ 378{
379 struct usb_serial *serial = port->serial;
379 struct mct_u232_private *priv; 380 struct mct_u232_private *priv;
380 381
382 /* check first to simplify error handling */
383 if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
384 dev_err(&port->dev, "expected endpoint missing\n");
385 return -ENODEV;
386 }
387
381 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 388 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
382 if (!priv) 389 if (!priv)
383 return -ENOMEM; 390 return -ENOMEM;
384 391
385 /* Use second interrupt-in endpoint for reading. */ 392 /* Use second interrupt-in endpoint for reading. */
386 priv->read_urb = port->serial->port[1]->interrupt_in_urb; 393 priv->read_urb = serial->port[1]->interrupt_in_urb;
387 priv->read_urb->context = port; 394 priv->read_urb->context = port;
388 395
389 spin_lock_init(&priv->lock); 396 spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e19834b83..c6f497f16526 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
1818 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, 1818 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1819 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, 1819 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1820 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1820 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1821 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
1822 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1821 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1823 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1822 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1824 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1823 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1825 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba51362d2e2..90901861bfc0 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
123 unsigned int max_sectors = 64; 123 unsigned int max_sectors = 64;
124 124
125 if (us->fflags & US_FL_MAX_SECTORS_MIN) 125 if (us->fflags & US_FL_MAX_SECTORS_MIN)
126 max_sectors = PAGE_CACHE_SIZE >> 9; 126 max_sectors = PAGE_SIZE >> 9;
127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) 127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
128 blk_queue_max_hw_sectors(sdev->request_queue, 128 blk_queue_max_hw_sectors(sdev->request_queue,
129 max_sectors); 129 max_sectors);
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf003f19..e40da7759a0e 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
741 if (!(size > 0)) 741 if (!(size > 0))
742 return 0; 742 return 0;
743 743
744 if (size > urb->transfer_buffer_length) {
745 /* should not happen, probably malicious packet */
746 if (ud->side == USBIP_STUB) {
747 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
748 return 0;
749 } else {
750 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
751 return -EPIPE;
752 }
753 }
754
744 ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); 755 ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
745 if (ret != size) { 756 if (ret != size) {
746 dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); 757 dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 71a923e53f93..3b1ca4411073 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -735,7 +735,7 @@ out:
735 735
736out_unmap: 736out_unmap:
737 for (i = 0; i < nr_pages; i++) 737 for (i = 0; i < nr_pages; i++)
738 page_cache_release(pages[i]); 738 put_page(pages[i]);
739 739
740 kfree(pages); 740 kfree(pages);
741 741
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f6f28cc7eb45..e76bd91a29da 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
17 * 17 *
18 */ 18 */
19 19
20#include <linux/delay.h>
20#define VIRTIO_PCI_NO_LEGACY 21#define VIRTIO_PCI_NO_LEGACY
21#include "virtio_pci_common.h" 22#include "virtio_pci_common.h"
22 23
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
271 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 272 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
272 /* 0 status means a reset. */ 273 /* 0 status means a reset. */
273 vp_iowrite8(0, &vp_dev->common->device_status); 274 vp_iowrite8(0, &vp_dev->common->device_status);
274 /* Flush out the status write, and flush in device writes, 275 /* After writing 0 to device_status, the driver MUST wait for a read of
275 * including MSI-X interrupts, if any. */ 276 * device_status to return 0 before reinitializing the device.
276 vp_ioread8(&vp_dev->common->device_status); 277 * This will flush out the status write, and flush in device writes,
278 * including MSI-X interrupts, if any.
279 */
280 while (vp_ioread8(&vp_dev->common->device_status))
281 msleep(1);
277 /* Flush pending VQ/configuration callbacks. */ 282 /* Flush pending VQ/configuration callbacks. */
278 vp_synchronize_vectors(vdev); 283 vp_synchronize_vectors(vdev);
279} 284}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 488017a0806a..cb7138c97c69 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
485 int rc = 0; 485 int rc = 0;
486 486
487 irq_move_irq(data); 487 if (!VALID_EVTCHN(evtchn))
488 return;
488 489
489 if (VALID_EVTCHN(evtchn)) 490 if (unlikely(irqd_is_setaffinity_pending(data))) {
491 int masked = test_and_set_mask(evtchn);
492
493 clear_evtchn(evtchn);
494
495 irq_move_masked_irq(data);
496
497 if (!masked)
498 unmask_evtchn(evtchn);
499 } else
490 clear_evtchn(evtchn); 500 clear_evtchn(evtchn);
491 501
492 if (pirq_needs_eoi(data->irq)) { 502 if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
1357{ 1367{
1358 int evtchn = evtchn_from_irq(data->irq); 1368 int evtchn = evtchn_from_irq(data->irq);
1359 1369
1360 irq_move_irq(data); 1370 if (!VALID_EVTCHN(evtchn))
1371 return;
1361 1372
1362 if (VALID_EVTCHN(evtchn)) 1373 if (unlikely(irqd_is_setaffinity_pending(data))) {
1374 int masked = test_and_set_mask(evtchn);
1375
1376 clear_evtchn(evtchn);
1377
1378 irq_move_masked_irq(data);
1379
1380 if (!masked)
1381 unmask_evtchn(evtchn);
1382 } else
1363 clear_evtchn(evtchn); 1383 clear_evtchn(evtchn);
1364} 1384}
1365 1385
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index e9e04376c52c..ac9225e86bf3 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
153 * If called with zero offset, we should release 153 * If called with zero offset, we should release
154 * the private state assocated with the page 154 * the private state assocated with the page
155 */ 155 */
156 if (offset == 0 && length == PAGE_CACHE_SIZE) 156 if (offset == 0 && length == PAGE_SIZE)
157 v9fs_fscache_invalidate_page(page); 157 v9fs_fscache_invalidate_page(page);
158} 158}
159 159
@@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page)
166 struct bio_vec bvec; 166 struct bio_vec bvec;
167 int err, len; 167 int err, len;
168 168
169 if (page->index == size >> PAGE_CACHE_SHIFT) 169 if (page->index == size >> PAGE_SHIFT)
170 len = size & ~PAGE_CACHE_MASK; 170 len = size & ~PAGE_MASK;
171 else 171 else
172 len = PAGE_CACHE_SIZE; 172 len = PAGE_SIZE;
173 173
174 bvec.bv_page = page; 174 bvec.bv_page = page;
175 bvec.bv_offset = 0; 175 bvec.bv_offset = 0;
@@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
271 int retval = 0; 271 int retval = 0;
272 struct page *page; 272 struct page *page;
273 struct v9fs_inode *v9inode; 273 struct v9fs_inode *v9inode;
274 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 274 pgoff_t index = pos >> PAGE_SHIFT;
275 struct inode *inode = mapping->host; 275 struct inode *inode = mapping->host;
276 276
277 277
@@ -288,11 +288,11 @@ start:
288 if (PageUptodate(page)) 288 if (PageUptodate(page))
289 goto out; 289 goto out;
290 290
291 if (len == PAGE_CACHE_SIZE) 291 if (len == PAGE_SIZE)
292 goto out; 292 goto out;
293 293
294 retval = v9fs_fid_readpage(v9inode->writeback_fid, page); 294 retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
295 page_cache_release(page); 295 put_page(page);
296 if (!retval) 296 if (!retval)
297 goto start; 297 goto start;
298out: 298out:
@@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
313 /* 313 /*
314 * zero out the rest of the area 314 * zero out the rest of the area
315 */ 315 */
316 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 316 unsigned from = pos & (PAGE_SIZE - 1);
317 317
318 zero_user(page, from + copied, len - copied); 318 zero_user(page, from + copied, len - copied);
319 flush_dcache_page(page); 319 flush_dcache_page(page);
@@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
331 } 331 }
332 set_page_dirty(page); 332 set_page_dirty(page);
333 unlock_page(page); 333 unlock_page(page);
334 page_cache_release(page); 334 put_page(page);
335 335
336 return copied; 336 return copied;
337} 337}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index eadc894faea2..b84c291ba1eb 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
421 struct inode *inode = file_inode(file); 421 struct inode *inode = file_inode(file);
422 loff_t i_size; 422 loff_t i_size;
423 unsigned long pg_start, pg_end; 423 unsigned long pg_start, pg_end;
424 pg_start = origin >> PAGE_CACHE_SHIFT; 424 pg_start = origin >> PAGE_SHIFT;
425 pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; 425 pg_end = (origin + retval - 1) >> PAGE_SHIFT;
426 if (inode->i_mapping && inode->i_mapping->nrpages) 426 if (inode->i_mapping && inode->i_mapping->nrpages)
427 invalidate_inode_pages2_range(inode->i_mapping, 427 invalidate_inode_pages2_range(inode->i_mapping,
428 pg_start, pg_end); 428 pg_start, pg_end);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index bf495cedec26..de3ed8629196 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -87,7 +87,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
87 sb->s_op = &v9fs_super_ops; 87 sb->s_op = &v9fs_super_ops;
88 sb->s_bdi = &v9ses->bdi; 88 sb->s_bdi = &v9ses->bdi;
89 if (v9ses->cache) 89 if (v9ses->cache)
90 sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; 90 sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
91 91
92 sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; 92 sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
93 if (!v9ses->cache) 93 if (!v9ses->cache)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 22fc7c802d69..0cde550050e8 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -510,9 +510,9 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
510 510
511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, 511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
512 page->index, to); 512 page->index, to);
513 BUG_ON(to > PAGE_CACHE_SIZE); 513 BUG_ON(to > PAGE_SIZE);
514 bsize = AFFS_SB(sb)->s_data_blksize; 514 bsize = AFFS_SB(sb)->s_data_blksize;
515 tmp = page->index << PAGE_CACHE_SHIFT; 515 tmp = page->index << PAGE_SHIFT;
516 bidx = tmp / bsize; 516 bidx = tmp / bsize;
517 boff = tmp % bsize; 517 boff = tmp % bsize;
518 518
@@ -613,10 +613,10 @@ affs_readpage_ofs(struct file *file, struct page *page)
613 int err; 613 int err;
614 614
615 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); 615 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
616 to = PAGE_CACHE_SIZE; 616 to = PAGE_SIZE;
617 if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { 617 if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
618 to = inode->i_size & ~PAGE_CACHE_MASK; 618 to = inode->i_size & ~PAGE_MASK;
619 memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); 619 memset(page_address(page) + to, 0, PAGE_SIZE - to);
620 } 620 }
621 621
622 err = affs_do_readpage_ofs(page, to); 622 err = affs_do_readpage_ofs(page, to);
@@ -646,7 +646,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
646 return err; 646 return err;
647 } 647 }
648 648
649 index = pos >> PAGE_CACHE_SHIFT; 649 index = pos >> PAGE_SHIFT;
650 page = grab_cache_page_write_begin(mapping, index, flags); 650 page = grab_cache_page_write_begin(mapping, index, flags);
651 if (!page) 651 if (!page)
652 return -ENOMEM; 652 return -ENOMEM;
@@ -656,10 +656,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
656 return 0; 656 return 0;
657 657
658 /* XXX: inefficient but safe in the face of short writes */ 658 /* XXX: inefficient but safe in the face of short writes */
659 err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE); 659 err = affs_do_readpage_ofs(page, PAGE_SIZE);
660 if (err) { 660 if (err) {
661 unlock_page(page); 661 unlock_page(page);
662 page_cache_release(page); 662 put_page(page);
663 } 663 }
664 return err; 664 return err;
665} 665}
@@ -677,7 +677,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
677 u32 tmp; 677 u32 tmp;
678 int written; 678 int written;
679 679
680 from = pos & (PAGE_CACHE_SIZE - 1); 680 from = pos & (PAGE_SIZE - 1);
681 to = pos + len; 681 to = pos + len;
682 /* 682 /*
683 * XXX: not sure if this can handle short copies (len < copied), but 683 * XXX: not sure if this can handle short copies (len < copied), but
@@ -692,7 +692,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
692 692
693 bh = NULL; 693 bh = NULL;
694 written = 0; 694 written = 0;
695 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 695 tmp = (page->index << PAGE_SHIFT) + from;
696 bidx = tmp / bsize; 696 bidx = tmp / bsize;
697 boff = tmp % bsize; 697 boff = tmp % bsize;
698 if (boff) { 698 if (boff) {
@@ -788,13 +788,13 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
788 788
789done: 789done:
790 affs_brelse(bh); 790 affs_brelse(bh);
791 tmp = (page->index << PAGE_CACHE_SHIFT) + from; 791 tmp = (page->index << PAGE_SHIFT) + from;
792 if (tmp > inode->i_size) 792 if (tmp > inode->i_size)
793 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 793 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
794 794
795err_first_bh: 795err_first_bh:
796 unlock_page(page); 796 unlock_page(page);
797 page_cache_release(page); 797 put_page(page);
798 798
799 return written; 799 return written;
800 800
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e10e17788f06..5fda2bc53cd7 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -181,7 +181,7 @@ error:
181static inline void afs_dir_put_page(struct page *page) 181static inline void afs_dir_put_page(struct page *page)
182{ 182{
183 kunmap(page); 183 kunmap(page);
184 page_cache_release(page); 184 put_page(page);
185} 185}
186 186
187/* 187/*
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 999bc3caec92..6344aee4ac4b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -164,7 +164,7 @@ int afs_page_filler(void *data, struct page *page)
164 _debug("cache said ENOBUFS"); 164 _debug("cache said ENOBUFS");
165 default: 165 default:
166 go_on: 166 go_on:
167 offset = page->index << PAGE_CACHE_SHIFT; 167 offset = page->index << PAGE_SHIFT;
168 len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); 168 len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
169 169
170 /* read the contents of the file from the server into the 170 /* read the contents of the file from the server into the
@@ -319,7 +319,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
319 BUG_ON(!PageLocked(page)); 319 BUG_ON(!PageLocked(page));
320 320
321 /* we clean up only if the entire page is being invalidated */ 321 /* we clean up only if the entire page is being invalidated */
322 if (offset == 0 && length == PAGE_CACHE_SIZE) { 322 if (offset == 0 && length == PAGE_SIZE) {
323#ifdef CONFIG_AFS_FSCACHE 323#ifdef CONFIG_AFS_FSCACHE
324 if (PageFsCache(page)) { 324 if (PageFsCache(page)) {
325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index ccd0b212e82a..81dd075356b9 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -93,7 +93,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
93 93
94 kunmap(page); 94 kunmap(page);
95out_free: 95out_free:
96 page_cache_release(page); 96 put_page(page);
97out: 97out:
98 _leave(" = %d", ret); 98 _leave(" = %d", ret);
99 return ret; 99 return ret;
@@ -189,7 +189,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
189 buf = kmap_atomic(page); 189 buf = kmap_atomic(page);
190 memcpy(devname, buf, size); 190 memcpy(devname, buf, size);
191 kunmap_atomic(buf); 191 kunmap_atomic(buf);
192 page_cache_release(page); 192 put_page(page);
193 page = NULL; 193 page = NULL;
194 } 194 }
195 195
@@ -211,7 +211,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
211 return mnt; 211 return mnt;
212 212
213error: 213error:
214 page_cache_release(page); 214 put_page(page);
215error_no_page: 215error_no_page:
216 free_page((unsigned long) options); 216 free_page((unsigned long) options);
217error_no_options: 217error_no_options:
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 81afefe7d8a6..fbdb022b75a2 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -315,8 +315,8 @@ static int afs_fill_super(struct super_block *sb,
315 _enter(""); 315 _enter("");
316 316
317 /* fill in the superblock */ 317 /* fill in the superblock */
318 sb->s_blocksize = PAGE_CACHE_SIZE; 318 sb->s_blocksize = PAGE_SIZE;
319 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 319 sb->s_blocksize_bits = PAGE_SHIFT;
320 sb->s_magic = AFS_FS_MAGIC; 320 sb->s_magic = AFS_FS_MAGIC;
321 sb->s_op = &afs_super_ops; 321 sb->s_op = &afs_super_ops;
322 sb->s_bdi = &as->volume->bdi; 322 sb->s_bdi = &as->volume->bdi;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dfef94f70667..65de439bdc4f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
93 _enter(",,%llu", (unsigned long long)pos); 93 _enter(",,%llu", (unsigned long long)pos);
94 94
95 i_size = i_size_read(&vnode->vfs_inode); 95 i_size = i_size_read(&vnode->vfs_inode);
96 if (pos + PAGE_CACHE_SIZE > i_size) 96 if (pos + PAGE_SIZE > i_size)
97 len = i_size - pos; 97 len = i_size - pos;
98 else 98 else
99 len = PAGE_CACHE_SIZE; 99 len = PAGE_SIZE;
100 100
101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page); 101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
102 if (ret < 0) { 102 if (ret < 0) {
@@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
124 struct page *page; 124 struct page *page;
125 struct key *key = file->private_data; 125 struct key *key = file->private_data;
126 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 126 unsigned from = pos & (PAGE_SIZE - 1);
127 unsigned to = from + len; 127 unsigned to = from + len;
128 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 128 pgoff_t index = pos >> PAGE_SHIFT;
129 int ret; 129 int ret;
130 130
131 _enter("{%x:%u},{%lx},%u,%u", 131 _enter("{%x:%u},{%lx},%u,%u",
@@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
151 *pagep = page; 151 *pagep = page;
152 /* page won't leak in error case: it eventually gets cleaned off LRU */ 152 /* page won't leak in error case: it eventually gets cleaned off LRU */
153 153
154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { 154 if (!PageUptodate(page) && len != PAGE_SIZE) {
155 ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); 155 ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
156 if (ret < 0) { 156 if (ret < 0) {
157 kfree(candidate); 157 kfree(candidate);
158 _leave(" = %d [prep]", ret); 158 _leave(" = %d [prep]", ret);
@@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
266 if (PageDirty(page)) 266 if (PageDirty(page))
267 _debug("dirtied"); 267 _debug("dirtied");
268 unlock_page(page); 268 unlock_page(page);
269 page_cache_release(page); 269 put_page(page);
270 270
271 return copied; 271 return copied;
272} 272}
@@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping,
480 480
481 if (page->index > end) { 481 if (page->index > end) {
482 *_next = index; 482 *_next = index;
483 page_cache_release(page); 483 put_page(page);
484 _leave(" = 0 [%lx]", *_next); 484 _leave(" = 0 [%lx]", *_next);
485 return 0; 485 return 0;
486 } 486 }
@@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping,
494 494
495 if (page->mapping != mapping) { 495 if (page->mapping != mapping) {
496 unlock_page(page); 496 unlock_page(page);
497 page_cache_release(page); 497 put_page(page);
498 continue; 498 continue;
499 } 499 }
500 500
@@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping,
515 515
516 ret = afs_write_back_from_locked_page(wb, page); 516 ret = afs_write_back_from_locked_page(wb, page);
517 unlock_page(page); 517 unlock_page(page);
518 page_cache_release(page); 518 put_page(page);
519 if (ret < 0) { 519 if (ret < 0) {
520 _leave(" = %d", ret); 520 _leave(" = %d", ret);
521 return ret; 521 return ret;
@@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping,
551 &next); 551 &next);
552 mapping->writeback_index = next; 552 mapping->writeback_index = next;
553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
554 end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT); 554 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
555 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 555 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
556 if (wbc->nr_to_write > 0) 556 if (wbc->nr_to_write > 0)
557 mapping->writeback_index = next; 557 mapping->writeback_index = next;
558 } else { 558 } else {
559 start = wbc->range_start >> PAGE_CACHE_SHIFT; 559 start = wbc->range_start >> PAGE_SHIFT;
560 end = wbc->range_end >> PAGE_CACHE_SHIFT; 560 end = wbc->range_end >> PAGE_SHIFT;
561 ret = afs_writepages_region(mapping, wbc, start, end, &next); 561 ret = afs_writepages_region(mapping, wbc, start, end, &next);
562 } 562 }
563 563
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7d914c67a9d0..81381cc0dd17 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2292,7 +2292,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2292 void *kaddr = kmap(page); 2292 void *kaddr = kmap(page);
2293 stop = !dump_emit(cprm, kaddr, PAGE_SIZE); 2293 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2294 kunmap(page); 2294 kunmap(page);
2295 page_cache_release(page); 2295 put_page(page);
2296 } else 2296 } else
2297 stop = !dump_skip(cprm, PAGE_SIZE); 2297 stop = !dump_skip(cprm, PAGE_SIZE);
2298 if (stop) 2298 if (stop)
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index b1adb92e69de..083ea2bc60ab 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1533,7 +1533,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
1533 void *kaddr = kmap(page); 1533 void *kaddr = kmap(page);
1534 res = dump_emit(cprm, kaddr, PAGE_SIZE); 1534 res = dump_emit(cprm, kaddr, PAGE_SIZE);
1535 kunmap(page); 1535 kunmap(page);
1536 page_cache_release(page); 1536 put_page(page);
1537 } else { 1537 } else {
1538 res = dump_skip(cprm, PAGE_SIZE); 1538 res = dump_skip(cprm, PAGE_SIZE);
1539 } 1539 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3172c4e2f502..20a2c02b77c4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,7 +331,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
331 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); 331 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
332 332
333 unlock_page(page); 333 unlock_page(page);
334 page_cache_release(page); 334 put_page(page);
335 335
336 return ret; 336 return ret;
337} 337}
@@ -1149,7 +1149,7 @@ void bd_set_size(struct block_device *bdev, loff_t size)
1149 inode_lock(bdev->bd_inode); 1149 inode_lock(bdev->bd_inode);
1150 i_size_write(bdev->bd_inode, size); 1150 i_size_write(bdev->bd_inode, size);
1151 inode_unlock(bdev->bd_inode); 1151 inode_unlock(bdev->bd_inode);
1152 while (bsize < PAGE_CACHE_SIZE) { 1152 while (bsize < PAGE_SIZE) {
1153 if (size & bsize) 1153 if (size & bsize)
1154 break; 1154 break;
1155 bsize <<= 1; 1155 bsize <<= 1;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index e34a71b3e225..516e19d1d202 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -757,7 +757,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
757 BUG_ON(NULL == l); 757 BUG_ON(NULL == l);
758 758
759 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 759 ret = btrfsic_read_block(state, &tmp_next_block_ctx);
760 if (ret < (int)PAGE_CACHE_SIZE) { 760 if (ret < (int)PAGE_SIZE) {
761 printk(KERN_INFO 761 printk(KERN_INFO
762 "btrfsic: read @logical %llu failed!\n", 762 "btrfsic: read @logical %llu failed!\n",
763 tmp_next_block_ctx.start); 763 tmp_next_block_ctx.start);
@@ -1231,15 +1231,15 @@ static void btrfsic_read_from_block_data(
1231 size_t offset_in_page; 1231 size_t offset_in_page;
1232 char *kaddr; 1232 char *kaddr;
1233 char *dst = (char *)dstv; 1233 char *dst = (char *)dstv;
1234 size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1); 1234 size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1);
1235 unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT; 1235 unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
1236 1236
1237 WARN_ON(offset + len > block_ctx->len); 1237 WARN_ON(offset + len > block_ctx->len);
1238 offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1); 1238 offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1);
1239 1239
1240 while (len > 0) { 1240 while (len > 0) {
1241 cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); 1241 cur = min(len, ((size_t)PAGE_SIZE - offset_in_page));
1242 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE)); 1242 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
1243 kaddr = block_ctx->datav[i]; 1243 kaddr = block_ctx->datav[i];
1244 memcpy(dst, kaddr + offset_in_page, cur); 1244 memcpy(dst, kaddr + offset_in_page, cur);
1245 1245
@@ -1605,8 +1605,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
1605 1605
1606 BUG_ON(!block_ctx->datav); 1606 BUG_ON(!block_ctx->datav);
1607 BUG_ON(!block_ctx->pagev); 1607 BUG_ON(!block_ctx->pagev);
1608 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1608 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
1609 PAGE_CACHE_SHIFT; 1609 PAGE_SHIFT;
1610 while (num_pages > 0) { 1610 while (num_pages > 0) {
1611 num_pages--; 1611 num_pages--;
1612 if (block_ctx->datav[num_pages]) { 1612 if (block_ctx->datav[num_pages]) {
@@ -1637,15 +1637,15 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1637 BUG_ON(block_ctx->datav); 1637 BUG_ON(block_ctx->datav);
1638 BUG_ON(block_ctx->pagev); 1638 BUG_ON(block_ctx->pagev);
1639 BUG_ON(block_ctx->mem_to_free); 1639 BUG_ON(block_ctx->mem_to_free);
1640 if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) { 1640 if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
1641 printk(KERN_INFO 1641 printk(KERN_INFO
1642 "btrfsic: read_block() with unaligned bytenr %llu\n", 1642 "btrfsic: read_block() with unaligned bytenr %llu\n",
1643 block_ctx->dev_bytenr); 1643 block_ctx->dev_bytenr);
1644 return -1; 1644 return -1;
1645 } 1645 }
1646 1646
1647 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> 1647 num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
1648 PAGE_CACHE_SHIFT; 1648 PAGE_SHIFT;
1649 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) + 1649 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
1650 sizeof(*block_ctx->pagev)) * 1650 sizeof(*block_ctx->pagev)) *
1651 num_pages, GFP_NOFS); 1651 num_pages, GFP_NOFS);
@@ -1676,8 +1676,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1676 1676
1677 for (j = i; j < num_pages; j++) { 1677 for (j = i; j < num_pages; j++) {
1678 ret = bio_add_page(bio, block_ctx->pagev[j], 1678 ret = bio_add_page(bio, block_ctx->pagev[j],
1679 PAGE_CACHE_SIZE, 0); 1679 PAGE_SIZE, 0);
1680 if (PAGE_CACHE_SIZE != ret) 1680 if (PAGE_SIZE != ret)
1681 break; 1681 break;
1682 } 1682 }
1683 if (j == i) { 1683 if (j == i) {
@@ -1693,7 +1693,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1693 return -1; 1693 return -1;
1694 } 1694 }
1695 bio_put(bio); 1695 bio_put(bio);
1696 dev_bytenr += (j - i) * PAGE_CACHE_SIZE; 1696 dev_bytenr += (j - i) * PAGE_SIZE;
1697 i = j; 1697 i = j;
1698 } 1698 }
1699 for (i = 0; i < num_pages; i++) { 1699 for (i = 0; i < num_pages; i++) {
@@ -1769,9 +1769,9 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1769 u32 crc = ~(u32)0; 1769 u32 crc = ~(u32)0;
1770 unsigned int i; 1770 unsigned int i;
1771 1771
1772 if (num_pages * PAGE_CACHE_SIZE < state->metablock_size) 1772 if (num_pages * PAGE_SIZE < state->metablock_size)
1773 return 1; /* not metadata */ 1773 return 1; /* not metadata */
1774 num_pages = state->metablock_size >> PAGE_CACHE_SHIFT; 1774 num_pages = state->metablock_size >> PAGE_SHIFT;
1775 h = (struct btrfs_header *)datav[0]; 1775 h = (struct btrfs_header *)datav[0];
1776 1776
1777 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) 1777 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1779,8 +1779,8 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1779 1779
1780 for (i = 0; i < num_pages; i++) { 1780 for (i = 0; i < num_pages; i++) {
1781 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); 1781 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
1782 size_t sublen = i ? PAGE_CACHE_SIZE : 1782 size_t sublen = i ? PAGE_SIZE :
1783 (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); 1783 (PAGE_SIZE - BTRFS_CSUM_SIZE);
1784 1784
1785 crc = btrfs_crc32c(crc, data, sublen); 1785 crc = btrfs_crc32c(crc, data, sublen);
1786 } 1786 }
@@ -1826,14 +1826,14 @@ again:
1826 if (block->is_superblock) { 1826 if (block->is_superblock) {
1827 bytenr = btrfs_super_bytenr((struct btrfs_super_block *) 1827 bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
1828 mapped_datav[0]); 1828 mapped_datav[0]);
1829 if (num_pages * PAGE_CACHE_SIZE < 1829 if (num_pages * PAGE_SIZE <
1830 BTRFS_SUPER_INFO_SIZE) { 1830 BTRFS_SUPER_INFO_SIZE) {
1831 printk(KERN_INFO 1831 printk(KERN_INFO
1832 "btrfsic: cannot work with too short bios!\n"); 1832 "btrfsic: cannot work with too short bios!\n");
1833 return; 1833 return;
1834 } 1834 }
1835 is_metadata = 1; 1835 is_metadata = 1;
1836 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1)); 1836 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1));
1837 processed_len = BTRFS_SUPER_INFO_SIZE; 1837 processed_len = BTRFS_SUPER_INFO_SIZE;
1838 if (state->print_mask & 1838 if (state->print_mask &
1839 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1839 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
@@ -1844,7 +1844,7 @@ again:
1844 } 1844 }
1845 if (is_metadata) { 1845 if (is_metadata) {
1846 if (!block->is_superblock) { 1846 if (!block->is_superblock) {
1847 if (num_pages * PAGE_CACHE_SIZE < 1847 if (num_pages * PAGE_SIZE <
1848 state->metablock_size) { 1848 state->metablock_size) {
1849 printk(KERN_INFO 1849 printk(KERN_INFO
1850 "btrfsic: cannot work with too short bios!\n"); 1850 "btrfsic: cannot work with too short bios!\n");
@@ -1880,7 +1880,7 @@ again:
1880 } 1880 }
1881 block->logical_bytenr = bytenr; 1881 block->logical_bytenr = bytenr;
1882 } else { 1882 } else {
1883 if (num_pages * PAGE_CACHE_SIZE < 1883 if (num_pages * PAGE_SIZE <
1884 state->datablock_size) { 1884 state->datablock_size) {
1885 printk(KERN_INFO 1885 printk(KERN_INFO
1886 "btrfsic: cannot work with too short bios!\n"); 1886 "btrfsic: cannot work with too short bios!\n");
@@ -2013,7 +2013,7 @@ again:
2013 block->logical_bytenr = bytenr; 2013 block->logical_bytenr = bytenr;
2014 block->is_metadata = 1; 2014 block->is_metadata = 1;
2015 if (block->is_superblock) { 2015 if (block->is_superblock) {
2016 BUG_ON(PAGE_CACHE_SIZE != 2016 BUG_ON(PAGE_SIZE !=
2017 BTRFS_SUPER_INFO_SIZE); 2017 BTRFS_SUPER_INFO_SIZE);
2018 ret = btrfsic_process_written_superblock( 2018 ret = btrfsic_process_written_superblock(
2019 state, 2019 state,
@@ -2172,8 +2172,8 @@ again:
2172continue_loop: 2172continue_loop:
2173 BUG_ON(!processed_len); 2173 BUG_ON(!processed_len);
2174 dev_bytenr += processed_len; 2174 dev_bytenr += processed_len;
2175 mapped_datav += processed_len >> PAGE_CACHE_SHIFT; 2175 mapped_datav += processed_len >> PAGE_SHIFT;
2176 num_pages -= processed_len >> PAGE_CACHE_SHIFT; 2176 num_pages -= processed_len >> PAGE_SHIFT;
2177 goto again; 2177 goto again;
2178} 2178}
2179 2179
@@ -2954,7 +2954,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
2954 goto leave; 2954 goto leave;
2955 cur_bytenr = dev_bytenr; 2955 cur_bytenr = dev_bytenr;
2956 for (i = 0; i < bio->bi_vcnt; i++) { 2956 for (i = 0; i < bio->bi_vcnt; i++) {
2957 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); 2957 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
2958 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); 2958 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
2959 if (!mapped_datav[i]) { 2959 if (!mapped_datav[i]) {
2960 while (i > 0) { 2960 while (i > 0) {
@@ -3037,16 +3037,16 @@ int btrfsic_mount(struct btrfs_root *root,
3037 struct list_head *dev_head = &fs_devices->devices; 3037 struct list_head *dev_head = &fs_devices->devices;
3038 struct btrfs_device *device; 3038 struct btrfs_device *device;
3039 3039
3040 if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) { 3040 if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
3041 printk(KERN_INFO 3041 printk(KERN_INFO
3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
3043 root->nodesize, PAGE_CACHE_SIZE); 3043 root->nodesize, PAGE_SIZE);
3044 return -1; 3044 return -1;
3045 } 3045 }
3046 if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) { 3046 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
3047 printk(KERN_INFO 3047 printk(KERN_INFO
3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
3049 root->sectorsize, PAGE_CACHE_SIZE); 3049 root->sectorsize, PAGE_SIZE);
3050 return -1; 3050 return -1;
3051 } 3051 }
3052 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 3052 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3346cd8f9910..ff61a41ac90b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode,
119 csum = ~(u32)0; 119 csum = ~(u32)0;
120 120
121 kaddr = kmap_atomic(page); 121 kaddr = kmap_atomic(page);
122 csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); 122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
123 btrfs_csum_final(csum, (char *)&csum); 123 btrfs_csum_final(csum, (char *)&csum);
124 kunmap_atomic(kaddr); 124 kunmap_atomic(kaddr);
125 125
@@ -190,7 +190,7 @@ csum_failed:
190 for (index = 0; index < cb->nr_pages; index++) { 190 for (index = 0; index < cb->nr_pages; index++) {
191 page = cb->compressed_pages[index]; 191 page = cb->compressed_pages[index];
192 page->mapping = NULL; 192 page->mapping = NULL;
193 page_cache_release(page); 193 put_page(page);
194 } 194 }
195 195
196 /* do io completion on the original bio */ 196 /* do io completion on the original bio */
@@ -224,8 +224,8 @@ out:
224static noinline void end_compressed_writeback(struct inode *inode, 224static noinline void end_compressed_writeback(struct inode *inode,
225 const struct compressed_bio *cb) 225 const struct compressed_bio *cb)
226{ 226{
227 unsigned long index = cb->start >> PAGE_CACHE_SHIFT; 227 unsigned long index = cb->start >> PAGE_SHIFT;
228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT; 228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
229 struct page *pages[16]; 229 struct page *pages[16];
230 unsigned long nr_pages = end_index - index + 1; 230 unsigned long nr_pages = end_index - index + 1;
231 int i; 231 int i;
@@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
247 if (cb->errors) 247 if (cb->errors)
248 SetPageError(pages[i]); 248 SetPageError(pages[i]);
249 end_page_writeback(pages[i]); 249 end_page_writeback(pages[i]);
250 page_cache_release(pages[i]); 250 put_page(pages[i]);
251 } 251 }
252 nr_pages -= ret; 252 nr_pages -= ret;
253 index += ret; 253 index += ret;
@@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio)
304 for (index = 0; index < cb->nr_pages; index++) { 304 for (index = 0; index < cb->nr_pages; index++) {
305 page = cb->compressed_pages[index]; 305 page = cb->compressed_pages[index];
306 page->mapping = NULL; 306 page->mapping = NULL;
307 page_cache_release(page); 307 put_page(page);
308 } 308 }
309 309
310 /* finally free the cb struct */ 310 /* finally free the cb struct */
@@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
341 int ret; 341 int ret;
342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
343 343
344 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 344 WARN_ON(start & ((u64)PAGE_SIZE - 1));
345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
346 if (!cb) 346 if (!cb)
347 return -ENOMEM; 347 return -ENOMEM;
@@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
374 page->mapping = inode->i_mapping; 374 page->mapping = inode->i_mapping;
375 if (bio->bi_iter.bi_size) 375 if (bio->bi_iter.bi_size)
376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
377 PAGE_CACHE_SIZE, 377 PAGE_SIZE,
378 bio, 0); 378 bio, 0);
379 else 379 else
380 ret = 0; 380 ret = 0;
381 381
382 page->mapping = NULL; 382 page->mapping = NULL;
383 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 383 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
384 PAGE_CACHE_SIZE) { 384 PAGE_SIZE) {
385 bio_get(bio); 385 bio_get(bio);
386 386
387 /* 387 /*
@@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
410 BUG_ON(!bio); 410 BUG_ON(!bio);
411 bio->bi_private = cb; 411 bio->bi_private = cb;
412 bio->bi_end_io = end_compressed_bio_write; 412 bio->bi_end_io = end_compressed_bio_write;
413 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 413 bio_add_page(bio, page, PAGE_SIZE, 0);
414 } 414 }
415 if (bytes_left < PAGE_CACHE_SIZE) { 415 if (bytes_left < PAGE_SIZE) {
416 btrfs_info(BTRFS_I(inode)->root->fs_info, 416 btrfs_info(BTRFS_I(inode)->root->fs_info,
417 "bytes left %lu compress len %lu nr %lu", 417 "bytes left %lu compress len %lu nr %lu",
418 bytes_left, cb->compressed_len, cb->nr_pages); 418 bytes_left, cb->compressed_len, cb->nr_pages);
419 } 419 }
420 bytes_left -= PAGE_CACHE_SIZE; 420 bytes_left -= PAGE_SIZE;
421 first_byte += PAGE_CACHE_SIZE; 421 first_byte += PAGE_SIZE;
422 cond_resched(); 422 cond_resched();
423 } 423 }
424 bio_get(bio); 424 bio_get(bio);
@@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
457 int misses = 0; 457 int misses = 0;
458 458
459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
460 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 460 last_offset = (page_offset(page) + PAGE_SIZE);
461 em_tree = &BTRFS_I(inode)->extent_tree; 461 em_tree = &BTRFS_I(inode)->extent_tree;
462 tree = &BTRFS_I(inode)->io_tree; 462 tree = &BTRFS_I(inode)->io_tree;
463 463
464 if (isize == 0) 464 if (isize == 0)
465 return 0; 465 return 0;
466 466
467 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 467 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
468 468
469 while (last_offset < compressed_end) { 469 while (last_offset < compressed_end) {
470 pg_index = last_offset >> PAGE_CACHE_SHIFT; 470 pg_index = last_offset >> PAGE_SHIFT;
471 471
472 if (pg_index > end_index) 472 if (pg_index > end_index)
473 break; 473 break;
@@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode,
488 break; 488 break;
489 489
490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
491 page_cache_release(page); 491 put_page(page);
492 goto next; 492 goto next;
493 } 493 }
494 494
495 end = last_offset + PAGE_CACHE_SIZE - 1; 495 end = last_offset + PAGE_SIZE - 1;
496 /* 496 /*
497 * at this point, we have a locked page in the page cache 497 * at this point, we have a locked page in the page cache
498 * for these bytes in the file. But, we have to make 498 * for these bytes in the file. But, we have to make
@@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode,
502 lock_extent(tree, last_offset, end); 502 lock_extent(tree, last_offset, end);
503 read_lock(&em_tree->lock); 503 read_lock(&em_tree->lock);
504 em = lookup_extent_mapping(em_tree, last_offset, 504 em = lookup_extent_mapping(em_tree, last_offset,
505 PAGE_CACHE_SIZE); 505 PAGE_SIZE);
506 read_unlock(&em_tree->lock); 506 read_unlock(&em_tree->lock);
507 507
508 if (!em || last_offset < em->start || 508 if (!em || last_offset < em->start ||
509 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 509 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
511 free_extent_map(em); 511 free_extent_map(em);
512 unlock_extent(tree, last_offset, end); 512 unlock_extent(tree, last_offset, end);
513 unlock_page(page); 513 unlock_page(page);
514 page_cache_release(page); 514 put_page(page);
515 break; 515 break;
516 } 516 }
517 free_extent_map(em); 517 free_extent_map(em);
518 518
519 if (page->index == end_index) { 519 if (page->index == end_index) {
520 char *userpage; 520 char *userpage;
521 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 521 size_t zero_offset = isize & (PAGE_SIZE - 1);
522 522
523 if (zero_offset) { 523 if (zero_offset) {
524 int zeros; 524 int zeros;
525 zeros = PAGE_CACHE_SIZE - zero_offset; 525 zeros = PAGE_SIZE - zero_offset;
526 userpage = kmap_atomic(page); 526 userpage = kmap_atomic(page);
527 memset(userpage + zero_offset, 0, zeros); 527 memset(userpage + zero_offset, 0, zeros);
528 flush_dcache_page(page); 528 flush_dcache_page(page);
@@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode,
531 } 531 }
532 532
533 ret = bio_add_page(cb->orig_bio, page, 533 ret = bio_add_page(cb->orig_bio, page,
534 PAGE_CACHE_SIZE, 0); 534 PAGE_SIZE, 0);
535 535
536 if (ret == PAGE_CACHE_SIZE) { 536 if (ret == PAGE_SIZE) {
537 nr_pages++; 537 nr_pages++;
538 page_cache_release(page); 538 put_page(page);
539 } else { 539 } else {
540 unlock_extent(tree, last_offset, end); 540 unlock_extent(tree, last_offset, end);
541 unlock_page(page); 541 unlock_page(page);
542 page_cache_release(page); 542 put_page(page);
543 break; 543 break;
544 } 544 }
545next: 545next:
546 last_offset += PAGE_CACHE_SIZE; 546 last_offset += PAGE_SIZE;
547 } 547 }
548 return 0; 548 return 0;
549} 549}
@@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
567 struct extent_map_tree *em_tree; 567 struct extent_map_tree *em_tree;
568 struct compressed_bio *cb; 568 struct compressed_bio *cb;
569 struct btrfs_root *root = BTRFS_I(inode)->root; 569 struct btrfs_root *root = BTRFS_I(inode)->root;
570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
571 unsigned long compressed_len; 571 unsigned long compressed_len;
572 unsigned long nr_pages; 572 unsigned long nr_pages;
573 unsigned long pg_index; 573 unsigned long pg_index;
@@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
589 read_lock(&em_tree->lock); 589 read_lock(&em_tree->lock);
590 em = lookup_extent_mapping(em_tree, 590 em = lookup_extent_mapping(em_tree,
591 page_offset(bio->bi_io_vec->bv_page), 591 page_offset(bio->bi_io_vec->bv_page),
592 PAGE_CACHE_SIZE); 592 PAGE_SIZE);
593 read_unlock(&em_tree->lock); 593 read_unlock(&em_tree->lock);
594 if (!em) 594 if (!em)
595 return -EIO; 595 return -EIO;
@@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
617 cb->compress_type = extent_compress_type(bio_flags); 617 cb->compress_type = extent_compress_type(bio_flags);
618 cb->orig_bio = bio; 618 cb->orig_bio = bio;
619 619
620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); 620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
622 GFP_NOFS); 622 GFP_NOFS);
623 if (!cb->compressed_pages) 623 if (!cb->compressed_pages)
@@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
640 add_ra_bio_pages(inode, em_start + em_len, cb); 640 add_ra_bio_pages(inode, em_start + em_len, cb);
641 641
642 /* include any pages we added in add_ra-bio_pages */ 642 /* include any pages we added in add_ra-bio_pages */
643 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 643 uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
644 cb->len = uncompressed_len; 644 cb->len = uncompressed_len;
645 645
646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
@@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
653 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 653 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
654 page = cb->compressed_pages[pg_index]; 654 page = cb->compressed_pages[pg_index];
655 page->mapping = inode->i_mapping; 655 page->mapping = inode->i_mapping;
656 page->index = em_start >> PAGE_CACHE_SHIFT; 656 page->index = em_start >> PAGE_SHIFT;
657 657
658 if (comp_bio->bi_iter.bi_size) 658 if (comp_bio->bi_iter.bi_size)
659 ret = tree->ops->merge_bio_hook(READ, page, 0, 659 ret = tree->ops->merge_bio_hook(READ, page, 0,
660 PAGE_CACHE_SIZE, 660 PAGE_SIZE,
661 comp_bio, 0); 661 comp_bio, 0);
662 else 662 else
663 ret = 0; 663 ret = 0;
664 664
665 page->mapping = NULL; 665 page->mapping = NULL;
666 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 666 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
667 PAGE_CACHE_SIZE) { 667 PAGE_SIZE) {
668 bio_get(comp_bio); 668 bio_get(comp_bio);
669 669
670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
@@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
702 comp_bio->bi_private = cb; 702 comp_bio->bi_private = cb;
703 comp_bio->bi_end_io = end_compressed_bio_read; 703 comp_bio->bi_end_io = end_compressed_bio_read;
704 704
705 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 705 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
706 } 706 }
707 cur_disk_byte += PAGE_CACHE_SIZE; 707 cur_disk_byte += PAGE_SIZE;
708 } 708 }
709 bio_get(comp_bio); 709 bio_get(comp_bio);
710 710
@@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1013 1013
1014 /* copy bytes from the working buffer into the pages */ 1014 /* copy bytes from the working buffer into the pages */
1015 while (working_bytes > 0) { 1015 while (working_bytes > 0) {
1016 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 1016 bytes = min(PAGE_SIZE - *pg_offset,
1017 PAGE_CACHE_SIZE - buf_offset); 1017 PAGE_SIZE - buf_offset);
1018 bytes = min(bytes, working_bytes); 1018 bytes = min(bytes, working_bytes);
1019 kaddr = kmap_atomic(page_out); 1019 kaddr = kmap_atomic(page_out);
1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
@@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1027 current_buf_start += bytes; 1027 current_buf_start += bytes;
1028 1028
1029 /* check if we need to pick another page */ 1029 /* check if we need to pick another page */
1030 if (*pg_offset == PAGE_CACHE_SIZE) { 1030 if (*pg_offset == PAGE_SIZE) {
1031 (*pg_index)++; 1031 (*pg_index)++;
1032 if (*pg_index >= vcnt) 1032 if (*pg_index >= vcnt)
1033 return 0; 1033 return 0;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 77592931ab4f..ec7928a27aaa 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -19,6 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/rbtree.h> 21#include <linux/rbtree.h>
22#include <linux/vmalloc.h>
22#include "ctree.h" 23#include "ctree.h"
23#include "disk-io.h" 24#include "disk-io.h"
24#include "transaction.h" 25#include "transaction.h"
@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5361 goto out; 5362 goto out;
5362 } 5363 }
5363 5364
5364 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL); 5365 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
5365 if (!tmp_buf) { 5366 if (!tmp_buf) {
5366 ret = -ENOMEM; 5367 tmp_buf = vmalloc(left_root->nodesize);
5367 goto out; 5368 if (!tmp_buf) {
5369 ret = -ENOMEM;
5370 goto out;
5371 }
5368 } 5372 }
5369 5373
5370 left_path->search_commit_root = 1; 5374 left_path->search_commit_root = 1;
@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5565out: 5569out:
5566 btrfs_free_path(left_path); 5570 btrfs_free_path(left_path);
5567 btrfs_free_path(right_path); 5571 btrfs_free_path(right_path);
5568 kfree(tmp_buf); 5572 kvfree(tmp_buf);
5569 return ret; 5573 return ret;
5570} 5574}
5571 5575
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index a1d6652e0c47..26bcb487f958 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
394 dev_replace->cursor_right = 0; 394 dev_replace->cursor_right = 0;
395 dev_replace->is_valid = 1; 395 dev_replace->is_valid = 1;
396 dev_replace->item_needs_writeback = 1; 396 dev_replace->item_needs_writeback = 1;
397 atomic64_set(&dev_replace->num_write_errors, 0);
398 atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
397 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 399 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
398 btrfs_dev_replace_unlock(dev_replace, 1); 400 btrfs_dev_replace_unlock(dev_replace, 1);
399 401
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4b02591b0301..4e47849d7427 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -25,7 +25,6 @@
25#include <linux/buffer_head.h> 25#include <linux/buffer_head.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/freezer.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
30#include <linux/migrate.h> 29#include <linux/migrate.h>
31#include <linux/ratelimit.h> 30#include <linux/ratelimit.h>
@@ -303,7 +302,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
303 err = map_private_extent_buffer(buf, offset, 32, 302 err = map_private_extent_buffer(buf, offset, 32,
304 &kaddr, &map_start, &map_len); 303 &kaddr, &map_start, &map_len);
305 if (err) 304 if (err)
306 return 1; 305 return err;
307 cur_len = min(len, map_len - (offset - map_start)); 306 cur_len = min(len, map_len - (offset - map_start));
308 crc = btrfs_csum_data(kaddr + offset - map_start, 307 crc = btrfs_csum_data(kaddr + offset - map_start,
309 crc, cur_len); 308 crc, cur_len);
@@ -313,7 +312,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
313 if (csum_size > sizeof(inline_result)) { 312 if (csum_size > sizeof(inline_result)) {
314 result = kzalloc(csum_size, GFP_NOFS); 313 result = kzalloc(csum_size, GFP_NOFS);
315 if (!result) 314 if (!result)
316 return 1; 315 return -ENOMEM;
317 } else { 316 } else {
318 result = (char *)&inline_result; 317 result = (char *)&inline_result;
319 } 318 }
@@ -334,7 +333,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
334 val, found, btrfs_header_level(buf)); 333 val, found, btrfs_header_level(buf));
335 if (result != (char *)&inline_result) 334 if (result != (char *)&inline_result)
336 kfree(result); 335 kfree(result);
337 return 1; 336 return -EUCLEAN;
338 } 337 }
339 } else { 338 } else {
340 write_extent_buffer(buf, result, 0, csum_size); 339 write_extent_buffer(buf, result, 0, csum_size);
@@ -513,11 +512,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
513 eb = (struct extent_buffer *)page->private; 512 eb = (struct extent_buffer *)page->private;
514 if (page != eb->pages[0]) 513 if (page != eb->pages[0])
515 return 0; 514 return 0;
515
516 found_start = btrfs_header_bytenr(eb); 516 found_start = btrfs_header_bytenr(eb);
517 if (WARN_ON(found_start != start || !PageUptodate(page))) 517 /*
518 return 0; 518 * Please do not consolidate these warnings into a single if.
519 csum_tree_block(fs_info, eb, 0); 519 * It is useful to know what went wrong.
520 return 0; 520 */
521 if (WARN_ON(found_start != start))
522 return -EUCLEAN;
523 if (WARN_ON(!PageUptodate(page)))
524 return -EUCLEAN;
525
526 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
527 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
528
529 return csum_tree_block(fs_info, eb, 0);
521} 530}
522 531
523static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, 532static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
@@ -661,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
661 eb, found_level); 670 eb, found_level);
662 671
663 ret = csum_tree_block(fs_info, eb, 1); 672 ret = csum_tree_block(fs_info, eb, 1);
664 if (ret) { 673 if (ret)
665 ret = -EIO;
666 goto err; 674 goto err;
667 }
668 675
669 /* 676 /*
670 * If this is a leaf block and it is corrupt, set the corrupt bit so 677 * If this is a leaf block and it is corrupt, set the corrupt bit so
@@ -1055,7 +1062,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
1055 (unsigned long long)page_offset(page)); 1062 (unsigned long long)page_offset(page));
1056 ClearPagePrivate(page); 1063 ClearPagePrivate(page);
1057 set_page_private(page, 0); 1064 set_page_private(page, 0);
1058 page_cache_release(page); 1065 put_page(page);
1059 } 1066 }
1060} 1067}
1061 1068
@@ -1757,7 +1764,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1757 if (err) 1764 if (err)
1758 return err; 1765 return err;
1759 1766
1760 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; 1767 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
1761 bdi->congested_fn = btrfs_congested_fn; 1768 bdi->congested_fn = btrfs_congested_fn;
1762 bdi->congested_data = info; 1769 bdi->congested_data = info;
1763 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 1770 bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
@@ -1831,7 +1838,7 @@ static int cleaner_kthread(void *arg)
1831 */ 1838 */
1832 btrfs_delete_unused_bgs(root->fs_info); 1839 btrfs_delete_unused_bgs(root->fs_info);
1833sleep: 1840sleep:
1834 if (!try_to_freeze() && !again) { 1841 if (!again) {
1835 set_current_state(TASK_INTERRUPTIBLE); 1842 set_current_state(TASK_INTERRUPTIBLE);
1836 if (!kthread_should_stop()) 1843 if (!kthread_should_stop())
1837 schedule(); 1844 schedule();
@@ -1921,14 +1928,12 @@ sleep:
1921 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, 1928 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1922 &root->fs_info->fs_state))) 1929 &root->fs_info->fs_state)))
1923 btrfs_cleanup_transaction(root); 1930 btrfs_cleanup_transaction(root);
1924 if (!try_to_freeze()) { 1931 set_current_state(TASK_INTERRUPTIBLE);
1925 set_current_state(TASK_INTERRUPTIBLE); 1932 if (!kthread_should_stop() &&
1926 if (!kthread_should_stop() && 1933 (!btrfs_transaction_blocked(root->fs_info) ||
1927 (!btrfs_transaction_blocked(root->fs_info) || 1934 cannot_commit))
1928 cannot_commit)) 1935 schedule_timeout(delay);
1929 schedule_timeout(delay); 1936 __set_current_state(TASK_RUNNING);
1930 __set_current_state(TASK_RUNNING);
1931 }
1932 } while (!kthread_should_stop()); 1937 } while (!kthread_should_stop());
1933 return 0; 1938 return 0;
1934} 1939}
@@ -2537,7 +2542,7 @@ int open_ctree(struct super_block *sb,
2537 err = ret; 2542 err = ret;
2538 goto fail_bdi; 2543 goto fail_bdi;
2539 } 2544 }
2540 fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * 2545 fs_info->dirty_metadata_batch = PAGE_SIZE *
2541 (1 + ilog2(nr_cpu_ids)); 2546 (1 + ilog2(nr_cpu_ids));
2542 2547
2543 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 2548 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
@@ -2782,7 +2787,7 @@ int open_ctree(struct super_block *sb,
2782 * flag our filesystem as having big metadata blocks if 2787 * flag our filesystem as having big metadata blocks if
2783 * they are bigger than the page size 2788 * they are bigger than the page size
2784 */ 2789 */
2785 if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) { 2790 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2786 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 2791 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2787 printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); 2792 printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
2788 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 2793 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
@@ -2832,7 +2837,7 @@ int open_ctree(struct super_block *sb,
2832 2837
2833 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 2838 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2834 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 2839 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2835 SZ_4M / PAGE_CACHE_SIZE); 2840 SZ_4M / PAGE_SIZE);
2836 2841
2837 tree_root->nodesize = nodesize; 2842 tree_root->nodesize = nodesize;
2838 tree_root->sectorsize = sectorsize; 2843 tree_root->sectorsize = sectorsize;
@@ -4071,9 +4076,9 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4071 ret = -EINVAL; 4076 ret = -EINVAL;
4072 } 4077 }
4073 /* Only PAGE SIZE is supported yet */ 4078 /* Only PAGE SIZE is supported yet */
4074 if (sectorsize != PAGE_CACHE_SIZE) { 4079 if (sectorsize != PAGE_SIZE) {
4075 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n", 4080 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
4076 sectorsize, PAGE_CACHE_SIZE); 4081 sectorsize, PAGE_SIZE);
4077 ret = -EINVAL; 4082 ret = -EINVAL;
4078 } 4083 }
4079 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 4084 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 53e12977bfd0..84e060eb0de8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3452,7 +3452,7 @@ again:
3452 num_pages = 1; 3452 num_pages = 1;
3453 3453
3454 num_pages *= 16; 3454 num_pages *= 16;
3455 num_pages *= PAGE_CACHE_SIZE; 3455 num_pages *= PAGE_SIZE;
3456 3456
3457 ret = btrfs_check_data_free_space(inode, 0, num_pages); 3457 ret = btrfs_check_data_free_space(inode, 0, num_pages);
3458 if (ret) 3458 if (ret)
@@ -4639,7 +4639,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4639 loops = 0; 4639 loops = 0;
4640 while (delalloc_bytes && loops < 3) { 4640 while (delalloc_bytes && loops < 3) {
4641 max_reclaim = min(delalloc_bytes, to_reclaim); 4641 max_reclaim = min(delalloc_bytes, to_reclaim);
4642 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; 4642 nr_pages = max_reclaim >> PAGE_SHIFT;
4643 btrfs_writeback_inodes_sb_nr(root, nr_pages, items); 4643 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4644 /* 4644 /*
4645 * We need to wait for the async pages to actually start before 4645 * We need to wait for the async pages to actually start before
@@ -9386,15 +9386,23 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9386 u64 dev_min = 1; 9386 u64 dev_min = 1;
9387 u64 dev_nr = 0; 9387 u64 dev_nr = 0;
9388 u64 target; 9388 u64 target;
9389 int debug;
9389 int index; 9390 int index;
9390 int full = 0; 9391 int full = 0;
9391 int ret = 0; 9392 int ret = 0;
9392 9393
9394 debug = btrfs_test_opt(root, ENOSPC_DEBUG);
9395
9393 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 9396 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9394 9397
9395 /* odd, couldn't find the block group, leave it alone */ 9398 /* odd, couldn't find the block group, leave it alone */
9396 if (!block_group) 9399 if (!block_group) {
9400 if (debug)
9401 btrfs_warn(root->fs_info,
9402 "can't find block group for bytenr %llu",
9403 bytenr);
9397 return -1; 9404 return -1;
9405 }
9398 9406
9399 min_free = btrfs_block_group_used(&block_group->item); 9407 min_free = btrfs_block_group_used(&block_group->item);
9400 9408
@@ -9448,8 +9456,13 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9448 * this is just a balance, so if we were marked as full 9456 * this is just a balance, so if we were marked as full
9449 * we know there is no space for a new chunk 9457 * we know there is no space for a new chunk
9450 */ 9458 */
9451 if (full) 9459 if (full) {
9460 if (debug)
9461 btrfs_warn(root->fs_info,
9462 "no space to alloc new chunk for block group %llu",
9463 block_group->key.objectid);
9452 goto out; 9464 goto out;
9465 }
9453 9466
9454 index = get_block_group_index(block_group); 9467 index = get_block_group_index(block_group);
9455 } 9468 }
@@ -9496,6 +9509,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9496 ret = -1; 9509 ret = -1;
9497 } 9510 }
9498 } 9511 }
9512 if (debug && ret == -1)
9513 btrfs_warn(root->fs_info,
9514 "no space to allocate a new chunk for block group %llu",
9515 block_group->key.objectid);
9499 mutex_unlock(&root->fs_info->chunk_mutex); 9516 mutex_unlock(&root->fs_info->chunk_mutex);
9500 btrfs_end_transaction(trans, root); 9517 btrfs_end_transaction(trans, root);
9501out: 9518out:
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 76a0c8597d98..d247fc0eea19 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1363 1363
1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) 1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1365{ 1365{
1366 unsigned long index = start >> PAGE_CACHE_SHIFT; 1366 unsigned long index = start >> PAGE_SHIFT;
1367 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1367 unsigned long end_index = end >> PAGE_SHIFT;
1368 struct page *page; 1368 struct page *page;
1369 1369
1370 while (index <= end_index) { 1370 while (index <= end_index) {
1371 page = find_get_page(inode->i_mapping, index); 1371 page = find_get_page(inode->i_mapping, index);
1372 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1372 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1373 clear_page_dirty_for_io(page); 1373 clear_page_dirty_for_io(page);
1374 page_cache_release(page); 1374 put_page(page);
1375 index++; 1375 index++;
1376 } 1376 }
1377} 1377}
1378 1378
1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) 1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1380{ 1380{
1381 unsigned long index = start >> PAGE_CACHE_SHIFT; 1381 unsigned long index = start >> PAGE_SHIFT;
1382 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1382 unsigned long end_index = end >> PAGE_SHIFT;
1383 struct page *page; 1383 struct page *page;
1384 1384
1385 while (index <= end_index) { 1385 while (index <= end_index) {
@@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1388 __set_page_dirty_nobuffers(page); 1388 __set_page_dirty_nobuffers(page);
1389 account_page_redirty(page); 1389 account_page_redirty(page);
1390 page_cache_release(page); 1390 put_page(page);
1391 index++; 1391 index++;
1392 } 1392 }
1393} 1393}
@@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1397 */ 1397 */
1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1399{ 1399{
1400 unsigned long index = start >> PAGE_CACHE_SHIFT; 1400 unsigned long index = start >> PAGE_SHIFT;
1401 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1401 unsigned long end_index = end >> PAGE_SHIFT;
1402 struct page *page; 1402 struct page *page;
1403 1403
1404 while (index <= end_index) { 1404 while (index <= end_index) {
1405 page = find_get_page(tree->mapping, index); 1405 page = find_get_page(tree->mapping, index);
1406 BUG_ON(!page); /* Pages should be in the extent_io_tree */ 1406 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1407 set_page_writeback(page); 1407 set_page_writeback(page);
1408 page_cache_release(page); 1408 put_page(page);
1409 index++; 1409 index++;
1410 } 1410 }
1411} 1411}
@@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
1556{ 1556{
1557 int ret; 1557 int ret;
1558 struct page *pages[16]; 1558 struct page *pages[16];
1559 unsigned long index = start >> PAGE_CACHE_SHIFT; 1559 unsigned long index = start >> PAGE_SHIFT;
1560 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1560 unsigned long end_index = end >> PAGE_SHIFT;
1561 unsigned long nr_pages = end_index - index + 1; 1561 unsigned long nr_pages = end_index - index + 1;
1562 int i; 1562 int i;
1563 1563
@@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
1571 for (i = 0; i < ret; i++) { 1571 for (i = 0; i < ret; i++) {
1572 if (pages[i] != locked_page) 1572 if (pages[i] != locked_page)
1573 unlock_page(pages[i]); 1573 unlock_page(pages[i]);
1574 page_cache_release(pages[i]); 1574 put_page(pages[i]);
1575 } 1575 }
1576 nr_pages -= ret; 1576 nr_pages -= ret;
1577 index += ret; 1577 index += ret;
@@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1584 u64 delalloc_start, 1584 u64 delalloc_start,
1585 u64 delalloc_end) 1585 u64 delalloc_end)
1586{ 1586{
1587 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; 1587 unsigned long index = delalloc_start >> PAGE_SHIFT;
1588 unsigned long start_index = index; 1588 unsigned long start_index = index;
1589 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; 1589 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1590 unsigned long pages_locked = 0; 1590 unsigned long pages_locked = 0;
1591 struct page *pages[16]; 1591 struct page *pages[16];
1592 unsigned long nrpages; 1592 unsigned long nrpages;
@@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1619 pages[i]->mapping != inode->i_mapping) { 1619 pages[i]->mapping != inode->i_mapping) {
1620 ret = -EAGAIN; 1620 ret = -EAGAIN;
1621 unlock_page(pages[i]); 1621 unlock_page(pages[i]);
1622 page_cache_release(pages[i]); 1622 put_page(pages[i]);
1623 goto done; 1623 goto done;
1624 } 1624 }
1625 } 1625 }
1626 page_cache_release(pages[i]); 1626 put_page(pages[i]);
1627 pages_locked++; 1627 pages_locked++;
1628 } 1628 }
1629 nrpages -= ret; 1629 nrpages -= ret;
@@ -1636,7 +1636,7 @@ done:
1636 __unlock_for_delalloc(inode, locked_page, 1636 __unlock_for_delalloc(inode, locked_page,
1637 delalloc_start, 1637 delalloc_start,
1638 ((u64)(start_index + pages_locked - 1)) << 1638 ((u64)(start_index + pages_locked - 1)) <<
1639 PAGE_CACHE_SHIFT); 1639 PAGE_SHIFT);
1640 } 1640 }
1641 return ret; 1641 return ret;
1642} 1642}
@@ -1696,7 +1696,7 @@ again:
1696 free_extent_state(cached_state); 1696 free_extent_state(cached_state);
1697 cached_state = NULL; 1697 cached_state = NULL;
1698 if (!loops) { 1698 if (!loops) {
1699 max_bytes = PAGE_CACHE_SIZE; 1699 max_bytes = PAGE_SIZE;
1700 loops = 1; 1700 loops = 1;
1701 goto again; 1701 goto again;
1702 } else { 1702 } else {
@@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1735 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 1735 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1736 int ret; 1736 int ret;
1737 struct page *pages[16]; 1737 struct page *pages[16];
1738 unsigned long index = start >> PAGE_CACHE_SHIFT; 1738 unsigned long index = start >> PAGE_SHIFT;
1739 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1739 unsigned long end_index = end >> PAGE_SHIFT;
1740 unsigned long nr_pages = end_index - index + 1; 1740 unsigned long nr_pages = end_index - index + 1;
1741 int i; 1741 int i;
1742 1742
@@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1757 SetPagePrivate2(pages[i]); 1757 SetPagePrivate2(pages[i]);
1758 1758
1759 if (pages[i] == locked_page) { 1759 if (pages[i] == locked_page) {
1760 page_cache_release(pages[i]); 1760 put_page(pages[i]);
1761 continue; 1761 continue;
1762 } 1762 }
1763 if (page_ops & PAGE_CLEAR_DIRTY) 1763 if (page_ops & PAGE_CLEAR_DIRTY)
@@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1770 end_page_writeback(pages[i]); 1770 end_page_writeback(pages[i]);
1771 if (page_ops & PAGE_UNLOCK) 1771 if (page_ops & PAGE_UNLOCK)
1772 unlock_page(pages[i]); 1772 unlock_page(pages[i]);
1773 page_cache_release(pages[i]); 1773 put_page(pages[i]);
1774 } 1774 }
1775 nr_pages -= ret; 1775 nr_pages -= ret;
1776 index += ret; 1776 index += ret;
@@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) 1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1962{ 1962{
1963 u64 start = page_offset(page); 1963 u64 start = page_offset(page);
1964 u64 end = start + PAGE_CACHE_SIZE - 1; 1964 u64 end = start + PAGE_SIZE - 1;
1965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1966 SetPageUptodate(page); 1966 SetPageUptodate(page);
1967} 1967}
@@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2071 struct page *p = eb->pages[i]; 2071 struct page *p = eb->pages[i];
2072 2072
2073 ret = repair_io_failure(root->fs_info->btree_inode, start, 2073 ret = repair_io_failure(root->fs_info->btree_inode, start,
2074 PAGE_CACHE_SIZE, start, p, 2074 PAGE_SIZE, start, p,
2075 start - page_offset(p), mirror_num); 2075 start - page_offset(p), mirror_num);
2076 if (ret) 2076 if (ret)
2077 break; 2077 break;
2078 start += PAGE_CACHE_SIZE; 2078 start += PAGE_SIZE;
2079 } 2079 }
2080 2080
2081 return ret; 2081 return ret;
@@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio)
2466 * advance bv_offset and adjust bv_len to compensate. 2466 * advance bv_offset and adjust bv_len to compensate.
2467 * Print a warning for nonzero offsets, and an error 2467 * Print a warning for nonzero offsets, and an error
2468 * if they don't add up to a full page. */ 2468 * if they don't add up to a full page. */
2469 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2469 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2470 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2470 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2471 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2471 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2472 "partial page write in btrfs with offset %u and length %u", 2472 "partial page write in btrfs with offset %u and length %u",
2473 bvec->bv_offset, bvec->bv_len); 2473 bvec->bv_offset, bvec->bv_len);
@@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio)
2541 * advance bv_offset and adjust bv_len to compensate. 2541 * advance bv_offset and adjust bv_len to compensate.
2542 * Print a warning for nonzero offsets, and an error 2542 * Print a warning for nonzero offsets, and an error
2543 * if they don't add up to a full page. */ 2543 * if they don't add up to a full page. */
2544 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { 2544 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2545 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) 2545 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2546 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2546 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2547 "partial page read in btrfs with offset %u and length %u", 2547 "partial page read in btrfs with offset %u and length %u",
2548 bvec->bv_offset, bvec->bv_len); 2548 bvec->bv_offset, bvec->bv_len);
@@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio)
2598readpage_ok: 2598readpage_ok:
2599 if (likely(uptodate)) { 2599 if (likely(uptodate)) {
2600 loff_t i_size = i_size_read(inode); 2600 loff_t i_size = i_size_read(inode);
2601 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2601 pgoff_t end_index = i_size >> PAGE_SHIFT;
2602 unsigned off; 2602 unsigned off;
2603 2603
2604 /* Zero out the end if this page straddles i_size */ 2604 /* Zero out the end if this page straddles i_size */
2605 off = i_size & (PAGE_CACHE_SIZE-1); 2605 off = i_size & (PAGE_SIZE-1);
2606 if (page->index == end_index && off) 2606 if (page->index == end_index && off)
2607 zero_user_segment(page, off, PAGE_CACHE_SIZE); 2607 zero_user_segment(page, off, PAGE_SIZE);
2608 SetPageUptodate(page); 2608 SetPageUptodate(page);
2609 } else { 2609 } else {
2610 ClearPageUptodate(page); 2610 ClearPageUptodate(page);
@@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2768 struct bio *bio; 2768 struct bio *bio;
2769 int contig = 0; 2769 int contig = 0;
2770 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; 2770 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2771 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); 2771 size_t page_size = min_t(size_t, size, PAGE_SIZE);
2772 2772
2773 if (bio_ret && *bio_ret) { 2773 if (bio_ret && *bio_ret) {
2774 bio = *bio_ret; 2774 bio = *bio_ret;
@@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb,
2821{ 2821{
2822 if (!PagePrivate(page)) { 2822 if (!PagePrivate(page)) {
2823 SetPagePrivate(page); 2823 SetPagePrivate(page);
2824 page_cache_get(page); 2824 get_page(page);
2825 set_page_private(page, (unsigned long)eb); 2825 set_page_private(page, (unsigned long)eb);
2826 } else { 2826 } else {
2827 WARN_ON(page->private != (unsigned long)eb); 2827 WARN_ON(page->private != (unsigned long)eb);
@@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page)
2832{ 2832{
2833 if (!PagePrivate(page)) { 2833 if (!PagePrivate(page)) {
2834 SetPagePrivate(page); 2834 SetPagePrivate(page);
2835 page_cache_get(page); 2835 get_page(page);
2836 set_page_private(page, EXTENT_PAGE_PRIVATE); 2836 set_page_private(page, EXTENT_PAGE_PRIVATE);
2837 } 2837 }
2838} 2838}
@@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree,
2880{ 2880{
2881 struct inode *inode = page->mapping->host; 2881 struct inode *inode = page->mapping->host;
2882 u64 start = page_offset(page); 2882 u64 start = page_offset(page);
2883 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2883 u64 page_end = start + PAGE_SIZE - 1;
2884 u64 end; 2884 u64 end;
2885 u64 cur = start; 2885 u64 cur = start;
2886 u64 extent_offset; 2886 u64 extent_offset;
@@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree,
2909 } 2909 }
2910 } 2910 }
2911 2911
2912 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { 2912 if (page->index == last_byte >> PAGE_SHIFT) {
2913 char *userpage; 2913 char *userpage;
2914 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); 2914 size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2915 2915
2916 if (zero_offset) { 2916 if (zero_offset) {
2917 iosize = PAGE_CACHE_SIZE - zero_offset; 2917 iosize = PAGE_SIZE - zero_offset;
2918 userpage = kmap_atomic(page); 2918 userpage = kmap_atomic(page);
2919 memset(userpage + zero_offset, 0, iosize); 2919 memset(userpage + zero_offset, 0, iosize);
2920 flush_dcache_page(page); 2920 flush_dcache_page(page);
@@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree,
2922 } 2922 }
2923 } 2923 }
2924 while (cur <= end) { 2924 while (cur <= end) {
2925 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2925 unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
2926 bool force_bio_submit = false; 2926 bool force_bio_submit = false;
2927 2927
2928 if (cur >= last_byte) { 2928 if (cur >= last_byte) {
2929 char *userpage; 2929 char *userpage;
2930 struct extent_state *cached = NULL; 2930 struct extent_state *cached = NULL;
2931 2931
2932 iosize = PAGE_CACHE_SIZE - pg_offset; 2932 iosize = PAGE_SIZE - pg_offset;
2933 userpage = kmap_atomic(page); 2933 userpage = kmap_atomic(page);
2934 memset(userpage + pg_offset, 0, iosize); 2934 memset(userpage + pg_offset, 0, iosize);
2935 flush_dcache_page(page); 2935 flush_dcache_page(page);
@@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3112 for (index = 0; index < nr_pages; index++) { 3112 for (index = 0; index < nr_pages; index++) {
3113 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 3113 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3114 mirror_num, bio_flags, rw, prev_em_start); 3114 mirror_num, bio_flags, rw, prev_em_start);
3115 page_cache_release(pages[index]); 3115 put_page(pages[index]);
3116 } 3116 }
3117} 3117}
3118 3118
@@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree,
3134 page_start = page_offset(pages[index]); 3134 page_start = page_offset(pages[index]);
3135 if (!end) { 3135 if (!end) {
3136 start = page_start; 3136 start = page_start;
3137 end = start + PAGE_CACHE_SIZE - 1; 3137 end = start + PAGE_SIZE - 1;
3138 first_index = index; 3138 first_index = index;
3139 } else if (end + 1 == page_start) { 3139 } else if (end + 1 == page_start) {
3140 end += PAGE_CACHE_SIZE; 3140 end += PAGE_SIZE;
3141 } else { 3141 } else {
3142 __do_contiguous_readpages(tree, &pages[first_index], 3142 __do_contiguous_readpages(tree, &pages[first_index],
3143 index - first_index, start, 3143 index - first_index, start,
@@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
3145 bio, mirror_num, bio_flags, 3145 bio, mirror_num, bio_flags,
3146 rw, prev_em_start); 3146 rw, prev_em_start);
3147 start = page_start; 3147 start = page_start;
3148 end = start + PAGE_CACHE_SIZE - 1; 3148 end = start + PAGE_SIZE - 1;
3149 first_index = index; 3149 first_index = index;
3150 } 3150 }
3151 } 3151 }
@@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
3167 struct inode *inode = page->mapping->host; 3167 struct inode *inode = page->mapping->host;
3168 struct btrfs_ordered_extent *ordered; 3168 struct btrfs_ordered_extent *ordered;
3169 u64 start = page_offset(page); 3169 u64 start = page_offset(page);
3170 u64 end = start + PAGE_CACHE_SIZE - 1; 3170 u64 end = start + PAGE_SIZE - 1;
3171 int ret; 3171 int ret;
3172 3172
3173 while (1) { 3173 while (1) {
3174 lock_extent(tree, start, end); 3174 lock_extent(tree, start, end);
3175 ordered = btrfs_lookup_ordered_range(inode, start, 3175 ordered = btrfs_lookup_ordered_range(inode, start,
3176 PAGE_CACHE_SIZE); 3176 PAGE_SIZE);
3177 if (!ordered) 3177 if (!ordered)
3178 break; 3178 break;
3179 unlock_extent(tree, start, end); 3179 unlock_extent(tree, start, end);
@@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3227 unsigned long *nr_written) 3227 unsigned long *nr_written)
3228{ 3228{
3229 struct extent_io_tree *tree = epd->tree; 3229 struct extent_io_tree *tree = epd->tree;
3230 u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1; 3230 u64 page_end = delalloc_start + PAGE_SIZE - 1;
3231 u64 nr_delalloc; 3231 u64 nr_delalloc;
3232 u64 delalloc_to_write = 0; 3232 u64 delalloc_to_write = 0;
3233 u64 delalloc_end = 0; 3233 u64 delalloc_end = 0;
@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3264 goto done; 3264 goto done;
3265 } 3265 }
3266 /* 3266 /*
3267 * delalloc_end is already one less than the total 3267 * delalloc_end is already one less than the total length, so
3268 * length, so we don't subtract one from 3268 * we don't subtract one from PAGE_SIZE
3269 * PAGE_CACHE_SIZE
3270 */ 3269 */
3271 delalloc_to_write += (delalloc_end - delalloc_start + 3270 delalloc_to_write += (delalloc_end - delalloc_start +
3272 PAGE_CACHE_SIZE) >> 3271 PAGE_SIZE) >> PAGE_SHIFT;
3273 PAGE_CACHE_SHIFT;
3274 delalloc_start = delalloc_end + 1; 3272 delalloc_start = delalloc_end + 1;
3275 } 3273 }
3276 if (wbc->nr_to_write < delalloc_to_write) { 3274 if (wbc->nr_to_write < delalloc_to_write) {
@@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3319{ 3317{
3320 struct extent_io_tree *tree = epd->tree; 3318 struct extent_io_tree *tree = epd->tree;
3321 u64 start = page_offset(page); 3319 u64 start = page_offset(page);
3322 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3320 u64 page_end = start + PAGE_SIZE - 1;
3323 u64 end; 3321 u64 end;
3324 u64 cur = start; 3322 u64 cur = start;
3325 u64 extent_offset; 3323 u64 extent_offset;
@@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3434 if (ret) { 3432 if (ret) {
3435 SetPageError(page); 3433 SetPageError(page);
3436 } else { 3434 } else {
3437 unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; 3435 unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
3438 3436
3439 set_range_writeback(tree, cur, cur + iosize - 1); 3437 set_range_writeback(tree, cur, cur + iosize - 1);
3440 if (!PageWriteback(page)) { 3438 if (!PageWriteback(page)) {
@@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3477 struct inode *inode = page->mapping->host; 3475 struct inode *inode = page->mapping->host;
3478 struct extent_page_data *epd = data; 3476 struct extent_page_data *epd = data;
3479 u64 start = page_offset(page); 3477 u64 start = page_offset(page);
3480 u64 page_end = start + PAGE_CACHE_SIZE - 1; 3478 u64 page_end = start + PAGE_SIZE - 1;
3481 int ret; 3479 int ret;
3482 int nr = 0; 3480 int nr = 0;
3483 size_t pg_offset = 0; 3481 size_t pg_offset = 0;
3484 loff_t i_size = i_size_read(inode); 3482 loff_t i_size = i_size_read(inode);
3485 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 3483 unsigned long end_index = i_size >> PAGE_SHIFT;
3486 int write_flags; 3484 int write_flags;
3487 unsigned long nr_written = 0; 3485 unsigned long nr_written = 0;
3488 3486
@@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3497 3495
3498 ClearPageError(page); 3496 ClearPageError(page);
3499 3497
3500 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); 3498 pg_offset = i_size & (PAGE_SIZE - 1);
3501 if (page->index > end_index || 3499 if (page->index > end_index ||
3502 (page->index == end_index && !pg_offset)) { 3500 (page->index == end_index && !pg_offset)) {
3503 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 3501 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3504 unlock_page(page); 3502 unlock_page(page);
3505 return 0; 3503 return 0;
3506 } 3504 }
@@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3510 3508
3511 userpage = kmap_atomic(page); 3509 userpage = kmap_atomic(page);
3512 memset(userpage + pg_offset, 0, 3510 memset(userpage + pg_offset, 0,
3513 PAGE_CACHE_SIZE - pg_offset); 3511 PAGE_SIZE - pg_offset);
3514 kunmap_atomic(userpage); 3512 kunmap_atomic(userpage);
3515 flush_dcache_page(page); 3513 flush_dcache_page(page);
3516 } 3514 }
@@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3748 clear_page_dirty_for_io(p); 3746 clear_page_dirty_for_io(p);
3749 set_page_writeback(p); 3747 set_page_writeback(p);
3750 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, 3748 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3751 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, 3749 PAGE_SIZE, 0, bdev, &epd->bio,
3752 -1, end_bio_extent_buffer_writepage, 3750 -1, end_bio_extent_buffer_writepage,
3753 0, epd->bio_flags, bio_flags, false); 3751 0, epd->bio_flags, bio_flags, false);
3754 epd->bio_flags = bio_flags; 3752 epd->bio_flags = bio_flags;
@@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3760 ret = -EIO; 3758 ret = -EIO;
3761 break; 3759 break;
3762 } 3760 }
3763 offset += PAGE_CACHE_SIZE; 3761 offset += PAGE_SIZE;
3764 update_nr_written(p, wbc, 1); 3762 update_nr_written(p, wbc, 1);
3765 unlock_page(p); 3763 unlock_page(p);
3766 } 3764 }
@@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping,
3804 index = mapping->writeback_index; /* Start from prev offset */ 3802 index = mapping->writeback_index; /* Start from prev offset */
3805 end = -1; 3803 end = -1;
3806 } else { 3804 } else {
3807 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3805 index = wbc->range_start >> PAGE_SHIFT;
3808 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3806 end = wbc->range_end >> PAGE_SHIFT;
3809 scanned = 1; 3807 scanned = 1;
3810 } 3808 }
3811 if (wbc->sync_mode == WB_SYNC_ALL) 3809 if (wbc->sync_mode == WB_SYNC_ALL)
@@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
3948 index = mapping->writeback_index; /* Start from prev offset */ 3946 index = mapping->writeback_index; /* Start from prev offset */
3949 end = -1; 3947 end = -1;
3950 } else { 3948 } else {
3951 index = wbc->range_start >> PAGE_CACHE_SHIFT; 3949 index = wbc->range_start >> PAGE_SHIFT;
3952 end = wbc->range_end >> PAGE_CACHE_SHIFT; 3950 end = wbc->range_end >> PAGE_SHIFT;
3953 scanned = 1; 3951 scanned = 1;
3954 } 3952 }
3955 if (wbc->sync_mode == WB_SYNC_ALL) 3953 if (wbc->sync_mode == WB_SYNC_ALL)
@@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4083 int ret = 0; 4081 int ret = 0;
4084 struct address_space *mapping = inode->i_mapping; 4082 struct address_space *mapping = inode->i_mapping;
4085 struct page *page; 4083 struct page *page;
4086 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> 4084 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4087 PAGE_CACHE_SHIFT; 4085 PAGE_SHIFT;
4088 4086
4089 struct extent_page_data epd = { 4087 struct extent_page_data epd = {
4090 .bio = NULL, 4088 .bio = NULL,
@@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4102 }; 4100 };
4103 4101
4104 while (start <= end) { 4102 while (start <= end) {
4105 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 4103 page = find_get_page(mapping, start >> PAGE_SHIFT);
4106 if (clear_page_dirty_for_io(page)) 4104 if (clear_page_dirty_for_io(page))
4107 ret = __extent_writepage(page, &wbc_writepages, &epd); 4105 ret = __extent_writepage(page, &wbc_writepages, &epd);
4108 else { 4106 else {
4109 if (tree->ops && tree->ops->writepage_end_io_hook) 4107 if (tree->ops && tree->ops->writepage_end_io_hook)
4110 tree->ops->writepage_end_io_hook(page, start, 4108 tree->ops->writepage_end_io_hook(page, start,
4111 start + PAGE_CACHE_SIZE - 1, 4109 start + PAGE_SIZE - 1,
4112 NULL, 1); 4110 NULL, 1);
4113 unlock_page(page); 4111 unlock_page(page);
4114 } 4112 }
4115 page_cache_release(page); 4113 put_page(page);
4116 start += PAGE_CACHE_SIZE; 4114 start += PAGE_SIZE;
4117 } 4115 }
4118 4116
4119 flush_epd_write_bio(&epd); 4117 flush_epd_write_bio(&epd);
@@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree,
4163 list_del(&page->lru); 4161 list_del(&page->lru);
4164 if (add_to_page_cache_lru(page, mapping, 4162 if (add_to_page_cache_lru(page, mapping,
4165 page->index, GFP_NOFS)) { 4163 page->index, GFP_NOFS)) {
4166 page_cache_release(page); 4164 put_page(page);
4167 continue; 4165 continue;
4168 } 4166 }
4169 4167
@@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
4197{ 4195{
4198 struct extent_state *cached_state = NULL; 4196 struct extent_state *cached_state = NULL;
4199 u64 start = page_offset(page); 4197 u64 start = page_offset(page);
4200 u64 end = start + PAGE_CACHE_SIZE - 1; 4198 u64 end = start + PAGE_SIZE - 1;
4201 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 4199 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4202 4200
4203 start += ALIGN(offset, blocksize); 4201 start += ALIGN(offset, blocksize);
@@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map,
4223 struct page *page, gfp_t mask) 4221 struct page *page, gfp_t mask)
4224{ 4222{
4225 u64 start = page_offset(page); 4223 u64 start = page_offset(page);
4226 u64 end = start + PAGE_CACHE_SIZE - 1; 4224 u64 end = start + PAGE_SIZE - 1;
4227 int ret = 1; 4225 int ret = 1;
4228 4226
4229 if (test_range_bit(tree, start, end, 4227 if (test_range_bit(tree, start, end,
@@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
4262{ 4260{
4263 struct extent_map *em; 4261 struct extent_map *em;
4264 u64 start = page_offset(page); 4262 u64 start = page_offset(page);
4265 u64 end = start + PAGE_CACHE_SIZE - 1; 4263 u64 end = start + PAGE_SIZE - 1;
4266 4264
4267 if (gfpflags_allow_blocking(mask) && 4265 if (gfpflags_allow_blocking(mask) &&
4268 page->mapping->host->i_size > SZ_16M) { 4266 page->mapping->host->i_size > SZ_16M) {
@@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4587 ClearPagePrivate(page); 4585 ClearPagePrivate(page);
4588 set_page_private(page, 0); 4586 set_page_private(page, 0);
4589 /* One for the page private */ 4587 /* One for the page private */
4590 page_cache_release(page); 4588 put_page(page);
4591 } 4589 }
4592 4590
4593 if (mapped) 4591 if (mapped)
4594 spin_unlock(&page->mapping->private_lock); 4592 spin_unlock(&page->mapping->private_lock);
4595 4593
4596 /* One for when we alloced the page */ 4594 /* One for when we alloced the page */
4597 page_cache_release(page); 4595 put_page(page);
4598 } while (index != 0); 4596 } while (index != 0);
4599} 4597}
4600 4598
@@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4779 4777
4780 rcu_read_lock(); 4778 rcu_read_lock();
4781 eb = radix_tree_lookup(&fs_info->buffer_radix, 4779 eb = radix_tree_lookup(&fs_info->buffer_radix,
4782 start >> PAGE_CACHE_SHIFT); 4780 start >> PAGE_SHIFT);
4783 if (eb && atomic_inc_not_zero(&eb->refs)) { 4781 if (eb && atomic_inc_not_zero(&eb->refs)) {
4784 rcu_read_unlock(); 4782 rcu_read_unlock();
4785 /* 4783 /*
@@ -4829,7 +4827,7 @@ again:
4829 goto free_eb; 4827 goto free_eb;
4830 spin_lock(&fs_info->buffer_lock); 4828 spin_lock(&fs_info->buffer_lock);
4831 ret = radix_tree_insert(&fs_info->buffer_radix, 4829 ret = radix_tree_insert(&fs_info->buffer_radix,
4832 start >> PAGE_CACHE_SHIFT, eb); 4830 start >> PAGE_SHIFT, eb);
4833 spin_unlock(&fs_info->buffer_lock); 4831 spin_unlock(&fs_info->buffer_lock);
4834 radix_tree_preload_end(); 4832 radix_tree_preload_end();
4835 if (ret == -EEXIST) { 4833 if (ret == -EEXIST) {
@@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4862 unsigned long len = fs_info->tree_root->nodesize; 4860 unsigned long len = fs_info->tree_root->nodesize;
4863 unsigned long num_pages = num_extent_pages(start, len); 4861 unsigned long num_pages = num_extent_pages(start, len);
4864 unsigned long i; 4862 unsigned long i;
4865 unsigned long index = start >> PAGE_CACHE_SHIFT; 4863 unsigned long index = start >> PAGE_SHIFT;
4866 struct extent_buffer *eb; 4864 struct extent_buffer *eb;
4867 struct extent_buffer *exists = NULL; 4865 struct extent_buffer *exists = NULL;
4868 struct page *p; 4866 struct page *p;
@@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4896 if (atomic_inc_not_zero(&exists->refs)) { 4894 if (atomic_inc_not_zero(&exists->refs)) {
4897 spin_unlock(&mapping->private_lock); 4895 spin_unlock(&mapping->private_lock);
4898 unlock_page(p); 4896 unlock_page(p);
4899 page_cache_release(p); 4897 put_page(p);
4900 mark_extent_buffer_accessed(exists, p); 4898 mark_extent_buffer_accessed(exists, p);
4901 goto free_eb; 4899 goto free_eb;
4902 } 4900 }
@@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4908 */ 4906 */
4909 ClearPagePrivate(p); 4907 ClearPagePrivate(p);
4910 WARN_ON(PageDirty(p)); 4908 WARN_ON(PageDirty(p));
4911 page_cache_release(p); 4909 put_page(p);
4912 } 4910 }
4913 attach_extent_buffer_page(eb, p); 4911 attach_extent_buffer_page(eb, p);
4914 spin_unlock(&mapping->private_lock); 4912 spin_unlock(&mapping->private_lock);
@@ -4931,7 +4929,7 @@ again:
4931 4929
4932 spin_lock(&fs_info->buffer_lock); 4930 spin_lock(&fs_info->buffer_lock);
4933 ret = radix_tree_insert(&fs_info->buffer_radix, 4931 ret = radix_tree_insert(&fs_info->buffer_radix,
4934 start >> PAGE_CACHE_SHIFT, eb); 4932 start >> PAGE_SHIFT, eb);
4935 spin_unlock(&fs_info->buffer_lock); 4933 spin_unlock(&fs_info->buffer_lock);
4936 radix_tree_preload_end(); 4934 radix_tree_preload_end();
4937 if (ret == -EEXIST) { 4935 if (ret == -EEXIST) {
@@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
4994 4992
4995 spin_lock(&fs_info->buffer_lock); 4993 spin_lock(&fs_info->buffer_lock);
4996 radix_tree_delete(&fs_info->buffer_radix, 4994 radix_tree_delete(&fs_info->buffer_radix,
4997 eb->start >> PAGE_CACHE_SHIFT); 4995 eb->start >> PAGE_SHIFT);
4998 spin_unlock(&fs_info->buffer_lock); 4996 spin_unlock(&fs_info->buffer_lock);
4999 } else { 4997 } else {
5000 spin_unlock(&eb->refs_lock); 4998 spin_unlock(&eb->refs_lock);
@@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
5168 5166
5169 if (start) { 5167 if (start) {
5170 WARN_ON(start < eb->start); 5168 WARN_ON(start < eb->start);
5171 start_i = (start >> PAGE_CACHE_SHIFT) - 5169 start_i = (start >> PAGE_SHIFT) -
5172 (eb->start >> PAGE_CACHE_SHIFT); 5170 (eb->start >> PAGE_SHIFT);
5173 } else { 5171 } else {
5174 start_i = 0; 5172 start_i = 0;
5175 } 5173 }
@@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5252 struct page *page; 5250 struct page *page;
5253 char *kaddr; 5251 char *kaddr;
5254 char *dst = (char *)dstv; 5252 char *dst = (char *)dstv;
5255 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5253 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5256 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5254 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5257 5255
5258 WARN_ON(start > eb->len); 5256 WARN_ON(start > eb->len);
5259 WARN_ON(start + len > eb->start + eb->len); 5257 WARN_ON(start + len > eb->start + eb->len);
5260 5258
5261 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5259 offset = (start_offset + start) & (PAGE_SIZE - 1);
5262 5260
5263 while (len > 0) { 5261 while (len > 0) {
5264 page = eb->pages[i]; 5262 page = eb->pages[i];
5265 5263
5266 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5264 cur = min(len, (PAGE_SIZE - offset));
5267 kaddr = page_address(page); 5265 kaddr = page_address(page);
5268 memcpy(dst, kaddr + offset, cur); 5266 memcpy(dst, kaddr + offset, cur);
5269 5267
@@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5283 struct page *page; 5281 struct page *page;
5284 char *kaddr; 5282 char *kaddr;
5285 char __user *dst = (char __user *)dstv; 5283 char __user *dst = (char __user *)dstv;
5286 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5284 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5287 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5285 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5288 int ret = 0; 5286 int ret = 0;
5289 5287
5290 WARN_ON(start > eb->len); 5288 WARN_ON(start > eb->len);
5291 WARN_ON(start + len > eb->start + eb->len); 5289 WARN_ON(start + len > eb->start + eb->len);
5292 5290
5293 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5291 offset = (start_offset + start) & (PAGE_SIZE - 1);
5294 5292
5295 while (len > 0) { 5293 while (len > 0) {
5296 page = eb->pages[i]; 5294 page = eb->pages[i];
5297 5295
5298 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5296 cur = min(len, (PAGE_SIZE - offset));
5299 kaddr = page_address(page); 5297 kaddr = page_address(page);
5300 if (copy_to_user(dst, kaddr + offset, cur)) { 5298 if (copy_to_user(dst, kaddr + offset, cur)) {
5301 ret = -EFAULT; 5299 ret = -EFAULT;
@@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5316 unsigned long *map_start, 5314 unsigned long *map_start,
5317 unsigned long *map_len) 5315 unsigned long *map_len)
5318{ 5316{
5319 size_t offset = start & (PAGE_CACHE_SIZE - 1); 5317 size_t offset = start & (PAGE_SIZE - 1);
5320 char *kaddr; 5318 char *kaddr;
5321 struct page *p; 5319 struct page *p;
5322 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5320 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5323 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5321 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5324 unsigned long end_i = (start_offset + start + min_len - 1) >> 5322 unsigned long end_i = (start_offset + start + min_len - 1) >>
5325 PAGE_CACHE_SHIFT; 5323 PAGE_SHIFT;
5326 5324
5327 if (i != end_i) 5325 if (i != end_i)
5328 return -EINVAL; 5326 return -EINVAL;
@@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5332 *map_start = 0; 5330 *map_start = 0;
5333 } else { 5331 } else {
5334 offset = 0; 5332 offset = 0;
5335 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 5333 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5336 } 5334 }
5337 5335
5338 if (start + min_len > eb->len) { 5336 if (start + min_len > eb->len) {
@@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5345 p = eb->pages[i]; 5343 p = eb->pages[i];
5346 kaddr = page_address(p); 5344 kaddr = page_address(p);
5347 *map = kaddr + offset; 5345 *map = kaddr + offset;
5348 *map_len = PAGE_CACHE_SIZE - offset; 5346 *map_len = PAGE_SIZE - offset;
5349 return 0; 5347 return 0;
5350} 5348}
5351 5349
@@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5358 struct page *page; 5356 struct page *page;
5359 char *kaddr; 5357 char *kaddr;
5360 char *ptr = (char *)ptrv; 5358 char *ptr = (char *)ptrv;
5361 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5359 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5362 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5360 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5363 int ret = 0; 5361 int ret = 0;
5364 5362
5365 WARN_ON(start > eb->len); 5363 WARN_ON(start > eb->len);
5366 WARN_ON(start + len > eb->start + eb->len); 5364 WARN_ON(start + len > eb->start + eb->len);
5367 5365
5368 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5366 offset = (start_offset + start) & (PAGE_SIZE - 1);
5369 5367
5370 while (len > 0) { 5368 while (len > 0) {
5371 page = eb->pages[i]; 5369 page = eb->pages[i];
5372 5370
5373 cur = min(len, (PAGE_CACHE_SIZE - offset)); 5371 cur = min(len, (PAGE_SIZE - offset));
5374 5372
5375 kaddr = page_address(page); 5373 kaddr = page_address(page);
5376 ret = memcmp(ptr, kaddr + offset, cur); 5374 ret = memcmp(ptr, kaddr + offset, cur);
@@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5393 struct page *page; 5391 struct page *page;
5394 char *kaddr; 5392 char *kaddr;
5395 char *src = (char *)srcv; 5393 char *src = (char *)srcv;
5396 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5394 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5397 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5395 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5398 5396
5399 WARN_ON(start > eb->len); 5397 WARN_ON(start > eb->len);
5400 WARN_ON(start + len > eb->start + eb->len); 5398 WARN_ON(start + len > eb->start + eb->len);
5401 5399
5402 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5400 offset = (start_offset + start) & (PAGE_SIZE - 1);
5403 5401
5404 while (len > 0) { 5402 while (len > 0) {
5405 page = eb->pages[i]; 5403 page = eb->pages[i];
5406 WARN_ON(!PageUptodate(page)); 5404 WARN_ON(!PageUptodate(page));
5407 5405
5408 cur = min(len, PAGE_CACHE_SIZE - offset); 5406 cur = min(len, PAGE_SIZE - offset);
5409 kaddr = page_address(page); 5407 kaddr = page_address(page);
5410 memcpy(kaddr + offset, src, cur); 5408 memcpy(kaddr + offset, src, cur);
5411 5409
@@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
5423 size_t offset; 5421 size_t offset;
5424 struct page *page; 5422 struct page *page;
5425 char *kaddr; 5423 char *kaddr;
5426 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5424 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5427 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 5425 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5428 5426
5429 WARN_ON(start > eb->len); 5427 WARN_ON(start > eb->len);
5430 WARN_ON(start + len > eb->start + eb->len); 5428 WARN_ON(start + len > eb->start + eb->len);
5431 5429
5432 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); 5430 offset = (start_offset + start) & (PAGE_SIZE - 1);
5433 5431
5434 while (len > 0) { 5432 while (len > 0) {
5435 page = eb->pages[i]; 5433 page = eb->pages[i];
5436 WARN_ON(!PageUptodate(page)); 5434 WARN_ON(!PageUptodate(page));
5437 5435
5438 cur = min(len, PAGE_CACHE_SIZE - offset); 5436 cur = min(len, PAGE_SIZE - offset);
5439 kaddr = page_address(page); 5437 kaddr = page_address(page);
5440 memset(kaddr + offset, c, cur); 5438 memset(kaddr + offset, c, cur);
5441 5439
@@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5454 size_t offset; 5452 size_t offset;
5455 struct page *page; 5453 struct page *page;
5456 char *kaddr; 5454 char *kaddr;
5457 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5455 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5458 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5456 unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5459 5457
5460 WARN_ON(src->len != dst_len); 5458 WARN_ON(src->len != dst_len);
5461 5459
5462 offset = (start_offset + dst_offset) & 5460 offset = (start_offset + dst_offset) &
5463 (PAGE_CACHE_SIZE - 1); 5461 (PAGE_SIZE - 1);
5464 5462
5465 while (len > 0) { 5463 while (len > 0) {
5466 page = dst->pages[i]; 5464 page = dst->pages[i];
5467 WARN_ON(!PageUptodate(page)); 5465 WARN_ON(!PageUptodate(page));
5468 5466
5469 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 5467 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5470 5468
5471 kaddr = page_address(page); 5469 kaddr = page_address(page);
5472 read_extent_buffer(src, kaddr + offset, src_offset, cur); 5470 read_extent_buffer(src, kaddr + offset, src_offset, cur);
@@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
5508 unsigned long *page_index, 5506 unsigned long *page_index,
5509 size_t *page_offset) 5507 size_t *page_offset)
5510{ 5508{
5511 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 5509 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5512 size_t byte_offset = BIT_BYTE(nr); 5510 size_t byte_offset = BIT_BYTE(nr);
5513 size_t offset; 5511 size_t offset;
5514 5512
@@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
5519 */ 5517 */
5520 offset = start_offset + start + byte_offset; 5518 offset = start_offset + start + byte_offset;
5521 5519
5522 *page_index = offset >> PAGE_CACHE_SHIFT; 5520 *page_index = offset >> PAGE_SHIFT;
5523 *page_offset = offset & (PAGE_CACHE_SIZE - 1); 5521 *page_offset = offset & (PAGE_SIZE - 1);
5524} 5522}
5525 5523
5526/** 5524/**
@@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5572 len -= bits_to_set; 5570 len -= bits_to_set;
5573 bits_to_set = BITS_PER_BYTE; 5571 bits_to_set = BITS_PER_BYTE;
5574 mask_to_set = ~0U; 5572 mask_to_set = ~0U;
5575 if (++offset >= PAGE_CACHE_SIZE && len > 0) { 5573 if (++offset >= PAGE_SIZE && len > 0) {
5576 offset = 0; 5574 offset = 0;
5577 page = eb->pages[++i]; 5575 page = eb->pages[++i];
5578 WARN_ON(!PageUptodate(page)); 5576 WARN_ON(!PageUptodate(page));
@@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5614 len -= bits_to_clear; 5612 len -= bits_to_clear;
5615 bits_to_clear = BITS_PER_BYTE; 5613 bits_to_clear = BITS_PER_BYTE;
5616 mask_to_clear = ~0U; 5614 mask_to_clear = ~0U;
5617 if (++offset >= PAGE_CACHE_SIZE && len > 0) { 5615 if (++offset >= PAGE_SIZE && len > 0) {
5618 offset = 0; 5616 offset = 0;
5619 page = eb->pages[++i]; 5617 page = eb->pages[++i];
5620 WARN_ON(!PageUptodate(page)); 5618 WARN_ON(!PageUptodate(page));
@@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5661 size_t cur; 5659 size_t cur;
5662 size_t dst_off_in_page; 5660 size_t dst_off_in_page;
5663 size_t src_off_in_page; 5661 size_t src_off_in_page;
5664 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5662 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5665 unsigned long dst_i; 5663 unsigned long dst_i;
5666 unsigned long src_i; 5664 unsigned long src_i;
5667 5665
@@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5680 5678
5681 while (len > 0) { 5679 while (len > 0) {
5682 dst_off_in_page = (start_offset + dst_offset) & 5680 dst_off_in_page = (start_offset + dst_offset) &
5683 (PAGE_CACHE_SIZE - 1); 5681 (PAGE_SIZE - 1);
5684 src_off_in_page = (start_offset + src_offset) & 5682 src_off_in_page = (start_offset + src_offset) &
5685 (PAGE_CACHE_SIZE - 1); 5683 (PAGE_SIZE - 1);
5686 5684
5687 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; 5685 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5688 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; 5686 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5689 5687
5690 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - 5688 cur = min(len, (unsigned long)(PAGE_SIZE -
5691 src_off_in_page)); 5689 src_off_in_page));
5692 cur = min_t(unsigned long, cur, 5690 cur = min_t(unsigned long, cur,
5693 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); 5691 (unsigned long)(PAGE_SIZE - dst_off_in_page));
5694 5692
5695 copy_pages(dst->pages[dst_i], dst->pages[src_i], 5693 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5696 dst_off_in_page, src_off_in_page, cur); 5694 dst_off_in_page, src_off_in_page, cur);
@@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5709 size_t src_off_in_page; 5707 size_t src_off_in_page;
5710 unsigned long dst_end = dst_offset + len - 1; 5708 unsigned long dst_end = dst_offset + len - 1;
5711 unsigned long src_end = src_offset + len - 1; 5709 unsigned long src_end = src_offset + len - 1;
5712 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); 5710 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5713 unsigned long dst_i; 5711 unsigned long dst_i;
5714 unsigned long src_i; 5712 unsigned long src_i;
5715 5713
@@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5728 return; 5726 return;
5729 } 5727 }
5730 while (len > 0) { 5728 while (len > 0) {
5731 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 5729 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5732 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 5730 src_i = (start_offset + src_end) >> PAGE_SHIFT;
5733 5731
5734 dst_off_in_page = (start_offset + dst_end) & 5732 dst_off_in_page = (start_offset + dst_end) &
5735 (PAGE_CACHE_SIZE - 1); 5733 (PAGE_SIZE - 1);
5736 src_off_in_page = (start_offset + src_end) & 5734 src_off_in_page = (start_offset + src_end) &
5737 (PAGE_CACHE_SIZE - 1); 5735 (PAGE_SIZE - 1);
5738 5736
5739 cur = min_t(unsigned long, len, src_off_in_page + 1); 5737 cur = min_t(unsigned long, len, src_off_in_page + 1);
5740 cur = min(cur, dst_off_in_page + 1); 5738 cur = min(cur, dst_off_in_page + 1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 5dbf92e68fbd..b5e0ade90e88 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -120,7 +120,7 @@ struct extent_state {
120}; 120};
121 121
122#define INLINE_EXTENT_BUFFER_PAGES 16 122#define INLINE_EXTENT_BUFFER_PAGES 16
123#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE) 123#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
124struct extent_buffer { 124struct extent_buffer {
125 u64 start; 125 u64 start;
126 unsigned long len; 126 unsigned long len;
@@ -365,8 +365,8 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
365 365
366static inline unsigned long num_extent_pages(u64 start, u64 len) 366static inline unsigned long num_extent_pages(u64 start, u64 len)
367{ 367{
368 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 368 return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
369 (start >> PAGE_CACHE_SHIFT); 369 (start >> PAGE_SHIFT);
370} 370}
371 371
372static inline void extent_buffer_get(struct extent_buffer *eb) 372static inline void extent_buffer_get(struct extent_buffer *eb)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b5baf5bdc8e1..7a7d6e253cfc 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -32,7 +32,7 @@
32 size) - 1)) 32 size) - 1))
33 33
34#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 34#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_CACHE_SIZE)) 35 PAGE_SIZE))
36 36
37#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ 37#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \ 38 sizeof(struct btrfs_ordered_sum)) / \
@@ -203,7 +203,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
203 csum = (u8 *)dst; 203 csum = (u8 *)dst;
204 } 204 }
205 205
206 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) 206 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
207 path->reada = READA_FORWARD; 207 path->reada = READA_FORWARD;
208 208
209 WARN_ON(bio->bi_vcnt <= 0); 209 WARN_ON(bio->bi_vcnt <= 0);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 15a09cb156ce..8d7b5a45c005 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
414 size_t copied = 0; 414 size_t copied = 0;
415 size_t total_copied = 0; 415 size_t total_copied = 0;
416 int pg = 0; 416 int pg = 0;
417 int offset = pos & (PAGE_CACHE_SIZE - 1); 417 int offset = pos & (PAGE_SIZE - 1);
418 418
419 while (write_bytes > 0) { 419 while (write_bytes > 0) {
420 size_t count = min_t(size_t, 420 size_t count = min_t(size_t,
421 PAGE_CACHE_SIZE - offset, write_bytes); 421 PAGE_SIZE - offset, write_bytes);
422 struct page *page = prepared_pages[pg]; 422 struct page *page = prepared_pages[pg];
423 /* 423 /*
424 * Copy data from userspace to the current page 424 * Copy data from userspace to the current page
@@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
448 if (unlikely(copied == 0)) 448 if (unlikely(copied == 0))
449 break; 449 break;
450 450
451 if (copied < PAGE_CACHE_SIZE - offset) { 451 if (copied < PAGE_SIZE - offset) {
452 offset += copied; 452 offset += copied;
453 } else { 453 } else {
454 pg++; 454 pg++;
@@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
473 */ 473 */
474 ClearPageChecked(pages[i]); 474 ClearPageChecked(pages[i]);
475 unlock_page(pages[i]); 475 unlock_page(pages[i]);
476 page_cache_release(pages[i]); 476 put_page(pages[i]);
477 } 477 }
478} 478}
479 479
@@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode,
1297{ 1297{
1298 int ret = 0; 1298 int ret = 0;
1299 1299
1300 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && 1300 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1301 !PageUptodate(page)) { 1301 !PageUptodate(page)) {
1302 ret = btrfs_readpage(NULL, page); 1302 ret = btrfs_readpage(NULL, page);
1303 if (ret) 1303 if (ret)
@@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1323 size_t write_bytes, bool force_uptodate) 1323 size_t write_bytes, bool force_uptodate)
1324{ 1324{
1325 int i; 1325 int i;
1326 unsigned long index = pos >> PAGE_CACHE_SHIFT; 1326 unsigned long index = pos >> PAGE_SHIFT;
1327 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1327 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1328 int err = 0; 1328 int err = 0;
1329 int faili; 1329 int faili;
@@ -1345,7 +1345,7 @@ again:
1345 err = prepare_uptodate_page(inode, pages[i], 1345 err = prepare_uptodate_page(inode, pages[i],
1346 pos + write_bytes, false); 1346 pos + write_bytes, false);
1347 if (err) { 1347 if (err) {
1348 page_cache_release(pages[i]); 1348 put_page(pages[i]);
1349 if (err == -EAGAIN) { 1349 if (err == -EAGAIN) {
1350 err = 0; 1350 err = 0;
1351 goto again; 1351 goto again;
@@ -1360,7 +1360,7 @@ again:
1360fail: 1360fail:
1361 while (faili >= 0) { 1361 while (faili >= 0) {
1362 unlock_page(pages[faili]); 1362 unlock_page(pages[faili]);
1363 page_cache_release(pages[faili]); 1363 put_page(pages[faili]);
1364 faili--; 1364 faili--;
1365 } 1365 }
1366 return err; 1366 return err;
@@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1408 cached_state, GFP_NOFS); 1408 cached_state, GFP_NOFS);
1409 for (i = 0; i < num_pages; i++) { 1409 for (i = 0; i < num_pages; i++) {
1410 unlock_page(pages[i]); 1410 unlock_page(pages[i]);
1411 page_cache_release(pages[i]); 1411 put_page(pages[i]);
1412 } 1412 }
1413 btrfs_start_ordered_extent(inode, ordered, 1); 1413 btrfs_start_ordered_extent(inode, ordered, 1);
1414 btrfs_put_ordered_extent(ordered); 1414 btrfs_put_ordered_extent(ordered);
@@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1497 bool force_page_uptodate = false; 1497 bool force_page_uptodate = false;
1498 bool need_unlock; 1498 bool need_unlock;
1499 1499
1500 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE), 1500 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1501 PAGE_CACHE_SIZE / (sizeof(struct page *))); 1501 PAGE_SIZE / (sizeof(struct page *)));
1502 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1502 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1503 nrptrs = max(nrptrs, 8); 1503 nrptrs = max(nrptrs, 8);
1504 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1504 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
@@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1506 return -ENOMEM; 1506 return -ENOMEM;
1507 1507
1508 while (iov_iter_count(i) > 0) { 1508 while (iov_iter_count(i) > 0) {
1509 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1509 size_t offset = pos & (PAGE_SIZE - 1);
1510 size_t sector_offset; 1510 size_t sector_offset;
1511 size_t write_bytes = min(iov_iter_count(i), 1511 size_t write_bytes = min(iov_iter_count(i),
1512 nrptrs * (size_t)PAGE_CACHE_SIZE - 1512 nrptrs * (size_t)PAGE_SIZE -
1513 offset); 1513 offset);
1514 size_t num_pages = DIV_ROUND_UP(write_bytes + offset, 1514 size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1515 PAGE_CACHE_SIZE); 1515 PAGE_SIZE);
1516 size_t reserve_bytes; 1516 size_t reserve_bytes;
1517 size_t dirty_pages; 1517 size_t dirty_pages;
1518 size_t copied; 1518 size_t copied;
@@ -1547,7 +1547,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1547 * write_bytes, so scale down. 1547 * write_bytes, so scale down.
1548 */ 1548 */
1549 num_pages = DIV_ROUND_UP(write_bytes + offset, 1549 num_pages = DIV_ROUND_UP(write_bytes + offset,
1550 PAGE_CACHE_SIZE); 1550 PAGE_SIZE);
1551 reserve_bytes = round_up(write_bytes + sector_offset, 1551 reserve_bytes = round_up(write_bytes + sector_offset,
1552 root->sectorsize); 1552 root->sectorsize);
1553 goto reserve_metadata; 1553 goto reserve_metadata;
@@ -1609,7 +1609,7 @@ again:
1609 } else { 1609 } else {
1610 force_page_uptodate = false; 1610 force_page_uptodate = false;
1611 dirty_pages = DIV_ROUND_UP(copied + offset, 1611 dirty_pages = DIV_ROUND_UP(copied + offset,
1612 PAGE_CACHE_SIZE); 1612 PAGE_SIZE);
1613 } 1613 }
1614 1614
1615 /* 1615 /*
@@ -1641,7 +1641,7 @@ again:
1641 u64 __pos; 1641 u64 __pos;
1642 1642
1643 __pos = round_down(pos, root->sectorsize) + 1643 __pos = round_down(pos, root->sectorsize) +
1644 (dirty_pages << PAGE_CACHE_SHIFT); 1644 (dirty_pages << PAGE_SHIFT);
1645 btrfs_delalloc_release_space(inode, __pos, 1645 btrfs_delalloc_release_space(inode, __pos,
1646 release_bytes); 1646 release_bytes);
1647 } 1647 }
@@ -1682,7 +1682,7 @@ again:
1682 cond_resched(); 1682 cond_resched();
1683 1683
1684 balance_dirty_pages_ratelimited(inode->i_mapping); 1684 balance_dirty_pages_ratelimited(inode->i_mapping);
1685 if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1) 1685 if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
1686 btrfs_btree_balance_dirty(root); 1686 btrfs_btree_balance_dirty(root);
1687 1687
1688 pos += copied; 1688 pos += copied;
@@ -1738,8 +1738,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1738 goto out; 1738 goto out;
1739 written += written_buffered; 1739 written += written_buffered;
1740 iocb->ki_pos = pos + written_buffered; 1740 iocb->ki_pos = pos + written_buffered;
1741 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, 1741 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1742 endbyte >> PAGE_CACHE_SHIFT); 1742 endbyte >> PAGE_SHIFT);
1743out: 1743out:
1744 return written ? written : err; 1744 return written ? written : err;
1745} 1745}
@@ -1905,7 +1905,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1905 */ 1905 */
1906int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1906int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1907{ 1907{
1908 struct dentry *dentry = file->f_path.dentry; 1908 struct dentry *dentry = file_dentry(file);
1909 struct inode *inode = d_inode(dentry); 1909 struct inode *inode = d_inode(dentry);
1910 struct btrfs_root *root = BTRFS_I(inode)->root; 1910 struct btrfs_root *root = BTRFS_I(inode)->root;
1911 struct btrfs_trans_handle *trans; 1911 struct btrfs_trans_handle *trans;
@@ -2682,9 +2682,12 @@ static long btrfs_fallocate(struct file *file, int mode,
2682 return ret; 2682 return ret;
2683 2683
2684 inode_lock(inode); 2684 inode_lock(inode);
2685 ret = inode_newsize_ok(inode, alloc_end); 2685
2686 if (ret) 2686 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
2687 goto out; 2687 ret = inode_newsize_ok(inode, offset + len);
2688 if (ret)
2689 goto out;
2690 }
2688 2691
2689 /* 2692 /*
2690 * TODO: Move these two operations after we have checked 2693 * TODO: Move these two operations after we have checked
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 8f835bfa1bdd..5e6062c26129 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -29,7 +29,7 @@
29#include "inode-map.h" 29#include "inode-map.h"
30#include "volumes.h" 30#include "volumes.h"
31 31
32#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 32#define BITS_PER_BITMAP (PAGE_SIZE * 8)
33#define MAX_CACHE_BYTES_PER_GIG SZ_32K 33#define MAX_CACHE_BYTES_PER_GIG SZ_32K
34 34
35struct btrfs_trim_range { 35struct btrfs_trim_range {
@@ -295,7 +295,7 @@ static int readahead_cache(struct inode *inode)
295 return -ENOMEM; 295 return -ENOMEM;
296 296
297 file_ra_state_init(ra, inode->i_mapping); 297 file_ra_state_init(ra, inode->i_mapping);
298 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 298 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
299 299
300 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); 300 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
301 301
@@ -310,14 +310,14 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
310 int num_pages; 310 int num_pages;
311 int check_crcs = 0; 311 int check_crcs = 0;
312 312
313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); 313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
314 314
315 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) 315 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
316 check_crcs = 1; 316 check_crcs = 1;
317 317
318 /* Make sure we can fit our crcs into the first page */ 318 /* Make sure we can fit our crcs into the first page */
319 if (write && check_crcs && 319 if (write && check_crcs &&
320 (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) 320 (num_pages * sizeof(u32)) >= PAGE_SIZE)
321 return -ENOSPC; 321 return -ENOSPC;
322 322
323 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); 323 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -354,9 +354,9 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
354 io_ctl->page = io_ctl->pages[io_ctl->index++]; 354 io_ctl->page = io_ctl->pages[io_ctl->index++];
355 io_ctl->cur = page_address(io_ctl->page); 355 io_ctl->cur = page_address(io_ctl->page);
356 io_ctl->orig = io_ctl->cur; 356 io_ctl->orig = io_ctl->cur;
357 io_ctl->size = PAGE_CACHE_SIZE; 357 io_ctl->size = PAGE_SIZE;
358 if (clear) 358 if (clear)
359 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); 359 memset(io_ctl->cur, 0, PAGE_SIZE);
360} 360}
361 361
362static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) 362static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
@@ -369,7 +369,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
369 if (io_ctl->pages[i]) { 369 if (io_ctl->pages[i]) {
370 ClearPageChecked(io_ctl->pages[i]); 370 ClearPageChecked(io_ctl->pages[i]);
371 unlock_page(io_ctl->pages[i]); 371 unlock_page(io_ctl->pages[i]);
372 page_cache_release(io_ctl->pages[i]); 372 put_page(io_ctl->pages[i]);
373 } 373 }
374 } 374 }
375} 375}
@@ -475,7 +475,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
475 offset = sizeof(u32) * io_ctl->num_pages; 475 offset = sizeof(u32) * io_ctl->num_pages;
476 476
477 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 477 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
478 PAGE_CACHE_SIZE - offset); 478 PAGE_SIZE - offset);
479 btrfs_csum_final(crc, (char *)&crc); 479 btrfs_csum_final(crc, (char *)&crc);
480 io_ctl_unmap_page(io_ctl); 480 io_ctl_unmap_page(io_ctl);
481 tmp = page_address(io_ctl->pages[0]); 481 tmp = page_address(io_ctl->pages[0]);
@@ -503,7 +503,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
503 503
504 io_ctl_map_page(io_ctl, 0); 504 io_ctl_map_page(io_ctl, 0);
505 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 505 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
506 PAGE_CACHE_SIZE - offset); 506 PAGE_SIZE - offset);
507 btrfs_csum_final(crc, (char *)&crc); 507 btrfs_csum_final(crc, (char *)&crc);
508 if (val != crc) { 508 if (val != crc) {
509 btrfs_err_rl(io_ctl->root->fs_info, 509 btrfs_err_rl(io_ctl->root->fs_info,
@@ -561,7 +561,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
561 io_ctl_map_page(io_ctl, 0); 561 io_ctl_map_page(io_ctl, 0);
562 } 562 }
563 563
564 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); 564 memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
565 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 565 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
566 if (io_ctl->index < io_ctl->num_pages) 566 if (io_ctl->index < io_ctl->num_pages)
567 io_ctl_map_page(io_ctl, 0); 567 io_ctl_map_page(io_ctl, 0);
@@ -621,7 +621,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
621 if (ret) 621 if (ret)
622 return ret; 622 return ret;
623 623
624 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); 624 memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
625 io_ctl_unmap_page(io_ctl); 625 io_ctl_unmap_page(io_ctl);
626 626
627 return 0; 627 return 0;
@@ -775,7 +775,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
775 } else { 775 } else {
776 ASSERT(num_bitmaps); 776 ASSERT(num_bitmaps);
777 num_bitmaps--; 777 num_bitmaps--;
778 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 778 e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
779 if (!e->bitmap) { 779 if (!e->bitmap) {
780 kmem_cache_free( 780 kmem_cache_free(
781 btrfs_free_space_cachep, e); 781 btrfs_free_space_cachep, e);
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1661 * we add more bitmaps. 1661 * we add more bitmaps.
1662 */ 1662 */
1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; 1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
1664 1664
1665 if (bitmap_bytes >= max_bytes) { 1665 if (bitmap_bytes >= max_bytes) {
1666 ctl->extents_thresh = 0; 1666 ctl->extents_thresh = 0;
@@ -2111,7 +2111,7 @@ new_bitmap:
2111 } 2111 }
2112 2112
2113 /* allocate the bitmap */ 2113 /* allocate the bitmap */
2114 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 2114 info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
2115 spin_lock(&ctl->tree_lock); 2115 spin_lock(&ctl->tree_lock);
2116 if (!info->bitmap) { 2116 if (!info->bitmap) {
2117 ret = -ENOMEM; 2117 ret = -ENOMEM;
@@ -3580,7 +3580,7 @@ again:
3580 } 3580 }
3581 3581
3582 if (!map) { 3582 if (!map) {
3583 map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 3583 map = kzalloc(PAGE_SIZE, GFP_NOFS);
3584 if (!map) { 3584 if (!map) {
3585 kmem_cache_free(btrfs_free_space_cachep, info); 3585 kmem_cache_free(btrfs_free_space_cachep, info);
3586 return -ENOMEM; 3586 return -ENOMEM;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 1f0ec19b23f6..70107f7c9307 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
283} 283}
284 284
285#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) 285#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
286#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) 286#define INODES_PER_BITMAP (PAGE_SIZE * 8)
287 287
288/* 288/*
289 * The goal is to keep the memory used by the free_ino tree won't 289 * The goal is to keep the memory used by the free_ino tree won't
@@ -317,7 +317,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
317 } 317 }
318 318
319 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * 319 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
320 PAGE_CACHE_SIZE / sizeof(*info); 320 PAGE_SIZE / sizeof(*info);
321} 321}
322 322
323/* 323/*
@@ -481,12 +481,12 @@ again:
481 481
482 spin_lock(&ctl->tree_lock); 482 spin_lock(&ctl->tree_lock);
483 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; 483 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
484 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); 484 prealloc = ALIGN(prealloc, PAGE_SIZE);
485 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; 485 prealloc += ctl->total_bitmaps * PAGE_SIZE;
486 spin_unlock(&ctl->tree_lock); 486 spin_unlock(&ctl->tree_lock);
487 487
488 /* Just to make sure we have enough space */ 488 /* Just to make sure we have enough space */
489 prealloc += 8 * PAGE_CACHE_SIZE; 489 prealloc += 8 * PAGE_SIZE;
490 490
491 ret = btrfs_delalloc_reserve_space(inode, 0, prealloc); 491 ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
492 if (ret) 492 if (ret)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 41a5688ffdfe..2aaba58b4856 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
194 while (compressed_size > 0) { 194 while (compressed_size > 0) {
195 cpage = compressed_pages[i]; 195 cpage = compressed_pages[i];
196 cur_size = min_t(unsigned long, compressed_size, 196 cur_size = min_t(unsigned long, compressed_size,
197 PAGE_CACHE_SIZE); 197 PAGE_SIZE);
198 198
199 kaddr = kmap_atomic(cpage); 199 kaddr = kmap_atomic(cpage);
200 write_extent_buffer(leaf, kaddr, ptr, cur_size); 200 write_extent_buffer(leaf, kaddr, ptr, cur_size);
@@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
208 compress_type); 208 compress_type);
209 } else { 209 } else {
210 page = find_get_page(inode->i_mapping, 210 page = find_get_page(inode->i_mapping,
211 start >> PAGE_CACHE_SHIFT); 211 start >> PAGE_SHIFT);
212 btrfs_set_file_extent_compression(leaf, ei, 0); 212 btrfs_set_file_extent_compression(leaf, ei, 0);
213 kaddr = kmap_atomic(page); 213 kaddr = kmap_atomic(page);
214 offset = start & (PAGE_CACHE_SIZE - 1); 214 offset = start & (PAGE_SIZE - 1);
215 write_extent_buffer(leaf, kaddr + offset, ptr, size); 215 write_extent_buffer(leaf, kaddr + offset, ptr, size);
216 kunmap_atomic(kaddr); 216 kunmap_atomic(kaddr);
217 page_cache_release(page); 217 put_page(page);
218 } 218 }
219 btrfs_mark_buffer_dirty(leaf); 219 btrfs_mark_buffer_dirty(leaf);
220 btrfs_release_path(path); 220 btrfs_release_path(path);
@@ -322,7 +322,7 @@ out:
322 * And at reserve time, it's always aligned to page size, so 322 * And at reserve time, it's always aligned to page size, so
323 * just free one page here. 323 * just free one page here.
324 */ 324 */
325 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); 325 btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
326 btrfs_free_path(path); 326 btrfs_free_path(path);
327 btrfs_end_transaction(trans, root); 327 btrfs_end_transaction(trans, root);
328 return ret; 328 return ret;
@@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode,
435 actual_end = min_t(u64, isize, end + 1); 435 actual_end = min_t(u64, isize, end + 1);
436again: 436again:
437 will_compress = 0; 437 will_compress = 0;
438 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 438 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE); 439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
440 440
441 /* 441 /*
442 * we don't want to send crud past the end of i_size through 442 * we don't want to send crud past the end of i_size through
@@ -514,7 +514,7 @@ again:
514 514
515 if (!ret) { 515 if (!ret) {
516 unsigned long offset = total_compressed & 516 unsigned long offset = total_compressed &
517 (PAGE_CACHE_SIZE - 1); 517 (PAGE_SIZE - 1);
518 struct page *page = pages[nr_pages_ret - 1]; 518 struct page *page = pages[nr_pages_ret - 1];
519 char *kaddr; 519 char *kaddr;
520 520
@@ -524,7 +524,7 @@ again:
524 if (offset) { 524 if (offset) {
525 kaddr = kmap_atomic(page); 525 kaddr = kmap_atomic(page);
526 memset(kaddr + offset, 0, 526 memset(kaddr + offset, 0,
527 PAGE_CACHE_SIZE - offset); 527 PAGE_SIZE - offset);
528 kunmap_atomic(kaddr); 528 kunmap_atomic(kaddr);
529 } 529 }
530 will_compress = 1; 530 will_compress = 1;
@@ -580,7 +580,7 @@ cont:
580 * one last check to make sure the compression is really a 580 * one last check to make sure the compression is really a
581 * win, compare the page count read with the blocks on disk 581 * win, compare the page count read with the blocks on disk
582 */ 582 */
583 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); 583 total_in = ALIGN(total_in, PAGE_SIZE);
584 if (total_compressed >= total_in) { 584 if (total_compressed >= total_in) {
585 will_compress = 0; 585 will_compress = 0;
586 } else { 586 } else {
@@ -594,7 +594,7 @@ cont:
594 */ 594 */
595 for (i = 0; i < nr_pages_ret; i++) { 595 for (i = 0; i < nr_pages_ret; i++) {
596 WARN_ON(pages[i]->mapping); 596 WARN_ON(pages[i]->mapping);
597 page_cache_release(pages[i]); 597 put_page(pages[i]);
598 } 598 }
599 kfree(pages); 599 kfree(pages);
600 pages = NULL; 600 pages = NULL;
@@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed:
650free_pages_out: 650free_pages_out:
651 for (i = 0; i < nr_pages_ret; i++) { 651 for (i = 0; i < nr_pages_ret; i++) {
652 WARN_ON(pages[i]->mapping); 652 WARN_ON(pages[i]->mapping);
653 page_cache_release(pages[i]); 653 put_page(pages[i]);
654 } 654 }
655 kfree(pages); 655 kfree(pages);
656} 656}
@@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
664 664
665 for (i = 0; i < async_extent->nr_pages; i++) { 665 for (i = 0; i < async_extent->nr_pages; i++) {
666 WARN_ON(async_extent->pages[i]->mapping); 666 WARN_ON(async_extent->pages[i]->mapping);
667 page_cache_release(async_extent->pages[i]); 667 put_page(async_extent->pages[i]);
668 } 668 }
669 kfree(async_extent->pages); 669 kfree(async_extent->pages);
670 async_extent->nr_pages = 0; 670 async_extent->nr_pages = 0;
@@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode,
966 PAGE_END_WRITEBACK); 966 PAGE_END_WRITEBACK);
967 967
968 *nr_written = *nr_written + 968 *nr_written = *nr_written +
969 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 969 (end - start + PAGE_SIZE) / PAGE_SIZE;
970 *page_started = 1; 970 *page_started = 1;
971 goto out; 971 goto out;
972 } else if (ret < 0) { 972 } else if (ret < 0) {
@@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
1106 async_cow = container_of(work, struct async_cow, work); 1106 async_cow = container_of(work, struct async_cow, work);
1107 1107
1108 root = async_cow->root; 1108 root = async_cow->root;
1109 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1109 nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1110 PAGE_CACHE_SHIFT; 1110 PAGE_SHIFT;
1111 1111
1112 /* 1112 /*
1113 * atomic_sub_return implies a barrier for waitqueue_active 1113 * atomic_sub_return implies a barrier for waitqueue_active
@@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1164 async_cow_start, async_cow_submit, 1164 async_cow_start, async_cow_submit,
1165 async_cow_free); 1165 async_cow_free);
1166 1166
1167 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1167 nr_pages = (cur_end - start + PAGE_SIZE) >>
1168 PAGE_CACHE_SHIFT; 1168 PAGE_SHIFT;
1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1170 1170
1171 btrfs_queue_work(root->fs_info->delalloc_workers, 1171 btrfs_queue_work(root->fs_info->delalloc_workers,
@@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1961 struct extent_state **cached_state) 1961 struct extent_state **cached_state)
1962{ 1962{
1963 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); 1963 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1965 cached_state, GFP_NOFS); 1965 cached_state, GFP_NOFS);
1966} 1966}
@@ -1993,7 +1993,7 @@ again:
1993 1993
1994 inode = page->mapping->host; 1994 inode = page->mapping->host;
1995 page_start = page_offset(page); 1995 page_start = page_offset(page);
1996 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1996 page_end = page_offset(page) + PAGE_SIZE - 1;
1997 1997
1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
1999 &cached_state); 1999 &cached_state);
@@ -2003,7 +2003,7 @@ again:
2003 goto out; 2003 goto out;
2004 2004
2005 ordered = btrfs_lookup_ordered_range(inode, page_start, 2005 ordered = btrfs_lookup_ordered_range(inode, page_start,
2006 PAGE_CACHE_SIZE); 2006 PAGE_SIZE);
2007 if (ordered) { 2007 if (ordered) {
2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2009 page_end, &cached_state, GFP_NOFS); 2009 page_end, &cached_state, GFP_NOFS);
@@ -2014,7 +2014,7 @@ again:
2014 } 2014 }
2015 2015
2016 ret = btrfs_delalloc_reserve_space(inode, page_start, 2016 ret = btrfs_delalloc_reserve_space(inode, page_start,
2017 PAGE_CACHE_SIZE); 2017 PAGE_SIZE);
2018 if (ret) { 2018 if (ret) {
2019 mapping_set_error(page->mapping, ret); 2019 mapping_set_error(page->mapping, ret);
2020 end_extent_writepage(page, ret, page_start, page_end); 2020 end_extent_writepage(page, ret, page_start, page_end);
@@ -2030,7 +2030,7 @@ out:
2030 &cached_state, GFP_NOFS); 2030 &cached_state, GFP_NOFS);
2031out_page: 2031out_page:
2032 unlock_page(page); 2032 unlock_page(page);
2033 page_cache_release(page); 2033 put_page(page);
2034 kfree(fixup); 2034 kfree(fixup);
2035} 2035}
2036 2036
@@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2063 return -EAGAIN; 2063 return -EAGAIN;
2064 2064
2065 SetPageChecked(page); 2065 SetPageChecked(page);
2066 page_cache_get(page); 2066 get_page(page);
2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2068 btrfs_writepage_fixup_worker, NULL, NULL); 2068 btrfs_writepage_fixup_worker, NULL, NULL);
2069 fixup->page = page; 2069 fixup->page = page;
@@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode,
4247 4247
4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { 4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4249 loff_t offset = new_size; 4249 loff_t offset = new_size;
4250 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); 4250 loff_t page_end = ALIGN(offset, PAGE_SIZE);
4251 4251
4252 /* 4252 /*
4253 * Zero out the remaining of the last page of our inline extent, 4253 * Zero out the remaining of the last page of our inline extent,
@@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4633 struct extent_state *cached_state = NULL; 4633 struct extent_state *cached_state = NULL;
4634 char *kaddr; 4634 char *kaddr;
4635 u32 blocksize = root->sectorsize; 4635 u32 blocksize = root->sectorsize;
4636 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4636 pgoff_t index = from >> PAGE_SHIFT;
4637 unsigned offset = from & (blocksize - 1); 4637 unsigned offset = from & (blocksize - 1);
4638 struct page *page; 4638 struct page *page;
4639 gfp_t mask = btrfs_alloc_write_mask(mapping); 4639 gfp_t mask = btrfs_alloc_write_mask(mapping);
@@ -4668,7 +4668,7 @@ again:
4668 lock_page(page); 4668 lock_page(page);
4669 if (page->mapping != mapping) { 4669 if (page->mapping != mapping) {
4670 unlock_page(page); 4670 unlock_page(page);
4671 page_cache_release(page); 4671 put_page(page);
4672 goto again; 4672 goto again;
4673 } 4673 }
4674 if (!PageUptodate(page)) { 4674 if (!PageUptodate(page)) {
@@ -4686,7 +4686,7 @@ again:
4686 unlock_extent_cached(io_tree, block_start, block_end, 4686 unlock_extent_cached(io_tree, block_start, block_end,
4687 &cached_state, GFP_NOFS); 4687 &cached_state, GFP_NOFS);
4688 unlock_page(page); 4688 unlock_page(page);
4689 page_cache_release(page); 4689 put_page(page);
4690 btrfs_start_ordered_extent(inode, ordered, 1); 4690 btrfs_start_ordered_extent(inode, ordered, 1);
4691 btrfs_put_ordered_extent(ordered); 4691 btrfs_put_ordered_extent(ordered);
4692 goto again; 4692 goto again;
@@ -4728,7 +4728,7 @@ out_unlock:
4728 btrfs_delalloc_release_space(inode, block_start, 4728 btrfs_delalloc_release_space(inode, block_start,
4729 blocksize); 4729 blocksize);
4730 unlock_page(page); 4730 unlock_page(page);
4731 page_cache_release(page); 4731 put_page(page);
4732out: 4732out:
4733 return ret; 4733 return ret;
4734} 4734}
@@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
6717 6717
6718 read_extent_buffer(leaf, tmp, ptr, inline_size); 6718 read_extent_buffer(leaf, tmp, ptr, inline_size);
6719 6719
6720 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 6720 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6721 ret = btrfs_decompress(compress_type, tmp, page, 6721 ret = btrfs_decompress(compress_type, tmp, page,
6722 extent_offset, inline_size, max_size); 6722 extent_offset, inline_size, max_size);
6723 kfree(tmp); 6723 kfree(tmp);
@@ -6879,8 +6879,8 @@ next:
6879 6879
6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6881 extent_offset = page_offset(page) + pg_offset - extent_start; 6881 extent_offset = page_offset(page) + pg_offset - extent_start;
6882 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 6882 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6883 size - extent_offset); 6883 size - extent_offset);
6884 em->start = extent_start + extent_offset; 6884 em->start = extent_start + extent_offset;
6885 em->len = ALIGN(copy_size, root->sectorsize); 6885 em->len = ALIGN(copy_size, root->sectorsize);
6886 em->orig_block_len = em->len; 6886 em->orig_block_len = em->len;
@@ -6899,9 +6899,9 @@ next:
6899 map = kmap(page); 6899 map = kmap(page);
6900 read_extent_buffer(leaf, map + pg_offset, ptr, 6900 read_extent_buffer(leaf, map + pg_offset, ptr,
6901 copy_size); 6901 copy_size);
6902 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 6902 if (pg_offset + copy_size < PAGE_SIZE) {
6903 memset(map + pg_offset + copy_size, 0, 6903 memset(map + pg_offset + copy_size, 0,
6904 PAGE_CACHE_SIZE - pg_offset - 6904 PAGE_SIZE - pg_offset -
6905 copy_size); 6905 copy_size);
6906 } 6906 }
6907 kunmap(page); 6907 kunmap(page);
@@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7336 int start_idx; 7336 int start_idx;
7337 int end_idx; 7337 int end_idx;
7338 7338
7339 start_idx = start >> PAGE_CACHE_SHIFT; 7339 start_idx = start >> PAGE_SHIFT;
7340 7340
7341 /* 7341 /*
7342 * end is the last byte in the last page. end == start is legal 7342 * end is the last byte in the last page. end == start is legal
7343 */ 7343 */
7344 end_idx = end >> PAGE_CACHE_SHIFT; 7344 end_idx = end >> PAGE_SHIFT;
7345 7345
7346 rcu_read_lock(); 7346 rcu_read_lock();
7347 7347
@@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7382 * include/linux/pagemap.h for details. 7382 * include/linux/pagemap.h for details.
7383 */ 7383 */
7384 if (unlikely(page != *pagep)) { 7384 if (unlikely(page != *pagep)) {
7385 page_cache_release(page); 7385 put_page(page);
7386 page = NULL; 7386 page = NULL;
7387 } 7387 }
7388 } 7388 }
@@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7390 if (page) { 7390 if (page) {
7391 if (page->index <= end_idx) 7391 if (page->index <= end_idx)
7392 found = true; 7392 found = true;
7393 page_cache_release(page); 7393 put_page(page);
7394 } 7394 }
7395 7395
7396 rcu_read_unlock(); 7396 rcu_read_unlock();
@@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8719 if (ret == 1) { 8719 if (ret == 1) {
8720 ClearPagePrivate(page); 8720 ClearPagePrivate(page);
8721 set_page_private(page, 0); 8721 set_page_private(page, 0);
8722 page_cache_release(page); 8722 put_page(page);
8723 } 8723 }
8724 return ret; 8724 return ret;
8725} 8725}
@@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8739 struct btrfs_ordered_extent *ordered; 8739 struct btrfs_ordered_extent *ordered;
8740 struct extent_state *cached_state = NULL; 8740 struct extent_state *cached_state = NULL;
8741 u64 page_start = page_offset(page); 8741 u64 page_start = page_offset(page);
8742 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8742 u64 page_end = page_start + PAGE_SIZE - 1;
8743 u64 start; 8743 u64 start;
8744 u64 end; 8744 u64 end;
8745 int inode_evicting = inode->i_state & I_FREEING; 8745 int inode_evicting = inode->i_state & I_FREEING;
@@ -8822,7 +8822,7 @@ again:
8822 * 2) Not written to disk 8822 * 2) Not written to disk
8823 * This means the reserved space should be freed here. 8823 * This means the reserved space should be freed here.
8824 */ 8824 */
8825 btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); 8825 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8826 if (!inode_evicting) { 8826 if (!inode_evicting) {
8827 clear_extent_bit(tree, page_start, page_end, 8827 clear_extent_bit(tree, page_start, page_end,
8828 EXTENT_LOCKED | EXTENT_DIRTY | 8828 EXTENT_LOCKED | EXTENT_DIRTY |
@@ -8837,7 +8837,7 @@ again:
8837 if (PagePrivate(page)) { 8837 if (PagePrivate(page)) {
8838 ClearPagePrivate(page); 8838 ClearPagePrivate(page);
8839 set_page_private(page, 0); 8839 set_page_private(page, 0);
8840 page_cache_release(page); 8840 put_page(page);
8841 } 8841 }
8842} 8842}
8843 8843
@@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8874 u64 page_end; 8874 u64 page_end;
8875 u64 end; 8875 u64 end;
8876 8876
8877 reserved_space = PAGE_CACHE_SIZE; 8877 reserved_space = PAGE_SIZE;
8878 8878
8879 sb_start_pagefault(inode->i_sb); 8879 sb_start_pagefault(inode->i_sb);
8880 page_start = page_offset(page); 8880 page_start = page_offset(page);
8881 page_end = page_start + PAGE_CACHE_SIZE - 1; 8881 page_end = page_start + PAGE_SIZE - 1;
8882 end = page_end; 8882 end = page_end;
8883 8883
8884 /* 8884 /*
@@ -8934,15 +8934,15 @@ again:
8934 goto again; 8934 goto again;
8935 } 8935 }
8936 8936
8937 if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) { 8937 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8938 reserved_space = round_up(size - page_start, root->sectorsize); 8938 reserved_space = round_up(size - page_start, root->sectorsize);
8939 if (reserved_space < PAGE_CACHE_SIZE) { 8939 if (reserved_space < PAGE_SIZE) {
8940 end = page_start + reserved_space - 1; 8940 end = page_start + reserved_space - 1;
8941 spin_lock(&BTRFS_I(inode)->lock); 8941 spin_lock(&BTRFS_I(inode)->lock);
8942 BTRFS_I(inode)->outstanding_extents++; 8942 BTRFS_I(inode)->outstanding_extents++;
8943 spin_unlock(&BTRFS_I(inode)->lock); 8943 spin_unlock(&BTRFS_I(inode)->lock);
8944 btrfs_delalloc_release_space(inode, page_start, 8944 btrfs_delalloc_release_space(inode, page_start,
8945 PAGE_CACHE_SIZE - reserved_space); 8945 PAGE_SIZE - reserved_space);
8946 } 8946 }
8947 } 8947 }
8948 8948
@@ -8969,14 +8969,14 @@ again:
8969 ret = 0; 8969 ret = 0;
8970 8970
8971 /* page is wholly or partially inside EOF */ 8971 /* page is wholly or partially inside EOF */
8972 if (page_start + PAGE_CACHE_SIZE > size) 8972 if (page_start + PAGE_SIZE > size)
8973 zero_start = size & ~PAGE_CACHE_MASK; 8973 zero_start = size & ~PAGE_MASK;
8974 else 8974 else
8975 zero_start = PAGE_CACHE_SIZE; 8975 zero_start = PAGE_SIZE;
8976 8976
8977 if (zero_start != PAGE_CACHE_SIZE) { 8977 if (zero_start != PAGE_SIZE) {
8978 kaddr = kmap(page); 8978 kaddr = kmap(page);
8979 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 8979 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
8980 flush_dcache_page(page); 8980 flush_dcache_page(page);
8981 kunmap(page); 8981 kunmap(page);
8982 } 8982 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 053e677839fe..5a23806ae418 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -898,7 +898,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
898 u64 end; 898 u64 end;
899 899
900 read_lock(&em_tree->lock); 900 read_lock(&em_tree->lock);
901 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); 901 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
902 read_unlock(&em_tree->lock); 902 read_unlock(&em_tree->lock);
903 903
904 if (em) { 904 if (em) {
@@ -988,7 +988,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
990 struct extent_map *em; 990 struct extent_map *em;
991 u64 len = PAGE_CACHE_SIZE; 991 u64 len = PAGE_SIZE;
992 992
993 /* 993 /*
994 * hopefully we have this extent in the tree already, try without 994 * hopefully we have this extent in the tree already, try without
@@ -1124,15 +1124,15 @@ static int cluster_pages_for_defrag(struct inode *inode,
1124 struct extent_io_tree *tree; 1124 struct extent_io_tree *tree;
1125 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1125 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1126 1126
1127 file_end = (isize - 1) >> PAGE_CACHE_SHIFT; 1127 file_end = (isize - 1) >> PAGE_SHIFT;
1128 if (!isize || start_index > file_end) 1128 if (!isize || start_index > file_end)
1129 return 0; 1129 return 0;
1130 1130
1131 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); 1131 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1132 1132
1133 ret = btrfs_delalloc_reserve_space(inode, 1133 ret = btrfs_delalloc_reserve_space(inode,
1134 start_index << PAGE_CACHE_SHIFT, 1134 start_index << PAGE_SHIFT,
1135 page_cnt << PAGE_CACHE_SHIFT); 1135 page_cnt << PAGE_SHIFT);
1136 if (ret) 1136 if (ret)
1137 return ret; 1137 return ret;
1138 i_done = 0; 1138 i_done = 0;
@@ -1148,7 +1148,7 @@ again:
1148 break; 1148 break;
1149 1149
1150 page_start = page_offset(page); 1150 page_start = page_offset(page);
1151 page_end = page_start + PAGE_CACHE_SIZE - 1; 1151 page_end = page_start + PAGE_SIZE - 1;
1152 while (1) { 1152 while (1) {
1153 lock_extent_bits(tree, page_start, page_end, 1153 lock_extent_bits(tree, page_start, page_end,
1154 &cached_state); 1154 &cached_state);
@@ -1169,7 +1169,7 @@ again:
1169 */ 1169 */
1170 if (page->mapping != inode->i_mapping) { 1170 if (page->mapping != inode->i_mapping) {
1171 unlock_page(page); 1171 unlock_page(page);
1172 page_cache_release(page); 1172 put_page(page);
1173 goto again; 1173 goto again;
1174 } 1174 }
1175 } 1175 }
@@ -1179,7 +1179,7 @@ again:
1179 lock_page(page); 1179 lock_page(page);
1180 if (!PageUptodate(page)) { 1180 if (!PageUptodate(page)) {
1181 unlock_page(page); 1181 unlock_page(page);
1182 page_cache_release(page); 1182 put_page(page);
1183 ret = -EIO; 1183 ret = -EIO;
1184 break; 1184 break;
1185 } 1185 }
@@ -1187,7 +1187,7 @@ again:
1187 1187
1188 if (page->mapping != inode->i_mapping) { 1188 if (page->mapping != inode->i_mapping) {
1189 unlock_page(page); 1189 unlock_page(page);
1190 page_cache_release(page); 1190 put_page(page);
1191 goto again; 1191 goto again;
1192 } 1192 }
1193 1193
@@ -1208,7 +1208,7 @@ again:
1208 wait_on_page_writeback(pages[i]); 1208 wait_on_page_writeback(pages[i]);
1209 1209
1210 page_start = page_offset(pages[0]); 1210 page_start = page_offset(pages[0]);
1211 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; 1211 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1212 1212
1213 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1213 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1214 page_start, page_end - 1, &cached_state); 1214 page_start, page_end - 1, &cached_state);
@@ -1222,8 +1222,8 @@ again:
1222 BTRFS_I(inode)->outstanding_extents++; 1222 BTRFS_I(inode)->outstanding_extents++;
1223 spin_unlock(&BTRFS_I(inode)->lock); 1223 spin_unlock(&BTRFS_I(inode)->lock);
1224 btrfs_delalloc_release_space(inode, 1224 btrfs_delalloc_release_space(inode,
1225 start_index << PAGE_CACHE_SHIFT, 1225 start_index << PAGE_SHIFT,
1226 (page_cnt - i_done) << PAGE_CACHE_SHIFT); 1226 (page_cnt - i_done) << PAGE_SHIFT);
1227 } 1227 }
1228 1228
1229 1229
@@ -1240,17 +1240,17 @@ again:
1240 set_page_extent_mapped(pages[i]); 1240 set_page_extent_mapped(pages[i]);
1241 set_page_dirty(pages[i]); 1241 set_page_dirty(pages[i]);
1242 unlock_page(pages[i]); 1242 unlock_page(pages[i]);
1243 page_cache_release(pages[i]); 1243 put_page(pages[i]);
1244 } 1244 }
1245 return i_done; 1245 return i_done;
1246out: 1246out:
1247 for (i = 0; i < i_done; i++) { 1247 for (i = 0; i < i_done; i++) {
1248 unlock_page(pages[i]); 1248 unlock_page(pages[i]);
1249 page_cache_release(pages[i]); 1249 put_page(pages[i]);
1250 } 1250 }
1251 btrfs_delalloc_release_space(inode, 1251 btrfs_delalloc_release_space(inode,
1252 start_index << PAGE_CACHE_SHIFT, 1252 start_index << PAGE_SHIFT,
1253 page_cnt << PAGE_CACHE_SHIFT); 1253 page_cnt << PAGE_SHIFT);
1254 return ret; 1254 return ret;
1255 1255
1256} 1256}
@@ -1273,7 +1273,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1273 int defrag_count = 0; 1273 int defrag_count = 0;
1274 int compress_type = BTRFS_COMPRESS_ZLIB; 1274 int compress_type = BTRFS_COMPRESS_ZLIB;
1275 u32 extent_thresh = range->extent_thresh; 1275 u32 extent_thresh = range->extent_thresh;
1276 unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT; 1276 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1277 unsigned long cluster = max_cluster; 1277 unsigned long cluster = max_cluster;
1278 u64 new_align = ~((u64)SZ_128K - 1); 1278 u64 new_align = ~((u64)SZ_128K - 1);
1279 struct page **pages = NULL; 1279 struct page **pages = NULL;
@@ -1317,9 +1317,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1317 /* find the last page to defrag */ 1317 /* find the last page to defrag */
1318 if (range->start + range->len > range->start) { 1318 if (range->start + range->len > range->start) {
1319 last_index = min_t(u64, isize - 1, 1319 last_index = min_t(u64, isize - 1,
1320 range->start + range->len - 1) >> PAGE_CACHE_SHIFT; 1320 range->start + range->len - 1) >> PAGE_SHIFT;
1321 } else { 1321 } else {
1322 last_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1322 last_index = (isize - 1) >> PAGE_SHIFT;
1323 } 1323 }
1324 1324
1325 if (newer_than) { 1325 if (newer_than) {
@@ -1331,11 +1331,11 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1331 * we always align our defrag to help keep 1331 * we always align our defrag to help keep
1332 * the extents in the file evenly spaced 1332 * the extents in the file evenly spaced
1333 */ 1333 */
1334 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1334 i = (newer_off & new_align) >> PAGE_SHIFT;
1335 } else 1335 } else
1336 goto out_ra; 1336 goto out_ra;
1337 } else { 1337 } else {
1338 i = range->start >> PAGE_CACHE_SHIFT; 1338 i = range->start >> PAGE_SHIFT;
1339 } 1339 }
1340 if (!max_to_defrag) 1340 if (!max_to_defrag)
1341 max_to_defrag = last_index - i + 1; 1341 max_to_defrag = last_index - i + 1;
@@ -1348,7 +1348,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1348 inode->i_mapping->writeback_index = i; 1348 inode->i_mapping->writeback_index = i;
1349 1349
1350 while (i <= last_index && defrag_count < max_to_defrag && 1350 while (i <= last_index && defrag_count < max_to_defrag &&
1351 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) { 1351 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1352 /* 1352 /*
1353 * make sure we stop running if someone unmounts 1353 * make sure we stop running if someone unmounts
1354 * the FS 1354 * the FS
@@ -1362,7 +1362,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1362 break; 1362 break;
1363 } 1363 }
1364 1364
1365 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1365 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1366 extent_thresh, &last_len, &skip, 1366 extent_thresh, &last_len, &skip,
1367 &defrag_end, range->flags & 1367 &defrag_end, range->flags &
1368 BTRFS_DEFRAG_RANGE_COMPRESS)) { 1368 BTRFS_DEFRAG_RANGE_COMPRESS)) {
@@ -1371,14 +1371,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1371 * the should_defrag function tells us how much to skip 1371 * the should_defrag function tells us how much to skip
1372 * bump our counter by the suggested amount 1372 * bump our counter by the suggested amount
1373 */ 1373 */
1374 next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE); 1374 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1375 i = max(i + 1, next); 1375 i = max(i + 1, next);
1376 continue; 1376 continue;
1377 } 1377 }
1378 1378
1379 if (!newer_than) { 1379 if (!newer_than) {
1380 cluster = (PAGE_CACHE_ALIGN(defrag_end) >> 1380 cluster = (PAGE_ALIGN(defrag_end) >>
1381 PAGE_CACHE_SHIFT) - i; 1381 PAGE_SHIFT) - i;
1382 cluster = min(cluster, max_cluster); 1382 cluster = min(cluster, max_cluster);
1383 } else { 1383 } else {
1384 cluster = max_cluster; 1384 cluster = max_cluster;
@@ -1412,20 +1412,20 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1412 i += ret; 1412 i += ret;
1413 1413
1414 newer_off = max(newer_off + 1, 1414 newer_off = max(newer_off + 1,
1415 (u64)i << PAGE_CACHE_SHIFT); 1415 (u64)i << PAGE_SHIFT);
1416 1416
1417 ret = find_new_extents(root, inode, newer_than, 1417 ret = find_new_extents(root, inode, newer_than,
1418 &newer_off, SZ_64K); 1418 &newer_off, SZ_64K);
1419 if (!ret) { 1419 if (!ret) {
1420 range->start = newer_off; 1420 range->start = newer_off;
1421 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1421 i = (newer_off & new_align) >> PAGE_SHIFT;
1422 } else { 1422 } else {
1423 break; 1423 break;
1424 } 1424 }
1425 } else { 1425 } else {
1426 if (ret > 0) { 1426 if (ret > 0) {
1427 i += ret; 1427 i += ret;
1428 last_len += ret << PAGE_CACHE_SHIFT; 1428 last_len += ret << PAGE_SHIFT;
1429 } else { 1429 } else {
1430 i++; 1430 i++;
1431 last_len = 0; 1431 last_len = 0;
@@ -1654,7 +1654,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1654 1654
1655 src_inode = file_inode(src.file); 1655 src_inode = file_inode(src.file);
1656 if (src_inode->i_sb != file_inode(file)->i_sb) { 1656 if (src_inode->i_sb != file_inode(file)->i_sb) {
1657 btrfs_info(BTRFS_I(src_inode)->root->fs_info, 1657 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1658 "Snapshot src from another FS"); 1658 "Snapshot src from another FS");
1659 ret = -EXDEV; 1659 ret = -EXDEV;
1660 } else if (!inode_owner_or_capable(src_inode)) { 1660 } else if (!inode_owner_or_capable(src_inode)) {
@@ -1722,7 +1722,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1722 if (vol_args->flags & BTRFS_SUBVOL_RDONLY) 1722 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1723 readonly = true; 1723 readonly = true;
1724 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { 1724 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1725 if (vol_args->size > PAGE_CACHE_SIZE) { 1725 if (vol_args->size > PAGE_SIZE) {
1726 ret = -EINVAL; 1726 ret = -EINVAL;
1727 goto free_args; 1727 goto free_args;
1728 } 1728 }
@@ -2806,12 +2806,12 @@ static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2806 lock_page(page); 2806 lock_page(page);
2807 if (!PageUptodate(page)) { 2807 if (!PageUptodate(page)) {
2808 unlock_page(page); 2808 unlock_page(page);
2809 page_cache_release(page); 2809 put_page(page);
2810 return ERR_PTR(-EIO); 2810 return ERR_PTR(-EIO);
2811 } 2811 }
2812 if (page->mapping != inode->i_mapping) { 2812 if (page->mapping != inode->i_mapping) {
2813 unlock_page(page); 2813 unlock_page(page);
2814 page_cache_release(page); 2814 put_page(page);
2815 return ERR_PTR(-EAGAIN); 2815 return ERR_PTR(-EAGAIN);
2816 } 2816 }
2817 } 2817 }
@@ -2823,7 +2823,7 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
2823 int num_pages, u64 off) 2823 int num_pages, u64 off)
2824{ 2824{
2825 int i; 2825 int i;
2826 pgoff_t index = off >> PAGE_CACHE_SHIFT; 2826 pgoff_t index = off >> PAGE_SHIFT;
2827 2827
2828 for (i = 0; i < num_pages; i++) { 2828 for (i = 0; i < num_pages; i++) {
2829again: 2829again:
@@ -2932,12 +2932,12 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2932 pg = cmp->src_pages[i]; 2932 pg = cmp->src_pages[i];
2933 if (pg) { 2933 if (pg) {
2934 unlock_page(pg); 2934 unlock_page(pg);
2935 page_cache_release(pg); 2935 put_page(pg);
2936 } 2936 }
2937 pg = cmp->dst_pages[i]; 2937 pg = cmp->dst_pages[i];
2938 if (pg) { 2938 if (pg) {
2939 unlock_page(pg); 2939 unlock_page(pg);
2940 page_cache_release(pg); 2940 put_page(pg);
2941 } 2941 }
2942 } 2942 }
2943 kfree(cmp->src_pages); 2943 kfree(cmp->src_pages);
@@ -2949,7 +2949,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2949 u64 len, struct cmp_pages *cmp) 2949 u64 len, struct cmp_pages *cmp)
2950{ 2950{
2951 int ret; 2951 int ret;
2952 int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; 2952 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
2953 struct page **src_pgarr, **dst_pgarr; 2953 struct page **src_pgarr, **dst_pgarr;
2954 2954
2955 /* 2955 /*
@@ -2987,12 +2987,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2987 int ret = 0; 2987 int ret = 0;
2988 int i; 2988 int i;
2989 struct page *src_page, *dst_page; 2989 struct page *src_page, *dst_page;
2990 unsigned int cmp_len = PAGE_CACHE_SIZE; 2990 unsigned int cmp_len = PAGE_SIZE;
2991 void *addr, *dst_addr; 2991 void *addr, *dst_addr;
2992 2992
2993 i = 0; 2993 i = 0;
2994 while (len) { 2994 while (len) {
2995 if (len < PAGE_CACHE_SIZE) 2995 if (len < PAGE_SIZE)
2996 cmp_len = len; 2996 cmp_len = len;
2997 2997
2998 BUG_ON(i >= cmp->num_pages); 2998 BUG_ON(i >= cmp->num_pages);
@@ -3191,7 +3191,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
3191 if (olen > BTRFS_MAX_DEDUPE_LEN) 3191 if (olen > BTRFS_MAX_DEDUPE_LEN)
3192 olen = BTRFS_MAX_DEDUPE_LEN; 3192 olen = BTRFS_MAX_DEDUPE_LEN;
3193 3193
3194 if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) { 3194 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3195 /* 3195 /*
3196 * Btrfs does not support blocksize < page_size. As a 3196 * Btrfs does not support blocksize < page_size. As a
3197 * result, btrfs_cmp_data() won't correctly handle 3197 * result, btrfs_cmp_data() won't correctly handle
@@ -3891,8 +3891,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3891 * data immediately and not the previous data. 3891 * data immediately and not the previous data.
3892 */ 3892 */
3893 truncate_inode_pages_range(&inode->i_data, 3893 truncate_inode_pages_range(&inode->i_data,
3894 round_down(destoff, PAGE_CACHE_SIZE), 3894 round_down(destoff, PAGE_SIZE),
3895 round_up(destoff + len, PAGE_CACHE_SIZE) - 1); 3895 round_up(destoff + len, PAGE_SIZE) - 1);
3896out_unlock: 3896out_unlock:
3897 if (!same_inode) 3897 if (!same_inode)
3898 btrfs_double_inode_unlock(src, inode); 3898 btrfs_double_inode_unlock(src, inode);
@@ -4124,7 +4124,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
4124 /* we generally have at most 6 or so space infos, one for each raid 4124 /* we generally have at most 6 or so space infos, one for each raid
4125 * level. So, a whole page should be more than enough for everyone 4125 * level. So, a whole page should be more than enough for everyone
4126 */ 4126 */
4127 if (alloc_size > PAGE_CACHE_SIZE) 4127 if (alloc_size > PAGE_SIZE)
4128 return -ENOMEM; 4128 return -ENOMEM;
4129 4129
4130 space_args.total_spaces = 0; 4130 space_args.total_spaces = 0;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a2f051347731..1adfbe7be6b8 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -55,8 +55,8 @@ static struct list_head *lzo_alloc_workspace(void)
55 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
56 56
57 workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); 57 workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
58 workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); 58 workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
59 workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); 59 workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
60 if (!workspace->mem || !workspace->buf || !workspace->cbuf) 60 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
61 goto fail; 61 goto fail;
62 62
@@ -116,7 +116,7 @@ static int lzo_compress_pages(struct list_head *ws,
116 *total_out = 0; 116 *total_out = 0;
117 *total_in = 0; 117 *total_in = 0;
118 118
119 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 119 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
120 data_in = kmap(in_page); 120 data_in = kmap(in_page);
121 121
122 /* 122 /*
@@ -133,10 +133,10 @@ static int lzo_compress_pages(struct list_head *ws,
133 tot_out = LZO_LEN; 133 tot_out = LZO_LEN;
134 pages[0] = out_page; 134 pages[0] = out_page;
135 nr_pages = 1; 135 nr_pages = 1;
136 pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; 136 pg_bytes_left = PAGE_SIZE - LZO_LEN;
137 137
138 /* compress at most one page of data each time */ 138 /* compress at most one page of data each time */
139 in_len = min(len, PAGE_CACHE_SIZE); 139 in_len = min(len, PAGE_SIZE);
140 while (tot_in < len) { 140 while (tot_in < len) {
141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, 141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
142 &out_len, workspace->mem); 142 &out_len, workspace->mem);
@@ -201,7 +201,7 @@ static int lzo_compress_pages(struct list_head *ws,
201 cpage_out = kmap(out_page); 201 cpage_out = kmap(out_page);
202 pages[nr_pages++] = out_page; 202 pages[nr_pages++] = out_page;
203 203
204 pg_bytes_left = PAGE_CACHE_SIZE; 204 pg_bytes_left = PAGE_SIZE;
205 out_offset = 0; 205 out_offset = 0;
206 } 206 }
207 } 207 }
@@ -221,12 +221,12 @@ static int lzo_compress_pages(struct list_head *ws,
221 221
222 bytes_left = len - tot_in; 222 bytes_left = len - tot_in;
223 kunmap(in_page); 223 kunmap(in_page);
224 page_cache_release(in_page); 224 put_page(in_page);
225 225
226 start += PAGE_CACHE_SIZE; 226 start += PAGE_SIZE;
227 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 227 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
228 data_in = kmap(in_page); 228 data_in = kmap(in_page);
229 in_len = min(bytes_left, PAGE_CACHE_SIZE); 229 in_len = min(bytes_left, PAGE_SIZE);
230 } 230 }
231 231
232 if (tot_out > tot_in) 232 if (tot_out > tot_in)
@@ -248,7 +248,7 @@ out:
248 248
249 if (in_page) { 249 if (in_page) {
250 kunmap(in_page); 250 kunmap(in_page);
251 page_cache_release(in_page); 251 put_page(in_page);
252 } 252 }
253 253
254 return ret; 254 return ret;
@@ -266,7 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
266 char *data_in; 266 char *data_in;
267 unsigned long page_in_index = 0; 267 unsigned long page_in_index = 0;
268 unsigned long page_out_index = 0; 268 unsigned long page_out_index = 0;
269 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); 269 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
270 unsigned long buf_start; 270 unsigned long buf_start;
271 unsigned long buf_offset = 0; 271 unsigned long buf_offset = 0;
272 unsigned long bytes; 272 unsigned long bytes;
@@ -289,7 +289,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
289 tot_in = LZO_LEN; 289 tot_in = LZO_LEN;
290 in_offset = LZO_LEN; 290 in_offset = LZO_LEN;
291 tot_len = min_t(size_t, srclen, tot_len); 291 tot_len = min_t(size_t, srclen, tot_len);
292 in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; 292 in_page_bytes_left = PAGE_SIZE - LZO_LEN;
293 293
294 tot_out = 0; 294 tot_out = 0;
295 pg_offset = 0; 295 pg_offset = 0;
@@ -345,12 +345,12 @@ cont:
345 345
346 data_in = kmap(pages_in[++page_in_index]); 346 data_in = kmap(pages_in[++page_in_index]);
347 347
348 in_page_bytes_left = PAGE_CACHE_SIZE; 348 in_page_bytes_left = PAGE_SIZE;
349 in_offset = 0; 349 in_offset = 0;
350 } 350 }
351 } 351 }
352 352
353 out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); 353 out_len = lzo1x_worst_compress(PAGE_SIZE);
354 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, 354 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
355 &out_len); 355 &out_len);
356 if (need_unmap) 356 if (need_unmap)
@@ -399,7 +399,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
399 in_len = read_compress_length(data_in); 399 in_len = read_compress_length(data_in);
400 data_in += LZO_LEN; 400 data_in += LZO_LEN;
401 401
402 out_len = PAGE_CACHE_SIZE; 402 out_len = PAGE_SIZE;
403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); 403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
404 if (ret != LZO_E_OK) { 404 if (ret != LZO_E_OK) {
405 printk(KERN_WARNING "BTRFS: decompress failed!\n"); 405 printk(KERN_WARNING "BTRFS: decompress failed!\n");
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5279fdae7142..9e119552ed32 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1463,6 +1463,7 @@ struct btrfs_qgroup_extent_record
1463 u64 bytenr = record->bytenr; 1463 u64 bytenr = record->bytenr;
1464 1464
1465 assert_spin_locked(&delayed_refs->lock); 1465 assert_spin_locked(&delayed_refs->lock);
1466 trace_btrfs_qgroup_insert_dirty_extent(record);
1466 1467
1467 while (*p) { 1468 while (*p) {
1468 parent_node = *p; 1469 parent_node = *p;
@@ -1594,6 +1595,9 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1594 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 1595 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1595 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 1596 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1596 1597
1598 trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
1599 cur_new_count);
1600
1597 /* Rfer update part */ 1601 /* Rfer update part */
1598 if (cur_old_count == 0 && cur_new_count > 0) { 1602 if (cur_old_count == 0 && cur_new_count > 0) {
1599 qg->rfer += num_bytes; 1603 qg->rfer += num_bytes;
@@ -1683,6 +1687,9 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1683 goto out_free; 1687 goto out_free;
1684 BUG_ON(!fs_info->quota_root); 1688 BUG_ON(!fs_info->quota_root);
1685 1689
1690 trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
1691 nr_new_roots);
1692
1686 qgroups = ulist_alloc(GFP_NOFS); 1693 qgroups = ulist_alloc(GFP_NOFS);
1687 if (!qgroups) { 1694 if (!qgroups) {
1688 ret = -ENOMEM; 1695 ret = -ENOMEM;
@@ -1752,6 +1759,8 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
1752 record = rb_entry(node, struct btrfs_qgroup_extent_record, 1759 record = rb_entry(node, struct btrfs_qgroup_extent_record,
1753 node); 1760 node);
1754 1761
1762 trace_btrfs_qgroup_account_extents(record);
1763
1755 if (!ret) { 1764 if (!ret) {
1756 /* 1765 /*
1757 * Use (u64)-1 as time_seq to do special search, which 1766 * Use (u64)-1 as time_seq to do special search, which
@@ -1842,8 +1851,10 @@ out:
1842} 1851}
1843 1852
1844/* 1853/*
1845 * copy the acounting information between qgroups. This is necessary when a 1854 * Copy the acounting information between qgroups. This is necessary
1846 * snapshot or a subvolume is created 1855 * when a snapshot or a subvolume is created. Throwing an error will
1856 * cause a transaction abort so we take extra care here to only error
1857 * when a readonly fs is a reasonable outcome.
1847 */ 1858 */
1848int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, 1859int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1849 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, 1860 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
@@ -1873,15 +1884,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1873 2 * inherit->num_excl_copies; 1884 2 * inherit->num_excl_copies;
1874 for (i = 0; i < nums; ++i) { 1885 for (i = 0; i < nums; ++i) {
1875 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 1886 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1876 if (!srcgroup) {
1877 ret = -EINVAL;
1878 goto out;
1879 }
1880 1887
1881 if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) { 1888 /*
1882 ret = -EINVAL; 1889 * Zero out invalid groups so we can ignore
1883 goto out; 1890 * them later.
1884 } 1891 */
1892 if (!srcgroup ||
1893 ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
1894 *i_qgroups = 0ULL;
1895
1885 ++i_qgroups; 1896 ++i_qgroups;
1886 } 1897 }
1887 } 1898 }
@@ -1916,17 +1927,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1916 */ 1927 */
1917 if (inherit) { 1928 if (inherit) {
1918 i_qgroups = (u64 *)(inherit + 1); 1929 i_qgroups = (u64 *)(inherit + 1);
1919 for (i = 0; i < inherit->num_qgroups; ++i) { 1930 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
1931 if (*i_qgroups == 0)
1932 continue;
1920 ret = add_qgroup_relation_item(trans, quota_root, 1933 ret = add_qgroup_relation_item(trans, quota_root,
1921 objectid, *i_qgroups); 1934 objectid, *i_qgroups);
1922 if (ret) 1935 if (ret && ret != -EEXIST)
1923 goto out; 1936 goto out;
1924 ret = add_qgroup_relation_item(trans, quota_root, 1937 ret = add_qgroup_relation_item(trans, quota_root,
1925 *i_qgroups, objectid); 1938 *i_qgroups, objectid);
1926 if (ret) 1939 if (ret && ret != -EEXIST)
1927 goto out; 1940 goto out;
1928 ++i_qgroups;
1929 } 1941 }
1942 ret = 0;
1930 } 1943 }
1931 1944
1932 1945
@@ -1987,17 +2000,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1987 2000
1988 i_qgroups = (u64 *)(inherit + 1); 2001 i_qgroups = (u64 *)(inherit + 1);
1989 for (i = 0; i < inherit->num_qgroups; ++i) { 2002 for (i = 0; i < inherit->num_qgroups; ++i) {
1990 ret = add_relation_rb(quota_root->fs_info, objectid, 2003 if (*i_qgroups) {
1991 *i_qgroups); 2004 ret = add_relation_rb(quota_root->fs_info, objectid,
1992 if (ret) 2005 *i_qgroups);
1993 goto unlock; 2006 if (ret)
2007 goto unlock;
2008 }
1994 ++i_qgroups; 2009 ++i_qgroups;
1995 } 2010 }
1996 2011
1997 for (i = 0; i < inherit->num_ref_copies; ++i) { 2012 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
1998 struct btrfs_qgroup *src; 2013 struct btrfs_qgroup *src;
1999 struct btrfs_qgroup *dst; 2014 struct btrfs_qgroup *dst;
2000 2015
2016 if (!i_qgroups[0] || !i_qgroups[1])
2017 continue;
2018
2001 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2019 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2002 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2020 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2003 2021
@@ -2008,12 +2026,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2008 2026
2009 dst->rfer = src->rfer - level_size; 2027 dst->rfer = src->rfer - level_size;
2010 dst->rfer_cmpr = src->rfer_cmpr - level_size; 2028 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2011 i_qgroups += 2;
2012 } 2029 }
2013 for (i = 0; i < inherit->num_excl_copies; ++i) { 2030 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
2014 struct btrfs_qgroup *src; 2031 struct btrfs_qgroup *src;
2015 struct btrfs_qgroup *dst; 2032 struct btrfs_qgroup *dst;
2016 2033
2034 if (!i_qgroups[0] || !i_qgroups[1])
2035 continue;
2036
2017 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2037 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2018 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2038 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2019 2039
@@ -2024,7 +2044,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2024 2044
2025 dst->excl = src->excl + level_size; 2045 dst->excl = src->excl + level_size;
2026 dst->excl_cmpr = src->excl_cmpr + level_size; 2046 dst->excl_cmpr = src->excl_cmpr + level_size;
2027 i_qgroups += 2;
2028 } 2047 }
2029 2048
2030unlock: 2049unlock:
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 55161369fab1..0b7792e02dd5 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -270,7 +270,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
270 s = kmap(rbio->bio_pages[i]); 270 s = kmap(rbio->bio_pages[i]);
271 d = kmap(rbio->stripe_pages[i]); 271 d = kmap(rbio->stripe_pages[i]);
272 272
273 memcpy(d, s, PAGE_CACHE_SIZE); 273 memcpy(d, s, PAGE_SIZE);
274 274
275 kunmap(rbio->bio_pages[i]); 275 kunmap(rbio->bio_pages[i]);
276 kunmap(rbio->stripe_pages[i]); 276 kunmap(rbio->stripe_pages[i]);
@@ -962,7 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
962 */ 962 */
963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
964{ 964{
965 return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes; 965 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
966} 966}
967 967
968/* 968/*
@@ -1078,7 +1078,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1078 u64 disk_start; 1078 u64 disk_start;
1079 1079
1080 stripe = &rbio->bbio->stripes[stripe_nr]; 1080 stripe = &rbio->bbio->stripes[stripe_nr];
1081 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); 1081 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1082 1082
1083 /* if the device is missing, just fail this stripe */ 1083 /* if the device is missing, just fail this stripe */
1084 if (!stripe->dev->bdev) 1084 if (!stripe->dev->bdev)
@@ -1096,8 +1096,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1096 if (last_end == disk_start && stripe->dev->bdev && 1096 if (last_end == disk_start && stripe->dev->bdev &&
1097 !last->bi_error && 1097 !last->bi_error &&
1098 last->bi_bdev == stripe->dev->bdev) { 1098 last->bi_bdev == stripe->dev->bdev) {
1099 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); 1099 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1100 if (ret == PAGE_CACHE_SIZE) 1100 if (ret == PAGE_SIZE)
1101 return 0; 1101 return 0;
1102 } 1102 }
1103 } 1103 }
@@ -1111,7 +1111,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1111 bio->bi_bdev = stripe->dev->bdev; 1111 bio->bi_bdev = stripe->dev->bdev;
1112 bio->bi_iter.bi_sector = disk_start >> 9; 1112 bio->bi_iter.bi_sector = disk_start >> 9;
1113 1113
1114 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 1114 bio_add_page(bio, page, PAGE_SIZE, 0);
1115 bio_list_add(bio_list, bio); 1115 bio_list_add(bio_list, bio);
1116 return 0; 1116 return 0;
1117} 1117}
@@ -1154,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1154 bio_list_for_each(bio, &rbio->bio_list) { 1154 bio_list_for_each(bio, &rbio->bio_list) {
1155 start = (u64)bio->bi_iter.bi_sector << 9; 1155 start = (u64)bio->bi_iter.bi_sector << 9;
1156 stripe_offset = start - rbio->bbio->raid_map[0]; 1156 stripe_offset = start - rbio->bbio->raid_map[0];
1157 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1157 page_index = stripe_offset >> PAGE_SHIFT;
1158 1158
1159 for (i = 0; i < bio->bi_vcnt; i++) { 1159 for (i = 0; i < bio->bi_vcnt; i++) {
1160 p = bio->bi_io_vec[i].bv_page; 1160 p = bio->bi_io_vec[i].bv_page;
@@ -1253,7 +1253,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1253 } else { 1253 } else {
1254 /* raid5 */ 1254 /* raid5 */
1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1256 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 1256 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1257 } 1257 }
1258 1258
1259 1259
@@ -1914,7 +1914,7 @@ pstripe:
1914 /* Copy parity block into failed block to start with */ 1914 /* Copy parity block into failed block to start with */
1915 memcpy(pointers[faila], 1915 memcpy(pointers[faila],
1916 pointers[rbio->nr_data], 1916 pointers[rbio->nr_data],
1917 PAGE_CACHE_SIZE); 1917 PAGE_SIZE);
1918 1918
1919 /* rearrange the pointer array */ 1919 /* rearrange the pointer array */
1920 p = pointers[faila]; 1920 p = pointers[faila];
@@ -1923,7 +1923,7 @@ pstripe:
1923 pointers[rbio->nr_data - 1] = p; 1923 pointers[rbio->nr_data - 1] = p;
1924 1924
1925 /* xor in the rest */ 1925 /* xor in the rest */
1926 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); 1926 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1927 } 1927 }
1928 /* if we're doing this rebuild as part of an rmw, go through 1928 /* if we're doing this rebuild as part of an rmw, go through
1929 * and set all of our private rbio pages in the 1929 * and set all of our private rbio pages in the
@@ -2250,7 +2250,7 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + 2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2251 rbio->stripe_len * rbio->nr_data); 2251 rbio->stripe_len * rbio->nr_data);
2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); 2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2253 index = stripe_offset >> PAGE_CACHE_SHIFT; 2253 index = stripe_offset >> PAGE_SHIFT;
2254 rbio->bio_pages[index] = page; 2254 rbio->bio_pages[index] = page;
2255} 2255}
2256 2256
@@ -2365,14 +2365,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2365 } else { 2365 } else {
2366 /* raid5 */ 2366 /* raid5 */
2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2368 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2369 } 2369 }
2370 2370
2371 /* Check scrubbing pairty and repair it */ 2371 /* Check scrubbing pairty and repair it */
2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2373 parity = kmap(p); 2373 parity = kmap(p);
2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) 2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2375 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); 2375 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2376 else 2376 else
2377 /* Parity is right, needn't writeback */ 2377 /* Parity is right, needn't writeback */
2378 bitmap_clear(rbio->dbitmap, pagenr, 1); 2378 bitmap_clear(rbio->dbitmap, pagenr, 1);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index b892914968c1..298631eaee78 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -226,7 +226,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
226 /* find extent */ 226 /* find extent */
227 spin_lock(&fs_info->reada_lock); 227 spin_lock(&fs_info->reada_lock);
228 re = radix_tree_lookup(&fs_info->reada_tree, 228 re = radix_tree_lookup(&fs_info->reada_tree,
229 start >> PAGE_CACHE_SHIFT); 229 start >> PAGE_SHIFT);
230 if (re) 230 if (re)
231 re->refcnt++; 231 re->refcnt++;
232 spin_unlock(&fs_info->reada_lock); 232 spin_unlock(&fs_info->reada_lock);
@@ -257,7 +257,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
257 zone = NULL; 257 zone = NULL;
258 spin_lock(&fs_info->reada_lock); 258 spin_lock(&fs_info->reada_lock);
259 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 259 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
260 logical >> PAGE_CACHE_SHIFT, 1); 260 logical >> PAGE_SHIFT, 1);
261 if (ret == 1 && logical >= zone->start && logical <= zone->end) { 261 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
262 kref_get(&zone->refcnt); 262 kref_get(&zone->refcnt);
263 spin_unlock(&fs_info->reada_lock); 263 spin_unlock(&fs_info->reada_lock);
@@ -294,13 +294,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
294 294
295 spin_lock(&fs_info->reada_lock); 295 spin_lock(&fs_info->reada_lock);
296 ret = radix_tree_insert(&dev->reada_zones, 296 ret = radix_tree_insert(&dev->reada_zones,
297 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), 297 (unsigned long)(zone->end >> PAGE_SHIFT),
298 zone); 298 zone);
299 299
300 if (ret == -EEXIST) { 300 if (ret == -EEXIST) {
301 kfree(zone); 301 kfree(zone);
302 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 302 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
303 logical >> PAGE_CACHE_SHIFT, 1); 303 logical >> PAGE_SHIFT, 1);
304 if (ret == 1 && logical >= zone->start && logical <= zone->end) 304 if (ret == 1 && logical >= zone->start && logical <= zone->end)
305 kref_get(&zone->refcnt); 305 kref_get(&zone->refcnt);
306 else 306 else
@@ -326,7 +326,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
326 u64 length; 326 u64 length;
327 int real_stripes; 327 int real_stripes;
328 int nzones = 0; 328 int nzones = 0;
329 unsigned long index = logical >> PAGE_CACHE_SHIFT; 329 unsigned long index = logical >> PAGE_SHIFT;
330 int dev_replace_is_ongoing; 330 int dev_replace_is_ongoing;
331 int have_zone = 0; 331 int have_zone = 0;
332 332
@@ -495,7 +495,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
495 struct reada_extent *re) 495 struct reada_extent *re)
496{ 496{
497 int i; 497 int i;
498 unsigned long index = re->logical >> PAGE_CACHE_SHIFT; 498 unsigned long index = re->logical >> PAGE_SHIFT;
499 499
500 spin_lock(&fs_info->reada_lock); 500 spin_lock(&fs_info->reada_lock);
501 if (--re->refcnt) { 501 if (--re->refcnt) {
@@ -538,7 +538,7 @@ static void reada_zone_release(struct kref *kref)
538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); 538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
539 539
540 radix_tree_delete(&zone->device->reada_zones, 540 radix_tree_delete(&zone->device->reada_zones,
541 zone->end >> PAGE_CACHE_SHIFT); 541 zone->end >> PAGE_SHIFT);
542 542
543 kfree(zone); 543 kfree(zone);
544} 544}
@@ -587,7 +587,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
587static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) 587static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
588{ 588{
589 int i; 589 int i;
590 unsigned long index = zone->end >> PAGE_CACHE_SHIFT; 590 unsigned long index = zone->end >> PAGE_SHIFT;
591 591
592 for (i = 0; i < zone->ndevs; ++i) { 592 for (i = 0; i < zone->ndevs; ++i) {
593 struct reada_zone *peer; 593 struct reada_zone *peer;
@@ -622,7 +622,7 @@ static int reada_pick_zone(struct btrfs_device *dev)
622 (void **)&zone, index, 1); 622 (void **)&zone, index, 1);
623 if (ret == 0) 623 if (ret == 0)
624 break; 624 break;
625 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 625 index = (zone->end >> PAGE_SHIFT) + 1;
626 if (zone->locked) { 626 if (zone->locked) {
627 if (zone->elems > top_locked_elems) { 627 if (zone->elems > top_locked_elems) {
628 top_locked_elems = zone->elems; 628 top_locked_elems = zone->elems;
@@ -673,7 +673,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
673 * plugging to speed things up 673 * plugging to speed things up
674 */ 674 */
675 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 675 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
676 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 676 dev->reada_next >> PAGE_SHIFT, 1);
677 if (ret == 0 || re->logical > dev->reada_curr_zone->end) { 677 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
678 ret = reada_pick_zone(dev); 678 ret = reada_pick_zone(dev);
679 if (!ret) { 679 if (!ret) {
@@ -682,7 +682,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
682 } 682 }
683 re = NULL; 683 re = NULL;
684 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 684 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
685 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 685 dev->reada_next >> PAGE_SHIFT, 1);
686 } 686 }
687 if (ret == 0) { 687 if (ret == 0) {
688 spin_unlock(&fs_info->reada_lock); 688 spin_unlock(&fs_info->reada_lock);
@@ -838,7 +838,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
838 printk(KERN_CONT " curr off %llu", 838 printk(KERN_CONT " curr off %llu",
839 device->reada_next - zone->start); 839 device->reada_next - zone->start);
840 printk(KERN_CONT "\n"); 840 printk(KERN_CONT "\n");
841 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 841 index = (zone->end >> PAGE_SHIFT) + 1;
842 } 842 }
843 cnt = 0; 843 cnt = 0;
844 index = 0; 844 index = 0;
@@ -864,7 +864,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
864 } 864 }
865 } 865 }
866 printk(KERN_CONT "\n"); 866 printk(KERN_CONT "\n");
867 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 867 index = (re->logical >> PAGE_SHIFT) + 1;
868 if (++cnt > 15) 868 if (++cnt > 15)
869 break; 869 break;
870 } 870 }
@@ -880,7 +880,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
880 if (ret == 0) 880 if (ret == 0)
881 break; 881 break;
882 if (!re->scheduled) { 882 if (!re->scheduled) {
883 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 883 index = (re->logical >> PAGE_SHIFT) + 1;
884 continue; 884 continue;
885 } 885 }
886 printk(KERN_DEBUG 886 printk(KERN_DEBUG
@@ -897,7 +897,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
897 } 897 }
898 } 898 }
899 printk(KERN_CONT "\n"); 899 printk(KERN_CONT "\n");
900 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 900 index = (re->logical >> PAGE_SHIFT) + 1;
901 } 901 }
902 spin_unlock(&fs_info->reada_lock); 902 spin_unlock(&fs_info->reada_lock);
903} 903}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 2bd0011450df..08ef890deca6 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1850,6 +1850,7 @@ again:
1850 eb = read_tree_block(dest, old_bytenr, old_ptr_gen); 1850 eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
1851 if (IS_ERR(eb)) { 1851 if (IS_ERR(eb)) {
1852 ret = PTR_ERR(eb); 1852 ret = PTR_ERR(eb);
1853 break;
1853 } else if (!extent_buffer_uptodate(eb)) { 1854 } else if (!extent_buffer_uptodate(eb)) {
1854 ret = -EIO; 1855 ret = -EIO;
1855 free_extent_buffer(eb); 1856 free_extent_buffer(eb);
@@ -3129,10 +3130,10 @@ static int relocate_file_extent_cluster(struct inode *inode,
3129 if (ret) 3130 if (ret)
3130 goto out; 3131 goto out;
3131 3132
3132 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 3133 index = (cluster->start - offset) >> PAGE_SHIFT;
3133 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 3134 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3134 while (index <= last_index) { 3135 while (index <= last_index) {
3135 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 3136 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
3136 if (ret) 3137 if (ret)
3137 goto out; 3138 goto out;
3138 3139
@@ -3145,7 +3146,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3145 mask); 3146 mask);
3146 if (!page) { 3147 if (!page) {
3147 btrfs_delalloc_release_metadata(inode, 3148 btrfs_delalloc_release_metadata(inode,
3148 PAGE_CACHE_SIZE); 3149 PAGE_SIZE);
3149 ret = -ENOMEM; 3150 ret = -ENOMEM;
3150 goto out; 3151 goto out;
3151 } 3152 }
@@ -3162,16 +3163,16 @@ static int relocate_file_extent_cluster(struct inode *inode,
3162 lock_page(page); 3163 lock_page(page);
3163 if (!PageUptodate(page)) { 3164 if (!PageUptodate(page)) {
3164 unlock_page(page); 3165 unlock_page(page);
3165 page_cache_release(page); 3166 put_page(page);
3166 btrfs_delalloc_release_metadata(inode, 3167 btrfs_delalloc_release_metadata(inode,
3167 PAGE_CACHE_SIZE); 3168 PAGE_SIZE);
3168 ret = -EIO; 3169 ret = -EIO;
3169 goto out; 3170 goto out;
3170 } 3171 }
3171 } 3172 }
3172 3173
3173 page_start = page_offset(page); 3174 page_start = page_offset(page);
3174 page_end = page_start + PAGE_CACHE_SIZE - 1; 3175 page_end = page_start + PAGE_SIZE - 1;
3175 3176
3176 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3177 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3177 3178
@@ -3191,7 +3192,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3191 unlock_extent(&BTRFS_I(inode)->io_tree, 3192 unlock_extent(&BTRFS_I(inode)->io_tree,
3192 page_start, page_end); 3193 page_start, page_end);
3193 unlock_page(page); 3194 unlock_page(page);
3194 page_cache_release(page); 3195 put_page(page);
3195 3196
3196 index++; 3197 index++;
3197 balance_dirty_pages_ratelimited(inode->i_mapping); 3198 balance_dirty_pages_ratelimited(inode->i_mapping);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 39dbdcbf4d13..4678f03e878e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -703,7 +703,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
703 if (IS_ERR(inode)) 703 if (IS_ERR(inode))
704 return PTR_ERR(inode); 704 return PTR_ERR(inode);
705 705
706 index = offset >> PAGE_CACHE_SHIFT; 706 index = offset >> PAGE_SHIFT;
707 707
708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
709 if (!page) { 709 if (!page) {
@@ -1636,7 +1636,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1636 if (spage->io_error) { 1636 if (spage->io_error) {
1637 void *mapped_buffer = kmap_atomic(spage->page); 1637 void *mapped_buffer = kmap_atomic(spage->page);
1638 1638
1639 memset(mapped_buffer, 0, PAGE_CACHE_SIZE); 1639 memset(mapped_buffer, 0, PAGE_SIZE);
1640 flush_dcache_page(spage->page); 1640 flush_dcache_page(spage->page);
1641 kunmap_atomic(mapped_buffer); 1641 kunmap_atomic(mapped_buffer);
1642 } 1642 }
@@ -4294,8 +4294,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4294 goto out; 4294 goto out;
4295 } 4295 }
4296 4296
4297 while (len >= PAGE_CACHE_SIZE) { 4297 while (len >= PAGE_SIZE) {
4298 index = offset >> PAGE_CACHE_SHIFT; 4298 index = offset >> PAGE_SHIFT;
4299again: 4299again:
4300 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 4300 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4301 if (!page) { 4301 if (!page) {
@@ -4326,7 +4326,7 @@ again:
4326 */ 4326 */
4327 if (page->mapping != inode->i_mapping) { 4327 if (page->mapping != inode->i_mapping) {
4328 unlock_page(page); 4328 unlock_page(page);
4329 page_cache_release(page); 4329 put_page(page);
4330 goto again; 4330 goto again;
4331 } 4331 }
4332 if (!PageUptodate(page)) { 4332 if (!PageUptodate(page)) {
@@ -4348,15 +4348,15 @@ again:
4348 ret = err; 4348 ret = err;
4349next_page: 4349next_page:
4350 unlock_page(page); 4350 unlock_page(page);
4351 page_cache_release(page); 4351 put_page(page);
4352 4352
4353 if (ret) 4353 if (ret)
4354 break; 4354 break;
4355 4355
4356 offset += PAGE_CACHE_SIZE; 4356 offset += PAGE_SIZE;
4357 physical_for_dev_replace += PAGE_CACHE_SIZE; 4357 physical_for_dev_replace += PAGE_SIZE;
4358 nocow_ctx_logical += PAGE_CACHE_SIZE; 4358 nocow_ctx_logical += PAGE_SIZE;
4359 len -= PAGE_CACHE_SIZE; 4359 len -= PAGE_SIZE;
4360 } 4360 }
4361 ret = COPY_COMPLETE; 4361 ret = COPY_COMPLETE;
4362out: 4362out:
@@ -4390,8 +4390,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
4390 bio->bi_iter.bi_size = 0; 4390 bio->bi_iter.bi_size = 0;
4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; 4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4392 bio->bi_bdev = dev->bdev; 4392 bio->bi_bdev = dev->bdev;
4393 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 4393 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4394 if (ret != PAGE_CACHE_SIZE) { 4394 if (ret != PAGE_SIZE) {
4395leave_with_eio: 4395leave_with_eio:
4396 bio_put(bio); 4396 bio_put(bio);
4397 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 4397 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 19b7bf4284ee..8d358c547c59 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4449,9 +4449,9 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4449 struct page *page; 4449 struct page *page;
4450 char *addr; 4450 char *addr;
4451 struct btrfs_key key; 4451 struct btrfs_key key;
4452 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 4452 pgoff_t index = offset >> PAGE_SHIFT;
4453 pgoff_t last_index; 4453 pgoff_t last_index;
4454 unsigned pg_offset = offset & ~PAGE_CACHE_MASK; 4454 unsigned pg_offset = offset & ~PAGE_MASK;
4455 ssize_t ret = 0; 4455 ssize_t ret = 0;
4456 4456
4457 key.objectid = sctx->cur_ino; 4457 key.objectid = sctx->cur_ino;
@@ -4471,7 +4471,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4471 if (len == 0) 4471 if (len == 0)
4472 goto out; 4472 goto out;
4473 4473
4474 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; 4474 last_index = (offset + len - 1) >> PAGE_SHIFT;
4475 4475
4476 /* initial readahead */ 4476 /* initial readahead */
4477 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 4477 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
@@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4481 4481
4482 while (index <= last_index) { 4482 while (index <= last_index) {
4483 unsigned cur_len = min_t(unsigned, len, 4483 unsigned cur_len = min_t(unsigned, len,
4484 PAGE_CACHE_SIZE - pg_offset); 4484 PAGE_SIZE - pg_offset);
4485 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 4485 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
4486 if (!page) { 4486 if (!page) {
4487 ret = -ENOMEM; 4487 ret = -ENOMEM;
@@ -4493,7 +4493,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4493 lock_page(page); 4493 lock_page(page);
4494 if (!PageUptodate(page)) { 4494 if (!PageUptodate(page)) {
4495 unlock_page(page); 4495 unlock_page(page);
4496 page_cache_release(page); 4496 put_page(page);
4497 ret = -EIO; 4497 ret = -EIO;
4498 break; 4498 break;
4499 } 4499 }
@@ -4503,7 +4503,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4503 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); 4503 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4504 kunmap(page); 4504 kunmap(page);
4505 unlock_page(page); 4505 unlock_page(page);
4506 page_cache_release(page); 4506 put_page(page);
4507 index++; 4507 index++;
4508 pg_offset = 0; 4508 pg_offset = 0;
4509 len -= cur_len; 4509 len -= cur_len;
@@ -4804,7 +4804,7 @@ static int clone_range(struct send_ctx *sctx,
4804 type = btrfs_file_extent_type(leaf, ei); 4804 type = btrfs_file_extent_type(leaf, ei);
4805 if (type == BTRFS_FILE_EXTENT_INLINE) { 4805 if (type == BTRFS_FILE_EXTENT_INLINE) {
4806 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei); 4806 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
4807 ext_len = PAGE_CACHE_ALIGN(ext_len); 4807 ext_len = PAGE_ALIGN(ext_len);
4808 } else { 4808 } else {
4809 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 4809 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
4810 } 4810 }
@@ -4886,7 +4886,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
4886 * but there may be items after this page. Make 4886 * but there may be items after this page. Make
4887 * sure to send the whole thing 4887 * sure to send the whole thing
4888 */ 4888 */
4889 len = PAGE_CACHE_ALIGN(len); 4889 len = PAGE_ALIGN(len);
4890 } else { 4890 } else {
4891 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); 4891 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
4892 } 4892 }
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597b0721..e05619f241be 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
66 \ 66 \
67 if (token && token->kaddr && token->offset <= offset && \ 67 if (token && token->kaddr && token->offset <= offset && \
68 token->eb == eb && \ 68 token->eb == eb && \
69 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 69 (token->offset + PAGE_SIZE >= offset + size)) { \
70 kaddr = token->kaddr; \ 70 kaddr = token->kaddr; \
71 p = kaddr + part_offset - token->offset; \ 71 p = kaddr + part_offset - token->offset; \
72 res = get_unaligned_le##bits(p + off); \ 72 res = get_unaligned_le##bits(p + off); \
@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
104 \ 104 \
105 if (token && token->kaddr && token->offset <= offset && \ 105 if (token && token->kaddr && token->offset <= offset && \
106 token->eb == eb && \ 106 token->eb == eb && \
107 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 107 (token->offset + PAGE_SIZE >= offset + size)) { \
108 kaddr = token->kaddr; \ 108 kaddr = token->kaddr; \
109 p = kaddr + part_offset - token->offset; \ 109 p = kaddr + part_offset - token->offset; \
110 put_unaligned_le##bits(val, p + off); \ 110 put_unaligned_le##bits(val, p + off); \
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 669b58201e36..70948b13bc81 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -32,8 +32,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
32{ 32{
33 int ret; 33 int ret;
34 struct page *pages[16]; 34 struct page *pages[16];
35 unsigned long index = start >> PAGE_CACHE_SHIFT; 35 unsigned long index = start >> PAGE_SHIFT;
36 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 36 unsigned long end_index = end >> PAGE_SHIFT;
37 unsigned long nr_pages = end_index - index + 1; 37 unsigned long nr_pages = end_index - index + 1;
38 int i; 38 int i;
39 int count = 0; 39 int count = 0;
@@ -49,9 +49,9 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
49 count++; 49 count++;
50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) 50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
51 unlock_page(pages[i]); 51 unlock_page(pages[i]);
52 page_cache_release(pages[i]); 52 put_page(pages[i]);
53 if (flags & PROCESS_RELEASE) 53 if (flags & PROCESS_RELEASE)
54 page_cache_release(pages[i]); 54 put_page(pages[i]);
55 } 55 }
56 nr_pages -= ret; 56 nr_pages -= ret;
57 index += ret; 57 index += ret;
@@ -93,7 +93,7 @@ static int test_find_delalloc(void)
93 * everything to make sure our pages don't get evicted and screw up our 93 * everything to make sure our pages don't get evicted and screw up our
94 * test. 94 * test.
95 */ 95 */
96 for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) { 96 for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
97 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 97 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
98 if (!page) { 98 if (!page) {
99 test_msg("Failed to allocate test page\n"); 99 test_msg("Failed to allocate test page\n");
@@ -104,7 +104,7 @@ static int test_find_delalloc(void)
104 if (index) { 104 if (index) {
105 unlock_page(page); 105 unlock_page(page);
106 } else { 106 } else {
107 page_cache_get(page); 107 get_page(page);
108 locked_page = page; 108 locked_page = page;
109 } 109 }
110 } 110 }
@@ -129,7 +129,7 @@ static int test_find_delalloc(void)
129 } 129 }
130 unlock_extent(&tmp, start, end); 130 unlock_extent(&tmp, start, end);
131 unlock_page(locked_page); 131 unlock_page(locked_page);
132 page_cache_release(locked_page); 132 put_page(locked_page);
133 133
134 /* 134 /*
135 * Test this scenario 135 * Test this scenario
@@ -139,7 +139,7 @@ static int test_find_delalloc(void)
139 */ 139 */
140 test_start = SZ_64M; 140 test_start = SZ_64M;
141 locked_page = find_lock_page(inode->i_mapping, 141 locked_page = find_lock_page(inode->i_mapping,
142 test_start >> PAGE_CACHE_SHIFT); 142 test_start >> PAGE_SHIFT);
143 if (!locked_page) { 143 if (!locked_page) {
144 test_msg("Couldn't find the locked page\n"); 144 test_msg("Couldn't find the locked page\n");
145 goto out_bits; 145 goto out_bits;
@@ -165,7 +165,7 @@ static int test_find_delalloc(void)
165 } 165 }
166 unlock_extent(&tmp, start, end); 166 unlock_extent(&tmp, start, end);
167 /* locked_page was unlocked above */ 167 /* locked_page was unlocked above */
168 page_cache_release(locked_page); 168 put_page(locked_page);
169 169
170 /* 170 /*
171 * Test this scenario 171 * Test this scenario
@@ -174,7 +174,7 @@ static int test_find_delalloc(void)
174 */ 174 */
175 test_start = max_bytes + 4096; 175 test_start = max_bytes + 4096;
176 locked_page = find_lock_page(inode->i_mapping, test_start >> 176 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_CACHE_SHIFT); 177 PAGE_SHIFT);
178 if (!locked_page) { 178 if (!locked_page) {
179 test_msg("Could'nt find the locked page\n"); 179 test_msg("Could'nt find the locked page\n");
180 goto out_bits; 180 goto out_bits;
@@ -225,13 +225,13 @@ static int test_find_delalloc(void)
225 * range we want to find. 225 * range we want to find.
226 */ 226 */
227 page = find_get_page(inode->i_mapping, 227 page = find_get_page(inode->i_mapping,
228 (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT); 228 (max_bytes + SZ_1M) >> PAGE_SHIFT);
229 if (!page) { 229 if (!page) {
230 test_msg("Couldn't find our page\n"); 230 test_msg("Couldn't find our page\n");
231 goto out_bits; 231 goto out_bits;
232 } 232 }
233 ClearPageDirty(page); 233 ClearPageDirty(page);
234 page_cache_release(page); 234 put_page(page);
235 235
236 /* We unlocked it in the previous test */ 236 /* We unlocked it in the previous test */
237 lock_page(locked_page); 237 lock_page(locked_page);
@@ -239,7 +239,7 @@ static int test_find_delalloc(void)
239 end = 0; 239 end = 0;
240 /* 240 /*
241 * Currently if we fail to find dirty pages in the delalloc range we 241 * Currently if we fail to find dirty pages in the delalloc range we
242 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If 242 * will adjust max_bytes down to PAGE_SIZE and then re-search. If
243 * this changes at any point in the future we will need to fix this 243 * this changes at any point in the future we will need to fix this
244 * tests expected behavior. 244 * tests expected behavior.
245 */ 245 */
@@ -249,9 +249,9 @@ static int test_find_delalloc(void)
249 test_msg("Didn't find our range\n"); 249 test_msg("Didn't find our range\n");
250 goto out_bits; 250 goto out_bits;
251 } 251 }
252 if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) { 252 if (start != test_start && end != test_start + PAGE_SIZE - 1) {
253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n", 253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
254 test_start, test_start + PAGE_CACHE_SIZE - 1, start, 254 test_start, test_start + PAGE_SIZE - 1, start,
255 end); 255 end);
256 goto out_bits; 256 goto out_bits;
257 } 257 }
@@ -265,7 +265,7 @@ out_bits:
265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); 265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
266out: 266out:
267 if (locked_page) 267 if (locked_page)
268 page_cache_release(locked_page); 268 put_page(locked_page);
269 process_page_range(inode, 0, total_dirty - 1, 269 process_page_range(inode, 0, total_dirty - 1,
270 PROCESS_UNLOCK | PROCESS_RELEASE); 270 PROCESS_UNLOCK | PROCESS_RELEASE);
271 iput(inode); 271 iput(inode);
@@ -298,9 +298,9 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
298 return -EINVAL; 298 return -EINVAL;
299 } 299 }
300 300
301 bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 301 bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
302 sizeof(long) * BITS_PER_BYTE); 302 sizeof(long) * BITS_PER_BYTE);
303 extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, 303 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
304 sizeof(long) * BITS_PER_BYTE); 304 sizeof(long) * BITS_PER_BYTE);
305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
306 test_msg("Setting straddling pages failed\n"); 306 test_msg("Setting straddling pages failed\n");
@@ -309,10 +309,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
309 309
310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE); 310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
311 bitmap_clear(bitmap, 311 bitmap_clear(bitmap,
312 (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 312 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
313 sizeof(long) * BITS_PER_BYTE); 313 sizeof(long) * BITS_PER_BYTE);
314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); 314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
315 extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, 315 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
316 sizeof(long) * BITS_PER_BYTE); 316 sizeof(long) * BITS_PER_BYTE);
317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
318 test_msg("Clearing straddling pages failed\n"); 318 test_msg("Clearing straddling pages failed\n");
@@ -353,7 +353,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
353 353
354static int test_eb_bitmaps(void) 354static int test_eb_bitmaps(void)
355{ 355{
356 unsigned long len = PAGE_CACHE_SIZE * 4; 356 unsigned long len = PAGE_SIZE * 4;
357 unsigned long *bitmap; 357 unsigned long *bitmap;
358 struct extent_buffer *eb; 358 struct extent_buffer *eb;
359 int ret; 359 int ret;
@@ -379,7 +379,7 @@ static int test_eb_bitmaps(void)
379 379
380 /* Do it over again with an extent buffer which isn't page-aligned. */ 380 /* Do it over again with an extent buffer which isn't page-aligned. */
381 free_extent_buffer(eb); 381 free_extent_buffer(eb);
382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len); 382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
383 if (!eb) { 383 if (!eb) {
384 test_msg("Couldn't allocate test extent buffer\n"); 384 test_msg("Couldn't allocate test extent buffer\n");
385 kfree(bitmap); 385 kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c9ad97b1e690..514247515312 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -22,7 +22,7 @@
22#include "../disk-io.h" 22#include "../disk-io.h"
23#include "../free-space-cache.h" 23#include "../free-space-cache.h"
24 24
25#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 25#define BITS_PER_BITMAP (PAGE_SIZE * 8)
26 26
27/* 27/*
28 * This test just does basic sanity checking, making sure we can add an exten 28 * This test just does basic sanity checking, making sure we can add an exten
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 24d03c751149..517d0ccb351e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4415,6 +4415,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4415 return ret; 4415 return ret;
4416} 4416}
4417 4417
4418/*
4419 * When we are logging a new inode X, check if it doesn't have a reference that
4420 * matches the reference from some other inode Y created in a past transaction
4421 * and that was renamed in the current transaction. If we don't do this, then at
4422 * log replay time we can lose inode Y (and all its files if it's a directory):
4423 *
4424 * mkdir /mnt/x
4425 * echo "hello world" > /mnt/x/foobar
4426 * sync
4427 * mv /mnt/x /mnt/y
4428 * mkdir /mnt/x # or touch /mnt/x
4429 * xfs_io -c fsync /mnt/x
4430 * <power fail>
4431 * mount fs, trigger log replay
4432 *
4433 * After the log replay procedure, we would lose the first directory and all its
4434 * files (file foobar).
4435 * For the case where inode Y is not a directory we simply end up losing it:
4436 *
4437 * echo "123" > /mnt/foo
4438 * sync
4439 * mv /mnt/foo /mnt/bar
4440 * echo "abc" > /mnt/foo
4441 * xfs_io -c fsync /mnt/foo
4442 * <power fail>
4443 *
4444 * We also need this for cases where a snapshot entry is replaced by some other
4445 * entry (file or directory) otherwise we end up with an unreplayable log due to
4446 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4447 * if it were a regular entry:
4448 *
4449 * mkdir /mnt/x
4450 * btrfs subvolume snapshot /mnt /mnt/x/snap
4451 * btrfs subvolume delete /mnt/x/snap
4452 * rmdir /mnt/x
4453 * mkdir /mnt/x
4454 * fsync /mnt/x or fsync some new file inside it
4455 * <power fail>
4456 *
4457 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4458 * the same transaction.
4459 */
4460static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4461 const int slot,
4462 const struct btrfs_key *key,
4463 struct inode *inode)
4464{
4465 int ret;
4466 struct btrfs_path *search_path;
4467 char *name = NULL;
4468 u32 name_len = 0;
4469 u32 item_size = btrfs_item_size_nr(eb, slot);
4470 u32 cur_offset = 0;
4471 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4472
4473 search_path = btrfs_alloc_path();
4474 if (!search_path)
4475 return -ENOMEM;
4476 search_path->search_commit_root = 1;
4477 search_path->skip_locking = 1;
4478
4479 while (cur_offset < item_size) {
4480 u64 parent;
4481 u32 this_name_len;
4482 u32 this_len;
4483 unsigned long name_ptr;
4484 struct btrfs_dir_item *di;
4485
4486 if (key->type == BTRFS_INODE_REF_KEY) {
4487 struct btrfs_inode_ref *iref;
4488
4489 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4490 parent = key->offset;
4491 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4492 name_ptr = (unsigned long)(iref + 1);
4493 this_len = sizeof(*iref) + this_name_len;
4494 } else {
4495 struct btrfs_inode_extref *extref;
4496
4497 extref = (struct btrfs_inode_extref *)(ptr +
4498 cur_offset);
4499 parent = btrfs_inode_extref_parent(eb, extref);
4500 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4501 name_ptr = (unsigned long)&extref->name;
4502 this_len = sizeof(*extref) + this_name_len;
4503 }
4504
4505 if (this_name_len > name_len) {
4506 char *new_name;
4507
4508 new_name = krealloc(name, this_name_len, GFP_NOFS);
4509 if (!new_name) {
4510 ret = -ENOMEM;
4511 goto out;
4512 }
4513 name_len = this_name_len;
4514 name = new_name;
4515 }
4516
4517 read_extent_buffer(eb, name, name_ptr, this_name_len);
4518 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
4519 search_path, parent,
4520 name, this_name_len, 0);
4521 if (di && !IS_ERR(di)) {
4522 ret = 1;
4523 goto out;
4524 } else if (IS_ERR(di)) {
4525 ret = PTR_ERR(di);
4526 goto out;
4527 }
4528 btrfs_release_path(search_path);
4529
4530 cur_offset += this_len;
4531 }
4532 ret = 0;
4533out:
4534 btrfs_free_path(search_path);
4535 kfree(name);
4536 return ret;
4537}
4538
4418/* log a single inode in the tree log. 4539/* log a single inode in the tree log.
4419 * At least one parent directory for this inode must exist in the tree 4540 * At least one parent directory for this inode must exist in the tree
4420 * or be logged already. 4541 * or be logged already.
@@ -4602,6 +4723,22 @@ again:
4602 if (min_key.type == BTRFS_INODE_ITEM_KEY) 4723 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4603 need_log_inode_item = false; 4724 need_log_inode_item = false;
4604 4725
4726 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4727 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4728 BTRFS_I(inode)->generation == trans->transid) {
4729 ret = btrfs_check_ref_name_override(path->nodes[0],
4730 path->slots[0],
4731 &min_key, inode);
4732 if (ret < 0) {
4733 err = ret;
4734 goto out_unlock;
4735 } else if (ret > 0) {
4736 err = 1;
4737 btrfs_set_log_full_commit(root->fs_info, trans);
4738 goto out_unlock;
4739 }
4740 }
4741
4605 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 4742 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4606 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 4743 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4607 if (ins_nr == 0) 4744 if (ins_nr == 0)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e2b54d546b7c..bd0f45fb38c4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1025,16 +1025,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1025 } 1025 }
1026 1026
1027 /* make sure our super fits in the device */ 1027 /* make sure our super fits in the device */
1028 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) 1028 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1029 goto error_bdev_put; 1029 goto error_bdev_put;
1030 1030
1031 /* make sure our super fits in the page */ 1031 /* make sure our super fits in the page */
1032 if (sizeof(*disk_super) > PAGE_CACHE_SIZE) 1032 if (sizeof(*disk_super) > PAGE_SIZE)
1033 goto error_bdev_put; 1033 goto error_bdev_put;
1034 1034
1035 /* make sure our super doesn't straddle pages on disk */ 1035 /* make sure our super doesn't straddle pages on disk */
1036 index = bytenr >> PAGE_CACHE_SHIFT; 1036 index = bytenr >> PAGE_SHIFT;
1037 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index) 1037 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1038 goto error_bdev_put; 1038 goto error_bdev_put;
1039 1039
1040 /* pull in the page with our super */ 1040 /* pull in the page with our super */
@@ -1047,7 +1047,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1047 p = kmap(page); 1047 p = kmap(page);
1048 1048
1049 /* align our pointer to the offset of the super block */ 1049 /* align our pointer to the offset of the super block */
1050 disk_super = p + (bytenr & ~PAGE_CACHE_MASK); 1050 disk_super = p + (bytenr & ~PAGE_MASK);
1051 1051
1052 if (btrfs_super_bytenr(disk_super) != bytenr || 1052 if (btrfs_super_bytenr(disk_super) != bytenr ||
1053 btrfs_super_magic(disk_super) != BTRFS_MAGIC) 1053 btrfs_super_magic(disk_super) != BTRFS_MAGIC)
@@ -1075,7 +1075,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1075 1075
1076error_unmap: 1076error_unmap:
1077 kunmap(page); 1077 kunmap(page);
1078 page_cache_release(page); 1078 put_page(page);
1079 1079
1080error_bdev_put: 1080error_bdev_put:
1081 blkdev_put(bdev, flags); 1081 blkdev_put(bdev, flags);
@@ -6527,7 +6527,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6527 * but sb spans only this function. Add an explicit SetPageUptodate call 6527 * but sb spans only this function. Add an explicit SetPageUptodate call
6528 * to silence the warning eg. on PowerPC 64. 6528 * to silence the warning eg. on PowerPC 64.
6529 */ 6529 */
6530 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) 6530 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6531 SetPageUptodate(sb->pages[0]); 6531 SetPageUptodate(sb->pages[0]);
6532 6532
6533 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 6533 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 82990b8f872b..88d274e8ecf2 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -59,7 +59,7 @@ static struct list_head *zlib_alloc_workspace(void)
59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), 59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
60 zlib_inflate_workspacesize()); 60 zlib_inflate_workspacesize());
61 workspace->strm.workspace = vmalloc(workspacesize); 61 workspace->strm.workspace = vmalloc(workspacesize);
62 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); 62 workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
63 if (!workspace->strm.workspace || !workspace->buf) 63 if (!workspace->strm.workspace || !workspace->buf)
64 goto fail; 64 goto fail;
65 65
@@ -103,7 +103,7 @@ static int zlib_compress_pages(struct list_head *ws,
103 workspace->strm.total_in = 0; 103 workspace->strm.total_in = 0;
104 workspace->strm.total_out = 0; 104 workspace->strm.total_out = 0;
105 105
106 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 106 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
107 data_in = kmap(in_page); 107 data_in = kmap(in_page);
108 108
109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
@@ -117,8 +117,8 @@ static int zlib_compress_pages(struct list_head *ws,
117 117
118 workspace->strm.next_in = data_in; 118 workspace->strm.next_in = data_in;
119 workspace->strm.next_out = cpage_out; 119 workspace->strm.next_out = cpage_out;
120 workspace->strm.avail_out = PAGE_CACHE_SIZE; 120 workspace->strm.avail_out = PAGE_SIZE;
121 workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE); 121 workspace->strm.avail_in = min(len, PAGE_SIZE);
122 122
123 while (workspace->strm.total_in < len) { 123 while (workspace->strm.total_in < len) {
124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH); 124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
@@ -156,7 +156,7 @@ static int zlib_compress_pages(struct list_head *ws,
156 cpage_out = kmap(out_page); 156 cpage_out = kmap(out_page);
157 pages[nr_pages] = out_page; 157 pages[nr_pages] = out_page;
158 nr_pages++; 158 nr_pages++;
159 workspace->strm.avail_out = PAGE_CACHE_SIZE; 159 workspace->strm.avail_out = PAGE_SIZE;
160 workspace->strm.next_out = cpage_out; 160 workspace->strm.next_out = cpage_out;
161 } 161 }
162 /* we're all done */ 162 /* we're all done */
@@ -170,14 +170,14 @@ static int zlib_compress_pages(struct list_head *ws,
170 170
171 bytes_left = len - workspace->strm.total_in; 171 bytes_left = len - workspace->strm.total_in;
172 kunmap(in_page); 172 kunmap(in_page);
173 page_cache_release(in_page); 173 put_page(in_page);
174 174
175 start += PAGE_CACHE_SIZE; 175 start += PAGE_SIZE;
176 in_page = find_get_page(mapping, 176 in_page = find_get_page(mapping,
177 start >> PAGE_CACHE_SHIFT); 177 start >> PAGE_SHIFT);
178 data_in = kmap(in_page); 178 data_in = kmap(in_page);
179 workspace->strm.avail_in = min(bytes_left, 179 workspace->strm.avail_in = min(bytes_left,
180 PAGE_CACHE_SIZE); 180 PAGE_SIZE);
181 workspace->strm.next_in = data_in; 181 workspace->strm.next_in = data_in;
182 } 182 }
183 } 183 }
@@ -205,7 +205,7 @@ out:
205 205
206 if (in_page) { 206 if (in_page) {
207 kunmap(in_page); 207 kunmap(in_page);
208 page_cache_release(in_page); 208 put_page(in_page);
209 } 209 }
210 return ret; 210 return ret;
211} 211}
@@ -223,18 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
223 size_t total_out = 0; 223 size_t total_out = 0;
224 unsigned long page_in_index = 0; 224 unsigned long page_in_index = 0;
225 unsigned long page_out_index = 0; 225 unsigned long page_out_index = 0;
226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); 226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
227 unsigned long buf_start; 227 unsigned long buf_start;
228 unsigned long pg_offset; 228 unsigned long pg_offset;
229 229
230 data_in = kmap(pages_in[page_in_index]); 230 data_in = kmap(pages_in[page_in_index]);
231 workspace->strm.next_in = data_in; 231 workspace->strm.next_in = data_in;
232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); 232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
233 workspace->strm.total_in = 0; 233 workspace->strm.total_in = 0;
234 234
235 workspace->strm.total_out = 0; 235 workspace->strm.total_out = 0;
236 workspace->strm.next_out = workspace->buf; 236 workspace->strm.next_out = workspace->buf;
237 workspace->strm.avail_out = PAGE_CACHE_SIZE; 237 workspace->strm.avail_out = PAGE_SIZE;
238 pg_offset = 0; 238 pg_offset = 0;
239 239
240 /* If it's deflate, and it's got no preset dictionary, then 240 /* If it's deflate, and it's got no preset dictionary, then
@@ -274,7 +274,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
274 } 274 }
275 275
276 workspace->strm.next_out = workspace->buf; 276 workspace->strm.next_out = workspace->buf;
277 workspace->strm.avail_out = PAGE_CACHE_SIZE; 277 workspace->strm.avail_out = PAGE_SIZE;
278 278
279 if (workspace->strm.avail_in == 0) { 279 if (workspace->strm.avail_in == 0) {
280 unsigned long tmp; 280 unsigned long tmp;
@@ -288,7 +288,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
288 workspace->strm.next_in = data_in; 288 workspace->strm.next_in = data_in;
289 tmp = srclen - workspace->strm.total_in; 289 tmp = srclen - workspace->strm.total_in;
290 workspace->strm.avail_in = min(tmp, 290 workspace->strm.avail_in = min(tmp,
291 PAGE_CACHE_SIZE); 291 PAGE_SIZE);
292 } 292 }
293 } 293 }
294 if (ret != Z_STREAM_END) 294 if (ret != Z_STREAM_END)
@@ -325,7 +325,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
325 workspace->strm.total_in = 0; 325 workspace->strm.total_in = 0;
326 326
327 workspace->strm.next_out = workspace->buf; 327 workspace->strm.next_out = workspace->buf;
328 workspace->strm.avail_out = PAGE_CACHE_SIZE; 328 workspace->strm.avail_out = PAGE_SIZE;
329 workspace->strm.total_out = 0; 329 workspace->strm.total_out = 0;
330 /* If it's deflate, and it's got no preset dictionary, then 330 /* If it's deflate, and it's got no preset dictionary, then
331 we can tell zlib to skip the adler32 check. */ 331 we can tell zlib to skip the adler32 check. */
@@ -368,8 +368,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
368 else 368 else
369 buf_offset = 0; 369 buf_offset = 0;
370 370
371 bytes = min(PAGE_CACHE_SIZE - pg_offset, 371 bytes = min(PAGE_SIZE - pg_offset,
372 PAGE_CACHE_SIZE - buf_offset); 372 PAGE_SIZE - buf_offset);
373 bytes = min(bytes, bytes_left); 373 bytes = min(bytes, bytes_left);
374 374
375 kaddr = kmap_atomic(dest_page); 375 kaddr = kmap_atomic(dest_page);
@@ -380,7 +380,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
380 bytes_left -= bytes; 380 bytes_left -= bytes;
381next: 381next:
382 workspace->strm.next_out = workspace->buf; 382 workspace->strm.next_out = workspace->buf;
383 workspace->strm.avail_out = PAGE_CACHE_SIZE; 383 workspace->strm.avail_out = PAGE_SIZE;
384 } 384 }
385 385
386 if (ret != Z_STREAM_END && bytes_left != 0) 386 if (ret != Z_STREAM_END && bytes_left != 0)
diff --git a/fs/buffer.c b/fs/buffer.c
index 33be29675358..af0d9a82a8ed 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page)
129{ 129{
130 ClearPagePrivate(page); 130 ClearPagePrivate(page);
131 set_page_private(page, 0); 131 set_page_private(page, 0);
132 page_cache_release(page); 132 put_page(page);
133} 133}
134 134
135static void buffer_io_error(struct buffer_head *bh, char *msg) 135static void buffer_io_error(struct buffer_head *bh, char *msg)
@@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
207 struct page *page; 207 struct page *page;
208 int all_mapped = 1; 208 int all_mapped = 1;
209 209
210 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 210 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
211 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 211 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
212 if (!page) 212 if (!page)
213 goto out; 213 goto out;
@@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
245 } 245 }
246out_unlock: 246out_unlock:
247 spin_unlock(&bd_mapping->private_lock); 247 spin_unlock(&bd_mapping->private_lock);
248 page_cache_release(page); 248 put_page(page);
249out: 249out:
250 return ret; 250 return ret;
251} 251}
@@ -1040,7 +1040,7 @@ done:
1040 ret = (block < end_block) ? 1 : -ENXIO; 1040 ret = (block < end_block) ? 1 : -ENXIO;
1041failed: 1041failed:
1042 unlock_page(page); 1042 unlock_page(page);
1043 page_cache_release(page); 1043 put_page(page);
1044 return ret; 1044 return ret;
1045} 1045}
1046 1046
@@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset,
1533 /* 1533 /*
1534 * Check for overflow 1534 * Check for overflow
1535 */ 1535 */
1536 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1536 BUG_ON(stop > PAGE_SIZE || stop < length);
1537 1537
1538 head = page_buffers(page); 1538 head = page_buffers(page);
1539 bh = head; 1539 bh = head;
@@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1716 blocksize = bh->b_size; 1716 blocksize = bh->b_size;
1717 bbits = block_size_bits(blocksize); 1717 bbits = block_size_bits(blocksize);
1718 1718
1719 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1719 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1720 last_block = (i_size_read(inode) - 1) >> bbits; 1720 last_block = (i_size_read(inode) - 1) >> bbits;
1721 1721
1722 /* 1722 /*
@@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers);
1894int __block_write_begin(struct page *page, loff_t pos, unsigned len, 1894int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1895 get_block_t *get_block) 1895 get_block_t *get_block)
1896{ 1896{
1897 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1897 unsigned from = pos & (PAGE_SIZE - 1);
1898 unsigned to = from + len; 1898 unsigned to = from + len;
1899 struct inode *inode = page->mapping->host; 1899 struct inode *inode = page->mapping->host;
1900 unsigned block_start, block_end; 1900 unsigned block_start, block_end;
@@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1904 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 1904 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1905 1905
1906 BUG_ON(!PageLocked(page)); 1906 BUG_ON(!PageLocked(page));
1907 BUG_ON(from > PAGE_CACHE_SIZE); 1907 BUG_ON(from > PAGE_SIZE);
1908 BUG_ON(to > PAGE_CACHE_SIZE); 1908 BUG_ON(to > PAGE_SIZE);
1909 BUG_ON(from > to); 1909 BUG_ON(from > to);
1910 1910
1911 head = create_page_buffers(page, inode, 0); 1911 head = create_page_buffers(page, inode, 0);
1912 blocksize = head->b_size; 1912 blocksize = head->b_size;
1913 bbits = block_size_bits(blocksize); 1913 bbits = block_size_bits(blocksize);
1914 1914
1915 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1915 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1916 1916
1917 for(bh = head, block_start = 0; bh != head || !block_start; 1917 for(bh = head, block_start = 0; bh != head || !block_start;
1918 block++, block_start=block_end, bh = bh->b_this_page) { 1918 block++, block_start=block_end, bh = bh->b_this_page) {
@@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
2020int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2020int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2021 unsigned flags, struct page **pagep, get_block_t *get_block) 2021 unsigned flags, struct page **pagep, get_block_t *get_block)
2022{ 2022{
2023 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 2023 pgoff_t index = pos >> PAGE_SHIFT;
2024 struct page *page; 2024 struct page *page;
2025 int status; 2025 int status;
2026 2026
@@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2031 status = __block_write_begin(page, pos, len, get_block); 2031 status = __block_write_begin(page, pos, len, get_block);
2032 if (unlikely(status)) { 2032 if (unlikely(status)) {
2033 unlock_page(page); 2033 unlock_page(page);
2034 page_cache_release(page); 2034 put_page(page);
2035 page = NULL; 2035 page = NULL;
2036 } 2036 }
2037 2037
@@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
2047 struct inode *inode = mapping->host; 2047 struct inode *inode = mapping->host;
2048 unsigned start; 2048 unsigned start;
2049 2049
2050 start = pos & (PAGE_CACHE_SIZE - 1); 2050 start = pos & (PAGE_SIZE - 1);
2051 2051
2052 if (unlikely(copied < len)) { 2052 if (unlikely(copied < len)) {
2053 /* 2053 /*
@@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2099 } 2099 }
2100 2100
2101 unlock_page(page); 2101 unlock_page(page);
2102 page_cache_release(page); 2102 put_page(page);
2103 2103
2104 if (old_size < pos) 2104 if (old_size < pos)
2105 pagecache_isize_extended(inode, old_size, pos); 2105 pagecache_isize_extended(inode, old_size, pos);
@@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
2136 2136
2137 head = page_buffers(page); 2137 head = page_buffers(page);
2138 blocksize = head->b_size; 2138 blocksize = head->b_size;
2139 to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); 2139 to = min_t(unsigned, PAGE_SIZE - from, count);
2140 to = from + to; 2140 to = from + to;
2141 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) 2141 if (from < blocksize && to > PAGE_SIZE - blocksize)
2142 return 0; 2142 return 0;
2143 2143
2144 bh = head; 2144 bh = head;
@@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2181 blocksize = head->b_size; 2181 blocksize = head->b_size;
2182 bbits = block_size_bits(blocksize); 2182 bbits = block_size_bits(blocksize);
2183 2183
2184 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 2184 iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
2185 lblock = (i_size_read(inode)+blocksize-1) >> bbits; 2185 lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2186 bh = head; 2186 bh = head;
2187 nr = 0; 2187 nr = 0;
@@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2295 unsigned zerofrom, offset, len; 2295 unsigned zerofrom, offset, len;
2296 int err = 0; 2296 int err = 0;
2297 2297
2298 index = pos >> PAGE_CACHE_SHIFT; 2298 index = pos >> PAGE_SHIFT;
2299 offset = pos & ~PAGE_CACHE_MASK; 2299 offset = pos & ~PAGE_MASK;
2300 2300
2301 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { 2301 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2302 zerofrom = curpos & ~PAGE_CACHE_MASK; 2302 zerofrom = curpos & ~PAGE_MASK;
2303 if (zerofrom & (blocksize-1)) { 2303 if (zerofrom & (blocksize-1)) {
2304 *bytes |= (blocksize-1); 2304 *bytes |= (blocksize-1);
2305 (*bytes)++; 2305 (*bytes)++;
2306 } 2306 }
2307 len = PAGE_CACHE_SIZE - zerofrom; 2307 len = PAGE_SIZE - zerofrom;
2308 2308
2309 err = pagecache_write_begin(file, mapping, curpos, len, 2309 err = pagecache_write_begin(file, mapping, curpos, len,
2310 AOP_FLAG_UNINTERRUPTIBLE, 2310 AOP_FLAG_UNINTERRUPTIBLE,
@@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2329 2329
2330 /* page covers the boundary, find the boundary offset */ 2330 /* page covers the boundary, find the boundary offset */
2331 if (index == curidx) { 2331 if (index == curidx) {
2332 zerofrom = curpos & ~PAGE_CACHE_MASK; 2332 zerofrom = curpos & ~PAGE_MASK;
2333 /* if we will expand the thing last block will be filled */ 2333 /* if we will expand the thing last block will be filled */
2334 if (offset <= zerofrom) { 2334 if (offset <= zerofrom) {
2335 goto out; 2335 goto out;
@@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2375 if (err) 2375 if (err)
2376 return err; 2376 return err;
2377 2377
2378 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2378 zerofrom = *bytes & ~PAGE_MASK;
2379 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2379 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2380 *bytes |= (blocksize-1); 2380 *bytes |= (blocksize-1);
2381 (*bytes)++; 2381 (*bytes)++;
@@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2430 } 2430 }
2431 2431
2432 /* page is wholly or partially inside EOF */ 2432 /* page is wholly or partially inside EOF */
2433 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 2433 if (((page->index + 1) << PAGE_SHIFT) > size)
2434 end = size & ~PAGE_CACHE_MASK; 2434 end = size & ~PAGE_MASK;
2435 else 2435 else
2436 end = PAGE_CACHE_SIZE; 2436 end = PAGE_SIZE;
2437 2437
2438 ret = __block_write_begin(page, 0, end, get_block); 2438 ret = __block_write_begin(page, 0, end, get_block);
2439 if (!ret) 2439 if (!ret)
@@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping,
2508 int ret = 0; 2508 int ret = 0;
2509 int is_mapped_to_disk = 1; 2509 int is_mapped_to_disk = 1;
2510 2510
2511 index = pos >> PAGE_CACHE_SHIFT; 2511 index = pos >> PAGE_SHIFT;
2512 from = pos & (PAGE_CACHE_SIZE - 1); 2512 from = pos & (PAGE_SIZE - 1);
2513 to = from + len; 2513 to = from + len;
2514 2514
2515 page = grab_cache_page_write_begin(mapping, index, flags); 2515 page = grab_cache_page_write_begin(mapping, index, flags);
@@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping,
2543 goto out_release; 2543 goto out_release;
2544 } 2544 }
2545 2545
2546 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 2546 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
2547 2547
2548 /* 2548 /*
2549 * We loop across all blocks in the page, whether or not they are 2549 * We loop across all blocks in the page, whether or not they are
@@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping,
2551 * page is fully mapped-to-disk. 2551 * page is fully mapped-to-disk.
2552 */ 2552 */
2553 for (block_start = 0, block_in_page = 0, bh = head; 2553 for (block_start = 0, block_in_page = 0, bh = head;
2554 block_start < PAGE_CACHE_SIZE; 2554 block_start < PAGE_SIZE;
2555 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 2555 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2556 int create; 2556 int create;
2557 2557
@@ -2623,7 +2623,7 @@ failed:
2623 2623
2624out_release: 2624out_release:
2625 unlock_page(page); 2625 unlock_page(page);
2626 page_cache_release(page); 2626 put_page(page);
2627 *pagep = NULL; 2627 *pagep = NULL;
2628 2628
2629 return ret; 2629 return ret;
@@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
2653 } 2653 }
2654 2654
2655 unlock_page(page); 2655 unlock_page(page);
2656 page_cache_release(page); 2656 put_page(page);
2657 2657
2658 while (head) { 2658 while (head) {
2659 bh = head; 2659 bh = head;
@@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2675{ 2675{
2676 struct inode * const inode = page->mapping->host; 2676 struct inode * const inode = page->mapping->host;
2677 loff_t i_size = i_size_read(inode); 2677 loff_t i_size = i_size_read(inode);
2678 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2678 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2679 unsigned offset; 2679 unsigned offset;
2680 int ret; 2680 int ret;
2681 2681
@@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2684 goto out; 2684 goto out;
2685 2685
2686 /* Is the page fully outside i_size? (truncate in progress) */ 2686 /* Is the page fully outside i_size? (truncate in progress) */
2687 offset = i_size & (PAGE_CACHE_SIZE-1); 2687 offset = i_size & (PAGE_SIZE-1);
2688 if (page->index >= end_index+1 || !offset) { 2688 if (page->index >= end_index+1 || !offset) {
2689 /* 2689 /*
2690 * The page may have dirty, unmapped buffers. For example, 2690 * The page may have dirty, unmapped buffers. For example,
@@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2707 * the page size, the remaining memory is zeroed when mapped, and 2707 * the page size, the remaining memory is zeroed when mapped, and
2708 * writes to that region are not written out to the file." 2708 * writes to that region are not written out to the file."
2709 */ 2709 */
2710 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2710 zero_user_segment(page, offset, PAGE_SIZE);
2711out: 2711out:
2712 ret = mpage_writepage(page, get_block, wbc); 2712 ret = mpage_writepage(page, get_block, wbc);
2713 if (ret == -EAGAIN) 2713 if (ret == -EAGAIN)
@@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage);
2720int nobh_truncate_page(struct address_space *mapping, 2720int nobh_truncate_page(struct address_space *mapping,
2721 loff_t from, get_block_t *get_block) 2721 loff_t from, get_block_t *get_block)
2722{ 2722{
2723 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2723 pgoff_t index = from >> PAGE_SHIFT;
2724 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2724 unsigned offset = from & (PAGE_SIZE-1);
2725 unsigned blocksize; 2725 unsigned blocksize;
2726 sector_t iblock; 2726 sector_t iblock;
2727 unsigned length, pos; 2727 unsigned length, pos;
@@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping,
2738 return 0; 2738 return 0;
2739 2739
2740 length = blocksize - length; 2740 length = blocksize - length;
2741 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2741 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2742 2742
2743 page = grab_cache_page(mapping, index); 2743 page = grab_cache_page(mapping, index);
2744 err = -ENOMEM; 2744 err = -ENOMEM;
@@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping,
2748 if (page_has_buffers(page)) { 2748 if (page_has_buffers(page)) {
2749has_buffers: 2749has_buffers:
2750 unlock_page(page); 2750 unlock_page(page);
2751 page_cache_release(page); 2751 put_page(page);
2752 return block_truncate_page(mapping, from, get_block); 2752 return block_truncate_page(mapping, from, get_block);
2753 } 2753 }
2754 2754
@@ -2772,7 +2772,7 @@ has_buffers:
2772 if (!PageUptodate(page)) { 2772 if (!PageUptodate(page)) {
2773 err = mapping->a_ops->readpage(NULL, page); 2773 err = mapping->a_ops->readpage(NULL, page);
2774 if (err) { 2774 if (err) {
2775 page_cache_release(page); 2775 put_page(page);
2776 goto out; 2776 goto out;
2777 } 2777 }
2778 lock_page(page); 2778 lock_page(page);
@@ -2789,7 +2789,7 @@ has_buffers:
2789 2789
2790unlock: 2790unlock:
2791 unlock_page(page); 2791 unlock_page(page);
2792 page_cache_release(page); 2792 put_page(page);
2793out: 2793out:
2794 return err; 2794 return err;
2795} 2795}
@@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page);
2798int block_truncate_page(struct address_space *mapping, 2798int block_truncate_page(struct address_space *mapping,
2799 loff_t from, get_block_t *get_block) 2799 loff_t from, get_block_t *get_block)
2800{ 2800{
2801 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2801 pgoff_t index = from >> PAGE_SHIFT;
2802 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2802 unsigned offset = from & (PAGE_SIZE-1);
2803 unsigned blocksize; 2803 unsigned blocksize;
2804 sector_t iblock; 2804 sector_t iblock;
2805 unsigned length, pos; 2805 unsigned length, pos;
@@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping,
2816 return 0; 2816 return 0;
2817 2817
2818 length = blocksize - length; 2818 length = blocksize - length;
2819 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2819 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2820 2820
2821 page = grab_cache_page(mapping, index); 2821 page = grab_cache_page(mapping, index);
2822 err = -ENOMEM; 2822 err = -ENOMEM;
@@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping,
2865 2865
2866unlock: 2866unlock:
2867 unlock_page(page); 2867 unlock_page(page);
2868 page_cache_release(page); 2868 put_page(page);
2869out: 2869out:
2870 return err; 2870 return err;
2871} 2871}
@@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2879{ 2879{
2880 struct inode * const inode = page->mapping->host; 2880 struct inode * const inode = page->mapping->host;
2881 loff_t i_size = i_size_read(inode); 2881 loff_t i_size = i_size_read(inode);
2882 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2882 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2883 unsigned offset; 2883 unsigned offset;
2884 2884
2885 /* Is the page fully inside i_size? */ 2885 /* Is the page fully inside i_size? */
@@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2888 end_buffer_async_write); 2888 end_buffer_async_write);
2889 2889
2890 /* Is the page fully outside i_size? (truncate in progress) */ 2890 /* Is the page fully outside i_size? (truncate in progress) */
2891 offset = i_size & (PAGE_CACHE_SIZE-1); 2891 offset = i_size & (PAGE_SIZE-1);
2892 if (page->index >= end_index+1 || !offset) { 2892 if (page->index >= end_index+1 || !offset) {
2893 /* 2893 /*
2894 * The page may have dirty, unmapped buffers. For example, 2894 * The page may have dirty, unmapped buffers. For example,
2895 * they may have been added in ext3_writepage(). Make them 2895 * they may have been added in ext3_writepage(). Make them
2896 * freeable here, so the page does not leak. 2896 * freeable here, so the page does not leak.
2897 */ 2897 */
2898 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 2898 do_invalidatepage(page, 0, PAGE_SIZE);
2899 unlock_page(page); 2899 unlock_page(page);
2900 return 0; /* don't care */ 2900 return 0; /* don't care */
2901 } 2901 }
@@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2907 * the page size, the remaining memory is zeroed when mapped, and 2907 * the page size, the remaining memory is zeroed when mapped, and
2908 * writes to that region are not written out to the file." 2908 * writes to that region are not written out to the file."
2909 */ 2909 */
2910 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2910 zero_user_segment(page, offset, PAGE_SIZE);
2911 return __block_write_full_page(inode, page, get_block, wbc, 2911 return __block_write_full_page(inode, page, get_block, wbc,
2912 end_buffer_async_write); 2912 end_buffer_async_write);
2913} 2913}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0f3da3926a0..afbdc418966d 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -194,10 +194,10 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
194 error = -EIO; 194 error = -EIO;
195 } 195 }
196 196
197 page_cache_release(monitor->back_page); 197 put_page(monitor->back_page);
198 198
199 fscache_end_io(op, monitor->netfs_page, error); 199 fscache_end_io(op, monitor->netfs_page, error);
200 page_cache_release(monitor->netfs_page); 200 put_page(monitor->netfs_page);
201 fscache_retrieval_complete(op, 1); 201 fscache_retrieval_complete(op, 1);
202 fscache_put_retrieval(op); 202 fscache_put_retrieval(op);
203 kfree(monitor); 203 kfree(monitor);
@@ -288,8 +288,8 @@ monitor_backing_page:
288 _debug("- monitor add"); 288 _debug("- monitor add");
289 289
290 /* install the monitor */ 290 /* install the monitor */
291 page_cache_get(monitor->netfs_page); 291 get_page(monitor->netfs_page);
292 page_cache_get(backpage); 292 get_page(backpage);
293 monitor->back_page = backpage; 293 monitor->back_page = backpage;
294 monitor->monitor.private = backpage; 294 monitor->monitor.private = backpage;
295 add_page_wait_queue(backpage, &monitor->monitor); 295 add_page_wait_queue(backpage, &monitor->monitor);
@@ -310,7 +310,7 @@ backing_page_already_present:
310 _debug("- present"); 310 _debug("- present");
311 311
312 if (newpage) { 312 if (newpage) {
313 page_cache_release(newpage); 313 put_page(newpage);
314 newpage = NULL; 314 newpage = NULL;
315 } 315 }
316 316
@@ -342,7 +342,7 @@ success:
342 342
343out: 343out:
344 if (backpage) 344 if (backpage)
345 page_cache_release(backpage); 345 put_page(backpage);
346 if (monitor) { 346 if (monitor) {
347 fscache_put_retrieval(monitor->op); 347 fscache_put_retrieval(monitor->op);
348 kfree(monitor); 348 kfree(monitor);
@@ -363,7 +363,7 @@ io_error:
363 goto out; 363 goto out;
364 364
365nomem_page: 365nomem_page:
366 page_cache_release(newpage); 366 put_page(newpage);
367nomem_monitor: 367nomem_monitor:
368 fscache_put_retrieval(monitor->op); 368 fscache_put_retrieval(monitor->op);
369 kfree(monitor); 369 kfree(monitor);
@@ -530,7 +530,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
530 netpage->index, cachefiles_gfp); 530 netpage->index, cachefiles_gfp);
531 if (ret < 0) { 531 if (ret < 0) {
532 if (ret == -EEXIST) { 532 if (ret == -EEXIST) {
533 page_cache_release(netpage); 533 put_page(netpage);
534 fscache_retrieval_complete(op, 1); 534 fscache_retrieval_complete(op, 1);
535 continue; 535 continue;
536 } 536 }
@@ -538,10 +538,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
538 } 538 }
539 539
540 /* install a monitor */ 540 /* install a monitor */
541 page_cache_get(netpage); 541 get_page(netpage);
542 monitor->netfs_page = netpage; 542 monitor->netfs_page = netpage;
543 543
544 page_cache_get(backpage); 544 get_page(backpage);
545 monitor->back_page = backpage; 545 monitor->back_page = backpage;
546 monitor->monitor.private = backpage; 546 monitor->monitor.private = backpage;
547 add_page_wait_queue(backpage, &monitor->monitor); 547 add_page_wait_queue(backpage, &monitor->monitor);
@@ -555,10 +555,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
555 unlock_page(backpage); 555 unlock_page(backpage);
556 } 556 }
557 557
558 page_cache_release(backpage); 558 put_page(backpage);
559 backpage = NULL; 559 backpage = NULL;
560 560
561 page_cache_release(netpage); 561 put_page(netpage);
562 netpage = NULL; 562 netpage = NULL;
563 continue; 563 continue;
564 564
@@ -603,7 +603,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
603 netpage->index, cachefiles_gfp); 603 netpage->index, cachefiles_gfp);
604 if (ret < 0) { 604 if (ret < 0) {
605 if (ret == -EEXIST) { 605 if (ret == -EEXIST) {
606 page_cache_release(netpage); 606 put_page(netpage);
607 fscache_retrieval_complete(op, 1); 607 fscache_retrieval_complete(op, 1);
608 continue; 608 continue;
609 } 609 }
@@ -612,14 +612,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
612 612
613 copy_highpage(netpage, backpage); 613 copy_highpage(netpage, backpage);
614 614
615 page_cache_release(backpage); 615 put_page(backpage);
616 backpage = NULL; 616 backpage = NULL;
617 617
618 fscache_mark_page_cached(op, netpage); 618 fscache_mark_page_cached(op, netpage);
619 619
620 /* the netpage is unlocked and marked up to date here */ 620 /* the netpage is unlocked and marked up to date here */
621 fscache_end_io(op, netpage, 0); 621 fscache_end_io(op, netpage, 0);
622 page_cache_release(netpage); 622 put_page(netpage);
623 netpage = NULL; 623 netpage = NULL;
624 fscache_retrieval_complete(op, 1); 624 fscache_retrieval_complete(op, 1);
625 continue; 625 continue;
@@ -632,11 +632,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
632out: 632out:
633 /* tidy up */ 633 /* tidy up */
634 if (newpage) 634 if (newpage)
635 page_cache_release(newpage); 635 put_page(newpage);
636 if (netpage) 636 if (netpage)
637 page_cache_release(netpage); 637 put_page(netpage);
638 if (backpage) 638 if (backpage)
639 page_cache_release(backpage); 639 put_page(backpage);
640 if (monitor) { 640 if (monitor) {
641 fscache_put_retrieval(op); 641 fscache_put_retrieval(op);
642 kfree(monitor); 642 kfree(monitor);
@@ -644,7 +644,7 @@ out:
644 644
645 list_for_each_entry_safe(netpage, _n, list, lru) { 645 list_for_each_entry_safe(netpage, _n, list, lru) {
646 list_del(&netpage->lru); 646 list_del(&netpage->lru);
647 page_cache_release(netpage); 647 put_page(netpage);
648 fscache_retrieval_complete(op, 1); 648 fscache_retrieval_complete(op, 1);
649 } 649 }
650 650
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index fc5cae2a0db2..4801571f51cb 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
143 inode = page->mapping->host; 143 inode = page->mapping->host;
144 ci = ceph_inode(inode); 144 ci = ceph_inode(inode);
145 145
146 if (offset != 0 || length != PAGE_CACHE_SIZE) { 146 if (offset != 0 || length != PAGE_SIZE) {
147 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 147 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
148 inode, page, page->index, offset, length); 148 inode, page, page->index, offset, length);
149 return; 149 return;
@@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page)
197 &ceph_inode_to_client(inode)->client->osdc; 197 &ceph_inode_to_client(inode)->client->osdc;
198 int err = 0; 198 int err = 0;
199 u64 off = page_offset(page); 199 u64 off = page_offset(page);
200 u64 len = PAGE_CACHE_SIZE; 200 u64 len = PAGE_SIZE;
201 201
202 if (off >= i_size_read(inode)) { 202 if (off >= i_size_read(inode)) {
203 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 203 zero_user_segment(page, 0, PAGE_SIZE);
204 SetPageUptodate(page); 204 SetPageUptodate(page);
205 return 0; 205 return 0;
206 } 206 }
@@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
212 */ 212 */
213 if (off == 0) 213 if (off == 0)
214 return -EINVAL; 214 return -EINVAL;
215 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 215 zero_user_segment(page, 0, PAGE_SIZE);
216 SetPageUptodate(page); 216 SetPageUptodate(page);
217 return 0; 217 return 0;
218 } 218 }
@@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page)
234 ceph_fscache_readpage_cancel(inode, page); 234 ceph_fscache_readpage_cancel(inode, page);
235 goto out; 235 goto out;
236 } 236 }
237 if (err < PAGE_CACHE_SIZE) 237 if (err < PAGE_SIZE)
238 /* zero fill remainder of page */ 238 /* zero fill remainder of page */
239 zero_user_segment(page, err, PAGE_CACHE_SIZE); 239 zero_user_segment(page, err, PAGE_SIZE);
240 else 240 else
241 flush_dcache_page(page); 241 flush_dcache_page(page);
242 242
@@ -278,10 +278,10 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
278 278
279 if (rc < 0 && rc != -ENOENT) 279 if (rc < 0 && rc != -ENOENT)
280 goto unlock; 280 goto unlock;
281 if (bytes < (int)PAGE_CACHE_SIZE) { 281 if (bytes < (int)PAGE_SIZE) {
282 /* zero (remainder of) page */ 282 /* zero (remainder of) page */
283 int s = bytes < 0 ? 0 : bytes; 283 int s = bytes < 0 ? 0 : bytes;
284 zero_user_segment(page, s, PAGE_CACHE_SIZE); 284 zero_user_segment(page, s, PAGE_SIZE);
285 } 285 }
286 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 286 dout("finish_read %p uptodate %p idx %lu\n", inode, page,
287 page->index); 287 page->index);
@@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
290 ceph_readpage_to_fscache(inode, page); 290 ceph_readpage_to_fscache(inode, page);
291unlock: 291unlock:
292 unlock_page(page); 292 unlock_page(page);
293 page_cache_release(page); 293 put_page(page);
294 bytes -= PAGE_CACHE_SIZE; 294 bytes -= PAGE_SIZE;
295 } 295 }
296 kfree(osd_data->pages); 296 kfree(osd_data->pages);
297} 297}
@@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
336 if (max && nr_pages == max) 336 if (max && nr_pages == max)
337 break; 337 break;
338 } 338 }
339 len = nr_pages << PAGE_CACHE_SHIFT; 339 len = nr_pages << PAGE_SHIFT;
340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
341 off, len); 341 off, len);
342 vino = ceph_vino(inode); 342 vino = ceph_vino(inode);
@@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
364 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 364 if (add_to_page_cache_lru(page, &inode->i_data, page->index,
365 GFP_KERNEL)) { 365 GFP_KERNEL)) {
366 ceph_fscache_uncache_page(inode, page); 366 ceph_fscache_uncache_page(inode, page);
367 page_cache_release(page); 367 put_page(page);
368 dout("start_read %p add_to_page_cache failed %p\n", 368 dout("start_read %p add_to_page_cache failed %p\n",
369 inode, page); 369 inode, page);
370 nr_pages = i; 370 nr_pages = i;
@@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
415 if (rc == 0) 415 if (rc == 0)
416 goto out; 416 goto out;
417 417
418 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) 418 if (fsc->mount_options->rsize >= PAGE_SIZE)
419 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 419 max = (fsc->mount_options->rsize + PAGE_SIZE - 1)
420 >> PAGE_SHIFT; 420 >> PAGE_SHIFT;
421 421
422 dout("readpages %p file %p nr_pages %d max %d\n", inode, 422 dout("readpages %p file %p nr_pages %d max %d\n", inode,
@@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
484 long writeback_stat; 484 long writeback_stat;
485 u64 truncate_size; 485 u64 truncate_size;
486 u32 truncate_seq; 486 u32 truncate_seq;
487 int err = 0, len = PAGE_CACHE_SIZE; 487 int err = 0, len = PAGE_SIZE;
488 488
489 dout("writepage %p idx %lu\n", page, page->index); 489 dout("writepage %p idx %lu\n", page, page->index);
490 490
@@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping,
725 } 725 }
726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
727 wsize = fsc->mount_options->wsize; 727 wsize = fsc->mount_options->wsize;
728 if (wsize < PAGE_CACHE_SIZE) 728 if (wsize < PAGE_SIZE)
729 wsize = PAGE_CACHE_SIZE; 729 wsize = PAGE_SIZE;
730 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 730 max_pages_ever = wsize >> PAGE_SHIFT;
731 731
732 pagevec_init(&pvec, 0); 732 pagevec_init(&pvec, 0);
733 733
@@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping,
737 end = -1; 737 end = -1;
738 dout(" cyclic, start at %lu\n", start); 738 dout(" cyclic, start at %lu\n", start);
739 } else { 739 } else {
740 start = wbc->range_start >> PAGE_CACHE_SHIFT; 740 start = wbc->range_start >> PAGE_SHIFT;
741 end = wbc->range_end >> PAGE_CACHE_SHIFT; 741 end = wbc->range_end >> PAGE_SHIFT;
742 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 742 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
743 range_whole = 1; 743 range_whole = 1;
744 should_loop = 0; 744 should_loop = 0;
@@ -887,7 +887,7 @@ get_more_pages:
887 887
888 num_ops = 1 + do_sync; 888 num_ops = 1 + do_sync;
889 strip_unit_end = page->index + 889 strip_unit_end = page->index +
890 ((len - 1) >> PAGE_CACHE_SHIFT); 890 ((len - 1) >> PAGE_SHIFT);
891 891
892 BUG_ON(pages); 892 BUG_ON(pages);
893 max_pages = calc_pages_for(0, (u64)len); 893 max_pages = calc_pages_for(0, (u64)len);
@@ -901,7 +901,7 @@ get_more_pages:
901 901
902 len = 0; 902 len = 0;
903 } else if (page->index != 903 } else if (page->index !=
904 (offset + len) >> PAGE_CACHE_SHIFT) { 904 (offset + len) >> PAGE_SHIFT) {
905 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : 905 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
906 CEPH_OSD_MAX_OPS)) { 906 CEPH_OSD_MAX_OPS)) {
907 redirty_page_for_writepage(wbc, page); 907 redirty_page_for_writepage(wbc, page);
@@ -929,7 +929,7 @@ get_more_pages:
929 929
930 pages[locked_pages] = page; 930 pages[locked_pages] = page;
931 locked_pages++; 931 locked_pages++;
932 len += PAGE_CACHE_SIZE; 932 len += PAGE_SIZE;
933 } 933 }
934 934
935 /* did we get anything? */ 935 /* did we get anything? */
@@ -981,7 +981,7 @@ new_request:
981 BUG_ON(IS_ERR(req)); 981 BUG_ON(IS_ERR(req));
982 } 982 }
983 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 983 BUG_ON(len < page_offset(pages[locked_pages - 1]) +
984 PAGE_CACHE_SIZE - offset); 984 PAGE_SIZE - offset);
985 985
986 req->r_callback = writepages_finish; 986 req->r_callback = writepages_finish;
987 req->r_inode = inode; 987 req->r_inode = inode;
@@ -1011,7 +1011,7 @@ new_request:
1011 } 1011 }
1012 1012
1013 set_page_writeback(pages[i]); 1013 set_page_writeback(pages[i]);
1014 len += PAGE_CACHE_SIZE; 1014 len += PAGE_SIZE;
1015 } 1015 }
1016 1016
1017 if (snap_size != -1) { 1017 if (snap_size != -1) {
@@ -1020,7 +1020,7 @@ new_request:
1020 /* writepages_finish() clears writeback pages 1020 /* writepages_finish() clears writeback pages
1021 * according to the data length, so make sure 1021 * according to the data length, so make sure
1022 * data length covers all locked pages */ 1022 * data length covers all locked pages */
1023 u64 min_len = len + 1 - PAGE_CACHE_SIZE; 1023 u64 min_len = len + 1 - PAGE_SIZE;
1024 len = min(len, (u64)i_size_read(inode) - offset); 1024 len = min(len, (u64)i_size_read(inode) - offset);
1025 len = max(len, min_len); 1025 len = max(len, min_len);
1026 } 1026 }
@@ -1135,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file,
1135{ 1135{
1136 struct inode *inode = file_inode(file); 1136 struct inode *inode = file_inode(file);
1137 struct ceph_inode_info *ci = ceph_inode(inode); 1137 struct ceph_inode_info *ci = ceph_inode(inode);
1138 loff_t page_off = pos & PAGE_CACHE_MASK; 1138 loff_t page_off = pos & PAGE_MASK;
1139 int pos_in_page = pos & ~PAGE_CACHE_MASK; 1139 int pos_in_page = pos & ~PAGE_MASK;
1140 int end_in_page = pos_in_page + len; 1140 int end_in_page = pos_in_page + len;
1141 loff_t i_size; 1141 loff_t i_size;
1142 int r; 1142 int r;
@@ -1191,7 +1191,7 @@ retry_locked:
1191 } 1191 }
1192 1192
1193 /* full page? */ 1193 /* full page? */
1194 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) 1194 if (pos_in_page == 0 && len == PAGE_SIZE)
1195 return 0; 1195 return 0;
1196 1196
1197 /* past end of file? */ 1197 /* past end of file? */
@@ -1199,12 +1199,12 @@ retry_locked:
1199 1199
1200 if (page_off >= i_size || 1200 if (page_off >= i_size ||
1201 (pos_in_page == 0 && (pos+len) >= i_size && 1201 (pos_in_page == 0 && (pos+len) >= i_size &&
1202 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { 1202 end_in_page - pos_in_page != PAGE_SIZE)) {
1203 dout(" zeroing %p 0 - %d and %d - %d\n", 1203 dout(" zeroing %p 0 - %d and %d - %d\n",
1204 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); 1204 page, pos_in_page, end_in_page, (int)PAGE_SIZE);
1205 zero_user_segments(page, 1205 zero_user_segments(page,
1206 0, pos_in_page, 1206 0, pos_in_page,
1207 end_in_page, PAGE_CACHE_SIZE); 1207 end_in_page, PAGE_SIZE);
1208 return 0; 1208 return 0;
1209 } 1209 }
1210 1210
@@ -1228,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
1228{ 1228{
1229 struct inode *inode = file_inode(file); 1229 struct inode *inode = file_inode(file);
1230 struct page *page; 1230 struct page *page;
1231 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1231 pgoff_t index = pos >> PAGE_SHIFT;
1232 int r; 1232 int r;
1233 1233
1234 do { 1234 do {
@@ -1242,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
1242 1242
1243 r = ceph_update_writeable_page(file, pos, len, page); 1243 r = ceph_update_writeable_page(file, pos, len, page);
1244 if (r < 0) 1244 if (r < 0)
1245 page_cache_release(page); 1245 put_page(page);
1246 else 1246 else
1247 *pagep = page; 1247 *pagep = page;
1248 } while (r == -EAGAIN); 1248 } while (r == -EAGAIN);
@@ -1259,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1259 struct page *page, void *fsdata) 1259 struct page *page, void *fsdata)
1260{ 1260{
1261 struct inode *inode = file_inode(file); 1261 struct inode *inode = file_inode(file);
1262 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1262 unsigned from = pos & (PAGE_SIZE - 1);
1263 int check_cap = 0; 1263 int check_cap = 0;
1264 1264
1265 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1265 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
@@ -1279,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1279 set_page_dirty(page); 1279 set_page_dirty(page);
1280 1280
1281 unlock_page(page); 1281 unlock_page(page);
1282 page_cache_release(page); 1282 put_page(page);
1283 1283
1284 if (check_cap) 1284 if (check_cap)
1285 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1285 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1322,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1322 struct ceph_inode_info *ci = ceph_inode(inode); 1322 struct ceph_inode_info *ci = ceph_inode(inode);
1323 struct ceph_file_info *fi = vma->vm_file->private_data; 1323 struct ceph_file_info *fi = vma->vm_file->private_data;
1324 struct page *pinned_page = NULL; 1324 struct page *pinned_page = NULL;
1325 loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; 1325 loff_t off = vmf->pgoff << PAGE_SHIFT;
1326 int want, got, ret; 1326 int want, got, ret;
1327 1327
1328 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1328 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
1329 inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); 1329 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
1330 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1330 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1331 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1331 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1332 else 1332 else
@@ -1343,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1343 } 1343 }
1344 } 1344 }
1345 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1345 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
1346 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); 1346 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
1347 1347
1348 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1348 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1349 ci->i_inline_version == CEPH_INLINE_NONE) 1349 ci->i_inline_version == CEPH_INLINE_NONE)
@@ -1352,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1352 ret = -EAGAIN; 1352 ret = -EAGAIN;
1353 1353
1354 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", 1354 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
1355 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); 1355 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
1356 if (pinned_page) 1356 if (pinned_page)
1357 page_cache_release(pinned_page); 1357 put_page(pinned_page);
1358 ceph_put_cap_refs(ci, got); 1358 ceph_put_cap_refs(ci, got);
1359 1359
1360 if (ret != -EAGAIN) 1360 if (ret != -EAGAIN)
1361 return ret; 1361 return ret;
1362 1362
1363 /* read inline data */ 1363 /* read inline data */
1364 if (off >= PAGE_CACHE_SIZE) { 1364 if (off >= PAGE_SIZE) {
1365 /* does not support inline data > PAGE_SIZE */ 1365 /* does not support inline data > PAGE_SIZE */
1366 ret = VM_FAULT_SIGBUS; 1366 ret = VM_FAULT_SIGBUS;
1367 } else { 1367 } else {
@@ -1378,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1378 CEPH_STAT_CAP_INLINE_DATA, true); 1378 CEPH_STAT_CAP_INLINE_DATA, true);
1379 if (ret1 < 0 || off >= i_size_read(inode)) { 1379 if (ret1 < 0 || off >= i_size_read(inode)) {
1380 unlock_page(page); 1380 unlock_page(page);
1381 page_cache_release(page); 1381 put_page(page);
1382 ret = VM_FAULT_SIGBUS; 1382 ret = VM_FAULT_SIGBUS;
1383 goto out; 1383 goto out;
1384 } 1384 }
1385 if (ret1 < PAGE_CACHE_SIZE) 1385 if (ret1 < PAGE_SIZE)
1386 zero_user_segment(page, ret1, PAGE_CACHE_SIZE); 1386 zero_user_segment(page, ret1, PAGE_SIZE);
1387 else 1387 else
1388 flush_dcache_page(page); 1388 flush_dcache_page(page);
1389 SetPageUptodate(page); 1389 SetPageUptodate(page);
@@ -1392,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1392 } 1392 }
1393out: 1393out:
1394 dout("filemap_fault %p %llu~%zd read inline data ret %d\n", 1394 dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
1395 inode, off, (size_t)PAGE_CACHE_SIZE, ret); 1395 inode, off, (size_t)PAGE_SIZE, ret);
1396 return ret; 1396 return ret;
1397} 1397}
1398 1398
@@ -1430,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1430 } 1430 }
1431 } 1431 }
1432 1432
1433 if (off + PAGE_CACHE_SIZE <= size) 1433 if (off + PAGE_SIZE <= size)
1434 len = PAGE_CACHE_SIZE; 1434 len = PAGE_SIZE;
1435 else 1435 else
1436 len = size & ~PAGE_CACHE_MASK; 1436 len = size & ~PAGE_MASK;
1437 1437
1438 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1438 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1439 inode, ceph_vinop(inode), off, len, size); 1439 inode, ceph_vinop(inode), off, len, size);
@@ -1519,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1519 return; 1519 return;
1520 if (PageUptodate(page)) { 1520 if (PageUptodate(page)) {
1521 unlock_page(page); 1521 unlock_page(page);
1522 page_cache_release(page); 1522 put_page(page);
1523 return; 1523 return;
1524 } 1524 }
1525 } 1525 }
@@ -1534,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1534 } 1534 }
1535 1535
1536 if (page != locked_page) { 1536 if (page != locked_page) {
1537 if (len < PAGE_CACHE_SIZE) 1537 if (len < PAGE_SIZE)
1538 zero_user_segment(page, len, PAGE_CACHE_SIZE); 1538 zero_user_segment(page, len, PAGE_SIZE);
1539 else 1539 else
1540 flush_dcache_page(page); 1540 flush_dcache_page(page);
1541 1541
1542 SetPageUptodate(page); 1542 SetPageUptodate(page);
1543 unlock_page(page); 1543 unlock_page(page);
1544 page_cache_release(page); 1544 put_page(page);
1545 } 1545 }
1546} 1546}
1547 1547
@@ -1578,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
1578 from_pagecache = true; 1578 from_pagecache = true;
1579 lock_page(page); 1579 lock_page(page);
1580 } else { 1580 } else {
1581 page_cache_release(page); 1581 put_page(page);
1582 page = NULL; 1582 page = NULL;
1583 } 1583 }
1584 } 1584 }
@@ -1586,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
1586 1586
1587 if (page) { 1587 if (page) {
1588 len = i_size_read(inode); 1588 len = i_size_read(inode);
1589 if (len > PAGE_CACHE_SIZE) 1589 if (len > PAGE_SIZE)
1590 len = PAGE_CACHE_SIZE; 1590 len = PAGE_SIZE;
1591 } else { 1591 } else {
1592 page = __page_cache_alloc(GFP_NOFS); 1592 page = __page_cache_alloc(GFP_NOFS);
1593 if (!page) { 1593 if (!page) {
@@ -1670,7 +1670,7 @@ out:
1670 if (page && page != locked_page) { 1670 if (page && page != locked_page) {
1671 if (from_pagecache) { 1671 if (from_pagecache) {
1672 unlock_page(page); 1672 unlock_page(page);
1673 page_cache_release(page); 1673 put_page(page);
1674 } else 1674 } else
1675 __free_pages(page, 0); 1675 __free_pages(page, 0);
1676 } 1676 }
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index de17bb232ff8..cfaeef18cbca 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2510,7 +2510,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2510 *pinned_page = page; 2510 *pinned_page = page;
2511 break; 2511 break;
2512 } 2512 }
2513 page_cache_release(page); 2513 put_page(page);
2514 } 2514 }
2515 /* 2515 /*
2516 * drop cap refs first because getattr while 2516 * drop cap refs first because getattr while
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index fadc243dfb28..4fb2bbc2a272 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -129,7 +129,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
129 struct inode *dir = d_inode(parent); 129 struct inode *dir = d_inode(parent);
130 struct dentry *dentry, *last = NULL; 130 struct dentry *dentry, *last = NULL;
131 struct ceph_dentry_info *di; 131 struct ceph_dentry_info *di;
132 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *); 132 unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
133 int err = 0; 133 int err = 0;
134 loff_t ptr_pos = 0; 134 loff_t ptr_pos = 0;
135 struct ceph_readdir_cache_control cache_ctl = {}; 135 struct ceph_readdir_cache_control cache_ctl = {};
@@ -154,7 +154,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
154 } 154 }
155 155
156 err = -EAGAIN; 156 err = -EAGAIN;
157 pgoff = ptr_pos >> PAGE_CACHE_SHIFT; 157 pgoff = ptr_pos >> PAGE_SHIFT;
158 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) { 158 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
159 ceph_readdir_cache_release(&cache_ctl); 159 ceph_readdir_cache_release(&cache_ctl);
160 cache_ctl.page = find_lock_page(&dir->i_data, pgoff); 160 cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ef38f01c1795..a79f9269831e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -466,7 +466,7 @@ more:
466 ret += zlen; 466 ret += zlen;
467 } 467 }
468 468
469 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; 469 didpages = (page_align + ret) >> PAGE_SHIFT;
470 pos += ret; 470 pos += ret;
471 read = pos - off; 471 read = pos - off;
472 left -= ret; 472 left -= ret;
@@ -806,8 +806,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
806 806
807 if (write) { 807 if (write) {
808 ret = invalidate_inode_pages2_range(inode->i_mapping, 808 ret = invalidate_inode_pages2_range(inode->i_mapping,
809 pos >> PAGE_CACHE_SHIFT, 809 pos >> PAGE_SHIFT,
810 (pos + count) >> PAGE_CACHE_SHIFT); 810 (pos + count) >> PAGE_SHIFT);
811 if (ret < 0) 811 if (ret < 0)
812 dout("invalidate_inode_pages2_range returned %d\n", ret); 812 dout("invalidate_inode_pages2_range returned %d\n", ret);
813 813
@@ -872,7 +872,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
872 * may block. 872 * may block.
873 */ 873 */
874 truncate_inode_pages_range(inode->i_mapping, pos, 874 truncate_inode_pages_range(inode->i_mapping, pos,
875 (pos+len) | (PAGE_CACHE_SIZE - 1)); 875 (pos+len) | (PAGE_SIZE - 1));
876 876
877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
878 } 878 }
@@ -1006,8 +1006,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1006 return ret; 1006 return ret;
1007 1007
1008 ret = invalidate_inode_pages2_range(inode->i_mapping, 1008 ret = invalidate_inode_pages2_range(inode->i_mapping,
1009 pos >> PAGE_CACHE_SHIFT, 1009 pos >> PAGE_SHIFT,
1010 (pos + count) >> PAGE_CACHE_SHIFT); 1010 (pos + count) >> PAGE_SHIFT);
1011 if (ret < 0) 1011 if (ret < 0)
1012 dout("invalidate_inode_pages2_range returned %d\n", ret); 1012 dout("invalidate_inode_pages2_range returned %d\n", ret);
1013 1013
@@ -1036,7 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1036 * write from beginning of first page, 1036 * write from beginning of first page,
1037 * regardless of io alignment 1037 * regardless of io alignment
1038 */ 1038 */
1039 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1039 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1040 1040
1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1042 if (IS_ERR(pages)) { 1042 if (IS_ERR(pages)) {
@@ -1159,7 +1159,7 @@ again:
1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1161 if (pinned_page) { 1161 if (pinned_page) {
1162 page_cache_release(pinned_page); 1162 put_page(pinned_page);
1163 pinned_page = NULL; 1163 pinned_page = NULL;
1164 } 1164 }
1165 ceph_put_cap_refs(ci, got); 1165 ceph_put_cap_refs(ci, got);
@@ -1188,10 +1188,10 @@ again:
1188 if (retry_op == READ_INLINE) { 1188 if (retry_op == READ_INLINE) {
1189 BUG_ON(ret > 0 || read > 0); 1189 BUG_ON(ret > 0 || read > 0);
1190 if (iocb->ki_pos < i_size && 1190 if (iocb->ki_pos < i_size &&
1191 iocb->ki_pos < PAGE_CACHE_SIZE) { 1191 iocb->ki_pos < PAGE_SIZE) {
1192 loff_t end = min_t(loff_t, i_size, 1192 loff_t end = min_t(loff_t, i_size,
1193 iocb->ki_pos + len); 1193 iocb->ki_pos + len);
1194 end = min_t(loff_t, end, PAGE_CACHE_SIZE); 1194 end = min_t(loff_t, end, PAGE_SIZE);
1195 if (statret < end) 1195 if (statret < end)
1196 zero_user_segment(page, statret, end); 1196 zero_user_segment(page, statret, end);
1197 ret = copy_page_to_iter(page, 1197 ret = copy_page_to_iter(page,
@@ -1463,21 +1463,21 @@ static inline void ceph_zero_partial_page(
1463 struct inode *inode, loff_t offset, unsigned size) 1463 struct inode *inode, loff_t offset, unsigned size)
1464{ 1464{
1465 struct page *page; 1465 struct page *page;
1466 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 1466 pgoff_t index = offset >> PAGE_SHIFT;
1467 1467
1468 page = find_lock_page(inode->i_mapping, index); 1468 page = find_lock_page(inode->i_mapping, index);
1469 if (page) { 1469 if (page) {
1470 wait_on_page_writeback(page); 1470 wait_on_page_writeback(page);
1471 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); 1471 zero_user(page, offset & (PAGE_SIZE - 1), size);
1472 unlock_page(page); 1472 unlock_page(page);
1473 page_cache_release(page); 1473 put_page(page);
1474 } 1474 }
1475} 1475}
1476 1476
1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1478 loff_t length) 1478 loff_t length)
1479{ 1479{
1480 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); 1480 loff_t nearly = round_up(offset, PAGE_SIZE);
1481 if (offset < nearly) { 1481 if (offset < nearly) {
1482 loff_t size = nearly - offset; 1482 loff_t size = nearly - offset;
1483 if (length < size) 1483 if (length < size)
@@ -1486,8 +1486,8 @@ static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1486 offset += size; 1486 offset += size;
1487 length -= size; 1487 length -= size;
1488 } 1488 }
1489 if (length >= PAGE_CACHE_SIZE) { 1489 if (length >= PAGE_SIZE) {
1490 loff_t size = round_down(length, PAGE_CACHE_SIZE); 1490 loff_t size = round_down(length, PAGE_SIZE);
1491 truncate_pagecache_range(inode, offset, offset + size - 1); 1491 truncate_pagecache_range(inode, offset, offset + size - 1);
1492 offset += size; 1492 offset += size;
1493 length -= size; 1493 length -= size;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ed58b168904a..edfade037738 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1338,7 +1338,7 @@ void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1338{ 1338{
1339 if (ctl->page) { 1339 if (ctl->page) {
1340 kunmap(ctl->page); 1340 kunmap(ctl->page);
1341 page_cache_release(ctl->page); 1341 put_page(ctl->page);
1342 ctl->page = NULL; 1342 ctl->page = NULL;
1343 } 1343 }
1344} 1344}
@@ -1348,7 +1348,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1348 struct ceph_mds_request *req) 1348 struct ceph_mds_request *req)
1349{ 1349{
1350 struct ceph_inode_info *ci = ceph_inode(dir); 1350 struct ceph_inode_info *ci = ceph_inode(dir);
1351 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*); 1351 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1352 unsigned idx = ctl->index % nsize; 1352 unsigned idx = ctl->index % nsize;
1353 pgoff_t pgoff = ctl->index / nsize; 1353 pgoff_t pgoff = ctl->index / nsize;
1354 1354
@@ -1367,7 +1367,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1367 unlock_page(ctl->page); 1367 unlock_page(ctl->page);
1368 ctl->dentries = kmap(ctl->page); 1368 ctl->dentries = kmap(ctl->page);
1369 if (idx == 0) 1369 if (idx == 0)
1370 memset(ctl->dentries, 0, PAGE_CACHE_SIZE); 1370 memset(ctl->dentries, 0, PAGE_SIZE);
1371 } 1371 }
1372 1372
1373 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1373 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 44852c3ae531..541ead4d8965 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1610,7 +1610,7 @@ again:
1610 while (!list_empty(&tmp_list)) { 1610 while (!list_empty(&tmp_list)) {
1611 if (!msg) { 1611 if (!msg) {
1612 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, 1612 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1613 PAGE_CACHE_SIZE, GFP_NOFS, false); 1613 PAGE_SIZE, GFP_NOFS, false);
1614 if (!msg) 1614 if (!msg)
1615 goto out_err; 1615 goto out_err;
1616 head = msg->front.iov_base; 1616 head = msg->front.iov_base;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 37712ccffcc6..ee69a537dba5 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -97,7 +97,7 @@ struct ceph_mds_reply_info_parsed {
97/* 97/*
98 * cap releases are batched and sent to the MDS en masse. 98 * cap releases are batched and sent to the MDS en masse.
99 */ 99 */
100#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ 100#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \
101 sizeof(struct ceph_mds_cap_release)) / \ 101 sizeof(struct ceph_mds_cap_release)) / \
102 sizeof(struct ceph_mds_cap_item)) 102 sizeof(struct ceph_mds_cap_item))
103 103
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index c973043deb0e..f12d5e2955c2 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -560,7 +560,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
560 560
561 /* set up mempools */ 561 /* set up mempools */
562 err = -ENOMEM; 562 err = -ENOMEM;
563 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; 563 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
564 size = sizeof (struct page *) * (page_count ? page_count : 1); 564 size = sizeof (struct page *) * (page_count ? page_count : 1);
565 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); 565 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
566 if (!fsc->wb_pagevec_pool) 566 if (!fsc->wb_pagevec_pool)
@@ -912,13 +912,13 @@ static int ceph_register_bdi(struct super_block *sb,
912 int err; 912 int err;
913 913
914 /* set ra_pages based on rasize mount option? */ 914 /* set ra_pages based on rasize mount option? */
915 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE) 915 if (fsc->mount_options->rasize >= PAGE_SIZE)
916 fsc->backing_dev_info.ra_pages = 916 fsc->backing_dev_info.ra_pages =
917 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1) 917 (fsc->mount_options->rasize + PAGE_SIZE - 1)
918 >> PAGE_SHIFT; 918 >> PAGE_SHIFT;
919 else 919 else
920 fsc->backing_dev_info.ra_pages = 920 fsc->backing_dev_info.ra_pages =
921 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; 921 VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
922 922
923 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", 923 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
924 atomic_long_inc_return(&bdi_seq)); 924 atomic_long_inc_return(&bdi_seq));
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 1d86fc620e5c..89201564c346 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -962,7 +962,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
962 cifs_dbg(FYI, "about to flush pages\n"); 962 cifs_dbg(FYI, "about to flush pages\n");
963 /* should we flush first and last page first */ 963 /* should we flush first and last page first */
964 truncate_inode_pages_range(&target_inode->i_data, destoff, 964 truncate_inode_pages_range(&target_inode->i_data, destoff,
965 PAGE_CACHE_ALIGN(destoff + len)-1); 965 PAGE_ALIGN(destoff + len)-1);
966 966
967 if (target_tcon->ses->server->ops->duplicate_extents) 967 if (target_tcon->ses->server->ops->duplicate_extents)
968 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 968 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d21da9f05bae..f2cc0b3d1af7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
714 * 714 *
715 * Note that this might make for "interesting" allocation problems during 715 * Note that this might make for "interesting" allocation problems during
716 * writeback however as we have to allocate an array of pointers for the 716 * writeback however as we have to allocate an array of pointers for the
717 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 717 * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
718 * 718 *
719 * For reads, there is a similar problem as we need to allocate an array 719 * For reads, there is a similar problem as we need to allocate an array
720 * of kvecs to handle the receive, though that should only need to be done 720 * of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
733 733
734/* 734/*
735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
736 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 736 * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
737 * a single wsize request with a single call. 737 * a single wsize request with a single call.
738 */ 738 */
739#define CIFS_DEFAULT_IOSIZE (1024 * 1024) 739#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 76fcb50295a3..a894bf809ff7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1929,17 +1929,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1929 1929
1930 wsize = server->ops->wp_retry_size(inode); 1930 wsize = server->ops->wp_retry_size(inode);
1931 if (wsize < rest_len) { 1931 if (wsize < rest_len) {
1932 nr_pages = wsize / PAGE_CACHE_SIZE; 1932 nr_pages = wsize / PAGE_SIZE;
1933 if (!nr_pages) { 1933 if (!nr_pages) {
1934 rc = -ENOTSUPP; 1934 rc = -ENOTSUPP;
1935 break; 1935 break;
1936 } 1936 }
1937 cur_len = nr_pages * PAGE_CACHE_SIZE; 1937 cur_len = nr_pages * PAGE_SIZE;
1938 tailsz = PAGE_CACHE_SIZE; 1938 tailsz = PAGE_SIZE;
1939 } else { 1939 } else {
1940 nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE); 1940 nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
1941 cur_len = rest_len; 1941 cur_len = rest_len;
1942 tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE; 1942 tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
1943 } 1943 }
1944 1944
1945 wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); 1945 wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
@@ -1957,7 +1957,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1957 wdata2->sync_mode = wdata->sync_mode; 1957 wdata2->sync_mode = wdata->sync_mode;
1958 wdata2->nr_pages = nr_pages; 1958 wdata2->nr_pages = nr_pages;
1959 wdata2->offset = page_offset(wdata2->pages[0]); 1959 wdata2->offset = page_offset(wdata2->pages[0]);
1960 wdata2->pagesz = PAGE_CACHE_SIZE; 1960 wdata2->pagesz = PAGE_SIZE;
1961 wdata2->tailsz = tailsz; 1961 wdata2->tailsz = tailsz;
1962 wdata2->bytes = cur_len; 1962 wdata2->bytes = cur_len;
1963 1963
@@ -1975,7 +1975,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1975 if (rc != 0 && rc != -EAGAIN) { 1975 if (rc != 0 && rc != -EAGAIN) {
1976 SetPageError(wdata2->pages[j]); 1976 SetPageError(wdata2->pages[j]);
1977 end_page_writeback(wdata2->pages[j]); 1977 end_page_writeback(wdata2->pages[j]);
1978 page_cache_release(wdata2->pages[j]); 1978 put_page(wdata2->pages[j]);
1979 } 1979 }
1980 } 1980 }
1981 1981
@@ -2018,7 +2018,7 @@ cifs_writev_complete(struct work_struct *work)
2018 else if (wdata->result < 0) 2018 else if (wdata->result < 0)
2019 SetPageError(page); 2019 SetPageError(page);
2020 end_page_writeback(page); 2020 end_page_writeback(page);
2021 page_cache_release(page); 2021 put_page(page);
2022 } 2022 }
2023 if (wdata->result != -EAGAIN) 2023 if (wdata->result != -EAGAIN)
2024 mapping_set_error(inode->i_mapping, wdata->result); 2024 mapping_set_error(inode->i_mapping, wdata->result);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a763cd3d9e7c..6f62ac821a84 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3630,7 +3630,7 @@ try_mount_again:
3630 cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info); 3630 cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info);
3631 3631
3632 /* tune readahead according to rsize */ 3632 /* tune readahead according to rsize */
3633 cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; 3633 cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE;
3634 3634
3635remote_path_check: 3635remote_path_check:
3636#ifdef CONFIG_CIFS_DFS_UPCALL 3636#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ff882aeaccc6..c03d0744648b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1833,7 +1833,7 @@ refind_writable:
1833static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) 1833static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1834{ 1834{
1835 struct address_space *mapping = page->mapping; 1835 struct address_space *mapping = page->mapping;
1836 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 1836 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1837 char *write_data; 1837 char *write_data;
1838 int rc = -EFAULT; 1838 int rc = -EFAULT;
1839 int bytes_written = 0; 1839 int bytes_written = 0;
@@ -1849,7 +1849,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1849 write_data = kmap(page); 1849 write_data = kmap(page);
1850 write_data += from; 1850 write_data += from;
1851 1851
1852 if ((to > PAGE_CACHE_SIZE) || (from > to)) { 1852 if ((to > PAGE_SIZE) || (from > to)) {
1853 kunmap(page); 1853 kunmap(page);
1854 return -EIO; 1854 return -EIO;
1855 } 1855 }
@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1902 * find_get_pages_tag seems to return a max of 256 on each 1902 * find_get_pages_tag seems to return a max of 256 on each
1903 * iteration, so we must call it several times in order to 1903 * iteration, so we must call it several times in order to
1904 * fill the array or the wsize is effectively limited to 1904 * fill the array or the wsize is effectively limited to
1905 * 256 * PAGE_CACHE_SIZE. 1905 * 256 * PAGE_SIZE.
1906 */ 1906 */
1907 *found_pages = 0; 1907 *found_pages = 0;
1908 pages = wdata->pages; 1908 pages = wdata->pages;
@@ -1991,7 +1991,7 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1991 1991
1992 /* put any pages we aren't going to use */ 1992 /* put any pages we aren't going to use */
1993 for (i = nr_pages; i < found_pages; i++) { 1993 for (i = nr_pages; i < found_pages; i++) {
1994 page_cache_release(wdata->pages[i]); 1994 put_page(wdata->pages[i]);
1995 wdata->pages[i] = NULL; 1995 wdata->pages[i] = NULL;
1996 } 1996 }
1997 1997
@@ -2009,11 +2009,11 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2009 wdata->sync_mode = wbc->sync_mode; 2009 wdata->sync_mode = wbc->sync_mode;
2010 wdata->nr_pages = nr_pages; 2010 wdata->nr_pages = nr_pages;
2011 wdata->offset = page_offset(wdata->pages[0]); 2011 wdata->offset = page_offset(wdata->pages[0]);
2012 wdata->pagesz = PAGE_CACHE_SIZE; 2012 wdata->pagesz = PAGE_SIZE;
2013 wdata->tailsz = min(i_size_read(mapping->host) - 2013 wdata->tailsz = min(i_size_read(mapping->host) -
2014 page_offset(wdata->pages[nr_pages - 1]), 2014 page_offset(wdata->pages[nr_pages - 1]),
2015 (loff_t)PAGE_CACHE_SIZE); 2015 (loff_t)PAGE_SIZE);
2016 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; 2016 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2017 2017
2018 if (wdata->cfile != NULL) 2018 if (wdata->cfile != NULL)
2019 cifsFileInfo_put(wdata->cfile); 2019 cifsFileInfo_put(wdata->cfile);
@@ -2047,15 +2047,15 @@ static int cifs_writepages(struct address_space *mapping,
2047 * If wsize is smaller than the page cache size, default to writing 2047 * If wsize is smaller than the page cache size, default to writing
2048 * one page at a time via cifs_writepage 2048 * one page at a time via cifs_writepage
2049 */ 2049 */
2050 if (cifs_sb->wsize < PAGE_CACHE_SIZE) 2050 if (cifs_sb->wsize < PAGE_SIZE)
2051 return generic_writepages(mapping, wbc); 2051 return generic_writepages(mapping, wbc);
2052 2052
2053 if (wbc->range_cyclic) { 2053 if (wbc->range_cyclic) {
2054 index = mapping->writeback_index; /* Start from prev offset */ 2054 index = mapping->writeback_index; /* Start from prev offset */
2055 end = -1; 2055 end = -1;
2056 } else { 2056 } else {
2057 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2057 index = wbc->range_start >> PAGE_SHIFT;
2058 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2058 end = wbc->range_end >> PAGE_SHIFT;
2059 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2059 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2060 range_whole = true; 2060 range_whole = true;
2061 scanned = true; 2061 scanned = true;
@@ -2071,7 +2071,7 @@ retry:
2071 if (rc) 2071 if (rc)
2072 break; 2072 break;
2073 2073
2074 tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; 2074 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2075 2075
2076 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index, 2076 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2077 &found_pages); 2077 &found_pages);
@@ -2111,7 +2111,7 @@ retry:
2111 else 2111 else
2112 SetPageError(wdata->pages[i]); 2112 SetPageError(wdata->pages[i]);
2113 end_page_writeback(wdata->pages[i]); 2113 end_page_writeback(wdata->pages[i]);
2114 page_cache_release(wdata->pages[i]); 2114 put_page(wdata->pages[i]);
2115 } 2115 }
2116 if (rc != -EAGAIN) 2116 if (rc != -EAGAIN)
2117 mapping_set_error(mapping, rc); 2117 mapping_set_error(mapping, rc);
@@ -2154,7 +2154,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2154 2154
2155 xid = get_xid(); 2155 xid = get_xid();
2156/* BB add check for wbc flags */ 2156/* BB add check for wbc flags */
2157 page_cache_get(page); 2157 get_page(page);
2158 if (!PageUptodate(page)) 2158 if (!PageUptodate(page))
2159 cifs_dbg(FYI, "ppw - page not up to date\n"); 2159 cifs_dbg(FYI, "ppw - page not up to date\n");
2160 2160
@@ -2170,7 +2170,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2170 */ 2170 */
2171 set_page_writeback(page); 2171 set_page_writeback(page);
2172retry_write: 2172retry_write:
2173 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); 2173 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2174 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL) 2174 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2175 goto retry_write; 2175 goto retry_write;
2176 else if (rc == -EAGAIN) 2176 else if (rc == -EAGAIN)
@@ -2180,7 +2180,7 @@ retry_write:
2180 else 2180 else
2181 SetPageUptodate(page); 2181 SetPageUptodate(page);
2182 end_page_writeback(page); 2182 end_page_writeback(page);
2183 page_cache_release(page); 2183 put_page(page);
2184 free_xid(xid); 2184 free_xid(xid);
2185 return rc; 2185 return rc;
2186} 2186}
@@ -2214,12 +2214,12 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
2214 if (copied == len) 2214 if (copied == len)
2215 SetPageUptodate(page); 2215 SetPageUptodate(page);
2216 ClearPageChecked(page); 2216 ClearPageChecked(page);
2217 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) 2217 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2218 SetPageUptodate(page); 2218 SetPageUptodate(page);
2219 2219
2220 if (!PageUptodate(page)) { 2220 if (!PageUptodate(page)) {
2221 char *page_data; 2221 char *page_data;
2222 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 2222 unsigned offset = pos & (PAGE_SIZE - 1);
2223 unsigned int xid; 2223 unsigned int xid;
2224 2224
2225 xid = get_xid(); 2225 xid = get_xid();
@@ -2248,7 +2248,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
2248 } 2248 }
2249 2249
2250 unlock_page(page); 2250 unlock_page(page);
2251 page_cache_release(page); 2251 put_page(page);
2252 2252
2253 return rc; 2253 return rc;
2254} 2254}
@@ -3286,9 +3286,9 @@ cifs_readv_complete(struct work_struct *work)
3286 (rdata->result == -EAGAIN && got_bytes)) 3286 (rdata->result == -EAGAIN && got_bytes))
3287 cifs_readpage_to_fscache(rdata->mapping->host, page); 3287 cifs_readpage_to_fscache(rdata->mapping->host, page);
3288 3288
3289 got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes); 3289 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3290 3290
3291 page_cache_release(page); 3291 put_page(page);
3292 rdata->pages[i] = NULL; 3292 rdata->pages[i] = NULL;
3293 } 3293 }
3294 kref_put(&rdata->refcount, cifs_readdata_release); 3294 kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3307,21 +3307,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3307 3307
3308 /* determine the eof that the server (probably) has */ 3308 /* determine the eof that the server (probably) has */
3309 eof = CIFS_I(rdata->mapping->host)->server_eof; 3309 eof = CIFS_I(rdata->mapping->host)->server_eof;
3310 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; 3310 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3311 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); 3311 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3312 3312
3313 rdata->got_bytes = 0; 3313 rdata->got_bytes = 0;
3314 rdata->tailsz = PAGE_CACHE_SIZE; 3314 rdata->tailsz = PAGE_SIZE;
3315 for (i = 0; i < nr_pages; i++) { 3315 for (i = 0; i < nr_pages; i++) {
3316 struct page *page = rdata->pages[i]; 3316 struct page *page = rdata->pages[i];
3317 3317
3318 if (len >= PAGE_CACHE_SIZE) { 3318 if (len >= PAGE_SIZE) {
3319 /* enough data to fill the page */ 3319 /* enough data to fill the page */
3320 iov.iov_base = kmap(page); 3320 iov.iov_base = kmap(page);
3321 iov.iov_len = PAGE_CACHE_SIZE; 3321 iov.iov_len = PAGE_SIZE;
3322 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", 3322 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3323 i, page->index, iov.iov_base, iov.iov_len); 3323 i, page->index, iov.iov_base, iov.iov_len);
3324 len -= PAGE_CACHE_SIZE; 3324 len -= PAGE_SIZE;
3325 } else if (len > 0) { 3325 } else if (len > 0) {
3326 /* enough for partial page, fill and zero the rest */ 3326 /* enough for partial page, fill and zero the rest */
3327 iov.iov_base = kmap(page); 3327 iov.iov_base = kmap(page);
@@ -3329,7 +3329,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3329 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", 3329 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3330 i, page->index, iov.iov_base, iov.iov_len); 3330 i, page->index, iov.iov_base, iov.iov_len);
3331 memset(iov.iov_base + len, 3331 memset(iov.iov_base + len,
3332 '\0', PAGE_CACHE_SIZE - len); 3332 '\0', PAGE_SIZE - len);
3333 rdata->tailsz = len; 3333 rdata->tailsz = len;
3334 len = 0; 3334 len = 0;
3335 } else if (page->index > eof_index) { 3335 } else if (page->index > eof_index) {
@@ -3341,12 +3341,12 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3341 * to prevent the VFS from repeatedly attempting to 3341 * to prevent the VFS from repeatedly attempting to
3342 * fill them until the writes are flushed. 3342 * fill them until the writes are flushed.
3343 */ 3343 */
3344 zero_user(page, 0, PAGE_CACHE_SIZE); 3344 zero_user(page, 0, PAGE_SIZE);
3345 lru_cache_add_file(page); 3345 lru_cache_add_file(page);
3346 flush_dcache_page(page); 3346 flush_dcache_page(page);
3347 SetPageUptodate(page); 3347 SetPageUptodate(page);
3348 unlock_page(page); 3348 unlock_page(page);
3349 page_cache_release(page); 3349 put_page(page);
3350 rdata->pages[i] = NULL; 3350 rdata->pages[i] = NULL;
3351 rdata->nr_pages--; 3351 rdata->nr_pages--;
3352 continue; 3352 continue;
@@ -3354,7 +3354,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3354 /* no need to hold page hostage */ 3354 /* no need to hold page hostage */
3355 lru_cache_add_file(page); 3355 lru_cache_add_file(page);
3356 unlock_page(page); 3356 unlock_page(page);
3357 page_cache_release(page); 3357 put_page(page);
3358 rdata->pages[i] = NULL; 3358 rdata->pages[i] = NULL;
3359 rdata->nr_pages--; 3359 rdata->nr_pages--;
3360 continue; 3360 continue;
@@ -3402,8 +3402,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3402 } 3402 }
3403 3403
3404 /* move first page to the tmplist */ 3404 /* move first page to the tmplist */
3405 *offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 3405 *offset = (loff_t)page->index << PAGE_SHIFT;
3406 *bytes = PAGE_CACHE_SIZE; 3406 *bytes = PAGE_SIZE;
3407 *nr_pages = 1; 3407 *nr_pages = 1;
3408 list_move_tail(&page->lru, tmplist); 3408 list_move_tail(&page->lru, tmplist);
3409 3409
@@ -3415,7 +3415,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3415 break; 3415 break;
3416 3416
3417 /* would this page push the read over the rsize? */ 3417 /* would this page push the read over the rsize? */
3418 if (*bytes + PAGE_CACHE_SIZE > rsize) 3418 if (*bytes + PAGE_SIZE > rsize)
3419 break; 3419 break;
3420 3420
3421 __SetPageLocked(page); 3421 __SetPageLocked(page);
@@ -3424,7 +3424,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3424 break; 3424 break;
3425 } 3425 }
3426 list_move_tail(&page->lru, tmplist); 3426 list_move_tail(&page->lru, tmplist);
3427 (*bytes) += PAGE_CACHE_SIZE; 3427 (*bytes) += PAGE_SIZE;
3428 expected_index++; 3428 expected_index++;
3429 (*nr_pages)++; 3429 (*nr_pages)++;
3430 } 3430 }
@@ -3493,7 +3493,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3493 * reach this point however since we set ra_pages to 0 when the 3493 * reach this point however since we set ra_pages to 0 when the
3494 * rsize is smaller than a cache page. 3494 * rsize is smaller than a cache page.
3495 */ 3495 */
3496 if (unlikely(rsize < PAGE_CACHE_SIZE)) { 3496 if (unlikely(rsize < PAGE_SIZE)) {
3497 add_credits_and_wake_if(server, credits, 0); 3497 add_credits_and_wake_if(server, credits, 0);
3498 return 0; 3498 return 0;
3499 } 3499 }
@@ -3512,7 +3512,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3512 list_del(&page->lru); 3512 list_del(&page->lru);
3513 lru_cache_add_file(page); 3513 lru_cache_add_file(page);
3514 unlock_page(page); 3514 unlock_page(page);
3515 page_cache_release(page); 3515 put_page(page);
3516 } 3516 }
3517 rc = -ENOMEM; 3517 rc = -ENOMEM;
3518 add_credits_and_wake_if(server, credits, 0); 3518 add_credits_and_wake_if(server, credits, 0);
@@ -3524,7 +3524,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3524 rdata->offset = offset; 3524 rdata->offset = offset;
3525 rdata->bytes = bytes; 3525 rdata->bytes = bytes;
3526 rdata->pid = pid; 3526 rdata->pid = pid;
3527 rdata->pagesz = PAGE_CACHE_SIZE; 3527 rdata->pagesz = PAGE_SIZE;
3528 rdata->read_into_pages = cifs_readpages_read_into_pages; 3528 rdata->read_into_pages = cifs_readpages_read_into_pages;
3529 rdata->credits = credits; 3529 rdata->credits = credits;
3530 3530
@@ -3542,7 +3542,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3542 page = rdata->pages[i]; 3542 page = rdata->pages[i];
3543 lru_cache_add_file(page); 3543 lru_cache_add_file(page);
3544 unlock_page(page); 3544 unlock_page(page);
3545 page_cache_release(page); 3545 put_page(page);
3546 } 3546 }
3547 /* Fallback to the readpage in error/reconnect cases */ 3547 /* Fallback to the readpage in error/reconnect cases */
3548 kref_put(&rdata->refcount, cifs_readdata_release); 3548 kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3577,7 +3577,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
3577 read_data = kmap(page); 3577 read_data = kmap(page);
3578 /* for reads over a certain size could initiate async read ahead */ 3578 /* for reads over a certain size could initiate async read ahead */
3579 3579
3580 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); 3580 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
3581 3581
3582 if (rc < 0) 3582 if (rc < 0)
3583 goto io_error; 3583 goto io_error;
@@ -3587,8 +3587,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
3587 file_inode(file)->i_atime = 3587 file_inode(file)->i_atime =
3588 current_fs_time(file_inode(file)->i_sb); 3588 current_fs_time(file_inode(file)->i_sb);
3589 3589
3590 if (PAGE_CACHE_SIZE > rc) 3590 if (PAGE_SIZE > rc)
3591 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); 3591 memset(read_data + rc, 0, PAGE_SIZE - rc);
3592 3592
3593 flush_dcache_page(page); 3593 flush_dcache_page(page);
3594 SetPageUptodate(page); 3594 SetPageUptodate(page);
@@ -3608,7 +3608,7 @@ read_complete:
3608 3608
3609static int cifs_readpage(struct file *file, struct page *page) 3609static int cifs_readpage(struct file *file, struct page *page)
3610{ 3610{
3611 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 3611 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
3612 int rc = -EACCES; 3612 int rc = -EACCES;
3613 unsigned int xid; 3613 unsigned int xid;
3614 3614
@@ -3679,8 +3679,8 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
3679 struct page **pagep, void **fsdata) 3679 struct page **pagep, void **fsdata)
3680{ 3680{
3681 int oncethru = 0; 3681 int oncethru = 0;
3682 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 3682 pgoff_t index = pos >> PAGE_SHIFT;
3683 loff_t offset = pos & (PAGE_CACHE_SIZE - 1); 3683 loff_t offset = pos & (PAGE_SIZE - 1);
3684 loff_t page_start = pos & PAGE_MASK; 3684 loff_t page_start = pos & PAGE_MASK;
3685 loff_t i_size; 3685 loff_t i_size;
3686 struct page *page; 3686 struct page *page;
@@ -3703,7 +3703,7 @@ start:
3703 * the server. If the write is short, we'll end up doing a sync write 3703 * the server. If the write is short, we'll end up doing a sync write
3704 * instead. 3704 * instead.
3705 */ 3705 */
3706 if (len == PAGE_CACHE_SIZE) 3706 if (len == PAGE_SIZE)
3707 goto out; 3707 goto out;
3708 3708
3709 /* 3709 /*
@@ -3718,7 +3718,7 @@ start:
3718 (offset == 0 && (pos + len) >= i_size)) { 3718 (offset == 0 && (pos + len) >= i_size)) {
3719 zero_user_segments(page, 0, offset, 3719 zero_user_segments(page, 0, offset,
3720 offset + len, 3720 offset + len,
3721 PAGE_CACHE_SIZE); 3721 PAGE_SIZE);
3722 /* 3722 /*
3723 * PageChecked means that the parts of the page 3723 * PageChecked means that the parts of the page
3724 * to which we're not writing are considered up 3724 * to which we're not writing are considered up
@@ -3737,7 +3737,7 @@ start:
3737 * do a sync write instead since PG_uptodate isn't set. 3737 * do a sync write instead since PG_uptodate isn't set.
3738 */ 3738 */
3739 cifs_readpage_worker(file, page, &page_start); 3739 cifs_readpage_worker(file, page, &page_start);
3740 page_cache_release(page); 3740 put_page(page);
3741 oncethru = 1; 3741 oncethru = 1;
3742 goto start; 3742 goto start;
3743 } else { 3743 } else {
@@ -3764,7 +3764,7 @@ static void cifs_invalidate_page(struct page *page, unsigned int offset,
3764{ 3764{
3765 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); 3765 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3766 3766
3767 if (offset == 0 && length == PAGE_CACHE_SIZE) 3767 if (offset == 0 && length == PAGE_SIZE)
3768 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); 3768 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3769} 3769}
3770 3770
@@ -3772,7 +3772,7 @@ static int cifs_launder_page(struct page *page)
3772{ 3772{
3773 int rc = 0; 3773 int rc = 0;
3774 loff_t range_start = page_offset(page); 3774 loff_t range_start = page_offset(page);
3775 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 3775 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
3776 struct writeback_control wbc = { 3776 struct writeback_control wbc = {
3777 .sync_mode = WB_SYNC_ALL, 3777 .sync_mode = WB_SYNC_ALL,
3778 .nr_to_write = 0, 3778 .nr_to_write = 0,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index aeb26dbfa1bf..5f9ad5c42180 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -59,7 +59,7 @@ static void cifs_set_ops(struct inode *inode)
59 59
60 /* check if server can support readpages */ 60 /* check if server can support readpages */
61 if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf < 61 if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
62 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) 62 PAGE_SIZE + MAX_CIFS_HDR_SIZE)
63 inode->i_data.a_ops = &cifs_addr_ops_smallbuf; 63 inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
64 else 64 else
65 inode->i_data.a_ops = &cifs_addr_ops; 65 inode->i_data.a_ops = &cifs_addr_ops;
@@ -2019,8 +2019,8 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
2019 2019
2020static int cifs_truncate_page(struct address_space *mapping, loff_t from) 2020static int cifs_truncate_page(struct address_space *mapping, loff_t from)
2021{ 2021{
2022 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2022 pgoff_t index = from >> PAGE_SHIFT;
2023 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 2023 unsigned offset = from & (PAGE_SIZE - 1);
2024 struct page *page; 2024 struct page *page;
2025 int rc = 0; 2025 int rc = 0;
2026 2026
@@ -2028,9 +2028,9 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
2028 if (!page) 2028 if (!page)
2029 return -ENOMEM; 2029 return -ENOMEM;
2030 2030
2031 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2031 zero_user_segment(page, offset, PAGE_SIZE);
2032 unlock_page(page); 2032 unlock_page(page);
2033 page_cache_release(page); 2033 put_page(page);
2034 return rc; 2034 return rc;
2035} 2035}
2036 2036
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index a8f3b589a2df..cfd91320e869 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -71,8 +71,8 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
71 struct inode *inode; 71 struct inode *inode;
72 struct dentry *root; 72 struct dentry *root;
73 73
74 sb->s_blocksize = PAGE_CACHE_SIZE; 74 sb->s_blocksize = PAGE_SIZE;
75 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 75 sb->s_blocksize_bits = PAGE_SHIFT;
76 sb->s_magic = CONFIGFS_MAGIC; 76 sb->s_magic = CONFIGFS_MAGIC;
77 sb->s_op = &configfs_ops; 77 sb->s_op = &configfs_ops;
78 sb->s_time_gran = 1; 78 sb->s_time_gran = 1;
diff --git a/fs/cramfs/README b/fs/cramfs/README
index 445d1c2d7646..9d4e7ea311f4 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -86,26 +86,26 @@ Block Size
86 86
87(Block size in cramfs refers to the size of input data that is 87(Block size in cramfs refers to the size of input data that is
88compressed at a time. It's intended to be somewhere around 88compressed at a time. It's intended to be somewhere around
89PAGE_CACHE_SIZE for cramfs_readpage's convenience.) 89PAGE_SIZE for cramfs_readpage's convenience.)
90 90
91The superblock ought to indicate the block size that the fs was 91The superblock ought to indicate the block size that the fs was
92written for, since comments in <linux/pagemap.h> indicate that 92written for, since comments in <linux/pagemap.h> indicate that
93PAGE_CACHE_SIZE may grow in future (if I interpret the comment 93PAGE_SIZE may grow in future (if I interpret the comment
94correctly). 94correctly).
95 95
96Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that 96Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
97for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in 97for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). 98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
99This discrepancy is a bug, though it's not clear which should be 99This discrepancy is a bug, though it's not clear which should be
100changed. 100changed.
101 101
102One option is to change mkcramfs to take its PAGE_CACHE_SIZE from 102One option is to change mkcramfs to take its PAGE_SIZE from
103<asm/page.h>. Personally I don't like this option, but it does 103<asm/page.h>. Personally I don't like this option, but it does
104require the least amount of change: just change `#define 104require the least amount of change: just change `#define
105PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage 105PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
106is that the generated cramfs cannot always be shared between different 106is that the generated cramfs cannot always be shared between different
107kernels, not even necessarily kernels of the same architecture if 107kernels, not even necessarily kernels of the same architecture if
108PAGE_CACHE_SIZE is subject to change between kernel versions 108PAGE_SIZE is subject to change between kernel versions
109(currently possible with arm and ia64). 109(currently possible with arm and ia64).
110 110
111The remaining options try to make cramfs more sharable. 111The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@ size. The options are:
126 1. Always 4096 bytes. 126 1. Always 4096 bytes.
127 127
128 2. Writer chooses blocksize; kernel adapts but rejects blocksize > 128 2. Writer chooses blocksize; kernel adapts but rejects blocksize >
129 PAGE_CACHE_SIZE. 129 PAGE_SIZE.
130 130
131 3. Writer chooses blocksize; kernel adapts even to blocksize > 131 3. Writer chooses blocksize; kernel adapts even to blocksize >
132 PAGE_CACHE_SIZE. 132 PAGE_SIZE.
133 133
134It's easy enough to change the kernel to use a smaller value than 134It's easy enough to change the kernel to use a smaller value than
135PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. 135PAGE_SIZE: just make cramfs_readpage read multiple blocks.
136 136
137The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE 137The cost of option 1 is that kernels with a larger PAGE_SIZE
138value don't get as good compression as they can. 138value don't get as good compression as they can.
139 139
140The cost of option 2 relative to option 1 is that the code uses 140The cost of option 2 relative to option 1 is that the code uses
141variables instead of #define'd constants. The gain is that people 141variables instead of #define'd constants. The gain is that people
142with kernels having larger PAGE_CACHE_SIZE can make use of that if 142with kernels having larger PAGE_SIZE can make use of that if
143they don't mind their cramfs being inaccessible to kernels with 143they don't mind their cramfs being inaccessible to kernels with
144smaller PAGE_CACHE_SIZE values. 144smaller PAGE_SIZE values.
145 145
146Option 3 is easy to implement if we don't mind being CPU-inefficient: 146Option 3 is easy to implement if we don't mind being CPU-inefficient:
147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which 147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b862bc219cd7..3a32ddf98095 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
137 * page cache and dentry tree anyway.. 137 * page cache and dentry tree anyway..
138 * 138 *
139 * This also acts as a way to guarantee contiguous areas of up to 139 * This also acts as a way to guarantee contiguous areas of up to
140 * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to 140 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
141 * worry about end-of-buffer issues even when decompressing a full 141 * worry about end-of-buffer issues even when decompressing a full
142 * page cache. 142 * page cache.
143 */ 143 */
@@ -152,7 +152,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
152 */ 152 */
153#define BLKS_PER_BUF_SHIFT (2) 153#define BLKS_PER_BUF_SHIFT (2)
154#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) 154#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
155#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE) 155#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
156 156
157static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; 157static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
158static unsigned buffer_blocknr[READ_BUFFERS]; 158static unsigned buffer_blocknr[READ_BUFFERS];
@@ -173,8 +173,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
173 173
174 if (!len) 174 if (!len)
175 return NULL; 175 return NULL;
176 blocknr = offset >> PAGE_CACHE_SHIFT; 176 blocknr = offset >> PAGE_SHIFT;
177 offset &= PAGE_CACHE_SIZE - 1; 177 offset &= PAGE_SIZE - 1;
178 178
179 /* Check if an existing buffer already has the data.. */ 179 /* Check if an existing buffer already has the data.. */
180 for (i = 0; i < READ_BUFFERS; i++) { 180 for (i = 0; i < READ_BUFFERS; i++) {
@@ -184,14 +184,14 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
184 continue; 184 continue;
185 if (blocknr < buffer_blocknr[i]) 185 if (blocknr < buffer_blocknr[i])
186 continue; 186 continue;
187 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; 187 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
188 blk_offset += offset; 188 blk_offset += offset;
189 if (blk_offset + len > BUFFER_SIZE) 189 if (blk_offset + len > BUFFER_SIZE)
190 continue; 190 continue;
191 return read_buffers[i] + blk_offset; 191 return read_buffers[i] + blk_offset;
192 } 192 }
193 193
194 devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; 194 devsize = mapping->host->i_size >> PAGE_SHIFT;
195 195
196 /* Ok, read in BLKS_PER_BUF pages completely first. */ 196 /* Ok, read in BLKS_PER_BUF pages completely first. */
197 for (i = 0; i < BLKS_PER_BUF; i++) { 197 for (i = 0; i < BLKS_PER_BUF; i++) {
@@ -213,7 +213,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
213 wait_on_page_locked(page); 213 wait_on_page_locked(page);
214 if (!PageUptodate(page)) { 214 if (!PageUptodate(page)) {
215 /* asynchronous error */ 215 /* asynchronous error */
216 page_cache_release(page); 216 put_page(page);
217 pages[i] = NULL; 217 pages[i] = NULL;
218 } 218 }
219 } 219 }
@@ -229,12 +229,12 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
229 struct page *page = pages[i]; 229 struct page *page = pages[i];
230 230
231 if (page) { 231 if (page) {
232 memcpy(data, kmap(page), PAGE_CACHE_SIZE); 232 memcpy(data, kmap(page), PAGE_SIZE);
233 kunmap(page); 233 kunmap(page);
234 page_cache_release(page); 234 put_page(page);
235 } else 235 } else
236 memset(data, 0, PAGE_CACHE_SIZE); 236 memset(data, 0, PAGE_SIZE);
237 data += PAGE_CACHE_SIZE; 237 data += PAGE_SIZE;
238 } 238 }
239 return read_buffers[buffer] + offset; 239 return read_buffers[buffer] + offset;
240} 240}
@@ -353,7 +353,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
353 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 353 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
354 354
355 buf->f_type = CRAMFS_MAGIC; 355 buf->f_type = CRAMFS_MAGIC;
356 buf->f_bsize = PAGE_CACHE_SIZE; 356 buf->f_bsize = PAGE_SIZE;
357 buf->f_blocks = CRAMFS_SB(sb)->blocks; 357 buf->f_blocks = CRAMFS_SB(sb)->blocks;
358 buf->f_bfree = 0; 358 buf->f_bfree = 0;
359 buf->f_bavail = 0; 359 buf->f_bavail = 0;
@@ -496,7 +496,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
496 int bytes_filled; 496 int bytes_filled;
497 void *pgdata; 497 void *pgdata;
498 498
499 maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 499 maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
500 bytes_filled = 0; 500 bytes_filled = 0;
501 pgdata = kmap(page); 501 pgdata = kmap(page);
502 502
@@ -516,14 +516,14 @@ static int cramfs_readpage(struct file *file, struct page *page)
516 516
517 if (compr_len == 0) 517 if (compr_len == 0)
518 ; /* hole */ 518 ; /* hole */
519 else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) { 519 else if (unlikely(compr_len > (PAGE_SIZE << 1))) {
520 pr_err("bad compressed blocksize %u\n", 520 pr_err("bad compressed blocksize %u\n",
521 compr_len); 521 compr_len);
522 goto err; 522 goto err;
523 } else { 523 } else {
524 mutex_lock(&read_mutex); 524 mutex_lock(&read_mutex);
525 bytes_filled = cramfs_uncompress_block(pgdata, 525 bytes_filled = cramfs_uncompress_block(pgdata,
526 PAGE_CACHE_SIZE, 526 PAGE_SIZE,
527 cramfs_read(sb, start_offset, compr_len), 527 cramfs_read(sb, start_offset, compr_len),
528 compr_len); 528 compr_len);
529 mutex_unlock(&read_mutex); 529 mutex_unlock(&read_mutex);
@@ -532,7 +532,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
532 } 532 }
533 } 533 }
534 534
535 memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled); 535 memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
536 flush_dcache_page(page); 536 flush_dcache_page(page);
537 kunmap(page); 537 kunmap(page);
538 SetPageUptodate(page); 538 SetPageUptodate(page);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 06cd1a22240b..7f5804537d30 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -175,10 +175,10 @@ static int do_page_crypto(struct inode *inode,
175 FS_XTS_TWEAK_SIZE - sizeof(index)); 175 FS_XTS_TWEAK_SIZE - sizeof(index));
176 176
177 sg_init_table(&dst, 1); 177 sg_init_table(&dst, 1);
178 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 178 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
179 sg_init_table(&src, 1); 179 sg_init_table(&src, 1);
180 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 180 sg_set_page(&src, src_page, PAGE_SIZE, 0);
181 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 181 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
182 xts_tweak); 182 xts_tweak);
183 if (rw == FS_DECRYPT) 183 if (rw == FS_DECRYPT)
184 res = crypto_skcipher_decrypt(req); 184 res = crypto_skcipher_decrypt(req);
@@ -287,7 +287,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
287 struct bio *bio; 287 struct bio *bio;
288 int ret, err = 0; 288 int ret, err = 0;
289 289
290 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); 290 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
291 291
292 ctx = fscrypt_get_ctx(inode); 292 ctx = fscrypt_get_ctx(inode);
293 if (IS_ERR(ctx)) 293 if (IS_ERR(ctx))
diff --git a/fs/dax.c b/fs/dax.c
index 90322eb7498c..75ba46d82a76 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
323 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 323 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
324 if (vmf->pgoff >= size) { 324 if (vmf->pgoff >= size) {
325 unlock_page(page); 325 unlock_page(page);
326 page_cache_release(page); 326 put_page(page);
327 return VM_FAULT_SIGBUS; 327 return VM_FAULT_SIGBUS;
328 } 328 }
329 329
@@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode,
351} 351}
352 352
353#define NO_SECTOR -1 353#define NO_SECTOR -1
354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT)) 354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
355 355
356static int dax_radix_entry(struct address_space *mapping, pgoff_t index, 356static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
357 sector_t sector, bool pmd_entry, bool dirty) 357 sector_t sector, bool pmd_entry, bool dirty)
@@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
506 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 506 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
507 return 0; 507 return 0;
508 508
509 start_index = wbc->range_start >> PAGE_CACHE_SHIFT; 509 start_index = wbc->range_start >> PAGE_SHIFT;
510 end_index = wbc->range_end >> PAGE_CACHE_SHIFT; 510 end_index = wbc->range_end >> PAGE_SHIFT;
511 pmd_index = DAX_PMD_INDEX(start_index); 511 pmd_index = DAX_PMD_INDEX(start_index);
512 512
513 rcu_read_lock(); 513 rcu_read_lock();
@@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
642 page = find_get_page(mapping, vmf->pgoff); 642 page = find_get_page(mapping, vmf->pgoff);
643 if (page) { 643 if (page) {
644 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 644 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
645 page_cache_release(page); 645 put_page(page);
646 return VM_FAULT_RETRY; 646 return VM_FAULT_RETRY;
647 } 647 }
648 if (unlikely(page->mapping != mapping)) { 648 if (unlikely(page->mapping != mapping)) {
649 unlock_page(page); 649 unlock_page(page);
650 page_cache_release(page); 650 put_page(page);
651 goto repeat; 651 goto repeat;
652 } 652 }
653 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 653 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
711 711
712 if (page) { 712 if (page) {
713 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 713 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
714 PAGE_CACHE_SIZE, 0); 714 PAGE_SIZE, 0);
715 delete_from_page_cache(page); 715 delete_from_page_cache(page);
716 unlock_page(page); 716 unlock_page(page);
717 page_cache_release(page); 717 put_page(page);
718 page = NULL; 718 page = NULL;
719 } 719 }
720 720
@@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
747 unlock_page: 747 unlock_page:
748 if (page) { 748 if (page) {
749 unlock_page(page); 749 unlock_page(page);
750 page_cache_release(page); 750 put_page(page);
751 } 751 }
752 goto out; 752 goto out;
753} 753}
@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1094 * you are truncating a file, the helper function dax_truncate_page() may be 1094 * you are truncating a file, the helper function dax_truncate_page() may be
1095 * more convenient. 1095 * more convenient.
1096 * 1096 *
1097 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1097 * We work in terms of PAGE_SIZE here for commonality with
1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1099 * took care of disposing of the unnecessary blocks. Even if the filesystem 1099 * took care of disposing of the unnecessary blocks. Even if the filesystem
1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1104 get_block_t get_block) 1104 get_block_t get_block)
1105{ 1105{
1106 struct buffer_head bh; 1106 struct buffer_head bh;
1107 pgoff_t index = from >> PAGE_CACHE_SHIFT; 1107 pgoff_t index = from >> PAGE_SHIFT;
1108 unsigned offset = from & (PAGE_CACHE_SIZE-1); 1108 unsigned offset = from & (PAGE_SIZE-1);
1109 int err; 1109 int err;
1110 1110
1111 /* Block boundary? Nothing to do */ 1111 /* Block boundary? Nothing to do */
1112 if (!length) 1112 if (!length)
1113 return 0; 1113 return 0;
1114 BUG_ON((offset + length) > PAGE_CACHE_SIZE); 1114 BUG_ON((offset + length) > PAGE_SIZE);
1115 1115
1116 memset(&bh, 0, sizeof(bh)); 1116 memset(&bh, 0, sizeof(bh));
1117 bh.b_bdev = inode->i_sb->s_bdev; 1117 bh.b_bdev = inode->i_sb->s_bdev;
1118 bh.b_size = PAGE_CACHE_SIZE; 1118 bh.b_size = PAGE_SIZE;
1119 err = get_block(inode, index, &bh, 0); 1119 err = get_block(inode, index, &bh, 0);
1120 if (err < 0) 1120 if (err < 0)
1121 return err; 1121 return err;
@@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1123 struct block_device *bdev = bh.b_bdev; 1123 struct block_device *bdev = bh.b_bdev;
1124 struct blk_dax_ctl dax = { 1124 struct blk_dax_ctl dax = {
1125 .sector = to_sector(&bh, inode), 1125 .sector = to_sector(&bh, inode),
1126 .size = PAGE_CACHE_SIZE, 1126 .size = PAGE_SIZE,
1127 }; 1127 };
1128 1128
1129 if (dax_map_atomic(bdev, &dax) < 0) 1129 if (dax_map_atomic(bdev, &dax) < 0)
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
1146 * Similar to block_truncate_page(), this function can be called by a 1146 * Similar to block_truncate_page(), this function can be called by a
1147 * filesystem when it is truncating a DAX file to handle the partial page. 1147 * filesystem when it is truncating a DAX file to handle the partial page.
1148 * 1148 *
1149 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1149 * We work in terms of PAGE_SIZE here for commonality with
1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1151 * took care of disposing of the unnecessary blocks. Even if the filesystem 1151 * took care of disposing of the unnecessary blocks. Even if the filesystem
1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
1154 */ 1154 */
1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) 1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1156{ 1156{
1157 unsigned length = PAGE_CACHE_ALIGN(from) - from; 1157 unsigned length = PAGE_ALIGN(from) - from;
1158 return dax_zero_page_range(inode, from, length, get_block); 1158 return dax_zero_page_range(inode, from, length, get_block);
1159} 1159}
1160EXPORT_SYMBOL_GPL(dax_truncate_page); 1160EXPORT_SYMBOL_GPL(dax_truncate_page);
diff --git a/fs/dcache.c b/fs/dcache.c
index 32ceae3e6112..d5ecc6e477da 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1667 DCACHE_OP_REVALIDATE | 1667 DCACHE_OP_REVALIDATE |
1668 DCACHE_OP_WEAK_REVALIDATE | 1668 DCACHE_OP_WEAK_REVALIDATE |
1669 DCACHE_OP_DELETE | 1669 DCACHE_OP_DELETE |
1670 DCACHE_OP_SELECT_INODE)); 1670 DCACHE_OP_SELECT_INODE |
1671 DCACHE_OP_REAL));
1671 dentry->d_op = op; 1672 dentry->d_op = op;
1672 if (!op) 1673 if (!op)
1673 return; 1674 return;
@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1685 dentry->d_flags |= DCACHE_OP_PRUNE; 1686 dentry->d_flags |= DCACHE_OP_PRUNE;
1686 if (op->d_select_inode) 1687 if (op->d_select_inode)
1687 dentry->d_flags |= DCACHE_OP_SELECT_INODE; 1688 dentry->d_flags |= DCACHE_OP_SELECT_INODE;
1689 if (op->d_real)
1690 dentry->d_flags |= DCACHE_OP_REAL;
1688 1691
1689} 1692}
1690EXPORT_SYMBOL(d_set_d_op); 1693EXPORT_SYMBOL(d_set_d_op);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 476f1ecbd1f0..472037732daf 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -172,7 +172,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
172 */ 172 */
173 if (dio->page_errors == 0) 173 if (dio->page_errors == 0)
174 dio->page_errors = ret; 174 dio->page_errors = ret;
175 page_cache_get(page); 175 get_page(page);
176 dio->pages[0] = page; 176 dio->pages[0] = page;
177 sdio->head = 0; 177 sdio->head = 0;
178 sdio->tail = 1; 178 sdio->tail = 1;
@@ -424,7 +424,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
424static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) 424static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
425{ 425{
426 while (sdio->head < sdio->tail) 426 while (sdio->head < sdio->tail)
427 page_cache_release(dio->pages[sdio->head++]); 427 put_page(dio->pages[sdio->head++]);
428} 428}
429 429
430/* 430/*
@@ -487,7 +487,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
487 if (dio->rw == READ && !PageCompound(page) && 487 if (dio->rw == READ && !PageCompound(page) &&
488 dio->should_dirty) 488 dio->should_dirty)
489 set_page_dirty_lock(page); 489 set_page_dirty_lock(page);
490 page_cache_release(page); 490 put_page(page);
491 } 491 }
492 err = bio->bi_error; 492 err = bio->bi_error;
493 bio_put(bio); 493 bio_put(bio);
@@ -696,7 +696,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio)
696 */ 696 */
697 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) 697 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
698 sdio->pages_in_io--; 698 sdio->pages_in_io--;
699 page_cache_get(sdio->cur_page); 699 get_page(sdio->cur_page);
700 sdio->final_block_in_bio = sdio->cur_page_block + 700 sdio->final_block_in_bio = sdio->cur_page_block +
701 (sdio->cur_page_len >> sdio->blkbits); 701 (sdio->cur_page_len >> sdio->blkbits);
702 ret = 0; 702 ret = 0;
@@ -810,13 +810,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
810 */ 810 */
811 if (sdio->cur_page) { 811 if (sdio->cur_page) {
812 ret = dio_send_cur_page(dio, sdio, map_bh); 812 ret = dio_send_cur_page(dio, sdio, map_bh);
813 page_cache_release(sdio->cur_page); 813 put_page(sdio->cur_page);
814 sdio->cur_page = NULL; 814 sdio->cur_page = NULL;
815 if (ret) 815 if (ret)
816 return ret; 816 return ret;
817 } 817 }
818 818
819 page_cache_get(page); /* It is in dio */ 819 get_page(page); /* It is in dio */
820 sdio->cur_page = page; 820 sdio->cur_page = page;
821 sdio->cur_page_offset = offset; 821 sdio->cur_page_offset = offset;
822 sdio->cur_page_len = len; 822 sdio->cur_page_len = len;
@@ -830,7 +830,7 @@ out:
830 if (sdio->boundary) { 830 if (sdio->boundary) {
831 ret = dio_send_cur_page(dio, sdio, map_bh); 831 ret = dio_send_cur_page(dio, sdio, map_bh);
832 dio_bio_submit(dio, sdio); 832 dio_bio_submit(dio, sdio);
833 page_cache_release(sdio->cur_page); 833 put_page(sdio->cur_page);
834 sdio->cur_page = NULL; 834 sdio->cur_page = NULL;
835 } 835 }
836 return ret; 836 return ret;
@@ -947,7 +947,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
947 947
948 ret = get_more_blocks(dio, sdio, map_bh); 948 ret = get_more_blocks(dio, sdio, map_bh);
949 if (ret) { 949 if (ret) {
950 page_cache_release(page); 950 put_page(page);
951 goto out; 951 goto out;
952 } 952 }
953 if (!buffer_mapped(map_bh)) 953 if (!buffer_mapped(map_bh))
@@ -988,7 +988,7 @@ do_holes:
988 988
989 /* AKPM: eargh, -ENOTBLK is a hack */ 989 /* AKPM: eargh, -ENOTBLK is a hack */
990 if (dio->rw & WRITE) { 990 if (dio->rw & WRITE) {
991 page_cache_release(page); 991 put_page(page);
992 return -ENOTBLK; 992 return -ENOTBLK;
993 } 993 }
994 994
@@ -1001,7 +1001,7 @@ do_holes:
1001 if (sdio->block_in_file >= 1001 if (sdio->block_in_file >=
1002 i_size_aligned >> blkbits) { 1002 i_size_aligned >> blkbits) {
1003 /* We hit eof */ 1003 /* We hit eof */
1004 page_cache_release(page); 1004 put_page(page);
1005 goto out; 1005 goto out;
1006 } 1006 }
1007 zero_user(page, from, 1 << blkbits); 1007 zero_user(page, from, 1 << blkbits);
@@ -1041,7 +1041,7 @@ do_holes:
1041 sdio->next_block_for_io, 1041 sdio->next_block_for_io,
1042 map_bh); 1042 map_bh);
1043 if (ret) { 1043 if (ret) {
1044 page_cache_release(page); 1044 put_page(page);
1045 goto out; 1045 goto out;
1046 } 1046 }
1047 sdio->next_block_for_io += this_chunk_blocks; 1047 sdio->next_block_for_io += this_chunk_blocks;
@@ -1057,7 +1057,7 @@ next_block:
1057 } 1057 }
1058 1058
1059 /* Drop the ref which was taken in get_user_pages() */ 1059 /* Drop the ref which was taken in get_user_pages() */
1060 page_cache_release(page); 1060 put_page(page);
1061 } 1061 }
1062out: 1062out:
1063 return ret; 1063 return ret;
@@ -1281,7 +1281,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1281 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); 1281 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1282 if (retval == 0) 1282 if (retval == 0)
1283 retval = ret2; 1283 retval = ret2;
1284 page_cache_release(sdio.cur_page); 1284 put_page(sdio.cur_page);
1285 sdio.cur_page = NULL; 1285 sdio.cur_page = NULL;
1286 } 1286 }
1287 if (sdio.bio) 1287 if (sdio.bio)
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 519112168a9e..1669f6291c95 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g,
343 struct dlm_cluster *cl = NULL; 343 struct dlm_cluster *cl = NULL;
344 struct dlm_spaces *sps = NULL; 344 struct dlm_spaces *sps = NULL;
345 struct dlm_comms *cms = NULL; 345 struct dlm_comms *cms = NULL;
346 void *gps = NULL;
347 346
348 cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS); 347 cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
349 sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS); 348 sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
350 cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS); 349 cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
351 350
352 if (!cl || !gps || !sps || !cms) 351 if (!cl || !sps || !cms)
353 goto fail; 352 goto fail;
354 353
355 config_group_init_type_name(&cl->group, name, &cluster_type); 354 config_group_init_type_name(&cl->group, name, &cluster_type);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 00640e70ed7a..1ab012a27d9f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con)
640 con->rx_page = alloc_page(GFP_ATOMIC); 640 con->rx_page = alloc_page(GFP_ATOMIC);
641 if (con->rx_page == NULL) 641 if (con->rx_page == NULL)
642 goto out_resched; 642 goto out_resched;
643 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 643 cbuf_init(&con->cb, PAGE_SIZE);
644 } 644 }
645 645
646 /* 646 /*
@@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con)
657 * buffer and the start of the currently used section (cb.base) 657 * buffer and the start of the currently used section (cb.base)
658 */ 658 */
659 if (cbuf_data(&con->cb) >= con->cb.base) { 659 if (cbuf_data(&con->cb) >= con->cb.base) {
660 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 660 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
661 iov[1].iov_len = con->cb.base; 661 iov[1].iov_len = con->cb.base;
662 iov[1].iov_base = page_address(con->rx_page); 662 iov[1].iov_base = page_address(con->rx_page);
663 nvec = 2; 663 nvec = 2;
@@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con)
675 ret = dlm_process_incoming_buffer(con->nodeid, 675 ret = dlm_process_incoming_buffer(con->nodeid,
676 page_address(con->rx_page), 676 page_address(con->rx_page),
677 con->cb.base, con->cb.len, 677 con->cb.base, con->cb.len,
678 PAGE_CACHE_SIZE); 678 PAGE_SIZE);
679 if (ret == -EBADMSG) { 679 if (ret == -EBADMSG) {
680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", 680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
681 page_address(con->rx_page), con->cb.base, 681 page_address(con->rx_page), con->cb.base,
@@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1416 spin_lock(&con->writequeue_lock); 1416 spin_lock(&con->writequeue_lock);
1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1418 if ((&e->list == &con->writequeue) || 1418 if ((&e->list == &con->writequeue) ||
1419 (PAGE_CACHE_SIZE - e->end < len)) { 1419 (PAGE_SIZE - e->end < len)) {
1420 e = NULL; 1420 e = NULL;
1421 } else { 1421 } else {
1422 offset = e->end; 1422 offset = e->end;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 64026e53722a..d09cb4cdd09f 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -286,7 +286,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
286 pg = virt_to_page(addr); 286 pg = virt_to_page(addr);
287 offset = offset_in_page(addr); 287 offset = offset_in_page(addr);
288 sg_set_page(&sg[i], pg, 0, offset); 288 sg_set_page(&sg[i], pg, 0, offset);
289 remainder_of_page = PAGE_CACHE_SIZE - offset; 289 remainder_of_page = PAGE_SIZE - offset;
290 if (size >= remainder_of_page) { 290 if (size >= remainder_of_page) {
291 sg[i].length = remainder_of_page; 291 sg[i].length = remainder_of_page;
292 addr += remainder_of_page; 292 addr += remainder_of_page;
@@ -400,7 +400,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
400 struct page *page) 400 struct page *page)
401{ 401{
402 return ecryptfs_lower_header_size(crypt_stat) + 402 return ecryptfs_lower_header_size(crypt_stat) +
403 ((loff_t)page->index << PAGE_CACHE_SHIFT); 403 ((loff_t)page->index << PAGE_SHIFT);
404} 404}
405 405
406/** 406/**
@@ -428,7 +428,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
428 size_t extent_size = crypt_stat->extent_size; 428 size_t extent_size = crypt_stat->extent_size;
429 int rc; 429 int rc;
430 430
431 extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); 431 extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
432 rc = ecryptfs_derive_iv(extent_iv, crypt_stat, 432 rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
433 (extent_base + extent_offset)); 433 (extent_base + extent_offset));
434 if (rc) { 434 if (rc) {
@@ -498,7 +498,7 @@ int ecryptfs_encrypt_page(struct page *page)
498 } 498 }
499 499
500 for (extent_offset = 0; 500 for (extent_offset = 0;
501 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); 501 extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
502 extent_offset++) { 502 extent_offset++) {
503 rc = crypt_extent(crypt_stat, enc_extent_page, page, 503 rc = crypt_extent(crypt_stat, enc_extent_page, page,
504 extent_offset, ENCRYPT); 504 extent_offset, ENCRYPT);
@@ -512,7 +512,7 @@ int ecryptfs_encrypt_page(struct page *page)
512 lower_offset = lower_offset_for_page(crypt_stat, page); 512 lower_offset = lower_offset_for_page(crypt_stat, page);
513 enc_extent_virt = kmap(enc_extent_page); 513 enc_extent_virt = kmap(enc_extent_page);
514 rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, 514 rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
515 PAGE_CACHE_SIZE); 515 PAGE_SIZE);
516 kunmap(enc_extent_page); 516 kunmap(enc_extent_page);
517 if (rc < 0) { 517 if (rc < 0) {
518 ecryptfs_printk(KERN_ERR, 518 ecryptfs_printk(KERN_ERR,
@@ -560,7 +560,7 @@ int ecryptfs_decrypt_page(struct page *page)
560 560
561 lower_offset = lower_offset_for_page(crypt_stat, page); 561 lower_offset = lower_offset_for_page(crypt_stat, page);
562 page_virt = kmap(page); 562 page_virt = kmap(page);
563 rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, 563 rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
564 ecryptfs_inode); 564 ecryptfs_inode);
565 kunmap(page); 565 kunmap(page);
566 if (rc < 0) { 566 if (rc < 0) {
@@ -571,7 +571,7 @@ int ecryptfs_decrypt_page(struct page *page)
571 } 571 }
572 572
573 for (extent_offset = 0; 573 for (extent_offset = 0;
574 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); 574 extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
575 extent_offset++) { 575 extent_offset++) {
576 rc = crypt_extent(crypt_stat, page, page, 576 rc = crypt_extent(crypt_stat, page, page,
577 extent_offset, DECRYPT); 577 extent_offset, DECRYPT);
@@ -659,11 +659,11 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
659 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 659 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
660 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 660 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
661 else { 661 else {
662 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) 662 if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
663 crypt_stat->metadata_size = 663 crypt_stat->metadata_size =
664 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 664 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
665 else 665 else
666 crypt_stat->metadata_size = PAGE_CACHE_SIZE; 666 crypt_stat->metadata_size = PAGE_SIZE;
667 } 667 }
668} 668}
669 669
@@ -1442,7 +1442,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1442 ECRYPTFS_VALIDATE_HEADER_SIZE); 1442 ECRYPTFS_VALIDATE_HEADER_SIZE);
1443 if (rc) { 1443 if (rc) {
1444 /* metadata is not in the file header, so try xattrs */ 1444 /* metadata is not in the file header, so try xattrs */
1445 memset(page_virt, 0, PAGE_CACHE_SIZE); 1445 memset(page_virt, 0, PAGE_SIZE);
1446 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); 1446 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
1447 if (rc) { 1447 if (rc) {
1448 printk(KERN_DEBUG "Valid eCryptfs headers not found in " 1448 printk(KERN_DEBUG "Valid eCryptfs headers not found in "
@@ -1475,7 +1475,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1475 } 1475 }
1476out: 1476out:
1477 if (page_virt) { 1477 if (page_virt) {
1478 memset(page_virt, 0, PAGE_CACHE_SIZE); 1478 memset(page_virt, 0, PAGE_SIZE);
1479 kmem_cache_free(ecryptfs_header_cache, page_virt); 1479 kmem_cache_free(ecryptfs_header_cache, page_virt);
1480 } 1480 }
1481 return rc; 1481 return rc;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 121114e9a464..224b49e71aa4 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -763,10 +763,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
763 } else { /* ia->ia_size < i_size_read(inode) */ 763 } else { /* ia->ia_size < i_size_read(inode) */
764 /* We're chopping off all the pages down to the page 764 /* We're chopping off all the pages down to the page
765 * in which ia->ia_size is located. Fill in the end of 765 * in which ia->ia_size is located. Fill in the end of
766 * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to 766 * that page from (ia->ia_size & ~PAGE_MASK) to
767 * PAGE_CACHE_SIZE with zeros. */ 767 * PAGE_SIZE with zeros. */
768 size_t num_zeros = (PAGE_CACHE_SIZE 768 size_t num_zeros = (PAGE_SIZE
769 - (ia->ia_size & ~PAGE_CACHE_MASK)); 769 - (ia->ia_size & ~PAGE_MASK));
770 770
771 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 771 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
772 truncate_setsize(inode, ia->ia_size); 772 truncate_setsize(inode, ia->ia_size);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 9893d1538122..3cf1546dca82 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1798,7 +1798,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
1798 * added the our &auth_tok_list */ 1798 * added the our &auth_tok_list */
1799 next_packet_is_auth_tok_packet = 1; 1799 next_packet_is_auth_tok_packet = 1;
1800 while (next_packet_is_auth_tok_packet) { 1800 while (next_packet_is_auth_tok_packet) {
1801 size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i); 1801 size_t max_packet_size = ((PAGE_SIZE - 8) - i);
1802 1802
1803 switch (src[i]) { 1803 switch (src[i]) {
1804 case ECRYPTFS_TAG_3_PACKET_TYPE: 1804 case ECRYPTFS_TAG_3_PACKET_TYPE:
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 8b0b4a73116d..1698132d0e57 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -695,12 +695,12 @@ static struct ecryptfs_cache_info {
695 { 695 {
696 .cache = &ecryptfs_header_cache, 696 .cache = &ecryptfs_header_cache,
697 .name = "ecryptfs_headers", 697 .name = "ecryptfs_headers",
698 .size = PAGE_CACHE_SIZE, 698 .size = PAGE_SIZE,
699 }, 699 },
700 { 700 {
701 .cache = &ecryptfs_xattr_cache, 701 .cache = &ecryptfs_xattr_cache,
702 .name = "ecryptfs_xattr_cache", 702 .name = "ecryptfs_xattr_cache",
703 .size = PAGE_CACHE_SIZE, 703 .size = PAGE_SIZE,
704 }, 704 },
705 { 705 {
706 .cache = &ecryptfs_key_record_cache, 706 .cache = &ecryptfs_key_record_cache,
@@ -818,7 +818,7 @@ static int __init ecryptfs_init(void)
818{ 818{
819 int rc; 819 int rc;
820 820
821 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { 821 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
822 rc = -EINVAL; 822 rc = -EINVAL;
823 ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " 823 ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
824 "larger than the host's page size, and so " 824 "larger than the host's page size, and so "
@@ -826,7 +826,7 @@ static int __init ecryptfs_init(void)
826 "default eCryptfs extent size is [%u] bytes; " 826 "default eCryptfs extent size is [%u] bytes; "
827 "the page size is [%lu] bytes.\n", 827 "the page size is [%lu] bytes.\n",
828 ECRYPTFS_DEFAULT_EXTENT_SIZE, 828 ECRYPTFS_DEFAULT_EXTENT_SIZE,
829 (unsigned long)PAGE_CACHE_SIZE); 829 (unsigned long)PAGE_SIZE);
830 goto out; 830 goto out;
831 } 831 }
832 rc = ecryptfs_init_kmem_caches(); 832 rc = ecryptfs_init_kmem_caches();
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 1f5865263b3e..e6b1d80952b9 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -122,7 +122,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
122 struct ecryptfs_crypt_stat *crypt_stat) 122 struct ecryptfs_crypt_stat *crypt_stat)
123{ 123{
124 loff_t extent_num_in_page = 0; 124 loff_t extent_num_in_page = 0;
125 loff_t num_extents_per_page = (PAGE_CACHE_SIZE 125 loff_t num_extents_per_page = (PAGE_SIZE
126 / crypt_stat->extent_size); 126 / crypt_stat->extent_size);
127 int rc = 0; 127 int rc = 0;
128 128
@@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
138 char *page_virt; 138 char *page_virt;
139 139
140 page_virt = kmap_atomic(page); 140 page_virt = kmap_atomic(page);
141 memset(page_virt, 0, PAGE_CACHE_SIZE); 141 memset(page_virt, 0, PAGE_SIZE);
142 /* TODO: Support more than one header extent */ 142 /* TODO: Support more than one header extent */
143 if (view_extent_num == 0) { 143 if (view_extent_num == 0) {
144 size_t written; 144 size_t written;
@@ -164,8 +164,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
164 - crypt_stat->metadata_size); 164 - crypt_stat->metadata_size);
165 165
166 rc = ecryptfs_read_lower_page_segment( 166 rc = ecryptfs_read_lower_page_segment(
167 page, (lower_offset >> PAGE_CACHE_SHIFT), 167 page, (lower_offset >> PAGE_SHIFT),
168 (lower_offset & ~PAGE_CACHE_MASK), 168 (lower_offset & ~PAGE_MASK),
169 crypt_stat->extent_size, page->mapping->host); 169 crypt_stat->extent_size, page->mapping->host);
170 if (rc) { 170 if (rc) {
171 printk(KERN_ERR "%s: Error attempting to read " 171 printk(KERN_ERR "%s: Error attempting to read "
@@ -198,7 +198,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
198 198
199 if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 199 if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
200 rc = ecryptfs_read_lower_page_segment(page, page->index, 0, 200 rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
201 PAGE_CACHE_SIZE, 201 PAGE_SIZE,
202 page->mapping->host); 202 page->mapping->host);
203 } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { 203 } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
204 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { 204 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
@@ -215,7 +215,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
215 215
216 } else { 216 } else {
217 rc = ecryptfs_read_lower_page_segment( 217 rc = ecryptfs_read_lower_page_segment(
218 page, page->index, 0, PAGE_CACHE_SIZE, 218 page, page->index, 0, PAGE_SIZE,
219 page->mapping->host); 219 page->mapping->host);
220 if (rc) { 220 if (rc) {
221 printk(KERN_ERR "Error reading page; rc = " 221 printk(KERN_ERR "Error reading page; rc = "
@@ -250,12 +250,12 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
250 struct inode *inode = page->mapping->host; 250 struct inode *inode = page->mapping->host;
251 int end_byte_in_page; 251 int end_byte_in_page;
252 252
253 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) 253 if ((i_size_read(inode) / PAGE_SIZE) != page->index)
254 goto out; 254 goto out;
255 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 255 end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
256 if (to > end_byte_in_page) 256 if (to > end_byte_in_page)
257 end_byte_in_page = to; 257 end_byte_in_page = to;
258 zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE); 258 zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
259out: 259out:
260 return 0; 260 return 0;
261} 261}
@@ -279,7 +279,7 @@ static int ecryptfs_write_begin(struct file *file,
279 loff_t pos, unsigned len, unsigned flags, 279 loff_t pos, unsigned len, unsigned flags,
280 struct page **pagep, void **fsdata) 280 struct page **pagep, void **fsdata)
281{ 281{
282 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 282 pgoff_t index = pos >> PAGE_SHIFT;
283 struct page *page; 283 struct page *page;
284 loff_t prev_page_end_size; 284 loff_t prev_page_end_size;
285 int rc = 0; 285 int rc = 0;
@@ -289,14 +289,14 @@ static int ecryptfs_write_begin(struct file *file,
289 return -ENOMEM; 289 return -ENOMEM;
290 *pagep = page; 290 *pagep = page;
291 291
292 prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT); 292 prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
293 if (!PageUptodate(page)) { 293 if (!PageUptodate(page)) {
294 struct ecryptfs_crypt_stat *crypt_stat = 294 struct ecryptfs_crypt_stat *crypt_stat =
295 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; 295 &ecryptfs_inode_to_private(mapping->host)->crypt_stat;
296 296
297 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 297 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
298 rc = ecryptfs_read_lower_page_segment( 298 rc = ecryptfs_read_lower_page_segment(
299 page, index, 0, PAGE_CACHE_SIZE, mapping->host); 299 page, index, 0, PAGE_SIZE, mapping->host);
300 if (rc) { 300 if (rc) {
301 printk(KERN_ERR "%s: Error attempting to read " 301 printk(KERN_ERR "%s: Error attempting to read "
302 "lower page segment; rc = [%d]\n", 302 "lower page segment; rc = [%d]\n",
@@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file,
322 SetPageUptodate(page); 322 SetPageUptodate(page);
323 } else { 323 } else {
324 rc = ecryptfs_read_lower_page_segment( 324 rc = ecryptfs_read_lower_page_segment(
325 page, index, 0, PAGE_CACHE_SIZE, 325 page, index, 0, PAGE_SIZE,
326 mapping->host); 326 mapping->host);
327 if (rc) { 327 if (rc) {
328 printk(KERN_ERR "%s: Error reading " 328 printk(KERN_ERR "%s: Error reading "
@@ -336,9 +336,9 @@ static int ecryptfs_write_begin(struct file *file,
336 } else { 336 } else {
337 if (prev_page_end_size 337 if (prev_page_end_size
338 >= i_size_read(page->mapping->host)) { 338 >= i_size_read(page->mapping->host)) {
339 zero_user(page, 0, PAGE_CACHE_SIZE); 339 zero_user(page, 0, PAGE_SIZE);
340 SetPageUptodate(page); 340 SetPageUptodate(page);
341 } else if (len < PAGE_CACHE_SIZE) { 341 } else if (len < PAGE_SIZE) {
342 rc = ecryptfs_decrypt_page(page); 342 rc = ecryptfs_decrypt_page(page);
343 if (rc) { 343 if (rc) {
344 printk(KERN_ERR "%s: Error decrypting " 344 printk(KERN_ERR "%s: Error decrypting "
@@ -371,11 +371,11 @@ static int ecryptfs_write_begin(struct file *file,
371 * of page? Zero it out. */ 371 * of page? Zero it out. */
372 if ((i_size_read(mapping->host) == prev_page_end_size) 372 if ((i_size_read(mapping->host) == prev_page_end_size)
373 && (pos != 0)) 373 && (pos != 0))
374 zero_user(page, 0, PAGE_CACHE_SIZE); 374 zero_user(page, 0, PAGE_SIZE);
375out: 375out:
376 if (unlikely(rc)) { 376 if (unlikely(rc)) {
377 unlock_page(page); 377 unlock_page(page);
378 page_cache_release(page); 378 put_page(page);
379 *pagep = NULL; 379 *pagep = NULL;
380 } 380 }
381 return rc; 381 return rc;
@@ -437,7 +437,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
437 } 437 }
438 inode_lock(lower_inode); 438 inode_lock(lower_inode);
439 size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME, 439 size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
440 xattr_virt, PAGE_CACHE_SIZE); 440 xattr_virt, PAGE_SIZE);
441 if (size < 0) 441 if (size < 0)
442 size = 8; 442 size = 8;
443 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt); 443 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
@@ -479,8 +479,8 @@ static int ecryptfs_write_end(struct file *file,
479 loff_t pos, unsigned len, unsigned copied, 479 loff_t pos, unsigned len, unsigned copied,
480 struct page *page, void *fsdata) 480 struct page *page, void *fsdata)
481{ 481{
482 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 482 pgoff_t index = pos >> PAGE_SHIFT;
483 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 483 unsigned from = pos & (PAGE_SIZE - 1);
484 unsigned to = from + copied; 484 unsigned to = from + copied;
485 struct inode *ecryptfs_inode = mapping->host; 485 struct inode *ecryptfs_inode = mapping->host;
486 struct ecryptfs_crypt_stat *crypt_stat = 486 struct ecryptfs_crypt_stat *crypt_stat =
@@ -500,7 +500,7 @@ static int ecryptfs_write_end(struct file *file,
500 goto out; 500 goto out;
501 } 501 }
502 if (!PageUptodate(page)) { 502 if (!PageUptodate(page)) {
503 if (copied < PAGE_CACHE_SIZE) { 503 if (copied < PAGE_SIZE) {
504 rc = 0; 504 rc = 0;
505 goto out; 505 goto out;
506 } 506 }
@@ -533,7 +533,7 @@ static int ecryptfs_write_end(struct file *file,
533 rc = copied; 533 rc = copied;
534out: 534out:
535 unlock_page(page); 535 unlock_page(page);
536 page_cache_release(page); 536 put_page(page);
537 return rc; 537 return rc;
538} 538}
539 539
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 09fe622274e4..158a3a39f82d 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -74,7 +74,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
74 loff_t offset; 74 loff_t offset;
75 int rc; 75 int rc;
76 76
77 offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) 77 offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
78 + offset_in_page); 78 + offset_in_page);
79 virt = kmap(page_for_lower); 79 virt = kmap(page_for_lower);
80 rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size); 80 rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
@@ -123,9 +123,9 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
123 else 123 else
124 pos = offset; 124 pos = offset;
125 while (pos < (offset + size)) { 125 while (pos < (offset + size)) {
126 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); 126 pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
127 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); 127 size_t start_offset_in_page = (pos & ~PAGE_MASK);
128 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); 128 size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
129 loff_t total_remaining_bytes = ((offset + size) - pos); 129 loff_t total_remaining_bytes = ((offset + size) - pos);
130 130
131 if (fatal_signal_pending(current)) { 131 if (fatal_signal_pending(current)) {
@@ -165,7 +165,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
165 * Fill in zero values to the end of the page */ 165 * Fill in zero values to the end of the page */
166 memset(((char *)ecryptfs_page_virt 166 memset(((char *)ecryptfs_page_virt
167 + start_offset_in_page), 0, 167 + start_offset_in_page), 0,
168 PAGE_CACHE_SIZE - start_offset_in_page); 168 PAGE_SIZE - start_offset_in_page);
169 } 169 }
170 170
171 /* pos >= offset, we are now writing the data request */ 171 /* pos >= offset, we are now writing the data request */
@@ -186,7 +186,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
186 ecryptfs_page, 186 ecryptfs_page,
187 start_offset_in_page, 187 start_offset_in_page,
188 data_offset); 188 data_offset);
189 page_cache_release(ecryptfs_page); 189 put_page(ecryptfs_page);
190 if (rc) { 190 if (rc) {
191 printk(KERN_ERR "%s: Error encrypting " 191 printk(KERN_ERR "%s: Error encrypting "
192 "page; rc = [%d]\n", __func__, rc); 192 "page; rc = [%d]\n", __func__, rc);
@@ -262,7 +262,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
262 loff_t offset; 262 loff_t offset;
263 int rc; 263 int rc;
264 264
265 offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page); 265 offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
266 virt = kmap(page_for_ecryptfs); 266 virt = kmap(page_for_ecryptfs);
267 rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode); 267 rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
268 if (rc > 0) 268 if (rc > 0)
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index dd029d13ea61..553c5d2db4a4 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -197,8 +197,8 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
197 efivarfs_sb = sb; 197 efivarfs_sb = sb;
198 198
199 sb->s_maxbytes = MAX_LFS_FILESIZE; 199 sb->s_maxbytes = MAX_LFS_FILESIZE;
200 sb->s_blocksize = PAGE_CACHE_SIZE; 200 sb->s_blocksize = PAGE_SIZE;
201 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 201 sb->s_blocksize_bits = PAGE_SHIFT;
202 sb->s_magic = EFIVARFS_MAGIC; 202 sb->s_magic = EFIVARFS_MAGIC;
203 sb->s_op = &efivarfs_ops; 203 sb->s_op = &efivarfs_ops;
204 sb->s_d_op = &efivarfs_d_ops; 204 sb->s_d_op = &efivarfs_d_ops;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index e5bb2abf77f9..547b93cbea63 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -41,16 +41,16 @@ static inline unsigned exofs_chunk_size(struct inode *inode)
41static inline void exofs_put_page(struct page *page) 41static inline void exofs_put_page(struct page *page)
42{ 42{
43 kunmap(page); 43 kunmap(page);
44 page_cache_release(page); 44 put_page(page);
45} 45}
46 46
47static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) 47static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
48{ 48{
49 loff_t last_byte = inode->i_size; 49 loff_t last_byte = inode->i_size;
50 50
51 last_byte -= page_nr << PAGE_CACHE_SHIFT; 51 last_byte -= page_nr << PAGE_SHIFT;
52 if (last_byte > PAGE_CACHE_SIZE) 52 if (last_byte > PAGE_SIZE)
53 last_byte = PAGE_CACHE_SIZE; 53 last_byte = PAGE_SIZE;
54 return last_byte; 54 return last_byte;
55} 55}
56 56
@@ -85,13 +85,13 @@ static void exofs_check_page(struct page *page)
85 unsigned chunk_size = exofs_chunk_size(dir); 85 unsigned chunk_size = exofs_chunk_size(dir);
86 char *kaddr = page_address(page); 86 char *kaddr = page_address(page);
87 unsigned offs, rec_len; 87 unsigned offs, rec_len;
88 unsigned limit = PAGE_CACHE_SIZE; 88 unsigned limit = PAGE_SIZE;
89 struct exofs_dir_entry *p; 89 struct exofs_dir_entry *p;
90 char *error; 90 char *error;
91 91
92 /* if the page is the last one in the directory */ 92 /* if the page is the last one in the directory */
93 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 93 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
94 limit = dir->i_size & ~PAGE_CACHE_MASK; 94 limit = dir->i_size & ~PAGE_MASK;
95 if (limit & (chunk_size - 1)) 95 if (limit & (chunk_size - 1))
96 goto Ebadsize; 96 goto Ebadsize;
97 if (!limit) 97 if (!limit)
@@ -138,7 +138,7 @@ bad_entry:
138 EXOFS_ERR( 138 EXOFS_ERR(
139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " 139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
140 "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", 140 "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
141 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 141 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
142 _LLU(le64_to_cpu(p->inode_no)), 142 _LLU(le64_to_cpu(p->inode_no)),
143 rec_len, p->name_len); 143 rec_len, p->name_len);
144 goto fail; 144 goto fail;
@@ -147,7 +147,7 @@ Eend:
147 EXOFS_ERR("ERROR [exofs_check_page]: " 147 EXOFS_ERR("ERROR [exofs_check_page]: "
148 "entry in directory(0x%lx) spans the page boundary" 148 "entry in directory(0x%lx) spans the page boundary"
149 "offset=%lu, inode=0x%llx\n", 149 "offset=%lu, inode=0x%llx\n",
150 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 150 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
151 _LLU(le64_to_cpu(p->inode_no))); 151 _LLU(le64_to_cpu(p->inode_no)));
152fail: 152fail:
153 SetPageChecked(page); 153 SetPageChecked(page);
@@ -237,8 +237,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
237{ 237{
238 loff_t pos = ctx->pos; 238 loff_t pos = ctx->pos;
239 struct inode *inode = file_inode(file); 239 struct inode *inode = file_inode(file);
240 unsigned int offset = pos & ~PAGE_CACHE_MASK; 240 unsigned int offset = pos & ~PAGE_MASK;
241 unsigned long n = pos >> PAGE_CACHE_SHIFT; 241 unsigned long n = pos >> PAGE_SHIFT;
242 unsigned long npages = dir_pages(inode); 242 unsigned long npages = dir_pages(inode);
243 unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); 243 unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
244 int need_revalidate = (file->f_version != inode->i_version); 244 int need_revalidate = (file->f_version != inode->i_version);
@@ -254,7 +254,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
254 if (IS_ERR(page)) { 254 if (IS_ERR(page)) {
255 EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", 255 EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
256 inode->i_ino); 256 inode->i_ino);
257 ctx->pos += PAGE_CACHE_SIZE - offset; 257 ctx->pos += PAGE_SIZE - offset;
258 return PTR_ERR(page); 258 return PTR_ERR(page);
259 } 259 }
260 kaddr = page_address(page); 260 kaddr = page_address(page);
@@ -262,7 +262,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
262 if (offset) { 262 if (offset) {
263 offset = exofs_validate_entry(kaddr, offset, 263 offset = exofs_validate_entry(kaddr, offset,
264 chunk_mask); 264 chunk_mask);
265 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 265 ctx->pos = (n<<PAGE_SHIFT) + offset;
266 } 266 }
267 file->f_version = inode->i_version; 267 file->f_version = inode->i_version;
268 need_revalidate = 0; 268 need_revalidate = 0;
@@ -449,7 +449,7 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
449 kaddr = page_address(page); 449 kaddr = page_address(page);
450 dir_end = kaddr + exofs_last_byte(dir, n); 450 dir_end = kaddr + exofs_last_byte(dir, n);
451 de = (struct exofs_dir_entry *)kaddr; 451 de = (struct exofs_dir_entry *)kaddr;
452 kaddr += PAGE_CACHE_SIZE - reclen; 452 kaddr += PAGE_SIZE - reclen;
453 while ((char *)de <= kaddr) { 453 while ((char *)de <= kaddr) {
454 if ((char *)de == dir_end) { 454 if ((char *)de == dir_end) {
455 name_len = 0; 455 name_len = 0;
@@ -602,7 +602,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
602 kunmap_atomic(kaddr); 602 kunmap_atomic(kaddr);
603 err = exofs_commit_chunk(page, 0, chunk_size); 603 err = exofs_commit_chunk(page, 0, chunk_size);
604fail: 604fail:
605 page_cache_release(page); 605 put_page(page);
606 return err; 606 return err;
607} 607}
608 608
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 9eaf595aeaf8..49e1bd00b4ec 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -317,7 +317,7 @@ static int read_exec(struct page_collect *pcol)
317 317
318 if (!pcol->ios) { 318 if (!pcol->ios) {
319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true, 319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
320 pcol->pg_first << PAGE_CACHE_SHIFT, 320 pcol->pg_first << PAGE_SHIFT,
321 pcol->length, &pcol->ios); 321 pcol->length, &pcol->ios);
322 322
323 if (ret) 323 if (ret)
@@ -383,7 +383,7 @@ static int readpage_strip(void *data, struct page *page)
383 struct inode *inode = pcol->inode; 383 struct inode *inode = pcol->inode;
384 struct exofs_i_info *oi = exofs_i(inode); 384 struct exofs_i_info *oi = exofs_i(inode);
385 loff_t i_size = i_size_read(inode); 385 loff_t i_size = i_size_read(inode);
386 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 386 pgoff_t end_index = i_size >> PAGE_SHIFT;
387 size_t len; 387 size_t len;
388 int ret; 388 int ret;
389 389
@@ -397,9 +397,9 @@ static int readpage_strip(void *data, struct page *page)
397 pcol->that_locked_page = page; 397 pcol->that_locked_page = page;
398 398
399 if (page->index < end_index) 399 if (page->index < end_index)
400 len = PAGE_CACHE_SIZE; 400 len = PAGE_SIZE;
401 else if (page->index == end_index) 401 else if (page->index == end_index)
402 len = i_size & ~PAGE_CACHE_MASK; 402 len = i_size & ~PAGE_MASK;
403 else 403 else
404 len = 0; 404 len = 0;
405 405
@@ -442,8 +442,8 @@ try_again:
442 goto fail; 442 goto fail;
443 } 443 }
444 444
445 if (len != PAGE_CACHE_SIZE) 445 if (len != PAGE_SIZE)
446 zero_user(page, len, PAGE_CACHE_SIZE - len); 446 zero_user(page, len, PAGE_SIZE - len);
447 447
448 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n", 448 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
449 inode->i_ino, page->index, len); 449 inode->i_ino, page->index, len);
@@ -609,7 +609,7 @@ static void __r4w_put_page(void *priv, struct page *page)
609 609
610 if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) { 610 if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
611 EXOFS_DBGMSG2("index=0x%lx\n", page->index); 611 EXOFS_DBGMSG2("index=0x%lx\n", page->index);
612 page_cache_release(page); 612 put_page(page);
613 return; 613 return;
614 } 614 }
615 EXOFS_DBGMSG2("that_locked_page index=0x%lx\n", 615 EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
@@ -633,7 +633,7 @@ static int write_exec(struct page_collect *pcol)
633 633
634 BUG_ON(pcol->ios); 634 BUG_ON(pcol->ios);
635 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false, 635 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
636 pcol->pg_first << PAGE_CACHE_SHIFT, 636 pcol->pg_first << PAGE_SHIFT,
637 pcol->length, &pcol->ios); 637 pcol->length, &pcol->ios);
638 if (unlikely(ret)) 638 if (unlikely(ret))
639 goto err; 639 goto err;
@@ -696,7 +696,7 @@ static int writepage_strip(struct page *page,
696 struct inode *inode = pcol->inode; 696 struct inode *inode = pcol->inode;
697 struct exofs_i_info *oi = exofs_i(inode); 697 struct exofs_i_info *oi = exofs_i(inode);
698 loff_t i_size = i_size_read(inode); 698 loff_t i_size = i_size_read(inode);
699 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 699 pgoff_t end_index = i_size >> PAGE_SHIFT;
700 size_t len; 700 size_t len;
701 int ret; 701 int ret;
702 702
@@ -708,9 +708,9 @@ static int writepage_strip(struct page *page,
708 708
709 if (page->index < end_index) 709 if (page->index < end_index)
710 /* in this case, the page is within the limits of the file */ 710 /* in this case, the page is within the limits of the file */
711 len = PAGE_CACHE_SIZE; 711 len = PAGE_SIZE;
712 else { 712 else {
713 len = i_size & ~PAGE_CACHE_MASK; 713 len = i_size & ~PAGE_MASK;
714 714
715 if (page->index > end_index || !len) { 715 if (page->index > end_index || !len) {
716 /* in this case, the page is outside the limits 716 /* in this case, the page is outside the limits
@@ -790,10 +790,10 @@ static int exofs_writepages(struct address_space *mapping,
790 long start, end, expected_pages; 790 long start, end, expected_pages;
791 int ret; 791 int ret;
792 792
793 start = wbc->range_start >> PAGE_CACHE_SHIFT; 793 start = wbc->range_start >> PAGE_SHIFT;
794 end = (wbc->range_end == LLONG_MAX) ? 794 end = (wbc->range_end == LLONG_MAX) ?
795 start + mapping->nrpages : 795 start + mapping->nrpages :
796 wbc->range_end >> PAGE_CACHE_SHIFT; 796 wbc->range_end >> PAGE_SHIFT;
797 797
798 if (start || end) 798 if (start || end)
799 expected_pages = end - start + 1; 799 expected_pages = end - start + 1;
@@ -881,15 +881,15 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
881 } 881 }
882 882
883 /* read modify write */ 883 /* read modify write */
884 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { 884 if (!PageUptodate(page) && (len != PAGE_SIZE)) {
885 loff_t i_size = i_size_read(mapping->host); 885 loff_t i_size = i_size_read(mapping->host);
886 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 886 pgoff_t end_index = i_size >> PAGE_SHIFT;
887 size_t rlen; 887 size_t rlen;
888 888
889 if (page->index < end_index) 889 if (page->index < end_index)
890 rlen = PAGE_CACHE_SIZE; 890 rlen = PAGE_SIZE;
891 else if (page->index == end_index) 891 else if (page->index == end_index)
892 rlen = i_size & ~PAGE_CACHE_MASK; 892 rlen = i_size & ~PAGE_MASK;
893 else 893 else
894 rlen = 0; 894 rlen = 0;
895 895
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index c20d77df2679..622a686bb08b 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -292,11 +292,11 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
292out_dir: 292out_dir:
293 if (dir_de) { 293 if (dir_de) {
294 kunmap(dir_page); 294 kunmap(dir_page);
295 page_cache_release(dir_page); 295 put_page(dir_page);
296 } 296 }
297out_old: 297out_old:
298 kunmap(old_page); 298 kunmap(old_page);
299 page_cache_release(old_page); 299 put_page(old_page);
300out: 300out:
301 return err; 301 return err;
302} 302}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 0c6638b40f21..7ff6fcfa685d 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
37{ 37{
38 unsigned len = le16_to_cpu(dlen); 38 unsigned len = le16_to_cpu(dlen);
39 39
40#if (PAGE_CACHE_SIZE >= 65536) 40#if (PAGE_SIZE >= 65536)
41 if (len == EXT2_MAX_REC_LEN) 41 if (len == EXT2_MAX_REC_LEN)
42 return 1 << 16; 42 return 1 << 16;
43#endif 43#endif
@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
46 46
47static inline __le16 ext2_rec_len_to_disk(unsigned len) 47static inline __le16 ext2_rec_len_to_disk(unsigned len)
48{ 48{
49#if (PAGE_CACHE_SIZE >= 65536) 49#if (PAGE_SIZE >= 65536)
50 if (len == (1 << 16)) 50 if (len == (1 << 16))
51 return cpu_to_le16(EXT2_MAX_REC_LEN); 51 return cpu_to_le16(EXT2_MAX_REC_LEN);
52 else 52 else
@@ -67,7 +67,7 @@ static inline unsigned ext2_chunk_size(struct inode *inode)
67static inline void ext2_put_page(struct page *page) 67static inline void ext2_put_page(struct page *page)
68{ 68{
69 kunmap(page); 69 kunmap(page);
70 page_cache_release(page); 70 put_page(page);
71} 71}
72 72
73/* 73/*
@@ -79,9 +79,9 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr)
79{ 79{
80 unsigned last_byte = inode->i_size; 80 unsigned last_byte = inode->i_size;
81 81
82 last_byte -= page_nr << PAGE_CACHE_SHIFT; 82 last_byte -= page_nr << PAGE_SHIFT;
83 if (last_byte > PAGE_CACHE_SIZE) 83 if (last_byte > PAGE_SIZE)
84 last_byte = PAGE_CACHE_SIZE; 84 last_byte = PAGE_SIZE;
85 return last_byte; 85 return last_byte;
86} 86}
87 87
@@ -118,12 +118,12 @@ static void ext2_check_page(struct page *page, int quiet)
118 char *kaddr = page_address(page); 118 char *kaddr = page_address(page);
119 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); 119 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
120 unsigned offs, rec_len; 120 unsigned offs, rec_len;
121 unsigned limit = PAGE_CACHE_SIZE; 121 unsigned limit = PAGE_SIZE;
122 ext2_dirent *p; 122 ext2_dirent *p;
123 char *error; 123 char *error;
124 124
125 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 125 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
126 limit = dir->i_size & ~PAGE_CACHE_MASK; 126 limit = dir->i_size & ~PAGE_MASK;
127 if (limit & (chunk_size - 1)) 127 if (limit & (chunk_size - 1))
128 goto Ebadsize; 128 goto Ebadsize;
129 if (!limit) 129 if (!limit)
@@ -176,7 +176,7 @@ bad_entry:
176 if (!quiet) 176 if (!quiet)
177 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " 177 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
178 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 178 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
179 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 179 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
180 (unsigned long) le32_to_cpu(p->inode), 180 (unsigned long) le32_to_cpu(p->inode),
181 rec_len, p->name_len); 181 rec_len, p->name_len);
182 goto fail; 182 goto fail;
@@ -186,7 +186,7 @@ Eend:
186 ext2_error(sb, "ext2_check_page", 186 ext2_error(sb, "ext2_check_page",
187 "entry in directory #%lu spans the page boundary" 187 "entry in directory #%lu spans the page boundary"
188 "offset=%lu, inode=%lu", 188 "offset=%lu, inode=%lu",
189 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 189 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
190 (unsigned long) le32_to_cpu(p->inode)); 190 (unsigned long) le32_to_cpu(p->inode));
191 } 191 }
192fail: 192fail:
@@ -287,8 +287,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
287 loff_t pos = ctx->pos; 287 loff_t pos = ctx->pos;
288 struct inode *inode = file_inode(file); 288 struct inode *inode = file_inode(file);
289 struct super_block *sb = inode->i_sb; 289 struct super_block *sb = inode->i_sb;
290 unsigned int offset = pos & ~PAGE_CACHE_MASK; 290 unsigned int offset = pos & ~PAGE_MASK;
291 unsigned long n = pos >> PAGE_CACHE_SHIFT; 291 unsigned long n = pos >> PAGE_SHIFT;
292 unsigned long npages = dir_pages(inode); 292 unsigned long npages = dir_pages(inode);
293 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); 293 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
294 unsigned char *types = NULL; 294 unsigned char *types = NULL;
@@ -309,14 +309,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
309 ext2_error(sb, __func__, 309 ext2_error(sb, __func__,
310 "bad page in #%lu", 310 "bad page in #%lu",
311 inode->i_ino); 311 inode->i_ino);
312 ctx->pos += PAGE_CACHE_SIZE - offset; 312 ctx->pos += PAGE_SIZE - offset;
313 return PTR_ERR(page); 313 return PTR_ERR(page);
314 } 314 }
315 kaddr = page_address(page); 315 kaddr = page_address(page);
316 if (unlikely(need_revalidate)) { 316 if (unlikely(need_revalidate)) {
317 if (offset) { 317 if (offset) {
318 offset = ext2_validate_entry(kaddr, offset, chunk_mask); 318 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
319 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 319 ctx->pos = (n<<PAGE_SHIFT) + offset;
320 } 320 }
321 file->f_version = inode->i_version; 321 file->f_version = inode->i_version;
322 need_revalidate = 0; 322 need_revalidate = 0;
@@ -406,7 +406,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
406 if (++n >= npages) 406 if (++n >= npages)
407 n = 0; 407 n = 0;
408 /* next page is past the blocks we've got */ 408 /* next page is past the blocks we've got */
409 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { 409 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
410 ext2_error(dir->i_sb, __func__, 410 ext2_error(dir->i_sb, __func__,
411 "dir %lu size %lld exceeds block count %llu", 411 "dir %lu size %lld exceeds block count %llu",
412 dir->i_ino, dir->i_size, 412 dir->i_ino, dir->i_size,
@@ -511,7 +511,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
511 kaddr = page_address(page); 511 kaddr = page_address(page);
512 dir_end = kaddr + ext2_last_byte(dir, n); 512 dir_end = kaddr + ext2_last_byte(dir, n);
513 de = (ext2_dirent *)kaddr; 513 de = (ext2_dirent *)kaddr;
514 kaddr += PAGE_CACHE_SIZE - reclen; 514 kaddr += PAGE_SIZE - reclen;
515 while ((char *)de <= kaddr) { 515 while ((char *)de <= kaddr) {
516 if ((char *)de == dir_end) { 516 if ((char *)de == dir_end) {
517 /* We hit i_size */ 517 /* We hit i_size */
@@ -655,7 +655,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
655 kunmap_atomic(kaddr); 655 kunmap_atomic(kaddr);
656 err = ext2_commit_chunk(page, 0, chunk_size); 656 err = ext2_commit_chunk(page, 0, chunk_size);
657fail: 657fail:
658 page_cache_release(page); 658 put_page(page);
659 return err; 659 return err;
660} 660}
661 661
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 7a2be8f7f3c3..d34843925b23 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -398,7 +398,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
398 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); 398 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
399 else { 399 else {
400 kunmap(dir_page); 400 kunmap(dir_page);
401 page_cache_release(dir_page); 401 put_page(dir_page);
402 } 402 }
403 inode_dec_link_count(old_dir); 403 inode_dec_link_count(old_dir);
404 } 404 }
@@ -408,11 +408,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
408out_dir: 408out_dir:
409 if (dir_de) { 409 if (dir_de) {
410 kunmap(dir_page); 410 kunmap(dir_page);
411 page_cache_release(dir_page); 411 put_page(dir_page);
412 } 412 }
413out_old: 413out_old:
414 kunmap(old_page); 414 kunmap(old_page);
415 page_cache_release(old_page); 415 put_page(old_page);
416out: 416out:
417 return err; 417 return err;
418} 418}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index edc053a81914..db9ae6e18154 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -91,7 +91,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
91 * Return: An allocated and initialized encryption context on success; error 91 * Return: An allocated and initialized encryption context on success; error
92 * value or NULL otherwise. 92 * value or NULL otherwise.
93 */ 93 */
94struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) 94struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
95 gfp_t gfp_flags)
95{ 96{
96 struct ext4_crypto_ctx *ctx = NULL; 97 struct ext4_crypto_ctx *ctx = NULL;
97 int res = 0; 98 int res = 0;
@@ -118,7 +119,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
118 list_del(&ctx->free_list); 119 list_del(&ctx->free_list);
119 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); 120 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
120 if (!ctx) { 121 if (!ctx) {
121 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); 122 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
122 if (!ctx) { 123 if (!ctx) {
123 res = -ENOMEM; 124 res = -ENOMEM;
124 goto out; 125 goto out;
@@ -255,7 +256,8 @@ static int ext4_page_crypto(struct inode *inode,
255 ext4_direction_t rw, 256 ext4_direction_t rw,
256 pgoff_t index, 257 pgoff_t index,
257 struct page *src_page, 258 struct page *src_page,
258 struct page *dest_page) 259 struct page *dest_page,
260 gfp_t gfp_flags)
259 261
260{ 262{
261 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; 263 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@@ -266,7 +268,7 @@ static int ext4_page_crypto(struct inode *inode,
266 struct crypto_skcipher *tfm = ci->ci_ctfm; 268 struct crypto_skcipher *tfm = ci->ci_ctfm;
267 int res = 0; 269 int res = 0;
268 270
269 req = skcipher_request_alloc(tfm, GFP_NOFS); 271 req = skcipher_request_alloc(tfm, gfp_flags);
270 if (!req) { 272 if (!req) {
271 printk_ratelimited(KERN_ERR 273 printk_ratelimited(KERN_ERR
272 "%s: crypto_request_alloc() failed\n", 274 "%s: crypto_request_alloc() failed\n",
@@ -283,10 +285,10 @@ static int ext4_page_crypto(struct inode *inode,
283 EXT4_XTS_TWEAK_SIZE - sizeof(index)); 285 EXT4_XTS_TWEAK_SIZE - sizeof(index));
284 286
285 sg_init_table(&dst, 1); 287 sg_init_table(&dst, 1);
286 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 288 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
287 sg_init_table(&src, 1); 289 sg_init_table(&src, 1);
288 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 290 sg_set_page(&src, src_page, PAGE_SIZE, 0);
289 skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 291 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
290 xts_tweak); 292 xts_tweak);
291 if (rw == EXT4_DECRYPT) 293 if (rw == EXT4_DECRYPT)
292 res = crypto_skcipher_decrypt(req); 294 res = crypto_skcipher_decrypt(req);
@@ -307,9 +309,10 @@ static int ext4_page_crypto(struct inode *inode,
307 return 0; 309 return 0;
308} 310}
309 311
310static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) 312static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
313 gfp_t gfp_flags)
311{ 314{
312 ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT); 315 ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
313 if (ctx->w.bounce_page == NULL) 316 if (ctx->w.bounce_page == NULL)
314 return ERR_PTR(-ENOMEM); 317 return ERR_PTR(-ENOMEM);
315 ctx->flags |= EXT4_WRITE_PATH_FL; 318 ctx->flags |= EXT4_WRITE_PATH_FL;
@@ -332,7 +335,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
332 * error value or NULL. 335 * error value or NULL.
333 */ 336 */
334struct page *ext4_encrypt(struct inode *inode, 337struct page *ext4_encrypt(struct inode *inode,
335 struct page *plaintext_page) 338 struct page *plaintext_page,
339 gfp_t gfp_flags)
336{ 340{
337 struct ext4_crypto_ctx *ctx; 341 struct ext4_crypto_ctx *ctx;
338 struct page *ciphertext_page = NULL; 342 struct page *ciphertext_page = NULL;
@@ -340,17 +344,17 @@ struct page *ext4_encrypt(struct inode *inode,
340 344
341 BUG_ON(!PageLocked(plaintext_page)); 345 BUG_ON(!PageLocked(plaintext_page));
342 346
343 ctx = ext4_get_crypto_ctx(inode); 347 ctx = ext4_get_crypto_ctx(inode, gfp_flags);
344 if (IS_ERR(ctx)) 348 if (IS_ERR(ctx))
345 return (struct page *) ctx; 349 return (struct page *) ctx;
346 350
347 /* The encryption operation will require a bounce page. */ 351 /* The encryption operation will require a bounce page. */
348 ciphertext_page = alloc_bounce_page(ctx); 352 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
349 if (IS_ERR(ciphertext_page)) 353 if (IS_ERR(ciphertext_page))
350 goto errout; 354 goto errout;
351 ctx->w.control_page = plaintext_page; 355 ctx->w.control_page = plaintext_page;
352 err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index, 356 err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
353 plaintext_page, ciphertext_page); 357 plaintext_page, ciphertext_page, gfp_flags);
354 if (err) { 358 if (err) {
355 ciphertext_page = ERR_PTR(err); 359 ciphertext_page = ERR_PTR(err);
356 errout: 360 errout:
@@ -378,8 +382,8 @@ int ext4_decrypt(struct page *page)
378{ 382{
379 BUG_ON(!PageLocked(page)); 383 BUG_ON(!PageLocked(page));
380 384
381 return ext4_page_crypto(page->mapping->host, 385 return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
382 EXT4_DECRYPT, page->index, page, page); 386 page->index, page, page, GFP_NOFS);
383} 387}
384 388
385int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 389int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -396,13 +400,13 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
396 (unsigned long) inode->i_ino, lblk, len); 400 (unsigned long) inode->i_ino, lblk, len);
397#endif 401#endif
398 402
399 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); 403 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
400 404
401 ctx = ext4_get_crypto_ctx(inode); 405 ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
402 if (IS_ERR(ctx)) 406 if (IS_ERR(ctx))
403 return PTR_ERR(ctx); 407 return PTR_ERR(ctx);
404 408
405 ciphertext_page = alloc_bounce_page(ctx); 409 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
406 if (IS_ERR(ciphertext_page)) { 410 if (IS_ERR(ciphertext_page)) {
407 err = PTR_ERR(ciphertext_page); 411 err = PTR_ERR(ciphertext_page);
408 goto errout; 412 goto errout;
@@ -410,11 +414,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
410 414
411 while (len--) { 415 while (len--) {
412 err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, 416 err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
413 ZERO_PAGE(0), ciphertext_page); 417 ZERO_PAGE(0), ciphertext_page,
418 GFP_NOFS);
414 if (err) 419 if (err)
415 goto errout; 420 goto errout;
416 421
417 bio = bio_alloc(GFP_KERNEL, 1); 422 bio = bio_alloc(GFP_NOWAIT, 1);
418 if (!bio) { 423 if (!bio) {
419 err = -ENOMEM; 424 err = -ENOMEM;
420 goto errout; 425 goto errout;
@@ -473,13 +478,16 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
473 */ 478 */
474static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) 479static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
475{ 480{
476 struct inode *dir = d_inode(dentry->d_parent); 481 struct dentry *dir;
477 struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info; 482 struct ext4_crypt_info *ci;
478 int dir_has_key, cached_with_key; 483 int dir_has_key, cached_with_key;
479 484
480 if (!ext4_encrypted_inode(dir)) 485 dir = dget_parent(dentry);
486 if (!ext4_encrypted_inode(d_inode(dir))) {
487 dput(dir);
481 return 0; 488 return 0;
482 489 }
490 ci = EXT4_I(d_inode(dir))->i_crypt_info;
483 if (ci && ci->ci_keyring_key && 491 if (ci && ci->ci_keyring_key &&
484 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 492 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
485 (1 << KEY_FLAG_REVOKED) | 493 (1 << KEY_FLAG_REVOKED) |
@@ -489,6 +497,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
489 /* this should eventually be an flag in d_flags */ 497 /* this should eventually be an flag in d_flags */
490 cached_with_key = dentry->d_fsdata != NULL; 498 cached_with_key = dentry->d_fsdata != NULL;
491 dir_has_key = (ci != NULL); 499 dir_has_key = (ci != NULL);
500 dput(dir);
492 501
493 /* 502 /*
494 * If the dentry was cached without the key, and it is a 503 * If the dentry was cached without the key, and it is a
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 50ba27cbed03..561d7308b393 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -155,13 +155,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
155 err = ext4_map_blocks(NULL, inode, &map, 0); 155 err = ext4_map_blocks(NULL, inode, &map, 0);
156 if (err > 0) { 156 if (err > 0) {
157 pgoff_t index = map.m_pblk >> 157 pgoff_t index = map.m_pblk >>
158 (PAGE_CACHE_SHIFT - inode->i_blkbits); 158 (PAGE_SHIFT - inode->i_blkbits);
159 if (!ra_has_index(&file->f_ra, index)) 159 if (!ra_has_index(&file->f_ra, index))
160 page_cache_sync_readahead( 160 page_cache_sync_readahead(
161 sb->s_bdev->bd_inode->i_mapping, 161 sb->s_bdev->bd_inode->i_mapping,
162 &file->f_ra, file, 162 &file->f_ra, file,
163 index, 1); 163 index, 1);
164 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 164 file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
165 bh = ext4_bread(NULL, inode, map.m_lblk, 0); 165 bh = ext4_bread(NULL, inode, map.m_lblk, 0);
166 if (IS_ERR(bh)) { 166 if (IS_ERR(bh)) {
167 err = PTR_ERR(bh); 167 err = PTR_ERR(bh);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c04743519865..349afebe21ee 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -912,6 +912,29 @@ do { \
912#include "extents_status.h" 912#include "extents_status.h"
913 913
914/* 914/*
915 * Lock subclasses for i_data_sem in the ext4_inode_info structure.
916 *
917 * These are needed to avoid lockdep false positives when we need to
918 * allocate blocks to the quota inode during ext4_map_blocks(), while
919 * holding i_data_sem for a normal (non-quota) inode. Since we don't
920 * do quota tracking for the quota inode, this avoids deadlock (as
921 * well as infinite recursion, since it isn't turtles all the way
922 * down...)
923 *
924 * I_DATA_SEM_NORMAL - Used for most inodes
925 * I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
926 * where the second inode has larger inode number
927 * than the first
928 * I_DATA_SEM_QUOTA - Used for quota inodes only
929 */
930enum {
931 I_DATA_SEM_NORMAL = 0,
932 I_DATA_SEM_OTHER,
933 I_DATA_SEM_QUOTA,
934};
935
936
937/*
915 * fourth extended file system inode data in memory 938 * fourth extended file system inode data in memory
916 */ 939 */
917struct ext4_inode_info { 940struct ext4_inode_info {
@@ -1961,7 +1984,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
1961{ 1984{
1962 unsigned len = le16_to_cpu(dlen); 1985 unsigned len = le16_to_cpu(dlen);
1963 1986
1964#if (PAGE_CACHE_SIZE >= 65536) 1987#if (PAGE_SIZE >= 65536)
1965 if (len == EXT4_MAX_REC_LEN || len == 0) 1988 if (len == EXT4_MAX_REC_LEN || len == 0)
1966 return blocksize; 1989 return blocksize;
1967 return (len & 65532) | ((len & 3) << 16); 1990 return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1997,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
1974{ 1997{
1975 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) 1998 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
1976 BUG(); 1999 BUG();
1977#if (PAGE_CACHE_SIZE >= 65536) 2000#if (PAGE_SIZE >= 65536)
1978 if (len < 65536) 2001 if (len < 65536)
1979 return cpu_to_le16(len); 2002 return cpu_to_le16(len);
1980 if (len == blocksize) { 2003 if (len == blocksize) {
@@ -2282,11 +2305,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep;
2282bool ext4_valid_contents_enc_mode(uint32_t mode); 2305bool ext4_valid_contents_enc_mode(uint32_t mode);
2283uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size); 2306uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
2284extern struct workqueue_struct *ext4_read_workqueue; 2307extern struct workqueue_struct *ext4_read_workqueue;
2285struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode); 2308struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
2309 gfp_t gfp_flags);
2286void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx); 2310void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
2287void ext4_restore_control_page(struct page *data_page); 2311void ext4_restore_control_page(struct page *data_page);
2288struct page *ext4_encrypt(struct inode *inode, 2312struct page *ext4_encrypt(struct inode *inode,
2289 struct page *plaintext_page); 2313 struct page *plaintext_page,
2314 gfp_t gfp_flags);
2290int ext4_decrypt(struct page *page); 2315int ext4_decrypt(struct page *page);
2291int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 2316int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
2292 ext4_fsblk_t pblk, ext4_lblk_t len); 2317 ext4_fsblk_t pblk, ext4_lblk_t len);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6659e216385e..fa2208bae2e1 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -329,7 +329,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
329 struct super_block *sb = inode->i_sb; 329 struct super_block *sb = inode->i_sb;
330 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 330 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
331 struct vfsmount *mnt = filp->f_path.mnt; 331 struct vfsmount *mnt = filp->f_path.mnt;
332 struct inode *dir = filp->f_path.dentry->d_parent->d_inode; 332 struct dentry *dir;
333 struct path path; 333 struct path path;
334 char buf[64], *cp; 334 char buf[64], *cp;
335 int ret; 335 int ret;
@@ -373,14 +373,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
373 if (ext4_encryption_info(inode) == NULL) 373 if (ext4_encryption_info(inode) == NULL)
374 return -ENOKEY; 374 return -ENOKEY;
375 } 375 }
376 if (ext4_encrypted_inode(dir) && 376
377 !ext4_is_child_context_consistent_with_parent(dir, inode)) { 377 dir = dget_parent(file_dentry(filp));
378 if (ext4_encrypted_inode(d_inode(dir)) &&
379 !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
378 ext4_warning(inode->i_sb, 380 ext4_warning(inode->i_sb,
379 "Inconsistent encryption contexts: %lu/%lu\n", 381 "Inconsistent encryption contexts: %lu/%lu\n",
380 (unsigned long) dir->i_ino, 382 (unsigned long) d_inode(dir)->i_ino,
381 (unsigned long) inode->i_ino); 383 (unsigned long) inode->i_ino);
384 dput(dir);
382 return -EPERM; 385 return -EPERM;
383 } 386 }
387 dput(dir);
384 /* 388 /*
385 * Set up the jbd2_inode if we are opening the inode for 389 * Set up the jbd2_inode if we are opening the inode for
386 * writing and the journal is present 390 * writing and the journal is present
@@ -428,8 +432,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
428 lastoff = startoff; 432 lastoff = startoff;
429 endoff = (loff_t)end_blk << blkbits; 433 endoff = (loff_t)end_blk << blkbits;
430 434
431 index = startoff >> PAGE_CACHE_SHIFT; 435 index = startoff >> PAGE_SHIFT;
432 end = endoff >> PAGE_CACHE_SHIFT; 436 end = endoff >> PAGE_SHIFT;
433 437
434 pagevec_init(&pvec, 0); 438 pagevec_init(&pvec, 0);
435 do { 439 do {
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 7cbdd3752ba5..7bc6c855cc18 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -482,7 +482,7 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page)
482 ret = ext4_read_inline_data(inode, kaddr, len, &iloc); 482 ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
483 flush_dcache_page(page); 483 flush_dcache_page(page);
484 kunmap_atomic(kaddr); 484 kunmap_atomic(kaddr);
485 zero_user_segment(page, len, PAGE_CACHE_SIZE); 485 zero_user_segment(page, len, PAGE_SIZE);
486 SetPageUptodate(page); 486 SetPageUptodate(page);
487 brelse(iloc.bh); 487 brelse(iloc.bh);
488 488
@@ -507,7 +507,7 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
507 if (!page->index) 507 if (!page->index)
508 ret = ext4_read_inline_page(inode, page); 508 ret = ext4_read_inline_page(inode, page);
509 else if (!PageUptodate(page)) { 509 else if (!PageUptodate(page)) {
510 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 510 zero_user_segment(page, 0, PAGE_SIZE);
511 SetPageUptodate(page); 511 SetPageUptodate(page);
512 } 512 }
513 513
@@ -595,7 +595,7 @@ retry:
595 595
596 if (ret) { 596 if (ret) {
597 unlock_page(page); 597 unlock_page(page);
598 page_cache_release(page); 598 put_page(page);
599 page = NULL; 599 page = NULL;
600 ext4_orphan_add(handle, inode); 600 ext4_orphan_add(handle, inode);
601 up_write(&EXT4_I(inode)->xattr_sem); 601 up_write(&EXT4_I(inode)->xattr_sem);
@@ -621,7 +621,7 @@ retry:
621out: 621out:
622 if (page) { 622 if (page) {
623 unlock_page(page); 623 unlock_page(page);
624 page_cache_release(page); 624 put_page(page);
625 } 625 }
626 if (sem_held) 626 if (sem_held)
627 up_write(&EXT4_I(inode)->xattr_sem); 627 up_write(&EXT4_I(inode)->xattr_sem);
@@ -690,7 +690,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
690 if (!ext4_has_inline_data(inode)) { 690 if (!ext4_has_inline_data(inode)) {
691 ret = 0; 691 ret = 0;
692 unlock_page(page); 692 unlock_page(page);
693 page_cache_release(page); 693 put_page(page);
694 goto out_up_read; 694 goto out_up_read;
695 } 695 }
696 696
@@ -815,7 +815,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
815 if (ret) { 815 if (ret) {
816 up_read(&EXT4_I(inode)->xattr_sem); 816 up_read(&EXT4_I(inode)->xattr_sem);
817 unlock_page(page); 817 unlock_page(page);
818 page_cache_release(page); 818 put_page(page);
819 ext4_truncate_failed_write(inode); 819 ext4_truncate_failed_write(inode);
820 return ret; 820 return ret;
821 } 821 }
@@ -829,7 +829,7 @@ out:
829 up_read(&EXT4_I(inode)->xattr_sem); 829 up_read(&EXT4_I(inode)->xattr_sem);
830 if (page) { 830 if (page) {
831 unlock_page(page); 831 unlock_page(page);
832 page_cache_release(page); 832 put_page(page);
833 } 833 }
834 return ret; 834 return ret;
835} 835}
@@ -919,7 +919,7 @@ retry_journal:
919out_release_page: 919out_release_page:
920 up_read(&EXT4_I(inode)->xattr_sem); 920 up_read(&EXT4_I(inode)->xattr_sem);
921 unlock_page(page); 921 unlock_page(page);
922 page_cache_release(page); 922 put_page(page);
923out_journal: 923out_journal:
924 ext4_journal_stop(handle); 924 ext4_journal_stop(handle);
925out: 925out:
@@ -947,7 +947,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
947 i_size_changed = 1; 947 i_size_changed = 1;
948 } 948 }
949 unlock_page(page); 949 unlock_page(page);
950 page_cache_release(page); 950 put_page(page);
951 951
952 /* 952 /*
953 * Don't mark the inode dirty under page lock. First, it unnecessarily 953 * Don't mark the inode dirty under page lock. First, it unnecessarily
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dab84a2530ff..981a1fc30eaa 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -763,39 +763,47 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
763/* Maximum number of blocks we map for direct IO at once. */ 763/* Maximum number of blocks we map for direct IO at once. */
764#define DIO_MAX_BLOCKS 4096 764#define DIO_MAX_BLOCKS 4096
765 765
766static handle_t *start_dio_trans(struct inode *inode, 766/*
767 struct buffer_head *bh_result) 767 * Get blocks function for the cases that need to start a transaction -
768 * generally difference cases of direct IO and DAX IO. It also handles retries
769 * in case of ENOSPC.
770 */
771static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
772 struct buffer_head *bh_result, int flags)
768{ 773{
769 int dio_credits; 774 int dio_credits;
775 handle_t *handle;
776 int retries = 0;
777 int ret;
770 778
771 /* Trim mapping request to maximum we can map at once for DIO */ 779 /* Trim mapping request to maximum we can map at once for DIO */
772 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) 780 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
773 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; 781 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
774 dio_credits = ext4_chunk_trans_blocks(inode, 782 dio_credits = ext4_chunk_trans_blocks(inode,
775 bh_result->b_size >> inode->i_blkbits); 783 bh_result->b_size >> inode->i_blkbits);
776 return ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 784retry:
785 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
786 if (IS_ERR(handle))
787 return PTR_ERR(handle);
788
789 ret = _ext4_get_block(inode, iblock, bh_result, flags);
790 ext4_journal_stop(handle);
791
792 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
793 goto retry;
794 return ret;
777} 795}
778 796
779/* Get block function for DIO reads and writes to inodes without extents */ 797/* Get block function for DIO reads and writes to inodes without extents */
780int ext4_dio_get_block(struct inode *inode, sector_t iblock, 798int ext4_dio_get_block(struct inode *inode, sector_t iblock,
781 struct buffer_head *bh, int create) 799 struct buffer_head *bh, int create)
782{ 800{
783 handle_t *handle;
784 int ret;
785
786 /* We don't expect handle for direct IO */ 801 /* We don't expect handle for direct IO */
787 WARN_ON_ONCE(ext4_journal_current_handle()); 802 WARN_ON_ONCE(ext4_journal_current_handle());
788 803
789 if (create) { 804 if (!create)
790 handle = start_dio_trans(inode, bh); 805 return _ext4_get_block(inode, iblock, bh, 0);
791 if (IS_ERR(handle)) 806 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
792 return PTR_ERR(handle);
793 }
794 ret = _ext4_get_block(inode, iblock, bh,
795 create ? EXT4_GET_BLOCKS_CREATE : 0);
796 if (create)
797 ext4_journal_stop(handle);
798 return ret;
799} 807}
800 808
801/* 809/*
@@ -806,18 +814,13 @@ int ext4_dio_get_block(struct inode *inode, sector_t iblock,
806static int ext4_dio_get_block_unwritten_async(struct inode *inode, 814static int ext4_dio_get_block_unwritten_async(struct inode *inode,
807 sector_t iblock, struct buffer_head *bh_result, int create) 815 sector_t iblock, struct buffer_head *bh_result, int create)
808{ 816{
809 handle_t *handle;
810 int ret; 817 int ret;
811 818
812 /* We don't expect handle for direct IO */ 819 /* We don't expect handle for direct IO */
813 WARN_ON_ONCE(ext4_journal_current_handle()); 820 WARN_ON_ONCE(ext4_journal_current_handle());
814 821
815 handle = start_dio_trans(inode, bh_result); 822 ret = ext4_get_block_trans(inode, iblock, bh_result,
816 if (IS_ERR(handle)) 823 EXT4_GET_BLOCKS_IO_CREATE_EXT);
817 return PTR_ERR(handle);
818 ret = _ext4_get_block(inode, iblock, bh_result,
819 EXT4_GET_BLOCKS_IO_CREATE_EXT);
820 ext4_journal_stop(handle);
821 824
822 /* 825 /*
823 * When doing DIO using unwritten extents, we need io_end to convert 826 * When doing DIO using unwritten extents, we need io_end to convert
@@ -850,18 +853,13 @@ static int ext4_dio_get_block_unwritten_async(struct inode *inode,
850static int ext4_dio_get_block_unwritten_sync(struct inode *inode, 853static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
851 sector_t iblock, struct buffer_head *bh_result, int create) 854 sector_t iblock, struct buffer_head *bh_result, int create)
852{ 855{
853 handle_t *handle;
854 int ret; 856 int ret;
855 857
856 /* We don't expect handle for direct IO */ 858 /* We don't expect handle for direct IO */
857 WARN_ON_ONCE(ext4_journal_current_handle()); 859 WARN_ON_ONCE(ext4_journal_current_handle());
858 860
859 handle = start_dio_trans(inode, bh_result); 861 ret = ext4_get_block_trans(inode, iblock, bh_result,
860 if (IS_ERR(handle)) 862 EXT4_GET_BLOCKS_IO_CREATE_EXT);
861 return PTR_ERR(handle);
862 ret = _ext4_get_block(inode, iblock, bh_result,
863 EXT4_GET_BLOCKS_IO_CREATE_EXT);
864 ext4_journal_stop(handle);
865 863
866 /* 864 /*
867 * Mark inode as having pending DIO writes to unwritten extents. 865 * Mark inode as having pending DIO writes to unwritten extents.
@@ -1057,7 +1055,7 @@ int do_journal_get_write_access(handle_t *handle,
1057static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1055static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1058 get_block_t *get_block) 1056 get_block_t *get_block)
1059{ 1057{
1060 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1058 unsigned from = pos & (PAGE_SIZE - 1);
1061 unsigned to = from + len; 1059 unsigned to = from + len;
1062 struct inode *inode = page->mapping->host; 1060 struct inode *inode = page->mapping->host;
1063 unsigned block_start, block_end; 1061 unsigned block_start, block_end;
@@ -1069,15 +1067,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1069 bool decrypt = false; 1067 bool decrypt = false;
1070 1068
1071 BUG_ON(!PageLocked(page)); 1069 BUG_ON(!PageLocked(page));
1072 BUG_ON(from > PAGE_CACHE_SIZE); 1070 BUG_ON(from > PAGE_SIZE);
1073 BUG_ON(to > PAGE_CACHE_SIZE); 1071 BUG_ON(to > PAGE_SIZE);
1074 BUG_ON(from > to); 1072 BUG_ON(from > to);
1075 1073
1076 if (!page_has_buffers(page)) 1074 if (!page_has_buffers(page))
1077 create_empty_buffers(page, blocksize, 0); 1075 create_empty_buffers(page, blocksize, 0);
1078 head = page_buffers(page); 1076 head = page_buffers(page);
1079 bbits = ilog2(blocksize); 1077 bbits = ilog2(blocksize);
1080 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1078 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1081 1079
1082 for (bh = head, block_start = 0; bh != head || !block_start; 1080 for (bh = head, block_start = 0; bh != head || !block_start;
1083 block++, block_start = block_end, bh = bh->b_this_page) { 1081 block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1159,8 +1157,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
1159 * we allocate blocks but write fails for some reason 1157 * we allocate blocks but write fails for some reason
1160 */ 1158 */
1161 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1159 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1162 index = pos >> PAGE_CACHE_SHIFT; 1160 index = pos >> PAGE_SHIFT;
1163 from = pos & (PAGE_CACHE_SIZE - 1); 1161 from = pos & (PAGE_SIZE - 1);
1164 to = from + len; 1162 to = from + len;
1165 1163
1166 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1164 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -1188,7 +1186,7 @@ retry_grab:
1188retry_journal: 1186retry_journal:
1189 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1187 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1190 if (IS_ERR(handle)) { 1188 if (IS_ERR(handle)) {
1191 page_cache_release(page); 1189 put_page(page);
1192 return PTR_ERR(handle); 1190 return PTR_ERR(handle);
1193 } 1191 }
1194 1192
@@ -1196,7 +1194,7 @@ retry_journal:
1196 if (page->mapping != mapping) { 1194 if (page->mapping != mapping) {
1197 /* The page got truncated from under us */ 1195 /* The page got truncated from under us */
1198 unlock_page(page); 1196 unlock_page(page);
1199 page_cache_release(page); 1197 put_page(page);
1200 ext4_journal_stop(handle); 1198 ext4_journal_stop(handle);
1201 goto retry_grab; 1199 goto retry_grab;
1202 } 1200 }
@@ -1252,7 +1250,7 @@ retry_journal:
1252 if (ret == -ENOSPC && 1250 if (ret == -ENOSPC &&
1253 ext4_should_retry_alloc(inode->i_sb, &retries)) 1251 ext4_should_retry_alloc(inode->i_sb, &retries))
1254 goto retry_journal; 1252 goto retry_journal;
1255 page_cache_release(page); 1253 put_page(page);
1256 return ret; 1254 return ret;
1257 } 1255 }
1258 *pagep = page; 1256 *pagep = page;
@@ -1295,7 +1293,7 @@ static int ext4_write_end(struct file *file,
1295 ret = ext4_jbd2_file_inode(handle, inode); 1293 ret = ext4_jbd2_file_inode(handle, inode);
1296 if (ret) { 1294 if (ret) {
1297 unlock_page(page); 1295 unlock_page(page);
1298 page_cache_release(page); 1296 put_page(page);
1299 goto errout; 1297 goto errout;
1300 } 1298 }
1301 } 1299 }
@@ -1315,7 +1313,7 @@ static int ext4_write_end(struct file *file,
1315 */ 1313 */
1316 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1314 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1317 unlock_page(page); 1315 unlock_page(page);
1318 page_cache_release(page); 1316 put_page(page);
1319 1317
1320 if (old_size < pos) 1318 if (old_size < pos)
1321 pagecache_isize_extended(inode, old_size, pos); 1319 pagecache_isize_extended(inode, old_size, pos);
@@ -1399,7 +1397,7 @@ static int ext4_journalled_write_end(struct file *file,
1399 int size_changed = 0; 1397 int size_changed = 0;
1400 1398
1401 trace_ext4_journalled_write_end(inode, pos, len, copied); 1399 trace_ext4_journalled_write_end(inode, pos, len, copied);
1402 from = pos & (PAGE_CACHE_SIZE - 1); 1400 from = pos & (PAGE_SIZE - 1);
1403 to = from + len; 1401 to = from + len;
1404 1402
1405 BUG_ON(!ext4_handle_valid(handle)); 1403 BUG_ON(!ext4_handle_valid(handle));
@@ -1423,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
1423 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1421 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1424 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1422 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1425 unlock_page(page); 1423 unlock_page(page);
1426 page_cache_release(page); 1424 put_page(page);
1427 1425
1428 if (old_size < pos) 1426 if (old_size < pos)
1429 pagecache_isize_extended(inode, old_size, pos); 1427 pagecache_isize_extended(inode, old_size, pos);
@@ -1537,7 +1535,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1537 int num_clusters; 1535 int num_clusters;
1538 ext4_fsblk_t lblk; 1536 ext4_fsblk_t lblk;
1539 1537
1540 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1538 BUG_ON(stop > PAGE_SIZE || stop < length);
1541 1539
1542 head = page_buffers(page); 1540 head = page_buffers(page);
1543 bh = head; 1541 bh = head;
@@ -1553,7 +1551,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1553 clear_buffer_delay(bh); 1551 clear_buffer_delay(bh);
1554 } else if (contiguous_blks) { 1552 } else if (contiguous_blks) {
1555 lblk = page->index << 1553 lblk = page->index <<
1556 (PAGE_CACHE_SHIFT - inode->i_blkbits); 1554 (PAGE_SHIFT - inode->i_blkbits);
1557 lblk += (curr_off >> inode->i_blkbits) - 1555 lblk += (curr_off >> inode->i_blkbits) -
1558 contiguous_blks; 1556 contiguous_blks;
1559 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1557 ext4_es_remove_extent(inode, lblk, contiguous_blks);
@@ -1563,7 +1561,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1563 } while ((bh = bh->b_this_page) != head); 1561 } while ((bh = bh->b_this_page) != head);
1564 1562
1565 if (contiguous_blks) { 1563 if (contiguous_blks) {
1566 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1564 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
1567 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1565 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1568 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1566 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1569 } 1567 }
@@ -1572,7 +1570,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1572 * need to release the reserved space for that cluster. */ 1570 * need to release the reserved space for that cluster. */
1573 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1571 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1574 while (num_clusters > 0) { 1572 while (num_clusters > 0) {
1575 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 1573 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
1576 ((num_clusters - 1) << sbi->s_cluster_bits); 1574 ((num_clusters - 1) << sbi->s_cluster_bits);
1577 if (sbi->s_cluster_ratio == 1 || 1575 if (sbi->s_cluster_ratio == 1 ||
1578 !ext4_find_delalloc_cluster(inode, lblk)) 1576 !ext4_find_delalloc_cluster(inode, lblk))
@@ -1619,8 +1617,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1619 end = mpd->next_page - 1; 1617 end = mpd->next_page - 1;
1620 if (invalidate) { 1618 if (invalidate) {
1621 ext4_lblk_t start, last; 1619 ext4_lblk_t start, last;
1622 start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1620 start = index << (PAGE_SHIFT - inode->i_blkbits);
1623 last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1621 last = end << (PAGE_SHIFT - inode->i_blkbits);
1624 ext4_es_remove_extent(inode, start, last - start + 1); 1622 ext4_es_remove_extent(inode, start, last - start + 1);
1625 } 1623 }
1626 1624
@@ -1636,7 +1634,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1636 BUG_ON(!PageLocked(page)); 1634 BUG_ON(!PageLocked(page));
1637 BUG_ON(PageWriteback(page)); 1635 BUG_ON(PageWriteback(page));
1638 if (invalidate) { 1636 if (invalidate) {
1639 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1637 block_invalidatepage(page, 0, PAGE_SIZE);
1640 ClearPageUptodate(page); 1638 ClearPageUptodate(page);
1641 } 1639 }
1642 unlock_page(page); 1640 unlock_page(page);
@@ -2007,10 +2005,10 @@ static int ext4_writepage(struct page *page,
2007 2005
2008 trace_ext4_writepage(page); 2006 trace_ext4_writepage(page);
2009 size = i_size_read(inode); 2007 size = i_size_read(inode);
2010 if (page->index == size >> PAGE_CACHE_SHIFT) 2008 if (page->index == size >> PAGE_SHIFT)
2011 len = size & ~PAGE_CACHE_MASK; 2009 len = size & ~PAGE_MASK;
2012 else 2010 else
2013 len = PAGE_CACHE_SIZE; 2011 len = PAGE_SIZE;
2014 2012
2015 page_bufs = page_buffers(page); 2013 page_bufs = page_buffers(page);
2016 /* 2014 /*
@@ -2034,7 +2032,7 @@ static int ext4_writepage(struct page *page,
2034 ext4_bh_delay_or_unwritten)) { 2032 ext4_bh_delay_or_unwritten)) {
2035 redirty_page_for_writepage(wbc, page); 2033 redirty_page_for_writepage(wbc, page);
2036 if ((current->flags & PF_MEMALLOC) || 2034 if ((current->flags & PF_MEMALLOC) ||
2037 (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { 2035 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2038 /* 2036 /*
2039 * For memory cleaning there's no point in writing only 2037 * For memory cleaning there's no point in writing only
2040 * some buffers. So just bail out. Warn if we came here 2038 * some buffers. So just bail out. Warn if we came here
@@ -2076,10 +2074,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2076 int err; 2074 int err;
2077 2075
2078 BUG_ON(page->index != mpd->first_page); 2076 BUG_ON(page->index != mpd->first_page);
2079 if (page->index == size >> PAGE_CACHE_SHIFT) 2077 if (page->index == size >> PAGE_SHIFT)
2080 len = size & ~PAGE_CACHE_MASK; 2078 len = size & ~PAGE_MASK;
2081 else 2079 else
2082 len = PAGE_CACHE_SIZE; 2080 len = PAGE_SIZE;
2083 clear_page_dirty_for_io(page); 2081 clear_page_dirty_for_io(page);
2084 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2082 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2085 if (!err) 2083 if (!err)
@@ -2213,7 +2211,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2213 int nr_pages, i; 2211 int nr_pages, i;
2214 struct inode *inode = mpd->inode; 2212 struct inode *inode = mpd->inode;
2215 struct buffer_head *head, *bh; 2213 struct buffer_head *head, *bh;
2216 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; 2214 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2217 pgoff_t start, end; 2215 pgoff_t start, end;
2218 ext4_lblk_t lblk; 2216 ext4_lblk_t lblk;
2219 sector_t pblock; 2217 sector_t pblock;
@@ -2274,7 +2272,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2274 * supports blocksize < pagesize as we will try to 2272 * supports blocksize < pagesize as we will try to
2275 * convert potentially unmapped parts of inode. 2273 * convert potentially unmapped parts of inode.
2276 */ 2274 */
2277 mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; 2275 mpd->io_submit.io_end->size += PAGE_SIZE;
2278 /* Page fully mapped - let IO run! */ 2276 /* Page fully mapped - let IO run! */
2279 err = mpage_submit_page(mpd, page); 2277 err = mpage_submit_page(mpd, page);
2280 if (err < 0) { 2278 if (err < 0) {
@@ -2426,7 +2424,7 @@ update_disksize:
2426 * Update on-disk size after IO is submitted. Races with 2424 * Update on-disk size after IO is submitted. Races with
2427 * truncate are avoided by checking i_size under i_data_sem. 2425 * truncate are avoided by checking i_size under i_data_sem.
2428 */ 2426 */
2429 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; 2427 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2430 if (disksize > EXT4_I(inode)->i_disksize) { 2428 if (disksize > EXT4_I(inode)->i_disksize) {
2431 int err2; 2429 int err2;
2432 loff_t i_size; 2430 loff_t i_size;
@@ -2562,7 +2560,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2562 mpd->next_page = page->index + 1; 2560 mpd->next_page = page->index + 1;
2563 /* Add all dirty buffers to mpd */ 2561 /* Add all dirty buffers to mpd */
2564 lblk = ((ext4_lblk_t)page->index) << 2562 lblk = ((ext4_lblk_t)page->index) <<
2565 (PAGE_CACHE_SHIFT - blkbits); 2563 (PAGE_SHIFT - blkbits);
2566 head = page_buffers(page); 2564 head = page_buffers(page);
2567 err = mpage_process_page_bufs(mpd, head, head, lblk); 2565 err = mpage_process_page_bufs(mpd, head, head, lblk);
2568 if (err <= 0) 2566 if (err <= 0)
@@ -2647,7 +2645,7 @@ static int ext4_writepages(struct address_space *mapping,
2647 * We may need to convert up to one extent per block in 2645 * We may need to convert up to one extent per block in
2648 * the page and we may dirty the inode. 2646 * the page and we may dirty the inode.
2649 */ 2647 */
2650 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); 2648 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
2651 } 2649 }
2652 2650
2653 /* 2651 /*
@@ -2678,8 +2676,8 @@ static int ext4_writepages(struct address_space *mapping,
2678 mpd.first_page = writeback_index; 2676 mpd.first_page = writeback_index;
2679 mpd.last_page = -1; 2677 mpd.last_page = -1;
2680 } else { 2678 } else {
2681 mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; 2679 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2682 mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; 2680 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2683 } 2681 }
2684 2682
2685 mpd.inode = inode; 2683 mpd.inode = inode;
@@ -2838,7 +2836,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2838 struct inode *inode = mapping->host; 2836 struct inode *inode = mapping->host;
2839 handle_t *handle; 2837 handle_t *handle;
2840 2838
2841 index = pos >> PAGE_CACHE_SHIFT; 2839 index = pos >> PAGE_SHIFT;
2842 2840
2843 if (ext4_nonda_switch(inode->i_sb)) { 2841 if (ext4_nonda_switch(inode->i_sb)) {
2844 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2842 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -2881,7 +2879,7 @@ retry_journal:
2881 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2879 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2882 ext4_da_write_credits(inode, pos, len)); 2880 ext4_da_write_credits(inode, pos, len));
2883 if (IS_ERR(handle)) { 2881 if (IS_ERR(handle)) {
2884 page_cache_release(page); 2882 put_page(page);
2885 return PTR_ERR(handle); 2883 return PTR_ERR(handle);
2886 } 2884 }
2887 2885
@@ -2889,7 +2887,7 @@ retry_journal:
2889 if (page->mapping != mapping) { 2887 if (page->mapping != mapping) {
2890 /* The page got truncated from under us */ 2888 /* The page got truncated from under us */
2891 unlock_page(page); 2889 unlock_page(page);
2892 page_cache_release(page); 2890 put_page(page);
2893 ext4_journal_stop(handle); 2891 ext4_journal_stop(handle);
2894 goto retry_grab; 2892 goto retry_grab;
2895 } 2893 }
@@ -2917,7 +2915,7 @@ retry_journal:
2917 ext4_should_retry_alloc(inode->i_sb, &retries)) 2915 ext4_should_retry_alloc(inode->i_sb, &retries))
2918 goto retry_journal; 2916 goto retry_journal;
2919 2917
2920 page_cache_release(page); 2918 put_page(page);
2921 return ret; 2919 return ret;
2922 } 2920 }
2923 2921
@@ -2965,7 +2963,7 @@ static int ext4_da_write_end(struct file *file,
2965 len, copied, page, fsdata); 2963 len, copied, page, fsdata);
2966 2964
2967 trace_ext4_da_write_end(inode, pos, len, copied); 2965 trace_ext4_da_write_end(inode, pos, len, copied);
2968 start = pos & (PAGE_CACHE_SIZE - 1); 2966 start = pos & (PAGE_SIZE - 1);
2969 end = start + copied - 1; 2967 end = start + copied - 1;
2970 2968
2971 /* 2969 /*
@@ -3187,7 +3185,7 @@ static int __ext4_journalled_invalidatepage(struct page *page,
3187 /* 3185 /*
3188 * If it's a full truncate we just forget about the pending dirtying 3186 * If it's a full truncate we just forget about the pending dirtying
3189 */ 3187 */
3190 if (offset == 0 && length == PAGE_CACHE_SIZE) 3188 if (offset == 0 && length == PAGE_SIZE)
3191 ClearPageChecked(page); 3189 ClearPageChecked(page);
3192 3190
3193 return jbd2_journal_invalidatepage(journal, page, offset, length); 3191 return jbd2_journal_invalidatepage(journal, page, offset, length);
@@ -3556,8 +3554,8 @@ void ext4_set_aops(struct inode *inode)
3556static int __ext4_block_zero_page_range(handle_t *handle, 3554static int __ext4_block_zero_page_range(handle_t *handle,
3557 struct address_space *mapping, loff_t from, loff_t length) 3555 struct address_space *mapping, loff_t from, loff_t length)
3558{ 3556{
3559 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3557 ext4_fsblk_t index = from >> PAGE_SHIFT;
3560 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3558 unsigned offset = from & (PAGE_SIZE-1);
3561 unsigned blocksize, pos; 3559 unsigned blocksize, pos;
3562 ext4_lblk_t iblock; 3560 ext4_lblk_t iblock;
3563 struct inode *inode = mapping->host; 3561 struct inode *inode = mapping->host;
@@ -3565,14 +3563,14 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3565 struct page *page; 3563 struct page *page;
3566 int err = 0; 3564 int err = 0;
3567 3565
3568 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3566 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3569 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3567 mapping_gfp_constraint(mapping, ~__GFP_FS));
3570 if (!page) 3568 if (!page)
3571 return -ENOMEM; 3569 return -ENOMEM;
3572 3570
3573 blocksize = inode->i_sb->s_blocksize; 3571 blocksize = inode->i_sb->s_blocksize;
3574 3572
3575 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3573 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3576 3574
3577 if (!page_has_buffers(page)) 3575 if (!page_has_buffers(page))
3578 create_empty_buffers(page, blocksize, 0); 3576 create_empty_buffers(page, blocksize, 0);
@@ -3614,7 +3612,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3614 ext4_encrypted_inode(inode)) { 3612 ext4_encrypted_inode(inode)) {
3615 /* We expect the key to be set. */ 3613 /* We expect the key to be set. */
3616 BUG_ON(!ext4_has_encryption_key(inode)); 3614 BUG_ON(!ext4_has_encryption_key(inode));
3617 BUG_ON(blocksize != PAGE_CACHE_SIZE); 3615 BUG_ON(blocksize != PAGE_SIZE);
3618 WARN_ON_ONCE(ext4_decrypt(page)); 3616 WARN_ON_ONCE(ext4_decrypt(page));
3619 } 3617 }
3620 } 3618 }
@@ -3638,7 +3636,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3638 3636
3639unlock: 3637unlock:
3640 unlock_page(page); 3638 unlock_page(page);
3641 page_cache_release(page); 3639 put_page(page);
3642 return err; 3640 return err;
3643} 3641}
3644 3642
@@ -3653,7 +3651,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3653 struct address_space *mapping, loff_t from, loff_t length) 3651 struct address_space *mapping, loff_t from, loff_t length)
3654{ 3652{
3655 struct inode *inode = mapping->host; 3653 struct inode *inode = mapping->host;
3656 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3654 unsigned offset = from & (PAGE_SIZE-1);
3657 unsigned blocksize = inode->i_sb->s_blocksize; 3655 unsigned blocksize = inode->i_sb->s_blocksize;
3658 unsigned max = blocksize - (offset & (blocksize - 1)); 3656 unsigned max = blocksize - (offset & (blocksize - 1));
3659 3657
@@ -3678,7 +3676,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3678static int ext4_block_truncate_page(handle_t *handle, 3676static int ext4_block_truncate_page(handle_t *handle,
3679 struct address_space *mapping, loff_t from) 3677 struct address_space *mapping, loff_t from)
3680{ 3678{
3681 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3679 unsigned offset = from & (PAGE_SIZE-1);
3682 unsigned length; 3680 unsigned length;
3683 unsigned blocksize; 3681 unsigned blocksize;
3684 struct inode *inode = mapping->host; 3682 struct inode *inode = mapping->host;
@@ -3816,7 +3814,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3816 */ 3814 */
3817 if (offset + length > inode->i_size) { 3815 if (offset + length > inode->i_size) {
3818 length = inode->i_size + 3816 length = inode->i_size +
3819 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 3817 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3820 offset; 3818 offset;
3821 } 3819 }
3822 3820
@@ -4891,23 +4889,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
4891 tid_t commit_tid = 0; 4889 tid_t commit_tid = 0;
4892 int ret; 4890 int ret;
4893 4891
4894 offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 4892 offset = inode->i_size & (PAGE_SIZE - 1);
4895 /* 4893 /*
4896 * All buffers in the last page remain valid? Then there's nothing to 4894 * All buffers in the last page remain valid? Then there's nothing to
4897 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4895 * do. We do the check mainly to optimize the common PAGE_SIZE ==
4898 * blocksize case 4896 * blocksize case
4899 */ 4897 */
4900 if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 4898 if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
4901 return; 4899 return;
4902 while (1) { 4900 while (1) {
4903 page = find_lock_page(inode->i_mapping, 4901 page = find_lock_page(inode->i_mapping,
4904 inode->i_size >> PAGE_CACHE_SHIFT); 4902 inode->i_size >> PAGE_SHIFT);
4905 if (!page) 4903 if (!page)
4906 return; 4904 return;
4907 ret = __ext4_journalled_invalidatepage(page, offset, 4905 ret = __ext4_journalled_invalidatepage(page, offset,
4908 PAGE_CACHE_SIZE - offset); 4906 PAGE_SIZE - offset);
4909 unlock_page(page); 4907 unlock_page(page);
4910 page_cache_release(page); 4908 put_page(page);
4911 if (ret != -EBUSY) 4909 if (ret != -EBUSY)
4912 return; 4910 return;
4913 commit_tid = 0; 4911 commit_tid = 0;
@@ -5546,10 +5544,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5546 goto out; 5544 goto out;
5547 } 5545 }
5548 5546
5549 if (page->index == size >> PAGE_CACHE_SHIFT) 5547 if (page->index == size >> PAGE_SHIFT)
5550 len = size & ~PAGE_CACHE_MASK; 5548 len = size & ~PAGE_MASK;
5551 else 5549 else
5552 len = PAGE_CACHE_SIZE; 5550 len = PAGE_SIZE;
5553 /* 5551 /*
5554 * Return if we have all the buffers mapped. This avoids the need to do 5552 * Return if we have all the buffers mapped. This avoids the need to do
5555 * journal_start/journal_stop which can block and take a long time 5553 * journal_start/journal_stop which can block and take a long time
@@ -5580,7 +5578,7 @@ retry_alloc:
5580 ret = block_page_mkwrite(vma, vmf, get_block); 5578 ret = block_page_mkwrite(vma, vmf, get_block);
5581 if (!ret && ext4_should_journal_data(inode)) { 5579 if (!ret && ext4_should_journal_data(inode)) {
5582 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5580 if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5583 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 5581 PAGE_SIZE, NULL, do_journal_get_write_access)) {
5584 unlock_page(page); 5582 unlock_page(page);
5585 ret = VM_FAULT_SIGBUS; 5583 ret = VM_FAULT_SIGBUS;
5586 ext4_journal_stop(handle); 5584 ext4_journal_stop(handle);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 50e05df28f66..eeeade76012e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
119 * 119 *
120 * 120 *
121 * one block each for bitmap and buddy information. So for each group we 121 * one block each for bitmap and buddy information. So for each group we
122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
123 * blocksize) blocks. So it can have information regarding groups_per_page 123 * blocksize) blocks. So it can have information regarding groups_per_page
124 * which is blocks_per_page/2 124 * which is blocks_per_page/2
125 * 125 *
@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
807 * 807 *
808 * one block each for bitmap and buddy information. 808 * one block each for bitmap and buddy information.
809 * So for each group we take up 2 blocks. A page can 809 * So for each group we take up 2 blocks. A page can
810 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 810 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
811 * So it can have information regarding groups_per_page which 811 * So it can have information regarding groups_per_page which
812 * is blocks_per_page/2 812 * is blocks_per_page/2
813 * 813 *
@@ -839,7 +839,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
839 sb = inode->i_sb; 839 sb = inode->i_sb;
840 ngroups = ext4_get_groups_count(sb); 840 ngroups = ext4_get_groups_count(sb);
841 blocksize = 1 << inode->i_blkbits; 841 blocksize = 1 << inode->i_blkbits;
842 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 842 blocks_per_page = PAGE_SIZE / blocksize;
843 843
844 groups_per_page = blocks_per_page >> 1; 844 groups_per_page = blocks_per_page >> 1;
845 if (groups_per_page == 0) 845 if (groups_per_page == 0)
@@ -993,7 +993,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
993 e4b->bd_buddy_page = NULL; 993 e4b->bd_buddy_page = NULL;
994 e4b->bd_bitmap_page = NULL; 994 e4b->bd_bitmap_page = NULL;
995 995
996 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 996 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
997 /* 997 /*
998 * the buddy cache inode stores the block bitmap 998 * the buddy cache inode stores the block bitmap
999 * and buddy information in consecutive blocks. 999 * and buddy information in consecutive blocks.
@@ -1028,11 +1028,11 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1028{ 1028{
1029 if (e4b->bd_bitmap_page) { 1029 if (e4b->bd_bitmap_page) {
1030 unlock_page(e4b->bd_bitmap_page); 1030 unlock_page(e4b->bd_bitmap_page);
1031 page_cache_release(e4b->bd_bitmap_page); 1031 put_page(e4b->bd_bitmap_page);
1032 } 1032 }
1033 if (e4b->bd_buddy_page) { 1033 if (e4b->bd_buddy_page) {
1034 unlock_page(e4b->bd_buddy_page); 1034 unlock_page(e4b->bd_buddy_page);
1035 page_cache_release(e4b->bd_buddy_page); 1035 put_page(e4b->bd_buddy_page);
1036 } 1036 }
1037} 1037}
1038 1038
@@ -1125,7 +1125,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1125 might_sleep(); 1125 might_sleep();
1126 mb_debug(1, "load group %u\n", group); 1126 mb_debug(1, "load group %u\n", group);
1127 1127
1128 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1128 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1129 grp = ext4_get_group_info(sb, group); 1129 grp = ext4_get_group_info(sb, group);
1130 1130
1131 e4b->bd_blkbits = sb->s_blocksize_bits; 1131 e4b->bd_blkbits = sb->s_blocksize_bits;
@@ -1167,7 +1167,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1167 * is yet to initialize the same. So 1167 * is yet to initialize the same. So
1168 * wait for it to initialize. 1168 * wait for it to initialize.
1169 */ 1169 */
1170 page_cache_release(page); 1170 put_page(page);
1171 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1171 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1172 if (page) { 1172 if (page) {
1173 BUG_ON(page->mapping != inode->i_mapping); 1173 BUG_ON(page->mapping != inode->i_mapping);
@@ -1203,7 +1203,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1204 if (page == NULL || !PageUptodate(page)) { 1204 if (page == NULL || !PageUptodate(page)) {
1205 if (page) 1205 if (page)
1206 page_cache_release(page); 1206 put_page(page);
1207 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1207 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1208 if (page) { 1208 if (page) {
1209 BUG_ON(page->mapping != inode->i_mapping); 1209 BUG_ON(page->mapping != inode->i_mapping);
@@ -1238,11 +1238,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1238 1238
1239err: 1239err:
1240 if (page) 1240 if (page)
1241 page_cache_release(page); 1241 put_page(page);
1242 if (e4b->bd_bitmap_page) 1242 if (e4b->bd_bitmap_page)
1243 page_cache_release(e4b->bd_bitmap_page); 1243 put_page(e4b->bd_bitmap_page);
1244 if (e4b->bd_buddy_page) 1244 if (e4b->bd_buddy_page)
1245 page_cache_release(e4b->bd_buddy_page); 1245 put_page(e4b->bd_buddy_page);
1246 e4b->bd_buddy = NULL; 1246 e4b->bd_buddy = NULL;
1247 e4b->bd_bitmap = NULL; 1247 e4b->bd_bitmap = NULL;
1248 return ret; 1248 return ret;
@@ -1257,9 +1257,9 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1257static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1257static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1258{ 1258{
1259 if (e4b->bd_bitmap_page) 1259 if (e4b->bd_bitmap_page)
1260 page_cache_release(e4b->bd_bitmap_page); 1260 put_page(e4b->bd_bitmap_page);
1261 if (e4b->bd_buddy_page) 1261 if (e4b->bd_buddy_page)
1262 page_cache_release(e4b->bd_buddy_page); 1262 put_page(e4b->bd_buddy_page);
1263} 1263}
1264 1264
1265 1265
@@ -2833,8 +2833,8 @@ static void ext4_free_data_callback(struct super_block *sb,
2833 /* No more items in the per group rb tree 2833 /* No more items in the per group rb tree
2834 * balance refcounts from ext4_mb_free_metadata() 2834 * balance refcounts from ext4_mb_free_metadata()
2835 */ 2835 */
2836 page_cache_release(e4b.bd_buddy_page); 2836 put_page(e4b.bd_buddy_page);
2837 page_cache_release(e4b.bd_bitmap_page); 2837 put_page(e4b.bd_bitmap_page);
2838 } 2838 }
2839 ext4_unlock_group(sb, entry->efd_group); 2839 ext4_unlock_group(sb, entry->efd_group);
2840 kmem_cache_free(ext4_free_data_cachep, entry); 2840 kmem_cache_free(ext4_free_data_cachep, entry);
@@ -4385,9 +4385,9 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4385 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4385 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4386 } 4386 }
4387 if (ac->ac_bitmap_page) 4387 if (ac->ac_bitmap_page)
4388 page_cache_release(ac->ac_bitmap_page); 4388 put_page(ac->ac_bitmap_page);
4389 if (ac->ac_buddy_page) 4389 if (ac->ac_buddy_page)
4390 page_cache_release(ac->ac_buddy_page); 4390 put_page(ac->ac_buddy_page);
4391 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4391 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4392 mutex_unlock(&ac->ac_lg->lg_mutex); 4392 mutex_unlock(&ac->ac_lg->lg_mutex);
4393 ext4_mb_collect_stats(ac); 4393 ext4_mb_collect_stats(ac);
@@ -4599,8 +4599,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4599 * otherwise we'll refresh it from 4599 * otherwise we'll refresh it from
4600 * on-disk bitmap and lose not-yet-available 4600 * on-disk bitmap and lose not-yet-available
4601 * blocks */ 4601 * blocks */
4602 page_cache_get(e4b->bd_buddy_page); 4602 get_page(e4b->bd_buddy_page);
4603 page_cache_get(e4b->bd_bitmap_page); 4603 get_page(e4b->bd_bitmap_page);
4604 } 4604 }
4605 while (*n) { 4605 while (*n) {
4606 parent = *n; 4606 parent = *n;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 4098acc701c3..325cef48b39a 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
60{ 60{
61 if (first < second) { 61 if (first < second) {
62 down_write(&EXT4_I(first)->i_data_sem); 62 down_write(&EXT4_I(first)->i_data_sem);
63 down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); 63 down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
64 } else { 64 } else {
65 down_write(&EXT4_I(second)->i_data_sem); 65 down_write(&EXT4_I(second)->i_data_sem);
66 down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING); 66 down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
67 67
68 } 68 }
69} 69}
@@ -156,7 +156,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); 156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
157 if (!page[1]) { 157 if (!page[1]) {
158 unlock_page(page[0]); 158 unlock_page(page[0]);
159 page_cache_release(page[0]); 159 put_page(page[0]);
160 return -ENOMEM; 160 return -ENOMEM;
161 } 161 }
162 /* 162 /*
@@ -192,7 +192,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
192 create_empty_buffers(page, blocksize, 0); 192 create_empty_buffers(page, blocksize, 0);
193 193
194 head = page_buffers(page); 194 head = page_buffers(page);
195 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 195 block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
196 for (bh = head, block_start = 0; bh != head || !block_start; 196 for (bh = head, block_start = 0; bh != head || !block_start;
197 block++, block_start = block_end, bh = bh->b_this_page) { 197 block++, block_start = block_end, bh = bh->b_this_page) {
198 block_end = block_start + blocksize; 198 block_end = block_start + blocksize;
@@ -268,7 +268,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
268 int i, err2, jblocks, retries = 0; 268 int i, err2, jblocks, retries = 0;
269 int replaced_count = 0; 269 int replaced_count = 0;
270 int from = data_offset_in_page << orig_inode->i_blkbits; 270 int from = data_offset_in_page << orig_inode->i_blkbits;
271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 271 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
272 struct super_block *sb = orig_inode->i_sb; 272 struct super_block *sb = orig_inode->i_sb;
273 struct buffer_head *bh = NULL; 273 struct buffer_head *bh = NULL;
274 274
@@ -404,9 +404,9 @@ data_copy:
404 404
405unlock_pages: 405unlock_pages:
406 unlock_page(pagep[0]); 406 unlock_page(pagep[0]);
407 page_cache_release(pagep[0]); 407 put_page(pagep[0]);
408 unlock_page(pagep[1]); 408 unlock_page(pagep[1]);
409 page_cache_release(pagep[1]); 409 put_page(pagep[1]);
410stop_journal: 410stop_journal:
411 ext4_journal_stop(handle); 411 ext4_journal_stop(handle);
412 if (*err == -ENOSPC && 412 if (*err == -ENOSPC &&
@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
484 return -EBUSY; 484 return -EBUSY;
485 } 485 }
486 486
487 if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
488 ext4_debug("ext4 move extent: The argument files should "
489 "not be quota files [ino:orig %lu, donor %lu]\n",
490 orig_inode->i_ino, donor_inode->i_ino);
491 return -EBUSY;
492 }
493
487 /* Ext4 move extent supports only extent based file */ 494 /* Ext4 move extent supports only extent based file */
488 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { 495 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
489 ext4_debug("ext4 move extent: orig file is not extents " 496 ext4_debug("ext4 move extent: orig file is not extents "
@@ -554,7 +561,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
554 struct inode *orig_inode = file_inode(o_filp); 561 struct inode *orig_inode = file_inode(o_filp);
555 struct inode *donor_inode = file_inode(d_filp); 562 struct inode *donor_inode = file_inode(d_filp);
556 struct ext4_ext_path *path = NULL; 563 struct ext4_ext_path *path = NULL;
557 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 564 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
558 ext4_lblk_t o_end, o_start = orig_blk; 565 ext4_lblk_t o_end, o_start = orig_blk;
559 ext4_lblk_t d_start = donor_blk; 566 ext4_lblk_t d_start = donor_blk;
560 int ret; 567 int ret;
@@ -648,9 +655,9 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
648 if (o_end - o_start < cur_len) 655 if (o_end - o_start < cur_len)
649 cur_len = o_end - o_start; 656 cur_len = o_end - o_start;
650 657
651 orig_page_index = o_start >> (PAGE_CACHE_SHIFT - 658 orig_page_index = o_start >> (PAGE_SHIFT -
652 orig_inode->i_blkbits); 659 orig_inode->i_blkbits);
653 donor_page_index = d_start >> (PAGE_CACHE_SHIFT - 660 donor_page_index = d_start >> (PAGE_SHIFT -
654 donor_inode->i_blkbits); 661 donor_inode->i_blkbits);
655 offset_in_page = o_start % blocks_per_page; 662 offset_in_page = o_start % blocks_per_page;
656 if (cur_len > blocks_per_page- offset_in_page) 663 if (cur_len > blocks_per_page- offset_in_page)
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d77d15f4b674..e4fc8ea45d78 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/backing-dev.h>
26 27
27#include "ext4_jbd2.h" 28#include "ext4_jbd2.h"
28#include "xattr.h" 29#include "xattr.h"
@@ -432,8 +433,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
432 * the page size, the remaining memory is zeroed when mapped, and 433 * the page size, the remaining memory is zeroed when mapped, and
433 * writes to that region are not written out to the file." 434 * writes to that region are not written out to the file."
434 */ 435 */
435 if (len < PAGE_CACHE_SIZE) 436 if (len < PAGE_SIZE)
436 zero_user_segment(page, len, PAGE_CACHE_SIZE); 437 zero_user_segment(page, len, PAGE_SIZE);
437 /* 438 /*
438 * In the first loop we prepare and mark buffers to submit. We have to 439 * In the first loop we prepare and mark buffers to submit. We have to
439 * mark all buffers in the page before submitting so that 440 * mark all buffers in the page before submitting so that
@@ -470,9 +471,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
470 471
471 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) && 472 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
472 nr_to_submit) { 473 nr_to_submit) {
473 data_page = ext4_encrypt(inode, page); 474 gfp_t gfp_flags = GFP_NOFS;
475
476 retry_encrypt:
477 data_page = ext4_encrypt(inode, page, gfp_flags);
474 if (IS_ERR(data_page)) { 478 if (IS_ERR(data_page)) {
475 ret = PTR_ERR(data_page); 479 ret = PTR_ERR(data_page);
480 if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
481 if (io->io_bio) {
482 ext4_io_submit(io);
483 congestion_wait(BLK_RW_ASYNC, HZ/50);
484 }
485 gfp_flags |= __GFP_NOFAIL;
486 goto retry_encrypt;
487 }
476 data_page = NULL; 488 data_page = NULL;
477 goto out; 489 goto out;
478 } 490 }
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 5dc5e95063de..dc54a4b60eba 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * then this code just gives up and calls the buffer_head-based read function. 24 * then this code just gives up and calls the buffer_head-based read function.
25 * It does handle a page which has holes at the end - that is a common case: 25 * It does handle a page which has holes at the end - that is a common case:
26 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 26 * the end-of-file on blocksize < PAGE_SIZE setups.
27 * 27 *
28 */ 28 */
29 29
@@ -140,7 +140,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
140 140
141 struct inode *inode = mapping->host; 141 struct inode *inode = mapping->host;
142 const unsigned blkbits = inode->i_blkbits; 142 const unsigned blkbits = inode->i_blkbits;
143 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 143 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
144 const unsigned blocksize = 1 << blkbits; 144 const unsigned blocksize = 1 << blkbits;
145 sector_t block_in_file; 145 sector_t block_in_file;
146 sector_t last_block; 146 sector_t last_block;
@@ -173,7 +173,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
173 if (page_has_buffers(page)) 173 if (page_has_buffers(page))
174 goto confused; 174 goto confused;
175 175
176 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 176 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
177 last_block = block_in_file + nr_pages * blocks_per_page; 177 last_block = block_in_file + nr_pages * blocks_per_page;
178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
179 if (last_block > last_block_in_file) 179 if (last_block > last_block_in_file)
@@ -217,7 +217,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
217 set_error_page: 217 set_error_page:
218 SetPageError(page); 218 SetPageError(page);
219 zero_user_segment(page, 0, 219 zero_user_segment(page, 0,
220 PAGE_CACHE_SIZE); 220 PAGE_SIZE);
221 unlock_page(page); 221 unlock_page(page);
222 goto next_page; 222 goto next_page;
223 } 223 }
@@ -250,7 +250,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
250 } 250 }
251 if (first_hole != blocks_per_page) { 251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits, 252 zero_user_segment(page, first_hole << blkbits,
253 PAGE_CACHE_SIZE); 253 PAGE_SIZE);
254 if (first_hole == 0) { 254 if (first_hole == 0) {
255 SetPageUptodate(page); 255 SetPageUptodate(page);
256 unlock_page(page); 256 unlock_page(page);
@@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
279 279
280 if (ext4_encrypted_inode(inode) && 280 if (ext4_encrypted_inode(inode) &&
281 S_ISREG(inode->i_mode)) { 281 S_ISREG(inode->i_mode)) {
282 ctx = ext4_get_crypto_ctx(inode); 282 ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
283 if (IS_ERR(ctx)) 283 if (IS_ERR(ctx))
284 goto set_error_page; 284 goto set_error_page;
285 } 285 }
@@ -319,7 +319,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
319 unlock_page(page); 319 unlock_page(page);
320 next_page: 320 next_page:
321 if (pages) 321 if (pages)
322 page_cache_release(page); 322 put_page(page);
323 } 323 }
324 BUG_ON(pages && !list_empty(pages)); 324 BUG_ON(pages && !list_empty(pages));
325 if (bio) 325 if (bio)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 539297515896..304c712dbe12 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1113,6 +1113,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
1113static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 1113static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1114 unsigned int flags); 1114 unsigned int flags);
1115static int ext4_enable_quotas(struct super_block *sb); 1115static int ext4_enable_quotas(struct super_block *sb);
1116static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1116 1117
1117static struct dquot **ext4_get_dquots(struct inode *inode) 1118static struct dquot **ext4_get_dquots(struct inode *inode)
1118{ 1119{
@@ -1129,7 +1130,7 @@ static const struct dquot_operations ext4_quota_operations = {
1129 .alloc_dquot = dquot_alloc, 1130 .alloc_dquot = dquot_alloc,
1130 .destroy_dquot = dquot_destroy, 1131 .destroy_dquot = dquot_destroy,
1131 .get_projid = ext4_get_projid, 1132 .get_projid = ext4_get_projid,
1132 .get_next_id = dquot_get_next_id, 1133 .get_next_id = ext4_get_next_id,
1133}; 1134};
1134 1135
1135static const struct quotactl_ops ext4_qctl_operations = { 1136static const struct quotactl_ops ext4_qctl_operations = {
@@ -1323,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1323 return -1; 1324 return -1;
1324 } 1325 }
1325 if (ext4_has_feature_quota(sb)) { 1326 if (ext4_has_feature_quota(sb)) {
1326 ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " 1327 ext4_msg(sb, KERN_INFO, "Journaled quota options "
1327 "when QUOTA feature is enabled"); 1328 "ignored when QUOTA feature is enabled");
1328 return -1; 1329 return 1;
1329 } 1330 }
1330 qname = match_strdup(args); 1331 qname = match_strdup(args);
1331 if (!qname) { 1332 if (!qname) {
@@ -1688,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1688 return -1; 1689 return -1;
1689 } 1690 }
1690 if (ext4_has_feature_quota(sb)) { 1691 if (ext4_has_feature_quota(sb)) {
1691 ext4_msg(sb, KERN_ERR, 1692 ext4_msg(sb, KERN_INFO,
1692 "Cannot set journaled quota options " 1693 "Quota format mount options ignored "
1693 "when QUOTA feature is enabled"); 1694 "when QUOTA feature is enabled");
1694 return -1; 1695 return 1;
1695 } 1696 }
1696 sbi->s_jquota_fmt = m->mount_opt; 1697 sbi->s_jquota_fmt = m->mount_opt;
1697#endif 1698#endif
@@ -1756,11 +1757,11 @@ static int parse_options(char *options, struct super_block *sb,
1756#ifdef CONFIG_QUOTA 1757#ifdef CONFIG_QUOTA
1757 if (ext4_has_feature_quota(sb) && 1758 if (ext4_has_feature_quota(sb) &&
1758 (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) { 1759 (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
1759 ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA " 1760 ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
1760 "feature is enabled"); 1761 "mount options ignored.");
1761 return 0; 1762 clear_opt(sb, USRQUOTA);
1762 } 1763 clear_opt(sb, GRPQUOTA);
1763 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { 1764 } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1764 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) 1765 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1765 clear_opt(sb, USRQUOTA); 1766 clear_opt(sb, USRQUOTA);
1766 1767
@@ -1784,7 +1785,7 @@ static int parse_options(char *options, struct super_block *sb,
1784 int blocksize = 1785 int blocksize =
1785 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 1786 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
1786 1787
1787 if (blocksize < PAGE_CACHE_SIZE) { 1788 if (blocksize < PAGE_SIZE) {
1788 ext4_msg(sb, KERN_ERR, "can't mount with " 1789 ext4_msg(sb, KERN_ERR, "can't mount with "
1789 "dioread_nolock if block size != PAGE_SIZE"); 1790 "dioread_nolock if block size != PAGE_SIZE");
1790 return 0; 1791 return 0;
@@ -3808,7 +3809,7 @@ no_journal:
3808 } 3809 }
3809 3810
3810 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && 3811 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
3811 (blocksize != PAGE_CACHE_SIZE)) { 3812 (blocksize != PAGE_SIZE)) {
3812 ext4_msg(sb, KERN_ERR, 3813 ext4_msg(sb, KERN_ERR,
3813 "Unsupported blocksize for fs encryption"); 3814 "Unsupported blocksize for fs encryption");
3814 goto failed_mount_wq; 3815 goto failed_mount_wq;
@@ -5028,6 +5029,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
5028 EXT4_SB(sb)->s_jquota_fmt, type); 5029 EXT4_SB(sb)->s_jquota_fmt, type);
5029} 5030}
5030 5031
5032static void lockdep_set_quota_inode(struct inode *inode, int subclass)
5033{
5034 struct ext4_inode_info *ei = EXT4_I(inode);
5035
5036 /* The first argument of lockdep_set_subclass has to be
5037 * *exactly* the same as the argument to init_rwsem() --- in
5038 * this case, in init_once() --- or lockdep gets unhappy
5039 * because the name of the lock is set using the
5040 * stringification of the argument to init_rwsem().
5041 */
5042 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
5043 lockdep_set_subclass(&ei->i_data_sem, subclass);
5044}
5045
5031/* 5046/*
5032 * Standard function to be called on quota_on 5047 * Standard function to be called on quota_on
5033 */ 5048 */
@@ -5067,8 +5082,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5067 if (err) 5082 if (err)
5068 return err; 5083 return err;
5069 } 5084 }
5070 5085 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
5071 return dquot_quota_on(sb, type, format_id, path); 5086 err = dquot_quota_on(sb, type, format_id, path);
5087 if (err)
5088 lockdep_set_quota_inode(path->dentry->d_inode,
5089 I_DATA_SEM_NORMAL);
5090 return err;
5072} 5091}
5073 5092
5074static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 5093static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@@ -5095,8 +5114,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
5095 5114
5096 /* Don't account quota for quota files to avoid recursion */ 5115 /* Don't account quota for quota files to avoid recursion */
5097 qf_inode->i_flags |= S_NOQUOTA; 5116 qf_inode->i_flags |= S_NOQUOTA;
5117 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5098 err = dquot_enable(qf_inode, type, format_id, flags); 5118 err = dquot_enable(qf_inode, type, format_id, flags);
5099 iput(qf_inode); 5119 iput(qf_inode);
5120 if (err)
5121 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5100 5122
5101 return err; 5123 return err;
5102} 5124}
@@ -5253,6 +5275,17 @@ out:
5253 return len; 5275 return len;
5254} 5276}
5255 5277
5278static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
5279{
5280 const struct quota_format_ops *ops;
5281
5282 if (!sb_has_quota_loaded(sb, qid->type))
5283 return -ESRCH;
5284 ops = sb_dqopt(sb)->ops[qid->type];
5285 if (!ops || !ops->get_next_id)
5286 return -ENOSYS;
5287 return dquot_get_next_id(sb, qid);
5288}
5256#endif 5289#endif
5257 5290
5258static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, 5291static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 6f7ee30a89ce..75ed5c2f0c16 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -80,12 +80,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
80 if (res <= plen) 80 if (res <= plen)
81 paddr[res] = '\0'; 81 paddr[res] = '\0';
82 if (cpage) 82 if (cpage)
83 page_cache_release(cpage); 83 put_page(cpage);
84 set_delayed_call(done, kfree_link, paddr); 84 set_delayed_call(done, kfree_link, paddr);
85 return paddr; 85 return paddr;
86errout: 86errout:
87 if (cpage) 87 if (cpage)
88 page_cache_release(cpage); 88 put_page(cpage);
89 kfree(paddr); 89 kfree(paddr);
90 return ERR_PTR(res); 90 return ERR_PTR(res);
91} 91}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 0441e055c8e8..e79bd32b9b79 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -230,6 +230,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
230 return error; 230 return error;
231} 231}
232 232
233static int
234__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
235 void *end, const char *function, unsigned int line)
236{
237 struct ext4_xattr_entry *entry = IFIRST(header);
238 int error = -EFSCORRUPTED;
239
240 if (((void *) header >= end) ||
241 (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
242 goto errout;
243 error = ext4_xattr_check_names(entry, end, entry);
244errout:
245 if (error)
246 __ext4_error_inode(inode, function, line, 0,
247 "corrupted in-inode xattr");
248 return error;
249}
250
251#define xattr_check_inode(inode, header, end) \
252 __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
253
233static inline int 254static inline int
234ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) 255ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
235{ 256{
@@ -341,7 +362,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
341 header = IHDR(inode, raw_inode); 362 header = IHDR(inode, raw_inode);
342 entry = IFIRST(header); 363 entry = IFIRST(header);
343 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 364 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
344 error = ext4_xattr_check_names(entry, end, entry); 365 error = xattr_check_inode(inode, header, end);
345 if (error) 366 if (error)
346 goto cleanup; 367 goto cleanup;
347 error = ext4_xattr_find_entry(&entry, name_index, name, 368 error = ext4_xattr_find_entry(&entry, name_index, name,
@@ -477,7 +498,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
477 raw_inode = ext4_raw_inode(&iloc); 498 raw_inode = ext4_raw_inode(&iloc);
478 header = IHDR(inode, raw_inode); 499 header = IHDR(inode, raw_inode);
479 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 500 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
480 error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header)); 501 error = xattr_check_inode(inode, header, end);
481 if (error) 502 if (error)
482 goto cleanup; 503 goto cleanup;
483 error = ext4_xattr_list_entries(dentry, IFIRST(header), 504 error = ext4_xattr_list_entries(dentry, IFIRST(header),
@@ -1040,8 +1061,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
1040 is->s.here = is->s.first; 1061 is->s.here = is->s.first;
1041 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 1062 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
1042 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 1063 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
1043 error = ext4_xattr_check_names(IFIRST(header), is->s.end, 1064 error = xattr_check_inode(inode, header, is->s.end);
1044 IFIRST(header));
1045 if (error) 1065 if (error)
1046 return error; 1066 return error;
1047 /* Find the named attribute. */ 1067 /* Find the named attribute. */
@@ -1356,6 +1376,10 @@ retry:
1356 last = entry; 1376 last = entry;
1357 total_ino = sizeof(struct ext4_xattr_ibody_header); 1377 total_ino = sizeof(struct ext4_xattr_ibody_header);
1358 1378
1379 error = xattr_check_inode(inode, header, end);
1380 if (error)
1381 goto cleanup;
1382
1359 free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); 1383 free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
1360 if (free >= new_extra_isize) { 1384 if (free >= new_extra_isize) {
1361 entry = IFIRST(header); 1385 entry = IFIRST(header);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e5c762b37239..53fec0872e60 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -223,7 +223,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
223 /* Allocate a new bio */ 223 /* Allocate a new bio */
224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); 224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
225 225
226 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 226 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
227 bio_put(bio); 227 bio_put(bio);
228 return -EFAULT; 228 return -EFAULT;
229 } 229 }
@@ -265,8 +265,8 @@ alloc_new:
265 265
266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; 266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
267 267
268 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < 268 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
269 PAGE_CACHE_SIZE) { 269 PAGE_SIZE) {
270 __submit_merged_bio(io); 270 __submit_merged_bio(io);
271 goto alloc_new; 271 goto alloc_new;
272 } 272 }
@@ -406,7 +406,7 @@ got_it:
406 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 406 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
407 */ 407 */
408 if (dn.data_blkaddr == NEW_ADDR) { 408 if (dn.data_blkaddr == NEW_ADDR) {
409 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 409 zero_user_segment(page, 0, PAGE_SIZE);
410 SetPageUptodate(page); 410 SetPageUptodate(page);
411 unlock_page(page); 411 unlock_page(page);
412 return page; 412 return page;
@@ -517,7 +517,7 @@ struct page *get_new_data_page(struct inode *inode,
517 goto got_it; 517 goto got_it;
518 518
519 if (dn.data_blkaddr == NEW_ADDR) { 519 if (dn.data_blkaddr == NEW_ADDR) {
520 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 520 zero_user_segment(page, 0, PAGE_SIZE);
521 SetPageUptodate(page); 521 SetPageUptodate(page);
522 } else { 522 } else {
523 f2fs_put_page(page, 1); 523 f2fs_put_page(page, 1);
@@ -530,8 +530,8 @@ struct page *get_new_data_page(struct inode *inode,
530 } 530 }
531got_it: 531got_it:
532 if (new_i_size && i_size_read(inode) < 532 if (new_i_size && i_size_read(inode) <
533 ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { 533 ((loff_t)(index + 1) << PAGE_SHIFT)) {
534 i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); 534 i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
535 /* Only the directory inode sets new_i_size */ 535 /* Only the directory inode sets new_i_size */
536 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 536 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
537 } 537 }
@@ -570,9 +570,9 @@ alloc:
570 /* update i_size */ 570 /* update i_size */
571 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + 571 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
572 dn->ofs_in_node; 572 dn->ofs_in_node;
573 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) 573 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
574 i_size_write(dn->inode, 574 i_size_write(dn->inode,
575 ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); 575 ((loff_t)(fofs + 1) << PAGE_SHIFT));
576 return 0; 576 return 0;
577} 577}
578 578
@@ -971,7 +971,7 @@ got_it:
971 goto confused; 971 goto confused;
972 } 972 }
973 } else { 973 } else {
974 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 974 zero_user_segment(page, 0, PAGE_SIZE);
975 SetPageUptodate(page); 975 SetPageUptodate(page);
976 unlock_page(page); 976 unlock_page(page);
977 goto next_page; 977 goto next_page;
@@ -1021,7 +1021,7 @@ submit_and_realloc:
1021 goto next_page; 1021 goto next_page;
1022set_error_page: 1022set_error_page:
1023 SetPageError(page); 1023 SetPageError(page);
1024 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1024 zero_user_segment(page, 0, PAGE_SIZE);
1025 unlock_page(page); 1025 unlock_page(page);
1026 goto next_page; 1026 goto next_page;
1027confused: 1027confused:
@@ -1032,7 +1032,7 @@ confused:
1032 unlock_page(page); 1032 unlock_page(page);
1033next_page: 1033next_page:
1034 if (pages) 1034 if (pages)
1035 page_cache_release(page); 1035 put_page(page);
1036 } 1036 }
1037 BUG_ON(pages && !list_empty(pages)); 1037 BUG_ON(pages && !list_empty(pages));
1038 if (bio) 1038 if (bio)
@@ -1136,7 +1136,7 @@ static int f2fs_write_data_page(struct page *page,
1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1137 loff_t i_size = i_size_read(inode); 1137 loff_t i_size = i_size_read(inode);
1138 const pgoff_t end_index = ((unsigned long long) i_size) 1138 const pgoff_t end_index = ((unsigned long long) i_size)
1139 >> PAGE_CACHE_SHIFT; 1139 >> PAGE_SHIFT;
1140 unsigned offset = 0; 1140 unsigned offset = 0;
1141 bool need_balance_fs = false; 1141 bool need_balance_fs = false;
1142 int err = 0; 1142 int err = 0;
@@ -1157,11 +1157,11 @@ static int f2fs_write_data_page(struct page *page,
1157 * If the offset is out-of-range of file size, 1157 * If the offset is out-of-range of file size,
1158 * this page does not have to be written to disk. 1158 * this page does not have to be written to disk.
1159 */ 1159 */
1160 offset = i_size & (PAGE_CACHE_SIZE - 1); 1160 offset = i_size & (PAGE_SIZE - 1);
1161 if ((page->index >= end_index + 1) || !offset) 1161 if ((page->index >= end_index + 1) || !offset)
1162 goto out; 1162 goto out;
1163 1163
1164 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 1164 zero_user_segment(page, offset, PAGE_SIZE);
1165write: 1165write:
1166 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1166 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1167 goto redirty_out; 1167 goto redirty_out;
@@ -1267,8 +1267,8 @@ next:
1267 cycled = 0; 1267 cycled = 0;
1268 end = -1; 1268 end = -1;
1269 } else { 1269 } else {
1270 index = wbc->range_start >> PAGE_CACHE_SHIFT; 1270 index = wbc->range_start >> PAGE_SHIFT;
1271 end = wbc->range_end >> PAGE_CACHE_SHIFT; 1271 end = wbc->range_end >> PAGE_SHIFT;
1272 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1272 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1273 range_whole = 1; 1273 range_whole = 1;
1274 cycled = 1; /* ignore range_cyclic tests */ 1274 cycled = 1; /* ignore range_cyclic tests */
@@ -1448,11 +1448,11 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
1448 * the block addresses when there is no need to fill the page. 1448 * the block addresses when there is no need to fill the page.
1449 */ 1449 */
1450 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && 1450 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
1451 len == PAGE_CACHE_SIZE) 1451 len == PAGE_SIZE)
1452 return 0; 1452 return 0;
1453 1453
1454 if (f2fs_has_inline_data(inode) || 1454 if (f2fs_has_inline_data(inode) ||
1455 (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1455 (pos & PAGE_MASK) >= i_size_read(inode)) {
1456 f2fs_lock_op(sbi); 1456 f2fs_lock_op(sbi);
1457 locked = true; 1457 locked = true;
1458 } 1458 }
@@ -1513,7 +1513,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1513 struct inode *inode = mapping->host; 1513 struct inode *inode = mapping->host;
1514 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1514 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1515 struct page *page = NULL; 1515 struct page *page = NULL;
1516 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 1516 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1517 bool need_balance = false; 1517 bool need_balance = false;
1518 block_t blkaddr = NULL_ADDR; 1518 block_t blkaddr = NULL_ADDR;
1519 int err = 0; 1519 int err = 0;
@@ -1561,22 +1561,22 @@ repeat:
1561 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 1561 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1562 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); 1562 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1563 1563
1564 if (len == PAGE_CACHE_SIZE) 1564 if (len == PAGE_SIZE)
1565 goto out_update; 1565 goto out_update;
1566 if (PageUptodate(page)) 1566 if (PageUptodate(page))
1567 goto out_clear; 1567 goto out_clear;
1568 1568
1569 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1569 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
1570 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1570 unsigned start = pos & (PAGE_SIZE - 1);
1571 unsigned end = start + len; 1571 unsigned end = start + len;
1572 1572
1573 /* Reading beyond i_size is simple: memset to zero */ 1573 /* Reading beyond i_size is simple: memset to zero */
1574 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1574 zero_user_segments(page, 0, start, end, PAGE_SIZE);
1575 goto out_update; 1575 goto out_update;
1576 } 1576 }
1577 1577
1578 if (blkaddr == NEW_ADDR) { 1578 if (blkaddr == NEW_ADDR) {
1579 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1579 zero_user_segment(page, 0, PAGE_SIZE);
1580 } else { 1580 } else {
1581 struct f2fs_io_info fio = { 1581 struct f2fs_io_info fio = {
1582 .sbi = sbi, 1582 .sbi = sbi,
@@ -1688,7 +1688,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
1688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1689 1689
1690 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 1690 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1691 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) 1691 (offset % PAGE_SIZE || length != PAGE_SIZE))
1692 return; 1692 return;
1693 1693
1694 if (PageDirty(page)) { 1694 if (PageDirty(page)) {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 4fb6ef88a34f..f4a61a5ff79f 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -164,7 +164,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
164 164
165 /* build curseg */ 165 /* build curseg */
166 si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE; 166 si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
167 si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE; 167 si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
168 168
169 /* build dirty segmap */ 169 /* build dirty segmap */
170 si->base_mem += sizeof(struct dirty_seglist_info); 170 si->base_mem += sizeof(struct dirty_seglist_info);
@@ -201,9 +201,9 @@ get_cache:
201 201
202 si->page_mem = 0; 202 si->page_mem = 0;
203 npages = NODE_MAPPING(sbi)->nrpages; 203 npages = NODE_MAPPING(sbi)->nrpages;
204 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; 204 si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
205 npages = META_MAPPING(sbi)->nrpages; 205 npages = META_MAPPING(sbi)->nrpages;
206 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; 206 si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
207} 207}
208 208
209static int stat_show(struct seq_file *s, void *v) 209static int stat_show(struct seq_file *s, void *v)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 80641ad82745..af819571bce7 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -17,8 +17,8 @@
17 17
18static unsigned long dir_blocks(struct inode *inode) 18static unsigned long dir_blocks(struct inode *inode)
19{ 19{
20 return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1)) 20 return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
21 >> PAGE_CACHE_SHIFT; 21 >> PAGE_SHIFT;
22} 22}
23 23
24static unsigned int dir_buckets(unsigned int level, int dir_level) 24static unsigned int dir_buckets(unsigned int level, int dir_level)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index bbe2cd1265d0..7a4558d17f36 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1294,7 +1294,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
1294 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 1294 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
1295 unlock_page(page); 1295 unlock_page(page);
1296 } 1296 }
1297 page_cache_release(page); 1297 put_page(page);
1298} 1298}
1299 1299
1300static inline void f2fs_put_dnode(struct dnode_of_data *dn) 1300static inline void f2fs_put_dnode(struct dnode_of_data *dn)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b41c3579ea9e..443e07705c2a 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -74,11 +74,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
74 goto mapped; 74 goto mapped;
75 75
76 /* page is wholly or partially inside EOF */ 76 /* page is wholly or partially inside EOF */
77 if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) > 77 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
78 i_size_read(inode)) { 78 i_size_read(inode)) {
79 unsigned offset; 79 unsigned offset;
80 offset = i_size_read(inode) & ~PAGE_CACHE_MASK; 80 offset = i_size_read(inode) & ~PAGE_MASK;
81 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 81 zero_user_segment(page, offset, PAGE_SIZE);
82 } 82 }
83 set_page_dirty(page); 83 set_page_dirty(page);
84 SetPageUptodate(page); 84 SetPageUptodate(page);
@@ -346,11 +346,11 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
346 goto found; 346 goto found;
347 } 347 }
348 348
349 pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); 349 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
350 350
351 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 351 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
352 352
353 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { 353 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
354 set_new_dnode(&dn, inode, NULL, NULL, 0); 354 set_new_dnode(&dn, inode, NULL, NULL, 0);
355 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); 355 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
356 if (err && err != -ENOENT) { 356 if (err && err != -ENOENT) {
@@ -370,7 +370,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
370 /* find data/hole in dnode block */ 370 /* find data/hole in dnode block */
371 for (; dn.ofs_in_node < end_offset; 371 for (; dn.ofs_in_node < end_offset;
372 dn.ofs_in_node++, pgofs++, 372 dn.ofs_in_node++, pgofs++,
373 data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { 373 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
374 block_t blkaddr; 374 block_t blkaddr;
375 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 375 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
376 376
@@ -508,8 +508,8 @@ void truncate_data_blocks(struct dnode_of_data *dn)
508static int truncate_partial_data_page(struct inode *inode, u64 from, 508static int truncate_partial_data_page(struct inode *inode, u64 from,
509 bool cache_only) 509 bool cache_only)
510{ 510{
511 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 511 unsigned offset = from & (PAGE_SIZE - 1);
512 pgoff_t index = from >> PAGE_CACHE_SHIFT; 512 pgoff_t index = from >> PAGE_SHIFT;
513 struct address_space *mapping = inode->i_mapping; 513 struct address_space *mapping = inode->i_mapping;
514 struct page *page; 514 struct page *page;
515 515
@@ -529,7 +529,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
529 return 0; 529 return 0;
530truncate_out: 530truncate_out:
531 f2fs_wait_on_page_writeback(page, DATA, true); 531 f2fs_wait_on_page_writeback(page, DATA, true);
532 zero_user(page, offset, PAGE_CACHE_SIZE - offset); 532 zero_user(page, offset, PAGE_SIZE - offset);
533 if (!cache_only || !f2fs_encrypted_inode(inode) || 533 if (!cache_only || !f2fs_encrypted_inode(inode) ||
534 !S_ISREG(inode->i_mode)) 534 !S_ISREG(inode->i_mode))
535 set_page_dirty(page); 535 set_page_dirty(page);
@@ -799,11 +799,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
799 if (ret) 799 if (ret)
800 return ret; 800 return ret;
801 801
802 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 802 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
803 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 803 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
804 804
805 off_start = offset & (PAGE_CACHE_SIZE - 1); 805 off_start = offset & (PAGE_SIZE - 1);
806 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 806 off_end = (offset + len) & (PAGE_SIZE - 1);
807 807
808 if (pg_start == pg_end) { 808 if (pg_start == pg_end) {
809 ret = fill_zero(inode, pg_start, off_start, 809 ret = fill_zero(inode, pg_start, off_start,
@@ -813,7 +813,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
813 } else { 813 } else {
814 if (off_start) { 814 if (off_start) {
815 ret = fill_zero(inode, pg_start++, off_start, 815 ret = fill_zero(inode, pg_start++, off_start,
816 PAGE_CACHE_SIZE - off_start); 816 PAGE_SIZE - off_start);
817 if (ret) 817 if (ret)
818 return ret; 818 return ret;
819 } 819 }
@@ -830,8 +830,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
830 830
831 f2fs_balance_fs(sbi, true); 831 f2fs_balance_fs(sbi, true);
832 832
833 blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT; 833 blk_start = (loff_t)pg_start << PAGE_SHIFT;
834 blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT; 834 blk_end = (loff_t)pg_end << PAGE_SHIFT;
835 truncate_inode_pages_range(mapping, blk_start, 835 truncate_inode_pages_range(mapping, blk_start,
836 blk_end - 1); 836 blk_end - 1);
837 837
@@ -954,8 +954,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
954 if (ret) 954 if (ret)
955 return ret; 955 return ret;
956 956
957 pg_start = offset >> PAGE_CACHE_SHIFT; 957 pg_start = offset >> PAGE_SHIFT;
958 pg_end = (offset + len) >> PAGE_CACHE_SHIFT; 958 pg_end = (offset + len) >> PAGE_SHIFT;
959 959
960 /* write out all dirty pages from offset */ 960 /* write out all dirty pages from offset */
961 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 961 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -1006,11 +1006,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1006 1006
1007 truncate_pagecache_range(inode, offset, offset + len - 1); 1007 truncate_pagecache_range(inode, offset, offset + len - 1);
1008 1008
1009 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 1009 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1010 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 1010 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1011 1011
1012 off_start = offset & (PAGE_CACHE_SIZE - 1); 1012 off_start = offset & (PAGE_SIZE - 1);
1013 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 1013 off_end = (offset + len) & (PAGE_SIZE - 1);
1014 1014
1015 if (pg_start == pg_end) { 1015 if (pg_start == pg_end) {
1016 ret = fill_zero(inode, pg_start, off_start, 1016 ret = fill_zero(inode, pg_start, off_start,
@@ -1024,12 +1024,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1024 } else { 1024 } else {
1025 if (off_start) { 1025 if (off_start) {
1026 ret = fill_zero(inode, pg_start++, off_start, 1026 ret = fill_zero(inode, pg_start++, off_start,
1027 PAGE_CACHE_SIZE - off_start); 1027 PAGE_SIZE - off_start);
1028 if (ret) 1028 if (ret)
1029 return ret; 1029 return ret;
1030 1030
1031 new_size = max_t(loff_t, new_size, 1031 new_size = max_t(loff_t, new_size,
1032 (loff_t)pg_start << PAGE_CACHE_SHIFT); 1032 (loff_t)pg_start << PAGE_SHIFT);
1033 } 1033 }
1034 1034
1035 for (index = pg_start; index < pg_end; index++) { 1035 for (index = pg_start; index < pg_end; index++) {
@@ -1060,7 +1060,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1060 f2fs_unlock_op(sbi); 1060 f2fs_unlock_op(sbi);
1061 1061
1062 new_size = max_t(loff_t, new_size, 1062 new_size = max_t(loff_t, new_size,
1063 (loff_t)(index + 1) << PAGE_CACHE_SHIFT); 1063 (loff_t)(index + 1) << PAGE_SHIFT);
1064 } 1064 }
1065 1065
1066 if (off_end) { 1066 if (off_end) {
@@ -1117,8 +1117,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1117 1117
1118 truncate_pagecache(inode, offset); 1118 truncate_pagecache(inode, offset);
1119 1119
1120 pg_start = offset >> PAGE_CACHE_SHIFT; 1120 pg_start = offset >> PAGE_SHIFT;
1121 pg_end = (offset + len) >> PAGE_CACHE_SHIFT; 1121 pg_end = (offset + len) >> PAGE_SHIFT;
1122 delta = pg_end - pg_start; 1122 delta = pg_end - pg_start;
1123 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1123 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1124 1124
@@ -1158,11 +1158,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
1158 1158
1159 f2fs_balance_fs(sbi, true); 1159 f2fs_balance_fs(sbi, true);
1160 1160
1161 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 1161 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1162 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 1162 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1163 1163
1164 off_start = offset & (PAGE_CACHE_SIZE - 1); 1164 off_start = offset & (PAGE_SIZE - 1);
1165 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 1165 off_end = (offset + len) & (PAGE_SIZE - 1);
1166 1166
1167 f2fs_lock_op(sbi); 1167 f2fs_lock_op(sbi);
1168 1168
@@ -1180,12 +1180,12 @@ noalloc:
1180 if (pg_start == pg_end) 1180 if (pg_start == pg_end)
1181 new_size = offset + len; 1181 new_size = offset + len;
1182 else if (index == pg_start && off_start) 1182 else if (index == pg_start && off_start)
1183 new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT; 1183 new_size = (loff_t)(index + 1) << PAGE_SHIFT;
1184 else if (index == pg_end) 1184 else if (index == pg_end)
1185 new_size = ((loff_t)index << PAGE_CACHE_SHIFT) + 1185 new_size = ((loff_t)index << PAGE_SHIFT) +
1186 off_end; 1186 off_end;
1187 else 1187 else
1188 new_size += PAGE_CACHE_SIZE; 1188 new_size += PAGE_SIZE;
1189 } 1189 }
1190 1190
1191 if (!(mode & FALLOC_FL_KEEP_SIZE) && 1191 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -1652,8 +1652,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
1652 if (need_inplace_update(inode)) 1652 if (need_inplace_update(inode))
1653 return -EINVAL; 1653 return -EINVAL;
1654 1654
1655 pg_start = range->start >> PAGE_CACHE_SHIFT; 1655 pg_start = range->start >> PAGE_SHIFT;
1656 pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT; 1656 pg_end = (range->start + range->len) >> PAGE_SHIFT;
1657 1657
1658 f2fs_balance_fs(sbi, true); 1658 f2fs_balance_fs(sbi, true);
1659 1659
@@ -1770,7 +1770,7 @@ clear_out:
1770out: 1770out:
1771 inode_unlock(inode); 1771 inode_unlock(inode);
1772 if (!err) 1772 if (!err)
1773 range->len = (u64)total << PAGE_CACHE_SHIFT; 1773 range->len = (u64)total << PAGE_SHIFT;
1774 return err; 1774 return err;
1775} 1775}
1776 1776
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 358214e9f707..a2fbe6f427d3 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -51,7 +51,7 @@ void read_inline_data(struct page *page, struct page *ipage)
51 51
52 f2fs_bug_on(F2FS_P_SB(page), page->index); 52 f2fs_bug_on(F2FS_P_SB(page), page->index);
53 53
54 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 54 zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
55 55
56 /* Copy the whole inline data block */ 56 /* Copy the whole inline data block */
57 src_addr = inline_data_addr(ipage); 57 src_addr = inline_data_addr(ipage);
@@ -93,7 +93,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
93 } 93 }
94 94
95 if (page->index) 95 if (page->index)
96 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 96 zero_user_segment(page, 0, PAGE_SIZE);
97 else 97 else
98 read_inline_data(page, ipage); 98 read_inline_data(page, ipage);
99 99
@@ -375,7 +375,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
375 goto out; 375 goto out;
376 376
377 f2fs_wait_on_page_writeback(page, DATA, true); 377 f2fs_wait_on_page_writeback(page, DATA, true);
378 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 378 zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
379 379
380 dentry_blk = kmap_atomic(page); 380 dentry_blk = kmap_atomic(page);
381 381
@@ -405,8 +405,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
405 stat_dec_inline_dir(dir); 405 stat_dec_inline_dir(dir);
406 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 406 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
407 407
408 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 408 if (i_size_read(dir) < PAGE_SIZE) {
409 i_size_write(dir, PAGE_CACHE_SIZE); 409 i_size_write(dir, PAGE_SIZE);
410 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 410 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
411 } 411 }
412 412
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 7876f1052101..013e57932d61 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1027,12 +1027,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1027 goto errout; 1027 goto errout;
1028 } 1028 }
1029 1029
1030 /* this is broken symlink case */
1031 if (unlikely(cstr.name[0] == 0)) {
1032 res = -ENOENT;
1033 goto errout;
1034 }
1035
1036 if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { 1030 if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
1037 /* Symlink data on the disk is corrupted */ 1031 /* Symlink data on the disk is corrupted */
1038 res = -EIO; 1032 res = -EIO;
@@ -1046,17 +1040,23 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1046 if (res < 0) 1040 if (res < 0)
1047 goto errout; 1041 goto errout;
1048 1042
1043 /* this is broken symlink case */
1044 if (unlikely(pstr.name[0] == 0)) {
1045 res = -ENOENT;
1046 goto errout;
1047 }
1048
1049 paddr = pstr.name; 1049 paddr = pstr.name;
1050 1050
1051 /* Null-terminate the name */ 1051 /* Null-terminate the name */
1052 paddr[res] = '\0'; 1052 paddr[res] = '\0';
1053 1053
1054 page_cache_release(cpage); 1054 put_page(cpage);
1055 set_delayed_call(done, kfree_link, paddr); 1055 set_delayed_call(done, kfree_link, paddr);
1056 return paddr; 1056 return paddr;
1057errout: 1057errout:
1058 fscrypt_fname_free_buffer(&pstr); 1058 fscrypt_fname_free_buffer(&pstr);
1059 page_cache_release(cpage); 1059 put_page(cpage);
1060 return ERR_PTR(res); 1060 return ERR_PTR(res);
1061} 1061}
1062 1062
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 118321bd1a7f..1a33de9d84b1 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -46,11 +46,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
46 */ 46 */
47 if (type == FREE_NIDS) { 47 if (type == FREE_NIDS) {
48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
49 PAGE_CACHE_SHIFT; 49 PAGE_SHIFT;
50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
51 } else if (type == NAT_ENTRIES) { 51 } else if (type == NAT_ENTRIES) {
52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53 PAGE_CACHE_SHIFT; 53 PAGE_SHIFT;
54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
55 } else if (type == DIRTY_DENTS) { 55 } else if (type == DIRTY_DENTS) {
56 if (sbi->sb->s_bdi->wb.dirty_exceeded) 56 if (sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -62,13 +62,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
62 62
63 for (i = 0; i <= UPDATE_INO; i++) 63 for (i = 0; i <= UPDATE_INO; i++)
64 mem_size += (sbi->im[i].ino_num * 64 mem_size += (sbi->im[i].ino_num *
65 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 65 sizeof(struct ino_entry)) >> PAGE_SHIFT;
66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
67 } else if (type == EXTENT_CACHE) { 67 } else if (type == EXTENT_CACHE) {
68 mem_size = (atomic_read(&sbi->total_ext_tree) * 68 mem_size = (atomic_read(&sbi->total_ext_tree) *
69 sizeof(struct extent_tree) + 69 sizeof(struct extent_tree) +
70 atomic_read(&sbi->total_ext_node) * 70 atomic_read(&sbi->total_ext_node) *
71 sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; 71 sizeof(struct extent_node)) >> PAGE_SHIFT;
72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
73 } else { 73 } else {
74 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 74 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -121,7 +121,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
121 121
122 src_addr = page_address(src_page); 122 src_addr = page_address(src_page);
123 dst_addr = page_address(dst_page); 123 dst_addr = page_address(dst_page);
124 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 124 memcpy(dst_addr, src_addr, PAGE_SIZE);
125 set_page_dirty(dst_page); 125 set_page_dirty(dst_page);
126 f2fs_put_page(src_page, 1); 126 f2fs_put_page(src_page, 1);
127 127
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 0b30cd2aeebd..011942f94d64 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -591,7 +591,7 @@ out:
591 591
592 /* truncate meta pages to be used by the recovery */ 592 /* truncate meta pages to be used by the recovery */
593 truncate_inode_pages_range(META_MAPPING(sbi), 593 truncate_inode_pages_range(META_MAPPING(sbi),
594 (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1); 594 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
595 595
596 if (err) { 596 if (err) {
597 truncate_inode_pages_final(NODE_MAPPING(sbi)); 597 truncate_inode_pages_final(NODE_MAPPING(sbi));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6f16b39f0b52..540669d6978e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -885,12 +885,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
885 } 885 }
886 } 886 }
887 887
888 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - 888 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
889 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 889 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
890 if (valid_sum_count <= sum_in_page) 890 if (valid_sum_count <= sum_in_page)
891 return 1; 891 return 1;
892 else if ((valid_sum_count - sum_in_page) <= 892 else if ((valid_sum_count - sum_in_page) <=
893 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 893 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
894 return 2; 894 return 2;
895 return 3; 895 return 3;
896} 896}
@@ -909,9 +909,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
909 void *dst = page_address(page); 909 void *dst = page_address(page);
910 910
911 if (src) 911 if (src)
912 memcpy(dst, src, PAGE_CACHE_SIZE); 912 memcpy(dst, src, PAGE_SIZE);
913 else 913 else
914 memset(dst, 0, PAGE_CACHE_SIZE); 914 memset(dst, 0, PAGE_SIZE);
915 set_page_dirty(page); 915 set_page_dirty(page);
916 f2fs_put_page(page, 1); 916 f2fs_put_page(page, 1);
917} 917}
@@ -1596,7 +1596,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1596 s = (struct f2fs_summary *)(kaddr + offset); 1596 s = (struct f2fs_summary *)(kaddr + offset);
1597 seg_i->sum_blk->entries[j] = *s; 1597 seg_i->sum_blk->entries[j] = *s;
1598 offset += SUMMARY_SIZE; 1598 offset += SUMMARY_SIZE;
1599 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1599 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
1600 SUM_FOOTER_SIZE) 1600 SUM_FOOTER_SIZE)
1601 continue; 1601 continue;
1602 1602
@@ -1757,7 +1757,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1757 *summary = seg_i->sum_blk->entries[j]; 1757 *summary = seg_i->sum_blk->entries[j];
1758 written_size += SUMMARY_SIZE; 1758 written_size += SUMMARY_SIZE;
1759 1759
1760 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1760 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
1761 SUM_FOOTER_SIZE) 1761 SUM_FOOTER_SIZE)
1762 continue; 1762 continue;
1763 1763
@@ -1844,7 +1844,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1844 1844
1845 src_addr = page_address(src_page); 1845 src_addr = page_address(src_page);
1846 dst_addr = page_address(dst_page); 1846 dst_addr = page_address(dst_page);
1847 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1847 memcpy(dst_addr, src_addr, PAGE_SIZE);
1848 1848
1849 set_page_dirty(dst_page); 1849 set_page_dirty(dst_page);
1850 f2fs_put_page(src_page, 1); 1850 f2fs_put_page(src_page, 1);
@@ -2171,7 +2171,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
2171 2171
2172 for (i = 0; i < NR_CURSEG_TYPE; i++) { 2172 for (i = 0; i < NR_CURSEG_TYPE; i++) {
2173 mutex_init(&array[i].curseg_mutex); 2173 mutex_init(&array[i].curseg_mutex);
2174 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 2174 array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
2175 if (!array[i].sum_blk) 2175 if (!array[i].sum_blk)
2176 return -ENOMEM; 2176 return -ENOMEM;
2177 init_rwsem(&array[i].journal_rwsem); 2177 init_rwsem(&array[i].journal_rwsem);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 15bb81f8dac2..006f87d69921 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -984,9 +984,25 @@ static loff_t max_file_blocks(void)
984 return result; 984 return result;
985} 985}
986 986
987static int __f2fs_commit_super(struct buffer_head *bh,
988 struct f2fs_super_block *super)
989{
990 lock_buffer(bh);
991 if (super)
992 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
993 set_buffer_uptodate(bh);
994 set_buffer_dirty(bh);
995 unlock_buffer(bh);
996
997 /* it's rare case, we can do fua all the time */
998 return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
999}
1000
987static inline bool sanity_check_area_boundary(struct super_block *sb, 1001static inline bool sanity_check_area_boundary(struct super_block *sb,
988 struct f2fs_super_block *raw_super) 1002 struct buffer_head *bh)
989{ 1003{
1004 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1005 (bh->b_data + F2FS_SUPER_OFFSET);
990 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1006 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
991 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); 1007 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
992 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); 1008 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -1000,6 +1016,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
1000 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); 1016 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1001 u32 segment_count = le32_to_cpu(raw_super->segment_count); 1017 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1002 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 1018 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1019 u64 main_end_blkaddr = main_blkaddr +
1020 (segment_count_main << log_blocks_per_seg);
1021 u64 seg_end_blkaddr = segment0_blkaddr +
1022 (segment_count << log_blocks_per_seg);
1003 1023
1004 if (segment0_blkaddr != cp_blkaddr) { 1024 if (segment0_blkaddr != cp_blkaddr) {
1005 f2fs_msg(sb, KERN_INFO, 1025 f2fs_msg(sb, KERN_INFO,
@@ -1044,22 +1064,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
1044 return true; 1064 return true;
1045 } 1065 }
1046 1066
1047 if (main_blkaddr + (segment_count_main << log_blocks_per_seg) != 1067 if (main_end_blkaddr > seg_end_blkaddr) {
1048 segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
1049 f2fs_msg(sb, KERN_INFO, 1068 f2fs_msg(sb, KERN_INFO,
1050 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)", 1069 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1051 main_blkaddr, 1070 main_blkaddr,
1052 segment0_blkaddr + (segment_count << log_blocks_per_seg), 1071 segment0_blkaddr +
1072 (segment_count << log_blocks_per_seg),
1053 segment_count_main << log_blocks_per_seg); 1073 segment_count_main << log_blocks_per_seg);
1054 return true; 1074 return true;
1075 } else if (main_end_blkaddr < seg_end_blkaddr) {
1076 int err = 0;
1077 char *res;
1078
1079 /* fix in-memory information all the time */
1080 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
1081 segment0_blkaddr) >> log_blocks_per_seg);
1082
1083 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
1084 res = "internally";
1085 } else {
1086 err = __f2fs_commit_super(bh, NULL);
1087 res = err ? "failed" : "done";
1088 }
1089 f2fs_msg(sb, KERN_INFO,
1090 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1091 res, main_blkaddr,
1092 segment0_blkaddr +
1093 (segment_count << log_blocks_per_seg),
1094 segment_count_main << log_blocks_per_seg);
1095 if (err)
1096 return true;
1055 } 1097 }
1056
1057 return false; 1098 return false;
1058} 1099}
1059 1100
1060static int sanity_check_raw_super(struct super_block *sb, 1101static int sanity_check_raw_super(struct super_block *sb,
1061 struct f2fs_super_block *raw_super) 1102 struct buffer_head *bh)
1062{ 1103{
1104 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1105 (bh->b_data + F2FS_SUPER_OFFSET);
1063 unsigned int blocksize; 1106 unsigned int blocksize;
1064 1107
1065 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 1108 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1070,10 +1113,10 @@ static int sanity_check_raw_super(struct super_block *sb,
1070 } 1113 }
1071 1114
1072 /* Currently, support only 4KB page cache size */ 1115 /* Currently, support only 4KB page cache size */
1073 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { 1116 if (F2FS_BLKSIZE != PAGE_SIZE) {
1074 f2fs_msg(sb, KERN_INFO, 1117 f2fs_msg(sb, KERN_INFO,
1075 "Invalid page_cache_size (%lu), supports only 4KB\n", 1118 "Invalid page_cache_size (%lu), supports only 4KB\n",
1076 PAGE_CACHE_SIZE); 1119 PAGE_SIZE);
1077 return 1; 1120 return 1;
1078 } 1121 }
1079 1122
@@ -1126,7 +1169,7 @@ static int sanity_check_raw_super(struct super_block *sb,
1126 } 1169 }
1127 1170
1128 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ 1171 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1129 if (sanity_check_area_boundary(sb, raw_super)) 1172 if (sanity_check_area_boundary(sb, bh))
1130 return 1; 1173 return 1;
1131 1174
1132 return 0; 1175 return 0;
@@ -1202,7 +1245,7 @@ static int read_raw_super_block(struct super_block *sb,
1202{ 1245{
1203 int block; 1246 int block;
1204 struct buffer_head *bh; 1247 struct buffer_head *bh;
1205 struct f2fs_super_block *super, *buf; 1248 struct f2fs_super_block *super;
1206 int err = 0; 1249 int err = 0;
1207 1250
1208 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL); 1251 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
@@ -1218,11 +1261,8 @@ static int read_raw_super_block(struct super_block *sb,
1218 continue; 1261 continue;
1219 } 1262 }
1220 1263
1221 buf = (struct f2fs_super_block *)
1222 (bh->b_data + F2FS_SUPER_OFFSET);
1223
1224 /* sanity checking of raw super */ 1264 /* sanity checking of raw super */
1225 if (sanity_check_raw_super(sb, buf)) { 1265 if (sanity_check_raw_super(sb, bh)) {
1226 f2fs_msg(sb, KERN_ERR, 1266 f2fs_msg(sb, KERN_ERR,
1227 "Can't find valid F2FS filesystem in %dth superblock", 1267 "Can't find valid F2FS filesystem in %dth superblock",
1228 block + 1); 1268 block + 1);
@@ -1232,7 +1272,8 @@ static int read_raw_super_block(struct super_block *sb,
1232 } 1272 }
1233 1273
1234 if (!*raw_super) { 1274 if (!*raw_super) {
1235 memcpy(super, buf, sizeof(*super)); 1275 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
1276 sizeof(*super));
1236 *valid_super_block = block; 1277 *valid_super_block = block;
1237 *raw_super = super; 1278 *raw_super = super;
1238 } 1279 }
@@ -1252,42 +1293,29 @@ static int read_raw_super_block(struct super_block *sb,
1252 return err; 1293 return err;
1253} 1294}
1254 1295
1255static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block) 1296int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1256{ 1297{
1257 struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
1258 struct buffer_head *bh; 1298 struct buffer_head *bh;
1259 int err; 1299 int err;
1260 1300
1261 bh = sb_getblk(sbi->sb, block); 1301 /* write back-up superblock first */
1302 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
1262 if (!bh) 1303 if (!bh)
1263 return -EIO; 1304 return -EIO;
1264 1305 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1265 lock_buffer(bh);
1266 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1267 set_buffer_uptodate(bh);
1268 set_buffer_dirty(bh);
1269 unlock_buffer(bh);
1270
1271 /* it's rare case, we can do fua all the time */
1272 err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
1273 brelse(bh); 1306 brelse(bh);
1274 1307
1275 return err;
1276}
1277
1278int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1279{
1280 int err;
1281
1282 /* write back-up superblock first */
1283 err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
1284
1285 /* if we are in recovery path, skip writing valid superblock */ 1308 /* if we are in recovery path, skip writing valid superblock */
1286 if (recover || err) 1309 if (recover || err)
1287 return err; 1310 return err;
1288 1311
1289 /* write current valid superblock */ 1312 /* write current valid superblock */
1290 return __f2fs_commit_super(sbi, sbi->valid_super_block); 1313 bh = sb_getblk(sbi->sb, sbi->valid_super_block);
1314 if (!bh)
1315 return -EIO;
1316 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1317 brelse(bh);
1318 return err;
1291} 1319}
1292 1320
1293static int f2fs_fill_super(struct super_block *sb, void *data, int silent) 1321static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1442,7 +1470,7 @@ try_onemore:
1442 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); 1470 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1443 if (__exist_node_summaries(sbi)) 1471 if (__exist_node_summaries(sbi))
1444 sbi->kbytes_written = 1472 sbi->kbytes_written =
1445 le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written); 1473 le64_to_cpu(seg_i->journal->info.kbytes_written);
1446 1474
1447 build_gc_manager(sbi); 1475 build_gc_manager(sbi);
1448 1476
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index cb84f0fcc72a..bfc780c682fb 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -66,11 +66,11 @@ static int
66vxfs_immed_readpage(struct file *fp, struct page *pp) 66vxfs_immed_readpage(struct file *fp, struct page *pp)
67{ 67{
68 struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); 68 struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
69 u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT; 69 u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT;
70 caddr_t kaddr; 70 caddr_t kaddr;
71 71
72 kaddr = kmap(pp); 72 kaddr = kmap(pp);
73 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); 73 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
74 kunmap(pp); 74 kunmap(pp);
75 75
76 flush_dcache_page(pp); 76 flush_dcache_page(pp);
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 1cff72df0389..a49e0cfbb686 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -45,7 +45,7 @@
45/* 45/*
46 * Number of VxFS blocks per page. 46 * Number of VxFS blocks per page.
47 */ 47 */
48#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize)) 48#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize))
49 49
50 50
51static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); 51static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
@@ -175,7 +175,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
175 if (de) { 175 if (de) {
176 ino = de->d_ino; 176 ino = de->d_ino;
177 kunmap(pp); 177 kunmap(pp);
178 page_cache_release(pp); 178 put_page(pp);
179 } 179 }
180 180
181 return (ino); 181 return (ino);
@@ -255,8 +255,8 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
255 nblocks = dir_blocks(ip); 255 nblocks = dir_blocks(ip);
256 pblocks = VXFS_BLOCK_PER_PAGE(sbp); 256 pblocks = VXFS_BLOCK_PER_PAGE(sbp);
257 257
258 page = pos >> PAGE_CACHE_SHIFT; 258 page = pos >> PAGE_SHIFT;
259 offset = pos & ~PAGE_CACHE_MASK; 259 offset = pos & ~PAGE_MASK;
260 block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks; 260 block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
261 261
262 for (; page < npages; page++, block = 0) { 262 for (; page < npages; page++, block = 0) {
@@ -289,7 +289,7 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
289 continue; 289 continue;
290 290
291 offset = (char *)de - kaddr; 291 offset = (char *)de - kaddr;
292 ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; 292 ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
293 if (!dir_emit(ctx, de->d_name, de->d_namelen, 293 if (!dir_emit(ctx, de->d_name, de->d_namelen,
294 de->d_ino, DT_UNKNOWN)) { 294 de->d_ino, DT_UNKNOWN)) {
295 vxfs_put_page(pp); 295 vxfs_put_page(pp);
@@ -301,6 +301,6 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
301 vxfs_put_page(pp); 301 vxfs_put_page(pp);
302 offset = 0; 302 offset = 0;
303 } 303 }
304 ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; 304 ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
305 return 0; 305 return 0;
306} 306}
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 5d318c44f855..e806694d4145 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -50,7 +50,7 @@ inline void
50vxfs_put_page(struct page *pp) 50vxfs_put_page(struct page *pp)
51{ 51{
52 kunmap(pp); 52 kunmap(pp);
53 page_cache_release(pp); 53 put_page(pp);
54} 54}
55 55
56/** 56/**
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fee81e8768c9..592cea54cea0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -33,7 +33,7 @@
33/* 33/*
34 * 4MB minimal write chunk size 34 * 4MB minimal write chunk size
35 */ 35 */
36#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) 36#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
37 37
38struct wb_completion { 38struct wb_completion {
39 atomic_t cnt; 39 atomic_t cnt;
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 6b35fc4860a0..3078b679fcd1 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -113,7 +113,7 @@ try_again:
113 113
114 wake_up_bit(&cookie->flags, 0); 114 wake_up_bit(&cookie->flags, 0);
115 if (xpage) 115 if (xpage)
116 page_cache_release(xpage); 116 put_page(xpage);
117 __fscache_uncache_page(cookie, page); 117 __fscache_uncache_page(cookie, page);
118 return true; 118 return true;
119 119
@@ -164,7 +164,7 @@ static void fscache_end_page_write(struct fscache_object *object,
164 } 164 }
165 spin_unlock(&object->lock); 165 spin_unlock(&object->lock);
166 if (xpage) 166 if (xpage)
167 page_cache_release(xpage); 167 put_page(xpage);
168} 168}
169 169
170/* 170/*
@@ -884,7 +884,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
884 spin_unlock(&cookie->stores_lock); 884 spin_unlock(&cookie->stores_lock);
885 885
886 for (i = n - 1; i >= 0; i--) 886 for (i = n - 1; i >= 0; i--)
887 page_cache_release(results[i]); 887 put_page(results[i]);
888 } 888 }
889 889
890 _leave(""); 890 _leave("");
@@ -982,7 +982,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
982 982
983 radix_tree_tag_set(&cookie->stores, page->index, 983 radix_tree_tag_set(&cookie->stores, page->index,
984 FSCACHE_COOKIE_PENDING_TAG); 984 FSCACHE_COOKIE_PENDING_TAG);
985 page_cache_get(page); 985 get_page(page);
986 986
987 /* we only want one writer at a time, but we do need to queue new 987 /* we only want one writer at a time, but we do need to queue new
988 * writers after exclusive ops */ 988 * writers after exclusive ops */
@@ -1026,7 +1026,7 @@ submit_failed:
1026 radix_tree_delete(&cookie->stores, page->index); 1026 radix_tree_delete(&cookie->stores, page->index);
1027 spin_unlock(&cookie->stores_lock); 1027 spin_unlock(&cookie->stores_lock);
1028 wake_cookie = __fscache_unuse_cookie(cookie); 1028 wake_cookie = __fscache_unuse_cookie(cookie);
1029 page_cache_release(page); 1029 put_page(page);
1030 ret = -ENOBUFS; 1030 ret = -ENOBUFS;
1031 goto nobufs; 1031 goto nobufs;
1032 1032
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ebb5e37455a0..cbece1221417 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -897,7 +897,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
897 return err; 897 return err;
898 } 898 }
899 899
900 page_cache_get(newpage); 900 get_page(newpage);
901 901
902 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 902 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
903 lru_cache_add_file(newpage); 903 lru_cache_add_file(newpage);
@@ -912,12 +912,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
912 912
913 if (err) { 913 if (err) {
914 unlock_page(newpage); 914 unlock_page(newpage);
915 page_cache_release(newpage); 915 put_page(newpage);
916 return err; 916 return err;
917 } 917 }
918 918
919 unlock_page(oldpage); 919 unlock_page(oldpage);
920 page_cache_release(oldpage); 920 put_page(oldpage);
921 cs->len = 0; 921 cs->len = 0;
922 922
923 return 0; 923 return 0;
@@ -951,7 +951,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
951 fuse_copy_finish(cs); 951 fuse_copy_finish(cs);
952 952
953 buf = cs->pipebufs; 953 buf = cs->pipebufs;
954 page_cache_get(page); 954 get_page(page);
955 buf->page = page; 955 buf->page = page;
956 buf->offset = offset; 956 buf->offset = offset;
957 buf->len = count; 957 buf->len = count;
@@ -1435,7 +1435,7 @@ out_unlock:
1435 1435
1436out: 1436out:
1437 for (; page_nr < cs.nr_segs; page_nr++) 1437 for (; page_nr < cs.nr_segs; page_nr++)
1438 page_cache_release(bufs[page_nr].page); 1438 put_page(bufs[page_nr].page);
1439 1439
1440 kfree(bufs); 1440 kfree(bufs);
1441 return ret; 1441 return ret;
@@ -1632,8 +1632,8 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1632 goto out_up_killsb; 1632 goto out_up_killsb;
1633 1633
1634 mapping = inode->i_mapping; 1634 mapping = inode->i_mapping;
1635 index = outarg.offset >> PAGE_CACHE_SHIFT; 1635 index = outarg.offset >> PAGE_SHIFT;
1636 offset = outarg.offset & ~PAGE_CACHE_MASK; 1636 offset = outarg.offset & ~PAGE_MASK;
1637 file_size = i_size_read(inode); 1637 file_size = i_size_read(inode);
1638 end = outarg.offset + outarg.size; 1638 end = outarg.offset + outarg.size;
1639 if (end > file_size) { 1639 if (end > file_size) {
@@ -1652,13 +1652,13 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1652 if (!page) 1652 if (!page)
1653 goto out_iput; 1653 goto out_iput;
1654 1654
1655 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); 1655 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1656 err = fuse_copy_page(cs, &page, offset, this_num, 0); 1656 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1657 if (!err && offset == 0 && 1657 if (!err && offset == 0 &&
1658 (this_num == PAGE_CACHE_SIZE || file_size == end)) 1658 (this_num == PAGE_SIZE || file_size == end))
1659 SetPageUptodate(page); 1659 SetPageUptodate(page);
1660 unlock_page(page); 1660 unlock_page(page);
1661 page_cache_release(page); 1661 put_page(page);
1662 1662
1663 if (err) 1663 if (err)
1664 goto out_iput; 1664 goto out_iput;
@@ -1697,7 +1697,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1697 size_t total_len = 0; 1697 size_t total_len = 0;
1698 int num_pages; 1698 int num_pages;
1699 1699
1700 offset = outarg->offset & ~PAGE_CACHE_MASK; 1700 offset = outarg->offset & ~PAGE_MASK;
1701 file_size = i_size_read(inode); 1701 file_size = i_size_read(inode);
1702 1702
1703 num = outarg->size; 1703 num = outarg->size;
@@ -1720,7 +1720,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1720 req->page_descs[0].offset = offset; 1720 req->page_descs[0].offset = offset;
1721 req->end = fuse_retrieve_end; 1721 req->end = fuse_retrieve_end;
1722 1722
1723 index = outarg->offset >> PAGE_CACHE_SHIFT; 1723 index = outarg->offset >> PAGE_SHIFT;
1724 1724
1725 while (num && req->num_pages < num_pages) { 1725 while (num && req->num_pages < num_pages) {
1726 struct page *page; 1726 struct page *page;
@@ -1730,7 +1730,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1730 if (!page) 1730 if (!page)
1731 break; 1731 break;
1732 1732
1733 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); 1733 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1734 req->pages[req->num_pages] = page; 1734 req->pages[req->num_pages] = page;
1735 req->page_descs[req->num_pages].length = this_num; 1735 req->page_descs[req->num_pages].length = this_num;
1736 req->num_pages++; 1736 req->num_pages++;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9dde38f12c07..719924d6c706 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -348,7 +348,7 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
348 pgoff_t curr_index; 348 pgoff_t curr_index;
349 349
350 BUG_ON(req->inode != inode); 350 BUG_ON(req->inode != inode);
351 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 351 curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
352 if (idx_from < curr_index + req->num_pages && 352 if (idx_from < curr_index + req->num_pages &&
353 curr_index <= idx_to) { 353 curr_index <= idx_to) {
354 found = true; 354 found = true;
@@ -683,11 +683,11 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
683 * present there. 683 * present there.
684 */ 684 */
685 int i; 685 int i;
686 int start_idx = num_read >> PAGE_CACHE_SHIFT; 686 int start_idx = num_read >> PAGE_SHIFT;
687 size_t off = num_read & (PAGE_CACHE_SIZE - 1); 687 size_t off = num_read & (PAGE_SIZE - 1);
688 688
689 for (i = start_idx; i < req->num_pages; i++) { 689 for (i = start_idx; i < req->num_pages; i++) {
690 zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); 690 zero_user_segment(req->pages[i], off, PAGE_SIZE);
691 off = 0; 691 off = 0;
692 } 692 }
693 } else { 693 } else {
@@ -704,7 +704,7 @@ static int fuse_do_readpage(struct file *file, struct page *page)
704 struct fuse_req *req; 704 struct fuse_req *req;
705 size_t num_read; 705 size_t num_read;
706 loff_t pos = page_offset(page); 706 loff_t pos = page_offset(page);
707 size_t count = PAGE_CACHE_SIZE; 707 size_t count = PAGE_SIZE;
708 u64 attr_ver; 708 u64 attr_ver;
709 int err; 709 int err;
710 710
@@ -789,7 +789,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
789 else 789 else
790 SetPageError(page); 790 SetPageError(page);
791 unlock_page(page); 791 unlock_page(page);
792 page_cache_release(page); 792 put_page(page);
793 } 793 }
794 if (req->ff) 794 if (req->ff)
795 fuse_file_put(req->ff, false); 795 fuse_file_put(req->ff, false);
@@ -800,7 +800,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
800 struct fuse_file *ff = file->private_data; 800 struct fuse_file *ff = file->private_data;
801 struct fuse_conn *fc = ff->fc; 801 struct fuse_conn *fc = ff->fc;
802 loff_t pos = page_offset(req->pages[0]); 802 loff_t pos = page_offset(req->pages[0]);
803 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 803 size_t count = req->num_pages << PAGE_SHIFT;
804 804
805 req->out.argpages = 1; 805 req->out.argpages = 1;
806 req->out.page_zeroing = 1; 806 req->out.page_zeroing = 1;
@@ -836,7 +836,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
836 836
837 if (req->num_pages && 837 if (req->num_pages &&
838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
839 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 839 (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
840 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 840 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
841 int nr_alloc = min_t(unsigned, data->nr_pages, 841 int nr_alloc = min_t(unsigned, data->nr_pages,
842 FUSE_MAX_PAGES_PER_REQ); 842 FUSE_MAX_PAGES_PER_REQ);
@@ -858,7 +858,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
858 return -EIO; 858 return -EIO;
859 } 859 }
860 860
861 page_cache_get(page); 861 get_page(page);
862 req->pages[req->num_pages] = page; 862 req->pages[req->num_pages] = page;
863 req->page_descs[req->num_pages].length = PAGE_SIZE; 863 req->page_descs[req->num_pages].length = PAGE_SIZE;
864 req->num_pages++; 864 req->num_pages++;
@@ -1003,17 +1003,17 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
1003 for (i = 0; i < req->num_pages; i++) { 1003 for (i = 0; i < req->num_pages; i++) {
1004 struct page *page = req->pages[i]; 1004 struct page *page = req->pages[i];
1005 1005
1006 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 1006 if (!req->out.h.error && !offset && count >= PAGE_SIZE)
1007 SetPageUptodate(page); 1007 SetPageUptodate(page);
1008 1008
1009 if (count > PAGE_CACHE_SIZE - offset) 1009 if (count > PAGE_SIZE - offset)
1010 count -= PAGE_CACHE_SIZE - offset; 1010 count -= PAGE_SIZE - offset;
1011 else 1011 else
1012 count = 0; 1012 count = 0;
1013 offset = 0; 1013 offset = 0;
1014 1014
1015 unlock_page(page); 1015 unlock_page(page);
1016 page_cache_release(page); 1016 put_page(page);
1017 } 1017 }
1018 1018
1019 return res; 1019 return res;
@@ -1024,7 +1024,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1024 struct iov_iter *ii, loff_t pos) 1024 struct iov_iter *ii, loff_t pos)
1025{ 1025{
1026 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1026 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 1027 unsigned offset = pos & (PAGE_SIZE - 1);
1028 size_t count = 0; 1028 size_t count = 0;
1029 int err; 1029 int err;
1030 1030
@@ -1034,8 +1034,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1034 do { 1034 do {
1035 size_t tmp; 1035 size_t tmp;
1036 struct page *page; 1036 struct page *page;
1037 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1037 pgoff_t index = pos >> PAGE_SHIFT;
1038 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 1038 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
1039 iov_iter_count(ii)); 1039 iov_iter_count(ii));
1040 1040
1041 bytes = min_t(size_t, bytes, fc->max_write - count); 1041 bytes = min_t(size_t, bytes, fc->max_write - count);
@@ -1059,7 +1059,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1059 iov_iter_advance(ii, tmp); 1059 iov_iter_advance(ii, tmp);
1060 if (!tmp) { 1060 if (!tmp) {
1061 unlock_page(page); 1061 unlock_page(page);
1062 page_cache_release(page); 1062 put_page(page);
1063 bytes = min(bytes, iov_iter_single_seg_count(ii)); 1063 bytes = min(bytes, iov_iter_single_seg_count(ii));
1064 goto again; 1064 goto again;
1065 } 1065 }
@@ -1072,7 +1072,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1072 count += tmp; 1072 count += tmp;
1073 pos += tmp; 1073 pos += tmp;
1074 offset += tmp; 1074 offset += tmp;
1075 if (offset == PAGE_CACHE_SIZE) 1075 if (offset == PAGE_SIZE)
1076 offset = 0; 1076 offset = 0;
1077 1077
1078 if (!fc->big_writes) 1078 if (!fc->big_writes)
@@ -1086,8 +1086,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1086static inline unsigned fuse_wr_pages(loff_t pos, size_t len) 1086static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
1087{ 1087{
1088 return min_t(unsigned, 1088 return min_t(unsigned,
1089 ((pos + len - 1) >> PAGE_CACHE_SHIFT) - 1089 ((pos + len - 1) >> PAGE_SHIFT) -
1090 (pos >> PAGE_CACHE_SHIFT) + 1, 1090 (pos >> PAGE_SHIFT) + 1,
1091 FUSE_MAX_PAGES_PER_REQ); 1091 FUSE_MAX_PAGES_PER_REQ);
1092} 1092}
1093 1093
@@ -1205,8 +1205,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1205 goto out; 1205 goto out;
1206 1206
1207 invalidate_mapping_pages(file->f_mapping, 1207 invalidate_mapping_pages(file->f_mapping,
1208 pos >> PAGE_CACHE_SHIFT, 1208 pos >> PAGE_SHIFT,
1209 endbyte >> PAGE_CACHE_SHIFT); 1209 endbyte >> PAGE_SHIFT);
1210 1210
1211 written += written_buffered; 1211 written += written_buffered;
1212 iocb->ki_pos = pos + written_buffered; 1212 iocb->ki_pos = pos + written_buffered;
@@ -1315,8 +1315,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1315 size_t nmax = write ? fc->max_write : fc->max_read; 1315 size_t nmax = write ? fc->max_write : fc->max_read;
1316 loff_t pos = *ppos; 1316 loff_t pos = *ppos;
1317 size_t count = iov_iter_count(iter); 1317 size_t count = iov_iter_count(iter);
1318 pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT; 1318 pgoff_t idx_from = pos >> PAGE_SHIFT;
1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1320 ssize_t res = 0; 1320 ssize_t res = 0;
1321 struct fuse_req *req; 1321 struct fuse_req *req;
1322 int err = 0; 1322 int err = 0;
@@ -1466,7 +1466,7 @@ __acquires(fc->lock)
1466{ 1466{
1467 struct fuse_inode *fi = get_fuse_inode(req->inode); 1467 struct fuse_inode *fi = get_fuse_inode(req->inode);
1468 struct fuse_write_in *inarg = &req->misc.write.in; 1468 struct fuse_write_in *inarg = &req->misc.write.in;
1469 __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; 1469 __u64 data_size = req->num_pages * PAGE_SIZE;
1470 1470
1471 if (!fc->connected) 1471 if (!fc->connected)
1472 goto out_free; 1472 goto out_free;
@@ -1727,7 +1727,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1727 list_del(&new_req->writepages_entry); 1727 list_del(&new_req->writepages_entry);
1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) { 1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
1729 BUG_ON(old_req->inode != new_req->inode); 1729 BUG_ON(old_req->inode != new_req->inode);
1730 curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1730 curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
1731 if (curr_index <= page->index && 1731 if (curr_index <= page->index &&
1732 page->index < curr_index + old_req->num_pages) { 1732 page->index < curr_index + old_req->num_pages) {
1733 found = true; 1733 found = true;
@@ -1742,7 +1742,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1742 new_req->num_pages = 1; 1742 new_req->num_pages = 1;
1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { 1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
1744 BUG_ON(tmp->inode != new_req->inode); 1744 BUG_ON(tmp->inode != new_req->inode);
1745 curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1745 curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
1746 if (tmp->num_pages == 1 && 1746 if (tmp->num_pages == 1 &&
1747 curr_index == page->index) { 1747 curr_index == page->index) {
1748 old_req = tmp; 1748 old_req = tmp;
@@ -1799,7 +1799,7 @@ static int fuse_writepages_fill(struct page *page,
1799 1799
1800 if (req && req->num_pages && 1800 if (req && req->num_pages &&
1801 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || 1801 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
1802 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || 1802 (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
1803 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { 1803 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
1804 fuse_writepages_send(data); 1804 fuse_writepages_send(data);
1805 data->req = NULL; 1805 data->req = NULL;
@@ -1924,7 +1924,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
1924 loff_t pos, unsigned len, unsigned flags, 1924 loff_t pos, unsigned len, unsigned flags,
1925 struct page **pagep, void **fsdata) 1925 struct page **pagep, void **fsdata)
1926{ 1926{
1927 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1927 pgoff_t index = pos >> PAGE_SHIFT;
1928 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 1928 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
1929 struct page *page; 1929 struct page *page;
1930 loff_t fsize; 1930 loff_t fsize;
@@ -1938,15 +1938,15 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
1938 1938
1939 fuse_wait_on_page_writeback(mapping->host, page->index); 1939 fuse_wait_on_page_writeback(mapping->host, page->index);
1940 1940
1941 if (PageUptodate(page) || len == PAGE_CACHE_SIZE) 1941 if (PageUptodate(page) || len == PAGE_SIZE)
1942 goto success; 1942 goto success;
1943 /* 1943 /*
1944 * Check if the start this page comes after the end of file, in which 1944 * Check if the start this page comes after the end of file, in which
1945 * case the readpage can be optimized away. 1945 * case the readpage can be optimized away.
1946 */ 1946 */
1947 fsize = i_size_read(mapping->host); 1947 fsize = i_size_read(mapping->host);
1948 if (fsize <= (pos & PAGE_CACHE_MASK)) { 1948 if (fsize <= (pos & PAGE_MASK)) {
1949 size_t off = pos & ~PAGE_CACHE_MASK; 1949 size_t off = pos & ~PAGE_MASK;
1950 if (off) 1950 if (off)
1951 zero_user_segment(page, 0, off); 1951 zero_user_segment(page, 0, off);
1952 goto success; 1952 goto success;
@@ -1960,7 +1960,7 @@ success:
1960 1960
1961cleanup: 1961cleanup:
1962 unlock_page(page); 1962 unlock_page(page);
1963 page_cache_release(page); 1963 put_page(page);
1964error: 1964error:
1965 return err; 1965 return err;
1966} 1966}
@@ -1973,16 +1973,16 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1973 1973
1974 if (!PageUptodate(page)) { 1974 if (!PageUptodate(page)) {
1975 /* Zero any unwritten bytes at the end of the page */ 1975 /* Zero any unwritten bytes at the end of the page */
1976 size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; 1976 size_t endoff = (pos + copied) & ~PAGE_MASK;
1977 if (endoff) 1977 if (endoff)
1978 zero_user_segment(page, endoff, PAGE_CACHE_SIZE); 1978 zero_user_segment(page, endoff, PAGE_SIZE);
1979 SetPageUptodate(page); 1979 SetPageUptodate(page);
1980 } 1980 }
1981 1981
1982 fuse_write_update_size(inode, pos + copied); 1982 fuse_write_update_size(inode, pos + copied);
1983 set_page_dirty(page); 1983 set_page_dirty(page);
1984 unlock_page(page); 1984 unlock_page(page);
1985 page_cache_release(page); 1985 put_page(page);
1986 1986
1987 return copied; 1987 return copied;
1988} 1988}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4d69d5c0bedc..1ce67668a8e1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -339,11 +339,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
339 339
340 fuse_invalidate_attr(inode); 340 fuse_invalidate_attr(inode);
341 if (offset >= 0) { 341 if (offset >= 0) {
342 pg_start = offset >> PAGE_CACHE_SHIFT; 342 pg_start = offset >> PAGE_SHIFT;
343 if (len <= 0) 343 if (len <= 0)
344 pg_end = -1; 344 pg_end = -1;
345 else 345 else
346 pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; 346 pg_end = (offset + len - 1) >> PAGE_SHIFT;
347 invalidate_inode_pages2_range(inode->i_mapping, 347 invalidate_inode_pages2_range(inode->i_mapping,
348 pg_start, pg_end); 348 pg_start, pg_end);
349 } 349 }
@@ -864,7 +864,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
864 process_init_limits(fc, arg); 864 process_init_limits(fc, arg);
865 865
866 if (arg->minor >= 6) { 866 if (arg->minor >= 6) {
867 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; 867 ra_pages = arg->max_readahead / PAGE_SIZE;
868 if (arg->flags & FUSE_ASYNC_READ) 868 if (arg->flags & FUSE_ASYNC_READ)
869 fc->async_read = 1; 869 fc->async_read = 1;
870 if (!(arg->flags & FUSE_POSIX_LOCKS)) 870 if (!(arg->flags & FUSE_POSIX_LOCKS))
@@ -901,7 +901,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
901 if (arg->time_gran && arg->time_gran <= 1000000000) 901 if (arg->time_gran && arg->time_gran <= 1000000000)
902 fc->sb->s_time_gran = arg->time_gran; 902 fc->sb->s_time_gran = arg->time_gran;
903 } else { 903 } else {
904 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 904 ra_pages = fc->max_read / PAGE_SIZE;
905 fc->no_lock = 1; 905 fc->no_lock = 1;
906 fc->no_flock = 1; 906 fc->no_flock = 1;
907 } 907 }
@@ -922,7 +922,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
922 922
923 arg->major = FUSE_KERNEL_VERSION; 923 arg->major = FUSE_KERNEL_VERSION;
924 arg->minor = FUSE_KERNEL_MINOR_VERSION; 924 arg->minor = FUSE_KERNEL_MINOR_VERSION;
925 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 925 arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE;
926 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | 926 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
927 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | 927 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -955,7 +955,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
955 int err; 955 int err;
956 956
957 fc->bdi.name = "fuse"; 957 fc->bdi.name = "fuse";
958 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 958 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
959 /* fuse does it's own writeback accounting */ 959 /* fuse does it's own writeback accounting */
960 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT; 960 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
961 961
@@ -1053,8 +1053,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1053 goto err; 1053 goto err;
1054#endif 1054#endif
1055 } else { 1055 } else {
1056 sb->s_blocksize = PAGE_CACHE_SIZE; 1056 sb->s_blocksize = PAGE_SIZE;
1057 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1057 sb->s_blocksize_bits = PAGE_SHIFT;
1058 } 1058 }
1059 sb->s_magic = FUSE_SUPER_MAGIC; 1059 sb->s_magic = FUSE_SUPER_MAGIC;
1060 sb->s_op = &fuse_super_operations; 1060 sb->s_op = &fuse_super_operations;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aa016e4b8bec..1bbbee945f46 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -101,7 +101,7 @@ static int gfs2_writepage_common(struct page *page,
101 struct gfs2_inode *ip = GFS2_I(inode); 101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_sbd *sdp = GFS2_SB(inode); 102 struct gfs2_sbd *sdp = GFS2_SB(inode);
103 loff_t i_size = i_size_read(inode); 103 loff_t i_size = i_size_read(inode);
104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 104 pgoff_t end_index = i_size >> PAGE_SHIFT;
105 unsigned offset; 105 unsigned offset;
106 106
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
@@ -109,9 +109,9 @@ static int gfs2_writepage_common(struct page *page,
109 if (current->journal_info) 109 if (current->journal_info)
110 goto redirty; 110 goto redirty;
111 /* Is the page fully outside i_size? (truncate in progress) */ 111 /* Is the page fully outside i_size? (truncate in progress) */
112 offset = i_size & (PAGE_CACHE_SIZE-1); 112 offset = i_size & (PAGE_SIZE-1);
113 if (page->index > end_index || (page->index == end_index && !offset)) { 113 if (page->index > end_index || (page->index == end_index && !offset)) {
114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
115 goto out; 115 goto out;
116 } 116 }
117 return 1; 117 return 1;
@@ -238,7 +238,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
238{ 238{
239 struct inode *inode = mapping->host; 239 struct inode *inode = mapping->host;
240 struct gfs2_sbd *sdp = GFS2_SB(inode); 240 struct gfs2_sbd *sdp = GFS2_SB(inode);
241 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 241 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
242 int i; 242 int i;
243 int ret; 243 int ret;
244 244
@@ -366,8 +366,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
366 cycled = 0; 366 cycled = 0;
367 end = -1; 367 end = -1;
368 } else { 368 } else {
369 index = wbc->range_start >> PAGE_CACHE_SHIFT; 369 index = wbc->range_start >> PAGE_SHIFT;
370 end = wbc->range_end >> PAGE_CACHE_SHIFT; 370 end = wbc->range_end >> PAGE_SHIFT;
371 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 371 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
372 range_whole = 1; 372 range_whole = 1;
373 cycled = 1; /* ignore range_cyclic tests */ 373 cycled = 1; /* ignore range_cyclic tests */
@@ -458,7 +458,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
458 * so we need to supply one here. It doesn't happen often. 458 * so we need to supply one here. It doesn't happen often.
459 */ 459 */
460 if (unlikely(page->index)) { 460 if (unlikely(page->index)) {
461 zero_user(page, 0, PAGE_CACHE_SIZE); 461 zero_user(page, 0, PAGE_SIZE);
462 SetPageUptodate(page); 462 SetPageUptodate(page);
463 return 0; 463 return 0;
464 } 464 }
@@ -471,7 +471,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
471 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 471 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
472 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 472 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
473 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 473 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
474 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 474 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
475 kunmap_atomic(kaddr); 475 kunmap_atomic(kaddr);
476 flush_dcache_page(page); 476 flush_dcache_page(page);
477 brelse(dibh); 477 brelse(dibh);
@@ -560,8 +560,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
560 unsigned size) 560 unsigned size)
561{ 561{
562 struct address_space *mapping = ip->i_inode.i_mapping; 562 struct address_space *mapping = ip->i_inode.i_mapping;
563 unsigned long index = *pos / PAGE_CACHE_SIZE; 563 unsigned long index = *pos / PAGE_SIZE;
564 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); 564 unsigned offset = *pos & (PAGE_SIZE - 1);
565 unsigned copied = 0; 565 unsigned copied = 0;
566 unsigned amt; 566 unsigned amt;
567 struct page *page; 567 struct page *page;
@@ -569,15 +569,15 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
569 569
570 do { 570 do {
571 amt = size - copied; 571 amt = size - copied;
572 if (offset + size > PAGE_CACHE_SIZE) 572 if (offset + size > PAGE_SIZE)
573 amt = PAGE_CACHE_SIZE - offset; 573 amt = PAGE_SIZE - offset;
574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
575 if (IS_ERR(page)) 575 if (IS_ERR(page))
576 return PTR_ERR(page); 576 return PTR_ERR(page);
577 p = kmap_atomic(page); 577 p = kmap_atomic(page);
578 memcpy(buf + copied, p + offset, amt); 578 memcpy(buf + copied, p + offset, amt);
579 kunmap_atomic(p); 579 kunmap_atomic(p);
580 page_cache_release(page); 580 put_page(page);
581 copied += amt; 581 copied += amt;
582 index++; 582 index++;
583 offset = 0; 583 offset = 0;
@@ -651,8 +651,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
651 unsigned requested = 0; 651 unsigned requested = 0;
652 int alloc_required; 652 int alloc_required;
653 int error = 0; 653 int error = 0;
654 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 654 pgoff_t index = pos >> PAGE_SHIFT;
655 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 655 unsigned from = pos & (PAGE_SIZE - 1);
656 struct page *page; 656 struct page *page;
657 657
658 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 658 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -697,7 +697,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
697 rblocks += gfs2_rg_blocks(ip, requested); 697 rblocks += gfs2_rg_blocks(ip, requested);
698 698
699 error = gfs2_trans_begin(sdp, rblocks, 699 error = gfs2_trans_begin(sdp, rblocks,
700 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 700 PAGE_SIZE/sdp->sd_sb.sb_bsize);
701 if (error) 701 if (error)
702 goto out_trans_fail; 702 goto out_trans_fail;
703 703
@@ -727,7 +727,7 @@ out:
727 return 0; 727 return 0;
728 728
729 unlock_page(page); 729 unlock_page(page);
730 page_cache_release(page); 730 put_page(page);
731 731
732 gfs2_trans_end(sdp); 732 gfs2_trans_end(sdp);
733 if (pos + len > ip->i_inode.i_size) 733 if (pos + len > ip->i_inode.i_size)
@@ -827,7 +827,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
827 if (!PageUptodate(page)) 827 if (!PageUptodate(page))
828 SetPageUptodate(page); 828 SetPageUptodate(page);
829 unlock_page(page); 829 unlock_page(page);
830 page_cache_release(page); 830 put_page(page);
831 831
832 if (copied) { 832 if (copied) {
833 if (inode->i_size < to) 833 if (inode->i_size < to)
@@ -877,7 +877,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
877 struct gfs2_sbd *sdp = GFS2_SB(inode); 877 struct gfs2_sbd *sdp = GFS2_SB(inode);
878 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 878 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
879 struct buffer_head *dibh; 879 struct buffer_head *dibh;
880 unsigned int from = pos & (PAGE_CACHE_SIZE - 1); 880 unsigned int from = pos & (PAGE_SIZE - 1);
881 unsigned int to = from + len; 881 unsigned int to = from + len;
882 int ret; 882 int ret;
883 struct gfs2_trans *tr = current->journal_info; 883 struct gfs2_trans *tr = current->journal_info;
@@ -888,7 +888,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
888 ret = gfs2_meta_inode_buffer(ip, &dibh); 888 ret = gfs2_meta_inode_buffer(ip, &dibh);
889 if (unlikely(ret)) { 889 if (unlikely(ret)) {
890 unlock_page(page); 890 unlock_page(page);
891 page_cache_release(page); 891 put_page(page);
892 goto failed; 892 goto failed;
893 } 893 }
894 894
@@ -992,7 +992,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
992{ 992{
993 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 993 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
994 unsigned int stop = offset + length; 994 unsigned int stop = offset + length;
995 int partial_page = (offset || length < PAGE_CACHE_SIZE); 995 int partial_page = (offset || length < PAGE_SIZE);
996 struct buffer_head *bh, *head; 996 struct buffer_head *bh, *head;
997 unsigned long pos = 0; 997 unsigned long pos = 0;
998 998
@@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1082 * the first place, mapping->nr_pages will always be zero. 1082 * the first place, mapping->nr_pages will always be zero.
1083 */ 1083 */
1084 if (mapping->nrpages) { 1084 if (mapping->nrpages) {
1085 loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1); 1085 loff_t lstart = offset & ~(PAGE_SIZE - 1);
1086 loff_t len = iov_iter_count(iter); 1086 loff_t len = iov_iter_count(iter);
1087 loff_t end = PAGE_ALIGN(offset + len) - 1; 1087 loff_t end = PAGE_ALIGN(offset + len) - 1;
1088 1088
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 0860f0b5b3f1..24ce1cdd434a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -75,7 +75,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
75 dsize = dibh->b_size - sizeof(struct gfs2_dinode); 75 dsize = dibh->b_size - sizeof(struct gfs2_dinode);
76 76
77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
78 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 78 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
79 kunmap(page); 79 kunmap(page);
80 80
81 SetPageUptodate(page); 81 SetPageUptodate(page);
@@ -98,7 +98,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
98 98
99 if (release) { 99 if (release) {
100 unlock_page(page); 100 unlock_page(page);
101 page_cache_release(page); 101 put_page(page);
102 } 102 }
103 103
104 return 0; 104 return 0;
@@ -932,8 +932,8 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
932{ 932{
933 struct inode *inode = mapping->host; 933 struct inode *inode = mapping->host;
934 struct gfs2_inode *ip = GFS2_I(inode); 934 struct gfs2_inode *ip = GFS2_I(inode);
935 unsigned long index = from >> PAGE_CACHE_SHIFT; 935 unsigned long index = from >> PAGE_SHIFT;
936 unsigned offset = from & (PAGE_CACHE_SIZE-1); 936 unsigned offset = from & (PAGE_SIZE-1);
937 unsigned blocksize, iblock, length, pos; 937 unsigned blocksize, iblock, length, pos;
938 struct buffer_head *bh; 938 struct buffer_head *bh;
939 struct page *page; 939 struct page *page;
@@ -945,7 +945,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
945 945
946 blocksize = inode->i_sb->s_blocksize; 946 blocksize = inode->i_sb->s_blocksize;
947 length = blocksize - (offset & (blocksize - 1)); 947 length = blocksize - (offset & (blocksize - 1));
948 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 948 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
949 949
950 if (!page_has_buffers(page)) 950 if (!page_has_buffers(page))
951 create_empty_buffers(page, blocksize, 0); 951 create_empty_buffers(page, blocksize, 0);
@@ -989,7 +989,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
989 mark_buffer_dirty(bh); 989 mark_buffer_dirty(bh);
990unlock: 990unlock:
991 unlock_page(page); 991 unlock_page(page);
992 page_cache_release(page); 992 put_page(page);
993 return err; 993 return err;
994} 994}
995 995
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index c9384f932975..208efc70ad49 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -354,8 +354,8 @@ static int gfs2_allocate_page_backing(struct page *page)
354{ 354{
355 struct inode *inode = page->mapping->host; 355 struct inode *inode = page->mapping->host;
356 struct buffer_head bh; 356 struct buffer_head bh;
357 unsigned long size = PAGE_CACHE_SIZE; 357 unsigned long size = PAGE_SIZE;
358 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 358 u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
359 359
360 do { 360 do {
361 bh.b_state = 0; 361 bh.b_state = 0;
@@ -386,7 +386,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
386 struct gfs2_sbd *sdp = GFS2_SB(inode); 386 struct gfs2_sbd *sdp = GFS2_SB(inode);
387 struct gfs2_alloc_parms ap = { .aflags = 0, }; 387 struct gfs2_alloc_parms ap = { .aflags = 0, };
388 unsigned long last_index; 388 unsigned long last_index;
389 u64 pos = page->index << PAGE_CACHE_SHIFT; 389 u64 pos = page->index << PAGE_SHIFT;
390 unsigned int data_blocks, ind_blocks, rblocks; 390 unsigned int data_blocks, ind_blocks, rblocks;
391 struct gfs2_holder gh; 391 struct gfs2_holder gh;
392 loff_t size; 392 loff_t size;
@@ -401,7 +401,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
401 if (ret) 401 if (ret)
402 goto out; 402 goto out;
403 403
404 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); 404 gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
405 405
406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
407 ret = gfs2_glock_nq(&gh); 407 ret = gfs2_glock_nq(&gh);
@@ -411,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
411 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 411 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
412 set_bit(GIF_SW_PAGED, &ip->i_flags); 412 set_bit(GIF_SW_PAGED, &ip->i_flags);
413 413
414 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { 414 if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
415 lock_page(page); 415 lock_page(page);
416 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { 416 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
417 ret = -EAGAIN; 417 ret = -EAGAIN;
@@ -424,7 +424,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
424 if (ret) 424 if (ret)
425 goto out_unlock; 425 goto out_unlock;
426 426
427 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 427 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
428 ap.target = data_blocks + ind_blocks; 428 ap.target = data_blocks + ind_blocks;
429 ret = gfs2_quota_lock_check(ip, &ap); 429 ret = gfs2_quota_lock_check(ip, &ap);
430 if (ret) 430 if (ret)
@@ -447,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
447 lock_page(page); 447 lock_page(page);
448 ret = -EINVAL; 448 ret = -EINVAL;
449 size = i_size_read(inode); 449 size = i_size_read(inode);
450 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 450 last_index = (size - 1) >> PAGE_SHIFT;
451 /* Check page index against inode size */ 451 /* Check page index against inode size */
452 if (size == 0 || (page->index > last_index)) 452 if (size == 0 || (page->index > last_index))
453 goto out_trans_end; 453 goto out_trans_end;
@@ -873,7 +873,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
873 rblocks += data_blocks ? data_blocks : 1; 873 rblocks += data_blocks ? data_blocks : 1;
874 874
875 error = gfs2_trans_begin(sdp, rblocks, 875 error = gfs2_trans_begin(sdp, rblocks,
876 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 876 PAGE_SIZE/sdp->sd_sb.sb_bsize);
877 if (error) 877 if (error)
878 goto out_trans_fail; 878 goto out_trans_fail;
879 879
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index e137d96f1b17..0448524c11bc 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -124,7 +124,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
124 if (mapping == NULL) 124 if (mapping == NULL)
125 mapping = &sdp->sd_aspace; 125 mapping = &sdp->sd_aspace;
126 126
127 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift; 127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128 index = blkno >> shift; /* convert block to page */ 128 index = blkno >> shift; /* convert block to page */
129 bufnum = blkno - (index << shift); /* block buf index within page */ 129 bufnum = blkno - (index << shift); /* block buf index within page */
130 130
@@ -154,7 +154,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
154 map_bh(bh, sdp->sd_vfs, blkno); 154 map_bh(bh, sdp->sd_vfs, blkno);
155 155
156 unlock_page(page); 156 unlock_page(page);
157 page_cache_release(page); 157 put_page(page);
158 158
159 return bh; 159 return bh;
160} 160}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a39891344259..ce7d69a2fdc0 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -701,7 +701,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
701 unsigned to_write = bytes, pg_off = off; 701 unsigned to_write = bytes, pg_off = off;
702 int done = 0; 702 int done = 0;
703 703
704 blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift); 704 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
705 boff = off % bsize; 705 boff = off % bsize;
706 706
707 page = find_or_create_page(mapping, index, GFP_NOFS); 707 page = find_or_create_page(mapping, index, GFP_NOFS);
@@ -753,13 +753,13 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
753 flush_dcache_page(page); 753 flush_dcache_page(page);
754 kunmap_atomic(kaddr); 754 kunmap_atomic(kaddr);
755 unlock_page(page); 755 unlock_page(page);
756 page_cache_release(page); 756 put_page(page);
757 757
758 return 0; 758 return 0;
759 759
760unlock_out: 760unlock_out:
761 unlock_page(page); 761 unlock_page(page);
762 page_cache_release(page); 762 put_page(page);
763 return -EIO; 763 return -EIO;
764} 764}
765 765
@@ -773,13 +773,13 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
773 773
774 nbytes = sizeof(struct gfs2_quota); 774 nbytes = sizeof(struct gfs2_quota);
775 775
776 pg_beg = loc >> PAGE_CACHE_SHIFT; 776 pg_beg = loc >> PAGE_SHIFT;
777 pg_off = loc % PAGE_CACHE_SIZE; 777 pg_off = loc % PAGE_SIZE;
778 778
779 /* If the quota straddles a page boundary, split the write in two */ 779 /* If the quota straddles a page boundary, split the write in two */
780 if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { 780 if ((pg_off + nbytes) > PAGE_SIZE) {
781 pg_oflow = 1; 781 pg_oflow = 1;
782 overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; 782 overflow = (pg_off + nbytes) - PAGE_SIZE;
783 } 783 }
784 784
785 ptr = qp; 785 ptr = qp;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 07c0265aa195..99a0bdac8796 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -918,9 +918,8 @@ static int read_rindex_entry(struct gfs2_inode *ip)
918 goto fail; 918 goto fail;
919 919
920 rgd->rd_gl->gl_object = rgd; 920 rgd->rd_gl->gl_object = rgd;
921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK; 921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
922 rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr + 922 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
923 rgd->rd_length) * bsize) - 1;
924 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; 923 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
925 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED); 924 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
926 if (rgd->rd_data > sdp->sd_max_rg_data) 925 if (rgd->rd_data > sdp->sd_max_rg_data)
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 221719eac5de..d77d844b668b 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
278 278
279 mapping = tree->inode->i_mapping; 279 mapping = tree->inode->i_mapping;
280 off = (loff_t)cnid * tree->node_size; 280 off = (loff_t)cnid * tree->node_size;
281 block = off >> PAGE_CACHE_SHIFT; 281 block = off >> PAGE_SHIFT;
282 node->page_offset = off & ~PAGE_CACHE_MASK; 282 node->page_offset = off & ~PAGE_MASK;
283 for (i = 0; i < tree->pages_per_bnode; i++) { 283 for (i = 0; i < tree->pages_per_bnode; i++) {
284 page = read_mapping_page(mapping, block++, NULL); 284 page = read_mapping_page(mapping, block++, NULL);
285 if (IS_ERR(page)) 285 if (IS_ERR(page))
286 goto fail; 286 goto fail;
287 if (PageError(page)) { 287 if (PageError(page)) {
288 page_cache_release(page); 288 put_page(page);
289 goto fail; 289 goto fail;
290 } 290 }
291 node->page[i] = page; 291 node->page[i] = page;
@@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
401 401
402 for (i = 0; i < node->tree->pages_per_bnode; i++) 402 for (i = 0; i < node->tree->pages_per_bnode; i++)
403 if (node->page[i]) 403 if (node->page[i])
404 page_cache_release(node->page[i]); 404 put_page(node->page[i]);
405 kfree(node); 405 kfree(node);
406} 406}
407 407
@@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
429 429
430 pagep = node->page; 430 pagep = node->page;
431 memset(kmap(*pagep) + node->page_offset, 0, 431 memset(kmap(*pagep) + node->page_offset, 0,
432 min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); 432 min((int)PAGE_SIZE, (int)tree->node_size));
433 set_page_dirty(*pagep); 433 set_page_dirty(*pagep);
434 kunmap(*pagep); 434 kunmap(*pagep);
435 for (i = 1; i < tree->pages_per_bnode; i++) { 435 for (i = 1; i < tree->pages_per_bnode; i++) {
436 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 436 memset(kmap(*++pagep), 0, PAGE_SIZE);
437 set_page_dirty(*pagep); 437 set_page_dirty(*pagep);
438 kunmap(*pagep); 438 kunmap(*pagep);
439 } 439 }
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e660e69..37cdd955eceb 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
116 } 116 }
117 117
118 tree->node_size_shift = ffs(size) - 1; 118 tree->node_size_shift = ffs(size) - 1;
119 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 119 tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 120
121 kunmap(page); 121 kunmap(page);
122 page_cache_release(page); 122 put_page(page);
123 return tree; 123 return tree;
124 124
125fail_page: 125fail_page:
126 page_cache_release(page); 126 put_page(page);
127free_inode: 127free_inode:
128 tree->inode->i_mapping->a_ops = &hfs_aops; 128 tree->inode->i_mapping->a_ops = &hfs_aops;
129 iput(tree->inode); 129 iput(tree->inode);
@@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
257 off = off16; 257 off = off16;
258 258
259 off += node->page_offset; 259 off += node->page_offset;
260 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 260 pagep = node->page + (off >> PAGE_SHIFT);
261 data = kmap(*pagep); 261 data = kmap(*pagep);
262 off &= ~PAGE_CACHE_MASK; 262 off &= ~PAGE_MASK;
263 idx = 0; 263 idx = 0;
264 264
265 for (;;) { 265 for (;;) {
@@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
279 } 279 }
280 } 280 }
281 } 281 }
282 if (++off >= PAGE_CACHE_SIZE) { 282 if (++off >= PAGE_SIZE) {
283 kunmap(*pagep); 283 kunmap(*pagep);
284 data = kmap(*++pagep); 284 data = kmap(*++pagep);
285 off = 0; 285 off = 0;
@@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
302 len = hfs_brec_lenoff(node, 0, &off16); 302 len = hfs_brec_lenoff(node, 0, &off16);
303 off = off16; 303 off = off16;
304 off += node->page_offset; 304 off += node->page_offset;
305 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 305 pagep = node->page + (off >> PAGE_SHIFT);
306 data = kmap(*pagep); 306 data = kmap(*pagep);
307 off &= ~PAGE_CACHE_MASK; 307 off &= ~PAGE_MASK;
308 } 308 }
309} 309}
310 310
@@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
348 len = hfs_brec_lenoff(node, 0, &off); 348 len = hfs_brec_lenoff(node, 0, &off);
349 } 349 }
350 off += node->page_offset + nidx / 8; 350 off += node->page_offset + nidx / 8;
351 page = node->page[off >> PAGE_CACHE_SHIFT]; 351 page = node->page[off >> PAGE_SHIFT];
352 data = kmap(page); 352 data = kmap(page);
353 off &= ~PAGE_CACHE_MASK; 353 off &= ~PAGE_MASK;
354 m = 1 << (~nidx & 7); 354 m = 1 << (~nidx & 7);
355 byte = data[off]; 355 byte = data[off];
356 if (!(byte & m)) { 356 if (!(byte & m)) {
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 6686bf39a5b5..cb1e5faa2fb7 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
91 if (!tree) 91 if (!tree)
92 return 0; 92 return 0;
93 93
94 if (tree->node_size >= PAGE_CACHE_SIZE) { 94 if (tree->node_size >= PAGE_SIZE) {
95 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); 95 nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
96 spin_lock(&tree->hash_lock); 96 spin_lock(&tree->hash_lock);
97 node = hfs_bnode_findhash(tree, nidx); 97 node = hfs_bnode_findhash(tree, nidx);
98 if (!node) 98 if (!node)
@@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
105 } 105 }
106 spin_unlock(&tree->hash_lock); 106 spin_unlock(&tree->hash_lock);
107 } else { 107 } else {
108 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); 108 nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
109 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 109 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
110 spin_lock(&tree->hash_lock); 110 spin_lock(&tree->hash_lock);
111 do { 111 do {
112 node = hfs_bnode_findhash(tree, nidx++); 112 node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index d2954451519e..c0ae274c0a22 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -13,7 +13,7 @@
13#include "hfsplus_fs.h" 13#include "hfsplus_fs.h"
14#include "hfsplus_raw.h" 14#include "hfsplus_raw.h"
15 15
16#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8) 16#define PAGE_CACHE_BITS (PAGE_SIZE * 8)
17 17
18int hfsplus_block_allocate(struct super_block *sb, u32 size, 18int hfsplus_block_allocate(struct super_block *sb, u32 size,
19 u32 offset, u32 *max) 19 u32 offset, u32 *max)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 63924662aaf3..ce014ceb89ef 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
24 int l; 24 int l;
25 25
26 off += node->page_offset; 26 off += node->page_offset;
27 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 27 pagep = node->page + (off >> PAGE_SHIFT);
28 off &= ~PAGE_CACHE_MASK; 28 off &= ~PAGE_MASK;
29 29
30 l = min_t(int, len, PAGE_CACHE_SIZE - off); 30 l = min_t(int, len, PAGE_SIZE - off);
31 memcpy(buf, kmap(*pagep) + off, l); 31 memcpy(buf, kmap(*pagep) + off, l);
32 kunmap(*pagep); 32 kunmap(*pagep);
33 33
34 while ((len -= l) != 0) { 34 while ((len -= l) != 0) {
35 buf += l; 35 buf += l;
36 l = min_t(int, len, PAGE_CACHE_SIZE); 36 l = min_t(int, len, PAGE_SIZE);
37 memcpy(buf, kmap(*++pagep), l); 37 memcpy(buf, kmap(*++pagep), l);
38 kunmap(*pagep); 38 kunmap(*pagep);
39 } 39 }
@@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
77 int l; 77 int l;
78 78
79 off += node->page_offset; 79 off += node->page_offset;
80 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 80 pagep = node->page + (off >> PAGE_SHIFT);
81 off &= ~PAGE_CACHE_MASK; 81 off &= ~PAGE_MASK;
82 82
83 l = min_t(int, len, PAGE_CACHE_SIZE - off); 83 l = min_t(int, len, PAGE_SIZE - off);
84 memcpy(kmap(*pagep) + off, buf, l); 84 memcpy(kmap(*pagep) + off, buf, l);
85 set_page_dirty(*pagep); 85 set_page_dirty(*pagep);
86 kunmap(*pagep); 86 kunmap(*pagep);
87 87
88 while ((len -= l) != 0) { 88 while ((len -= l) != 0) {
89 buf += l; 89 buf += l;
90 l = min_t(int, len, PAGE_CACHE_SIZE); 90 l = min_t(int, len, PAGE_SIZE);
91 memcpy(kmap(*++pagep), buf, l); 91 memcpy(kmap(*++pagep), buf, l);
92 set_page_dirty(*pagep); 92 set_page_dirty(*pagep);
93 kunmap(*pagep); 93 kunmap(*pagep);
@@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
107 int l; 107 int l;
108 108
109 off += node->page_offset; 109 off += node->page_offset;
110 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 110 pagep = node->page + (off >> PAGE_SHIFT);
111 off &= ~PAGE_CACHE_MASK; 111 off &= ~PAGE_MASK;
112 112
113 l = min_t(int, len, PAGE_CACHE_SIZE - off); 113 l = min_t(int, len, PAGE_SIZE - off);
114 memset(kmap(*pagep) + off, 0, l); 114 memset(kmap(*pagep) + off, 0, l);
115 set_page_dirty(*pagep); 115 set_page_dirty(*pagep);
116 kunmap(*pagep); 116 kunmap(*pagep);
117 117
118 while ((len -= l) != 0) { 118 while ((len -= l) != 0) {
119 l = min_t(int, len, PAGE_CACHE_SIZE); 119 l = min_t(int, len, PAGE_SIZE);
120 memset(kmap(*++pagep), 0, l); 120 memset(kmap(*++pagep), 0, l);
121 set_page_dirty(*pagep); 121 set_page_dirty(*pagep);
122 kunmap(*pagep); 122 kunmap(*pagep);
@@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
136 tree = src_node->tree; 136 tree = src_node->tree;
137 src += src_node->page_offset; 137 src += src_node->page_offset;
138 dst += dst_node->page_offset; 138 dst += dst_node->page_offset;
139 src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); 139 src_page = src_node->page + (src >> PAGE_SHIFT);
140 src &= ~PAGE_CACHE_MASK; 140 src &= ~PAGE_MASK;
141 dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); 141 dst_page = dst_node->page + (dst >> PAGE_SHIFT);
142 dst &= ~PAGE_CACHE_MASK; 142 dst &= ~PAGE_MASK;
143 143
144 if (src == dst) { 144 if (src == dst) {
145 l = min_t(int, len, PAGE_CACHE_SIZE - src); 145 l = min_t(int, len, PAGE_SIZE - src);
146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); 146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
147 kunmap(*src_page); 147 kunmap(*src_page);
148 set_page_dirty(*dst_page); 148 set_page_dirty(*dst_page);
149 kunmap(*dst_page); 149 kunmap(*dst_page);
150 150
151 while ((len -= l) != 0) { 151 while ((len -= l) != 0) {
152 l = min_t(int, len, PAGE_CACHE_SIZE); 152 l = min_t(int, len, PAGE_SIZE);
153 memcpy(kmap(*++dst_page), kmap(*++src_page), l); 153 memcpy(kmap(*++dst_page), kmap(*++src_page), l);
154 kunmap(*src_page); 154 kunmap(*src_page);
155 set_page_dirty(*dst_page); 155 set_page_dirty(*dst_page);
@@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
161 do { 161 do {
162 src_ptr = kmap(*src_page) + src; 162 src_ptr = kmap(*src_page) + src;
163 dst_ptr = kmap(*dst_page) + dst; 163 dst_ptr = kmap(*dst_page) + dst;
164 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { 164 if (PAGE_SIZE - src < PAGE_SIZE - dst) {
165 l = PAGE_CACHE_SIZE - src; 165 l = PAGE_SIZE - src;
166 src = 0; 166 src = 0;
167 dst += l; 167 dst += l;
168 } else { 168 } else {
169 l = PAGE_CACHE_SIZE - dst; 169 l = PAGE_SIZE - dst;
170 src += l; 170 src += l;
171 dst = 0; 171 dst = 0;
172 } 172 }
@@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
195 dst += node->page_offset; 195 dst += node->page_offset;
196 if (dst > src) { 196 if (dst > src) {
197 src += len - 1; 197 src += len - 1;
198 src_page = node->page + (src >> PAGE_CACHE_SHIFT); 198 src_page = node->page + (src >> PAGE_SHIFT);
199 src = (src & ~PAGE_CACHE_MASK) + 1; 199 src = (src & ~PAGE_MASK) + 1;
200 dst += len - 1; 200 dst += len - 1;
201 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); 201 dst_page = node->page + (dst >> PAGE_SHIFT);
202 dst = (dst & ~PAGE_CACHE_MASK) + 1; 202 dst = (dst & ~PAGE_MASK) + 1;
203 203
204 if (src == dst) { 204 if (src == dst) {
205 while (src < len) { 205 while (src < len) {
@@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
208 set_page_dirty(*dst_page); 208 set_page_dirty(*dst_page);
209 kunmap(*dst_page); 209 kunmap(*dst_page);
210 len -= src; 210 len -= src;
211 src = PAGE_CACHE_SIZE; 211 src = PAGE_SIZE;
212 src_page--; 212 src_page--;
213 dst_page--; 213 dst_page--;
214 } 214 }
@@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
226 dst_ptr = kmap(*dst_page) + dst; 226 dst_ptr = kmap(*dst_page) + dst;
227 if (src < dst) { 227 if (src < dst) {
228 l = src; 228 l = src;
229 src = PAGE_CACHE_SIZE; 229 src = PAGE_SIZE;
230 dst -= l; 230 dst -= l;
231 } else { 231 } else {
232 l = dst; 232 l = dst;
233 src -= l; 233 src -= l;
234 dst = PAGE_CACHE_SIZE; 234 dst = PAGE_SIZE;
235 } 235 }
236 l = min(len, l); 236 l = min(len, l);
237 memmove(dst_ptr - l, src_ptr - l, l); 237 memmove(dst_ptr - l, src_ptr - l, l);
238 kunmap(*src_page); 238 kunmap(*src_page);
239 set_page_dirty(*dst_page); 239 set_page_dirty(*dst_page);
240 kunmap(*dst_page); 240 kunmap(*dst_page);
241 if (dst == PAGE_CACHE_SIZE) 241 if (dst == PAGE_SIZE)
242 dst_page--; 242 dst_page--;
243 else 243 else
244 src_page--; 244 src_page--;
245 } while ((len -= l)); 245 } while ((len -= l));
246 } 246 }
247 } else { 247 } else {
248 src_page = node->page + (src >> PAGE_CACHE_SHIFT); 248 src_page = node->page + (src >> PAGE_SHIFT);
249 src &= ~PAGE_CACHE_MASK; 249 src &= ~PAGE_MASK;
250 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); 250 dst_page = node->page + (dst >> PAGE_SHIFT);
251 dst &= ~PAGE_CACHE_MASK; 251 dst &= ~PAGE_MASK;
252 252
253 if (src == dst) { 253 if (src == dst) {
254 l = min_t(int, len, PAGE_CACHE_SIZE - src); 254 l = min_t(int, len, PAGE_SIZE - src);
255 memmove(kmap(*dst_page) + src, 255 memmove(kmap(*dst_page) + src,
256 kmap(*src_page) + src, l); 256 kmap(*src_page) + src, l);
257 kunmap(*src_page); 257 kunmap(*src_page);
@@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
259 kunmap(*dst_page); 259 kunmap(*dst_page);
260 260
261 while ((len -= l) != 0) { 261 while ((len -= l) != 0) {
262 l = min_t(int, len, PAGE_CACHE_SIZE); 262 l = min_t(int, len, PAGE_SIZE);
263 memmove(kmap(*++dst_page), 263 memmove(kmap(*++dst_page),
264 kmap(*++src_page), l); 264 kmap(*++src_page), l);
265 kunmap(*src_page); 265 kunmap(*src_page);
@@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
272 do { 272 do {
273 src_ptr = kmap(*src_page) + src; 273 src_ptr = kmap(*src_page) + src;
274 dst_ptr = kmap(*dst_page) + dst; 274 dst_ptr = kmap(*dst_page) + dst;
275 if (PAGE_CACHE_SIZE - src < 275 if (PAGE_SIZE - src <
276 PAGE_CACHE_SIZE - dst) { 276 PAGE_SIZE - dst) {
277 l = PAGE_CACHE_SIZE - src; 277 l = PAGE_SIZE - src;
278 src = 0; 278 src = 0;
279 dst += l; 279 dst += l;
280 } else { 280 } else {
281 l = PAGE_CACHE_SIZE - dst; 281 l = PAGE_SIZE - dst;
282 src += l; 282 src += l;
283 dst = 0; 283 dst = 0;
284 } 284 }
@@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
444 444
445 mapping = tree->inode->i_mapping; 445 mapping = tree->inode->i_mapping;
446 off = (loff_t)cnid << tree->node_size_shift; 446 off = (loff_t)cnid << tree->node_size_shift;
447 block = off >> PAGE_CACHE_SHIFT; 447 block = off >> PAGE_SHIFT;
448 node->page_offset = off & ~PAGE_CACHE_MASK; 448 node->page_offset = off & ~PAGE_MASK;
449 for (i = 0; i < tree->pages_per_bnode; block++, i++) { 449 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
450 page = read_mapping_page(mapping, block, NULL); 450 page = read_mapping_page(mapping, block, NULL);
451 if (IS_ERR(page)) 451 if (IS_ERR(page))
452 goto fail; 452 goto fail;
453 if (PageError(page)) { 453 if (PageError(page)) {
454 page_cache_release(page); 454 put_page(page);
455 goto fail; 455 goto fail;
456 } 456 }
457 node->page[i] = page; 457 node->page[i] = page;
@@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
569 569
570 for (i = 0; i < node->tree->pages_per_bnode; i++) 570 for (i = 0; i < node->tree->pages_per_bnode; i++)
571 if (node->page[i]) 571 if (node->page[i])
572 page_cache_release(node->page[i]); 572 put_page(node->page[i]);
573 kfree(node); 573 kfree(node);
574} 574}
575 575
@@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
597 597
598 pagep = node->page; 598 pagep = node->page;
599 memset(kmap(*pagep) + node->page_offset, 0, 599 memset(kmap(*pagep) + node->page_offset, 0,
600 min_t(int, PAGE_CACHE_SIZE, tree->node_size)); 600 min_t(int, PAGE_SIZE, tree->node_size));
601 set_page_dirty(*pagep); 601 set_page_dirty(*pagep);
602 kunmap(*pagep); 602 kunmap(*pagep);
603 for (i = 1; i < tree->pages_per_bnode; i++) { 603 for (i = 1; i < tree->pages_per_bnode; i++) {
604 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 604 memset(kmap(*++pagep), 0, PAGE_SIZE);
605 set_page_dirty(*pagep); 605 set_page_dirty(*pagep);
606 kunmap(*pagep); 606 kunmap(*pagep);
607 } 607 }
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 3345c7553edc..d9d1a36ba826 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -236,15 +236,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
236 tree->node_size_shift = ffs(size) - 1; 236 tree->node_size_shift = ffs(size) - 1;
237 237
238 tree->pages_per_bnode = 238 tree->pages_per_bnode =
239 (tree->node_size + PAGE_CACHE_SIZE - 1) >> 239 (tree->node_size + PAGE_SIZE - 1) >>
240 PAGE_CACHE_SHIFT; 240 PAGE_SHIFT;
241 241
242 kunmap(page); 242 kunmap(page);
243 page_cache_release(page); 243 put_page(page);
244 return tree; 244 return tree;
245 245
246 fail_page: 246 fail_page:
247 page_cache_release(page); 247 put_page(page);
248 free_inode: 248 free_inode:
249 tree->inode->i_mapping->a_ops = &hfsplus_aops; 249 tree->inode->i_mapping->a_ops = &hfsplus_aops;
250 iput(tree->inode); 250 iput(tree->inode);
@@ -380,9 +380,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
380 off = off16; 380 off = off16;
381 381
382 off += node->page_offset; 382 off += node->page_offset;
383 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 383 pagep = node->page + (off >> PAGE_SHIFT);
384 data = kmap(*pagep); 384 data = kmap(*pagep);
385 off &= ~PAGE_CACHE_MASK; 385 off &= ~PAGE_MASK;
386 idx = 0; 386 idx = 0;
387 387
388 for (;;) { 388 for (;;) {
@@ -403,7 +403,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
403 } 403 }
404 } 404 }
405 } 405 }
406 if (++off >= PAGE_CACHE_SIZE) { 406 if (++off >= PAGE_SIZE) {
407 kunmap(*pagep); 407 kunmap(*pagep);
408 data = kmap(*++pagep); 408 data = kmap(*++pagep);
409 off = 0; 409 off = 0;
@@ -426,9 +426,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
426 len = hfs_brec_lenoff(node, 0, &off16); 426 len = hfs_brec_lenoff(node, 0, &off16);
427 off = off16; 427 off = off16;
428 off += node->page_offset; 428 off += node->page_offset;
429 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 429 pagep = node->page + (off >> PAGE_SHIFT);
430 data = kmap(*pagep); 430 data = kmap(*pagep);
431 off &= ~PAGE_CACHE_MASK; 431 off &= ~PAGE_MASK;
432 } 432 }
433} 433}
434 434
@@ -475,9 +475,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
475 len = hfs_brec_lenoff(node, 0, &off); 475 len = hfs_brec_lenoff(node, 0, &off);
476 } 476 }
477 off += node->page_offset + nidx / 8; 477 off += node->page_offset + nidx / 8;
478 page = node->page[off >> PAGE_CACHE_SHIFT]; 478 page = node->page[off >> PAGE_SHIFT];
479 data = kmap(page); 479 data = kmap(page);
480 off &= ~PAGE_CACHE_MASK; 480 off &= ~PAGE_MASK;
481 m = 1 << (~nidx & 7); 481 m = 1 << (~nidx & 7);
482 byte = data[off]; 482 byte = data[off];
483 if (!(byte & m)) { 483 if (!(byte & m)) {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 1a6394cdb54e..b28f39865c3a 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -87,9 +87,9 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
87 } 87 }
88 if (!tree) 88 if (!tree)
89 return 0; 89 return 0;
90 if (tree->node_size >= PAGE_CACHE_SIZE) { 90 if (tree->node_size >= PAGE_SIZE) {
91 nidx = page->index >> 91 nidx = page->index >>
92 (tree->node_size_shift - PAGE_CACHE_SHIFT); 92 (tree->node_size_shift - PAGE_SHIFT);
93 spin_lock(&tree->hash_lock); 93 spin_lock(&tree->hash_lock);
94 node = hfs_bnode_findhash(tree, nidx); 94 node = hfs_bnode_findhash(tree, nidx);
95 if (!node) 95 if (!node)
@@ -103,8 +103,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
103 spin_unlock(&tree->hash_lock); 103 spin_unlock(&tree->hash_lock);
104 } else { 104 } else {
105 nidx = page->index << 105 nidx = page->index <<
106 (PAGE_CACHE_SHIFT - tree->node_size_shift); 106 (PAGE_SHIFT - tree->node_size_shift);
107 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 107 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
108 spin_lock(&tree->hash_lock); 108 spin_lock(&tree->hash_lock);
109 do { 109 do {
110 node = hfs_bnode_findhash(tree, nidx++); 110 node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 5d54490a136d..c35911362ff9 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -438,7 +438,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
438 err = -EFBIG; 438 err = -EFBIG;
439 last_fs_block = sbi->total_blocks - 1; 439 last_fs_block = sbi->total_blocks - 1;
440 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 440 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
441 PAGE_CACHE_SHIFT; 441 PAGE_SHIFT;
442 442
443 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 443 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
444 (last_fs_page > (pgoff_t)(~0ULL))) { 444 (last_fs_page > (pgoff_t)(~0ULL))) {
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index ab01530b4930..70e445ff0cff 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -220,7 +220,7 @@ check_attr_tree_state_again:
220 220
221 index = 0; 221 index = 0;
222 written = 0; 222 written = 0;
223 for (; written < node_size; index++, written += PAGE_CACHE_SIZE) { 223 for (; written < node_size; index++, written += PAGE_SIZE) {
224 void *kaddr; 224 void *kaddr;
225 225
226 page = read_mapping_page(mapping, index, NULL); 226 page = read_mapping_page(mapping, index, NULL);
@@ -231,11 +231,11 @@ check_attr_tree_state_again:
231 231
232 kaddr = kmap_atomic(page); 232 kaddr = kmap_atomic(page);
233 memcpy(kaddr, buf + written, 233 memcpy(kaddr, buf + written,
234 min_t(size_t, PAGE_CACHE_SIZE, node_size - written)); 234 min_t(size_t, PAGE_SIZE, node_size - written));
235 kunmap_atomic(kaddr); 235 kunmap_atomic(kaddr);
236 236
237 set_page_dirty(page); 237 set_page_dirty(page);
238 page_cache_release(page); 238 put_page(page);
239 } 239 }
240 240
241 hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY); 241 hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d1abbee281d1..7016653f3e41 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -410,12 +410,12 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
410 struct inode *inode = mapping->host; 410 struct inode *inode = mapping->host;
411 char *buffer; 411 char *buffer;
412 loff_t base = page_offset(page); 412 loff_t base = page_offset(page);
413 int count = PAGE_CACHE_SIZE; 413 int count = PAGE_SIZE;
414 int end_index = inode->i_size >> PAGE_CACHE_SHIFT; 414 int end_index = inode->i_size >> PAGE_SHIFT;
415 int err; 415 int err;
416 416
417 if (page->index >= end_index) 417 if (page->index >= end_index)
418 count = inode->i_size & (PAGE_CACHE_SIZE-1); 418 count = inode->i_size & (PAGE_SIZE-1);
419 419
420 buffer = kmap(page); 420 buffer = kmap(page);
421 421
@@ -447,7 +447,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
447 447
448 buffer = kmap(page); 448 buffer = kmap(page);
449 bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, 449 bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
450 PAGE_CACHE_SIZE); 450 PAGE_SIZE);
451 if (bytes_read < 0) { 451 if (bytes_read < 0) {
452 ClearPageUptodate(page); 452 ClearPageUptodate(page);
453 SetPageError(page); 453 SetPageError(page);
@@ -455,7 +455,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
455 goto out; 455 goto out;
456 } 456 }
457 457
458 memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); 458 memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
459 459
460 ClearPageError(page); 460 ClearPageError(page);
461 SetPageUptodate(page); 461 SetPageUptodate(page);
@@ -471,7 +471,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping,
471 loff_t pos, unsigned len, unsigned flags, 471 loff_t pos, unsigned len, unsigned flags,
472 struct page **pagep, void **fsdata) 472 struct page **pagep, void **fsdata)
473{ 473{
474 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 474 pgoff_t index = pos >> PAGE_SHIFT;
475 475
476 *pagep = grab_cache_page_write_begin(mapping, index, flags); 476 *pagep = grab_cache_page_write_begin(mapping, index, flags);
477 if (!*pagep) 477 if (!*pagep)
@@ -485,14 +485,14 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
485{ 485{
486 struct inode *inode = mapping->host; 486 struct inode *inode = mapping->host;
487 void *buffer; 487 void *buffer;
488 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 488 unsigned from = pos & (PAGE_SIZE - 1);
489 int err; 489 int err;
490 490
491 buffer = kmap(page); 491 buffer = kmap(page);
492 err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); 492 err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
493 kunmap(page); 493 kunmap(page);
494 494
495 if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) 495 if (!PageUptodate(page) && err == PAGE_SIZE)
496 SetPageUptodate(page); 496 SetPageUptodate(page);
497 497
498 /* 498 /*
@@ -502,7 +502,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
502 if (err > 0 && (pos > inode->i_size)) 502 if (err > 0 && (pos > inode->i_size))
503 inode->i_size = pos; 503 inode->i_size = pos;
504 unlock_page(page); 504 unlock_page(page);
505 page_cache_release(page); 505 put_page(page);
506 506
507 return err; 507 return err;
508} 508}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e1f465a389d5..4ea71eba40a5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -213,12 +213,12 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
213 int i, chunksize; 213 int i, chunksize;
214 214
215 /* Find which 4k chunk and offset with in that chunk */ 215 /* Find which 4k chunk and offset with in that chunk */
216 i = offset >> PAGE_CACHE_SHIFT; 216 i = offset >> PAGE_SHIFT;
217 offset = offset & ~PAGE_CACHE_MASK; 217 offset = offset & ~PAGE_MASK;
218 218
219 while (size) { 219 while (size) {
220 size_t n; 220 size_t n;
221 chunksize = PAGE_CACHE_SIZE; 221 chunksize = PAGE_SIZE;
222 if (offset) 222 if (offset)
223 chunksize -= offset; 223 chunksize -= offset;
224 if (chunksize > size) 224 if (chunksize > size)
@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
237/* 237/*
238 * Support for read() - Find the page attached to f_mapping and copy out the 238 * Support for read() - Find the page attached to f_mapping and copy out the
239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
240 * since it has PAGE_CACHE_SIZE assumptions. 240 * since it has PAGE_SIZE assumptions.
241 */ 241 */
242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
243{ 243{
@@ -285,7 +285,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
285 * We have the page, copy it to user space buffer. 285 * We have the page, copy it to user space buffer.
286 */ 286 */
287 copied = hugetlbfs_read_actor(page, offset, to, nr); 287 copied = hugetlbfs_read_actor(page, offset, to, nr);
288 page_cache_release(page); 288 put_page(page);
289 } 289 }
290 offset += copied; 290 offset += copied;
291 retval += copied; 291 retval += copied;
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index f311bf084015..2e4e834d1a98 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -26,7 +26,7 @@
26#include "zisofs.h" 26#include "zisofs.h"
27 27
28/* This should probably be global. */ 28/* This should probably be global. */
29static char zisofs_sink_page[PAGE_CACHE_SIZE]; 29static char zisofs_sink_page[PAGE_SIZE];
30 30
31/* 31/*
32 * This contains the zlib memory allocation and the mutex for the 32 * This contains the zlib memory allocation and the mutex for the
@@ -70,11 +70,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
70 for ( i = 0 ; i < pcount ; i++ ) { 70 for ( i = 0 ; i < pcount ; i++ ) {
71 if (!pages[i]) 71 if (!pages[i])
72 continue; 72 continue;
73 memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); 73 memset(page_address(pages[i]), 0, PAGE_SIZE);
74 flush_dcache_page(pages[i]); 74 flush_dcache_page(pages[i]);
75 SetPageUptodate(pages[i]); 75 SetPageUptodate(pages[i]);
76 } 76 }
77 return ((loff_t)pcount) << PAGE_CACHE_SHIFT; 77 return ((loff_t)pcount) << PAGE_SHIFT;
78 } 78 }
79 79
80 /* Because zlib is not thread-safe, do all the I/O at the top. */ 80 /* Because zlib is not thread-safe, do all the I/O at the top. */
@@ -121,11 +121,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
121 if (pages[curpage]) { 121 if (pages[curpage]) {
122 stream.next_out = page_address(pages[curpage]) 122 stream.next_out = page_address(pages[curpage])
123 + poffset; 123 + poffset;
124 stream.avail_out = PAGE_CACHE_SIZE - poffset; 124 stream.avail_out = PAGE_SIZE - poffset;
125 poffset = 0; 125 poffset = 0;
126 } else { 126 } else {
127 stream.next_out = (void *)&zisofs_sink_page; 127 stream.next_out = (void *)&zisofs_sink_page;
128 stream.avail_out = PAGE_CACHE_SIZE; 128 stream.avail_out = PAGE_SIZE;
129 } 129 }
130 } 130 }
131 if (!stream.avail_in) { 131 if (!stream.avail_in) {
@@ -220,14 +220,14 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
220 * pages with the data we have anyway... 220 * pages with the data we have anyway...
221 */ 221 */
222 start_off = page_offset(pages[full_page]); 222 start_off = page_offset(pages[full_page]);
223 end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); 223 end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
224 224
225 cstart_block = start_off >> zisofs_block_shift; 225 cstart_block = start_off >> zisofs_block_shift;
226 cend_block = (end_off + (1 << zisofs_block_shift) - 1) 226 cend_block = (end_off + (1 << zisofs_block_shift) - 1)
227 >> zisofs_block_shift; 227 >> zisofs_block_shift;
228 228
229 WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) != 229 WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
230 ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK)); 230 ((cstart_block << zisofs_block_shift) & PAGE_MASK));
231 231
232 /* Find the pointer to this specific chunk */ 232 /* Find the pointer to this specific chunk */
233 /* Note: we're not using isonum_731() here because the data is known aligned */ 233 /* Note: we're not using isonum_731() here because the data is known aligned */
@@ -260,10 +260,10 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
260 ret = zisofs_uncompress_block(inode, block_start, block_end, 260 ret = zisofs_uncompress_block(inode, block_start, block_end,
261 pcount, pages, poffset, &err); 261 pcount, pages, poffset, &err);
262 poffset += ret; 262 poffset += ret;
263 pages += poffset >> PAGE_CACHE_SHIFT; 263 pages += poffset >> PAGE_SHIFT;
264 pcount -= poffset >> PAGE_CACHE_SHIFT; 264 pcount -= poffset >> PAGE_SHIFT;
265 full_page -= poffset >> PAGE_CACHE_SHIFT; 265 full_page -= poffset >> PAGE_SHIFT;
266 poffset &= ~PAGE_CACHE_MASK; 266 poffset &= ~PAGE_MASK;
267 267
268 if (err) { 268 if (err) {
269 brelse(bh); 269 brelse(bh);
@@ -282,7 +282,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
282 282
283 if (poffset && *pages) { 283 if (poffset && *pages) {
284 memset(page_address(*pages) + poffset, 0, 284 memset(page_address(*pages) + poffset, 0,
285 PAGE_CACHE_SIZE - poffset); 285 PAGE_SIZE - poffset);
286 flush_dcache_page(*pages); 286 flush_dcache_page(*pages);
287 SetPageUptodate(*pages); 287 SetPageUptodate(*pages);
288 } 288 }
@@ -302,12 +302,12 @@ static int zisofs_readpage(struct file *file, struct page *page)
302 int i, pcount, full_page; 302 int i, pcount, full_page;
303 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; 303 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
304 unsigned int zisofs_pages_per_cblock = 304 unsigned int zisofs_pages_per_cblock =
305 PAGE_CACHE_SHIFT <= zisofs_block_shift ? 305 PAGE_SHIFT <= zisofs_block_shift ?
306 (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0; 306 (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
307 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; 307 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
308 pgoff_t index = page->index, end_index; 308 pgoff_t index = page->index, end_index;
309 309
310 end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 310 end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
311 /* 311 /*
312 * If this page is wholly outside i_size we just return zero; 312 * If this page is wholly outside i_size we just return zero;
313 * do_generic_file_read() will handle this for us 313 * do_generic_file_read() will handle this for us
@@ -318,7 +318,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
318 return 0; 318 return 0;
319 } 319 }
320 320
321 if (PAGE_CACHE_SHIFT <= zisofs_block_shift) { 321 if (PAGE_SHIFT <= zisofs_block_shift) {
322 /* We have already been given one page, this is the one 322 /* We have already been given one page, this is the one
323 we must do. */ 323 we must do. */
324 full_page = index & (zisofs_pages_per_cblock - 1); 324 full_page = index & (zisofs_pages_per_cblock - 1);
@@ -351,7 +351,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
351 kunmap(pages[i]); 351 kunmap(pages[i]);
352 unlock_page(pages[i]); 352 unlock_page(pages[i]);
353 if (i != full_page) 353 if (i != full_page)
354 page_cache_release(pages[i]); 354 put_page(pages[i]);
355 } 355 }
356 } 356 }
357 357
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bcd2d41b318a..131dedc920d8 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1021,7 +1021,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
1021 * the page with useless information without generating any 1021 * the page with useless information without generating any
1022 * I/O errors. 1022 * I/O errors.
1023 */ 1023 */
1024 if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { 1024 if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
1025 printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n", 1025 printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
1026 __func__, b_off, 1026 __func__, b_off,
1027 (unsigned long long)inode->i_size); 1027 (unsigned long long)inode->i_size);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 517f2de784cf..2ad98d6e19f4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -81,11 +81,11 @@ static void release_buffer_page(struct buffer_head *bh)
81 if (!trylock_page(page)) 81 if (!trylock_page(page))
82 goto nope; 82 goto nope;
83 83
84 page_cache_get(page); 84 get_page(page);
85 __brelse(bh); 85 __brelse(bh);
86 try_to_free_buffers(page); 86 try_to_free_buffers(page);
87 unlock_page(page); 87 unlock_page(page);
88 page_cache_release(page); 88 put_page(page);
89 return; 89 return;
90 90
91nope: 91nope:
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index de73a9516a54..435f0b26ac20 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2221,7 +2221,7 @@ void jbd2_journal_ack_err(journal_t *journal)
2221 2221
2222int jbd2_journal_blocks_per_page(struct inode *inode) 2222int jbd2_journal_blocks_per_page(struct inode *inode)
2223{ 2223{
2224 return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 2224 return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
2225} 2225}
2226 2226
2227/* 2227/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 01e4652d88f6..67c103867bf8 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2263,7 +2263,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
2263 struct buffer_head *head, *bh, *next; 2263 struct buffer_head *head, *bh, *next;
2264 unsigned int stop = offset + length; 2264 unsigned int stop = offset + length;
2265 unsigned int curr_off = 0; 2265 unsigned int curr_off = 0;
2266 int partial_page = (offset || length < PAGE_CACHE_SIZE); 2266 int partial_page = (offset || length < PAGE_SIZE);
2267 int may_free = 1; 2267 int may_free = 1;
2268 int ret = 0; 2268 int ret = 0;
2269 2269
@@ -2272,7 +2272,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
2272 if (!page_has_buffers(page)) 2272 if (!page_has_buffers(page))
2273 return 0; 2273 return 0;
2274 2274
2275 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 2275 BUG_ON(stop > PAGE_SIZE || stop < length);
2276 2276
2277 /* We will potentially be playing with lists other than just the 2277 /* We will potentially be playing with lists other than just the
2278 * data lists (especially for journaled data mode), so be 2278 * data lists (especially for journaled data mode), so be
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 1090eb64b90d..9d26b1b9fc01 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -95,15 +95,15 @@ __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
95 rather than mucking around with actually reading the node 95 rather than mucking around with actually reading the node
96 and checking the compression type, which is the real way 96 and checking the compression type, which is the real way
97 to tell a hole node. */ 97 to tell a hole node. */
98 if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) 98 if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag)
99 && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { 99 && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
100 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", 100 JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
101 ref_offset(fn->raw)); 101 ref_offset(fn->raw));
102 bitched = 1; 102 bitched = 1;
103 } 103 }
104 104
105 if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) 105 if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag)
106 && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { 106 && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
107 JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", 107 JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
108 ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); 108 ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
109 bitched = 1; 109 bitched = 1;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index cad86bac3453..0e62dec3effc 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
87 int ret; 87 int ret;
88 88
89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", 89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
90 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); 90 __func__, inode->i_ino, pg->index << PAGE_SHIFT);
91 91
92 BUG_ON(!PageLocked(pg)); 92 BUG_ON(!PageLocked(pg));
93 93
94 pg_buf = kmap(pg); 94 pg_buf = kmap(pg);
95 /* FIXME: Can kmap fail? */ 95 /* FIXME: Can kmap fail? */
96 96
97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); 97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
98 PAGE_SIZE);
98 99
99 if (ret) { 100 if (ret) {
100 ClearPageUptodate(pg); 101 ClearPageUptodate(pg);
@@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
137 struct page *pg; 138 struct page *pg;
138 struct inode *inode = mapping->host; 139 struct inode *inode = mapping->host;
139 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 140 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
140 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 141 pgoff_t index = pos >> PAGE_SHIFT;
141 uint32_t pageofs = index << PAGE_CACHE_SHIFT; 142 uint32_t pageofs = index << PAGE_SHIFT;
142 int ret = 0; 143 int ret = 0;
143 144
144 pg = grab_cache_page_write_begin(mapping, index, flags); 145 pg = grab_cache_page_write_begin(mapping, index, flags);
@@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
230 231
231out_page: 232out_page:
232 unlock_page(pg); 233 unlock_page(pg);
233 page_cache_release(pg); 234 put_page(pg);
234 return ret; 235 return ret;
235} 236}
236 237
@@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
245 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 246 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
246 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 247 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
247 struct jffs2_raw_inode *ri; 248 struct jffs2_raw_inode *ri;
248 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 249 unsigned start = pos & (PAGE_SIZE - 1);
249 unsigned end = start + copied; 250 unsigned end = start + copied;
250 unsigned aligned_start = start & ~3; 251 unsigned aligned_start = start & ~3;
251 int ret = 0; 252 int ret = 0;
252 uint32_t writtenlen = 0; 253 uint32_t writtenlen = 0;
253 254
254 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", 255 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
255 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, 256 __func__, inode->i_ino, pg->index << PAGE_SHIFT,
256 start, end, pg->flags); 257 start, end, pg->flags);
257 258
258 /* We need to avoid deadlock with page_cache_read() in 259 /* We need to avoid deadlock with page_cache_read() in
@@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
261 to re-lock it. */ 262 to re-lock it. */
262 BUG_ON(!PageUptodate(pg)); 263 BUG_ON(!PageUptodate(pg));
263 264
264 if (end == PAGE_CACHE_SIZE) { 265 if (end == PAGE_SIZE) {
265 /* When writing out the end of a page, write out the 266 /* When writing out the end of a page, write out the
266 _whole_ page. This helps to reduce the number of 267 _whole_ page. This helps to reduce the number of
267 nodes in files which have many short writes, like 268 nodes in files which have many short writes, like
@@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
275 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", 276 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
276 __func__); 277 __func__);
277 unlock_page(pg); 278 unlock_page(pg);
278 page_cache_release(pg); 279 put_page(pg);
279 return -ENOMEM; 280 return -ENOMEM;
280 } 281 }
281 282
@@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
292 kmap(pg); 293 kmap(pg);
293 294
294 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, 295 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
295 (pg->index << PAGE_CACHE_SHIFT) + aligned_start, 296 (pg->index << PAGE_SHIFT) + aligned_start,
296 end - aligned_start, &writtenlen); 297 end - aligned_start, &writtenlen);
297 298
298 kunmap(pg); 299 kunmap(pg);
@@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
329 jffs2_dbg(1, "%s() returning %d\n", 330 jffs2_dbg(1, "%s() returning %d\n",
330 __func__, writtenlen > 0 ? writtenlen : ret); 331 __func__, writtenlen > 0 ? writtenlen : ret);
331 unlock_page(pg); 332 unlock_page(pg);
332 page_cache_release(pg); 333 put_page(pg);
333 return writtenlen > 0 ? writtenlen : ret; 334 return writtenlen > 0 ? writtenlen : ret;
334} 335}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bead25ae8fe4..ae2ebb26b446 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -586,8 +586,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
586 goto out_root; 586 goto out_root;
587 587
588 sb->s_maxbytes = 0xFFFFFFFF; 588 sb->s_maxbytes = 0xFFFFFFFF;
589 sb->s_blocksize = PAGE_CACHE_SIZE; 589 sb->s_blocksize = PAGE_SIZE;
590 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 590 sb->s_blocksize_bits = PAGE_SHIFT;
591 sb->s_magic = JFFS2_SUPER_MAGIC; 591 sb->s_magic = JFFS2_SUPER_MAGIC;
592 if (!(sb->s_flags & MS_RDONLY)) 592 if (!(sb->s_flags & MS_RDONLY))
593 jffs2_start_garbage_collect_thread(c); 593 jffs2_start_garbage_collect_thread(c);
@@ -685,7 +685,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
685 struct inode *inode = OFNI_EDONI_2SFFJ(f); 685 struct inode *inode = OFNI_EDONI_2SFFJ(f);
686 struct page *pg; 686 struct page *pg;
687 687
688 pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 688 pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
689 (void *)jffs2_do_readpage_unlock, inode); 689 (void *)jffs2_do_readpage_unlock, inode);
690 if (IS_ERR(pg)) 690 if (IS_ERR(pg))
691 return (void *)pg; 691 return (void *)pg;
@@ -701,7 +701,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c,
701 struct page *pg = (void *)*priv; 701 struct page *pg = (void *)*priv;
702 702
703 kunmap(pg); 703 kunmap(pg);
704 page_cache_release(pg); 704 put_page(pg);
705} 705}
706 706
707static int jffs2_flash_setup(struct jffs2_sb_info *c) { 707static int jffs2_flash_setup(struct jffs2_sb_info *c) {
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 7e553f286775..9ed0f26cf023 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -552,7 +552,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
552 goto upnout; 552 goto upnout;
553 } 553 }
554 /* We found a datanode. Do the GC */ 554 /* We found a datanode. Do the GC */
555 if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) { 555 if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
556 /* It crosses a page boundary. Therefore, it must be a hole. */ 556 /* It crosses a page boundary. Therefore, it must be a hole. */
557 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); 557 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
558 } else { 558 } else {
@@ -1192,8 +1192,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1192 struct jffs2_node_frag *frag; 1192 struct jffs2_node_frag *frag;
1193 uint32_t min, max; 1193 uint32_t min, max;
1194 1194
1195 min = start & ~(PAGE_CACHE_SIZE-1); 1195 min = start & ~(PAGE_SIZE-1);
1196 max = min + PAGE_CACHE_SIZE; 1196 max = min + PAGE_SIZE;
1197 1197
1198 frag = jffs2_lookup_node_frag(&f->fragtree, start); 1198 frag = jffs2_lookup_node_frag(&f->fragtree, start);
1199 1199
@@ -1351,7 +1351,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1351 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); 1351 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
1352 datalen = end - offset; 1352 datalen = end - offset;
1353 1353
1354 writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1)); 1354 writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
1355 1355
1356 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); 1356 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
1357 1357
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 9a5449bc3afb..b86c78d178c6 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list,
90 90
91 /* If the last fragment starts at the RAM page boundary, it is 91 /* If the last fragment starts at the RAM page boundary, it is
92 * REF_PRISTINE irrespective of its size. */ 92 * REF_PRISTINE irrespective of its size. */
93 if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { 93 if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
94 dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", 94 dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
95 frag->ofs, frag->ofs + frag->size); 95 frag->ofs, frag->ofs + frag->size);
96 frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; 96 frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
@@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
237 If so, both 'this' and the new node get marked REF_NORMAL so 237 If so, both 'this' and the new node get marked REF_NORMAL so
238 the GC can take a look. 238 the GC can take a look.
239 */ 239 */
240 if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { 240 if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
241 if (this->node) 241 if (this->node)
242 mark_ref_normal(this->node->raw); 242 mark_ref_normal(this->node->raw);
243 mark_ref_normal(newfrag->node->raw); 243 mark_ref_normal(newfrag->node->raw);
@@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
382 382
383 /* If we now share a page with other nodes, mark either previous 383 /* If we now share a page with other nodes, mark either previous
384 or next node REF_NORMAL, as appropriate. */ 384 or next node REF_NORMAL, as appropriate. */
385 if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { 385 if (newfrag->ofs & (PAGE_SIZE-1)) {
386 struct jffs2_node_frag *prev = frag_prev(newfrag); 386 struct jffs2_node_frag *prev = frag_prev(newfrag);
387 387
388 mark_ref_normal(fn->raw); 388 mark_ref_normal(fn->raw);
@@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
391 mark_ref_normal(prev->node->raw); 391 mark_ref_normal(prev->node->raw);
392 } 392 }
393 393
394 if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { 394 if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
395 struct jffs2_node_frag *next = frag_next(newfrag); 395 struct jffs2_node_frag *next = frag_next(newfrag);
396 396
397 if (next) { 397 if (next) {
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index b634de4c8101..7fb187ab2682 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -172,8 +172,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
172 beginning of a page and runs to the end of the file, or if 172 beginning of a page and runs to the end of the file, or if
173 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. 173 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
174 */ 174 */
175 if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || 175 if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
176 ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && 176 ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
177 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { 177 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
178 flash_ofs |= REF_PRISTINE; 178 flash_ofs |= REF_PRISTINE;
179 } else { 179 } else {
@@ -366,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
366 break; 366 break;
367 } 367 }
368 mutex_lock(&f->sem); 368 mutex_lock(&f->sem);
369 datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); 369 datalen = min_t(uint32_t, writelen,
370 PAGE_SIZE - (offset & (PAGE_SIZE-1)));
370 cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); 371 cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
371 372
372 comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); 373 comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index a3eb316b1ac3..b60e015cc757 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -80,7 +80,7 @@ static inline void lock_metapage(struct metapage *mp)
80static struct kmem_cache *metapage_cache; 80static struct kmem_cache *metapage_cache;
81static mempool_t *metapage_mempool; 81static mempool_t *metapage_mempool;
82 82
83#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) 83#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
84 84
85#if MPS_PER_PAGE > 1 85#if MPS_PER_PAGE > 1
86 86
@@ -316,7 +316,7 @@ static void last_write_complete(struct page *page)
316 struct metapage *mp; 316 struct metapage *mp;
317 unsigned int offset; 317 unsigned int offset;
318 318
319 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 319 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
320 mp = page_to_mp(page, offset); 320 mp = page_to_mp(page, offset);
321 if (mp && test_bit(META_io, &mp->flag)) { 321 if (mp && test_bit(META_io, &mp->flag)) {
322 if (mp->lsn) 322 if (mp->lsn)
@@ -366,12 +366,12 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
366 int bad_blocks = 0; 366 int bad_blocks = 0;
367 367
368 page_start = (sector_t)page->index << 368 page_start = (sector_t)page->index <<
369 (PAGE_CACHE_SHIFT - inode->i_blkbits); 369 (PAGE_SHIFT - inode->i_blkbits);
370 BUG_ON(!PageLocked(page)); 370 BUG_ON(!PageLocked(page));
371 BUG_ON(PageWriteback(page)); 371 BUG_ON(PageWriteback(page));
372 set_page_writeback(page); 372 set_page_writeback(page);
373 373
374 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 374 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
375 mp = page_to_mp(page, offset); 375 mp = page_to_mp(page, offset);
376 376
377 if (!mp || !test_bit(META_dirty, &mp->flag)) 377 if (!mp || !test_bit(META_dirty, &mp->flag))
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
416 bio = NULL; 416 bio = NULL;
417 } else 417 } else
418 inc_io(page); 418 inc_io(page);
419 xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; 419 xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
420 pblock = metapage_get_blocks(inode, lblock, &xlen); 420 pblock = metapage_get_blocks(inode, lblock, &xlen);
421 if (!pblock) { 421 if (!pblock) {
422 printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); 422 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -485,7 +485,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
485 struct inode *inode = page->mapping->host; 485 struct inode *inode = page->mapping->host;
486 struct bio *bio = NULL; 486 struct bio *bio = NULL;
487 int block_offset; 487 int block_offset;
488 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; 488 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
489 sector_t page_start; /* address of page in fs blocks */ 489 sector_t page_start; /* address of page in fs blocks */
490 sector_t pblock; 490 sector_t pblock;
491 int xlen; 491 int xlen;
@@ -494,7 +494,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
494 494
495 BUG_ON(!PageLocked(page)); 495 BUG_ON(!PageLocked(page));
496 page_start = (sector_t)page->index << 496 page_start = (sector_t)page->index <<
497 (PAGE_CACHE_SHIFT - inode->i_blkbits); 497 (PAGE_SHIFT - inode->i_blkbits);
498 498
499 block_offset = 0; 499 block_offset = 0;
500 while (block_offset < blocks_per_page) { 500 while (block_offset < blocks_per_page) {
@@ -542,7 +542,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
542 int ret = 1; 542 int ret = 1;
543 int offset; 543 int offset;
544 544
545 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 545 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
546 mp = page_to_mp(page, offset); 546 mp = page_to_mp(page, offset);
547 547
548 if (!mp) 548 if (!mp)
@@ -568,7 +568,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
568static void metapage_invalidatepage(struct page *page, unsigned int offset, 568static void metapage_invalidatepage(struct page *page, unsigned int offset,
569 unsigned int length) 569 unsigned int length)
570{ 570{
571 BUG_ON(offset || length < PAGE_CACHE_SIZE); 571 BUG_ON(offset || length < PAGE_SIZE);
572 572
573 BUG_ON(PageWriteback(page)); 573 BUG_ON(PageWriteback(page));
574 574
@@ -599,10 +599,10 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
599 inode->i_ino, lblock, absolute); 599 inode->i_ino, lblock, absolute);
600 600
601 l2bsize = inode->i_blkbits; 601 l2bsize = inode->i_blkbits;
602 l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize; 602 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
603 page_index = lblock >> l2BlocksPerPage; 603 page_index = lblock >> l2BlocksPerPage;
604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; 604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
605 if ((page_offset + size) > PAGE_CACHE_SIZE) { 605 if ((page_offset + size) > PAGE_SIZE) {
606 jfs_err("MetaData crosses page boundary!!"); 606 jfs_err("MetaData crosses page boundary!!");
607 jfs_err("lblock = %lx, size = %d", lblock, size); 607 jfs_err("lblock = %lx, size = %d", lblock, size);
608 dump_stack(); 608 dump_stack();
@@ -621,7 +621,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
621 mapping = inode->i_mapping; 621 mapping = inode->i_mapping;
622 } 622 }
623 623
624 if (new && (PSIZE == PAGE_CACHE_SIZE)) { 624 if (new && (PSIZE == PAGE_SIZE)) {
625 page = grab_cache_page(mapping, page_index); 625 page = grab_cache_page(mapping, page_index);
626 if (!page) { 626 if (!page) {
627 jfs_err("grab_cache_page failed!"); 627 jfs_err("grab_cache_page failed!");
@@ -693,7 +693,7 @@ unlock:
693void grab_metapage(struct metapage * mp) 693void grab_metapage(struct metapage * mp)
694{ 694{
695 jfs_info("grab_metapage: mp = 0x%p", mp); 695 jfs_info("grab_metapage: mp = 0x%p", mp);
696 page_cache_get(mp->page); 696 get_page(mp->page);
697 lock_page(mp->page); 697 lock_page(mp->page);
698 mp->count++; 698 mp->count++;
699 lock_metapage(mp); 699 lock_metapage(mp);
@@ -706,12 +706,12 @@ void force_metapage(struct metapage *mp)
706 jfs_info("force_metapage: mp = 0x%p", mp); 706 jfs_info("force_metapage: mp = 0x%p", mp);
707 set_bit(META_forcewrite, &mp->flag); 707 set_bit(META_forcewrite, &mp->flag);
708 clear_bit(META_sync, &mp->flag); 708 clear_bit(META_sync, &mp->flag);
709 page_cache_get(page); 709 get_page(page);
710 lock_page(page); 710 lock_page(page);
711 set_page_dirty(page); 711 set_page_dirty(page);
712 write_one_page(page, 1); 712 write_one_page(page, 1);
713 clear_bit(META_forcewrite, &mp->flag); 713 clear_bit(META_forcewrite, &mp->flag);
714 page_cache_release(page); 714 put_page(page);
715} 715}
716 716
717void hold_metapage(struct metapage *mp) 717void hold_metapage(struct metapage *mp)
@@ -726,7 +726,7 @@ void put_metapage(struct metapage *mp)
726 unlock_page(mp->page); 726 unlock_page(mp->page);
727 return; 727 return;
728 } 728 }
729 page_cache_get(mp->page); 729 get_page(mp->page);
730 mp->count++; 730 mp->count++;
731 lock_metapage(mp); 731 lock_metapage(mp);
732 unlock_page(mp->page); 732 unlock_page(mp->page);
@@ -746,7 +746,7 @@ void release_metapage(struct metapage * mp)
746 assert(mp->count); 746 assert(mp->count);
747 if (--mp->count || mp->nohomeok) { 747 if (--mp->count || mp->nohomeok) {
748 unlock_page(page); 748 unlock_page(page);
749 page_cache_release(page); 749 put_page(page);
750 return; 750 return;
751 } 751 }
752 752
@@ -764,13 +764,13 @@ void release_metapage(struct metapage * mp)
764 drop_metapage(page, mp); 764 drop_metapage(page, mp);
765 765
766 unlock_page(page); 766 unlock_page(page);
767 page_cache_release(page); 767 put_page(page);
768} 768}
769 769
770void __invalidate_metapages(struct inode *ip, s64 addr, int len) 770void __invalidate_metapages(struct inode *ip, s64 addr, int len)
771{ 771{
772 sector_t lblock; 772 sector_t lblock;
773 int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits; 773 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
774 int BlocksPerPage = 1 << l2BlocksPerPage; 774 int BlocksPerPage = 1 << l2BlocksPerPage;
775 /* All callers are interested in block device's mapping */ 775 /* All callers are interested in block device's mapping */
776 struct address_space *mapping = 776 struct address_space *mapping =
@@ -788,7 +788,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage); 788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
789 if (!page) 789 if (!page)
790 continue; 790 continue;
791 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 791 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
792 mp = page_to_mp(page, offset); 792 mp = page_to_mp(page, offset);
793 if (!mp) 793 if (!mp)
794 continue; 794 continue;
@@ -803,7 +803,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
803 remove_from_logsync(mp); 803 remove_from_logsync(mp);
804 } 804 }
805 unlock_page(page); 805 unlock_page(page);
806 page_cache_release(page); 806 put_page(page);
807 } 807 }
808} 808}
809 809
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index 337e9e51ac06..a869fb4a20d6 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -106,7 +106,7 @@ static inline void metapage_nohomeok(struct metapage *mp)
106 lock_page(page); 106 lock_page(page);
107 if (!mp->nohomeok++) { 107 if (!mp->nohomeok++) {
108 mark_metapage_dirty(mp); 108 mark_metapage_dirty(mp);
109 page_cache_get(page); 109 get_page(page);
110 wait_on_page_writeback(page); 110 wait_on_page_writeback(page);
111 } 111 }
112 unlock_page(page); 112 unlock_page(page);
@@ -128,7 +128,7 @@ static inline void metapage_wait_for_io(struct metapage *mp)
128static inline void _metapage_homeok(struct metapage *mp) 128static inline void _metapage_homeok(struct metapage *mp)
129{ 129{
130 if (!--mp->nohomeok) 130 if (!--mp->nohomeok)
131 page_cache_release(mp->page); 131 put_page(mp->page);
132} 132}
133 133
134static inline void metapage_homeok(struct metapage *mp) 134static inline void metapage_homeok(struct metapage *mp)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f5d85ba8e23..78d599198bf5 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -596,7 +596,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
596 * Page cache is indexed by long. 596 * Page cache is indexed by long.
597 * I would use MAX_LFS_FILESIZE, but it's only half as big 597 * I would use MAX_LFS_FILESIZE, but it's only half as big
598 */ 598 */
599 sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, 599 sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
600 (u64)sb->s_maxbytes); 600 (u64)sb->s_maxbytes);
601#endif 601#endif
602 sb->s_time_gran = 1; 602 sb->s_time_gran = 1;
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index b67dbccdaf88..f73541fbe7af 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -138,8 +138,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
138 struct dentry *root; 138 struct dentry *root;
139 139
140 info->sb = sb; 140 info->sb = sb;
141 sb->s_blocksize = PAGE_CACHE_SIZE; 141 sb->s_blocksize = PAGE_SIZE;
142 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 142 sb->s_blocksize_bits = PAGE_SHIFT;
143 sb->s_magic = magic; 143 sb->s_magic = magic;
144 sb->s_op = &kernfs_sops; 144 sb->s_op = &kernfs_sops;
145 sb->s_time_gran = 1; 145 sb->s_time_gran = 1;
diff --git a/fs/libfs.c b/fs/libfs.c
index 0ca80b2af420..f3fa82ce9b70 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -25,7 +25,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
25{ 25{
26 struct inode *inode = d_inode(dentry); 26 struct inode *inode = d_inode(dentry);
27 generic_fillattr(inode, stat); 27 generic_fillattr(inode, stat);
28 stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); 28 stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
29 return 0; 29 return 0;
30} 30}
31EXPORT_SYMBOL(simple_getattr); 31EXPORT_SYMBOL(simple_getattr);
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(simple_getattr);
33int simple_statfs(struct dentry *dentry, struct kstatfs *buf) 33int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
34{ 34{
35 buf->f_type = dentry->d_sb->s_magic; 35 buf->f_type = dentry->d_sb->s_magic;
36 buf->f_bsize = PAGE_CACHE_SIZE; 36 buf->f_bsize = PAGE_SIZE;
37 buf->f_namelen = NAME_MAX; 37 buf->f_namelen = NAME_MAX;
38 return 0; 38 return 0;
39} 39}
@@ -395,7 +395,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
395 struct page *page; 395 struct page *page;
396 pgoff_t index; 396 pgoff_t index;
397 397
398 index = pos >> PAGE_CACHE_SHIFT; 398 index = pos >> PAGE_SHIFT;
399 399
400 page = grab_cache_page_write_begin(mapping, index, flags); 400 page = grab_cache_page_write_begin(mapping, index, flags);
401 if (!page) 401 if (!page)
@@ -403,10 +403,10 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
403 403
404 *pagep = page; 404 *pagep = page;
405 405
406 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { 406 if (!PageUptodate(page) && (len != PAGE_SIZE)) {
407 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 407 unsigned from = pos & (PAGE_SIZE - 1);
408 408
409 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); 409 zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
410 } 410 }
411 return 0; 411 return 0;
412} 412}
@@ -442,7 +442,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
442 442
443 /* zero the stale part of the page if we did a short copy */ 443 /* zero the stale part of the page if we did a short copy */
444 if (copied < len) { 444 if (copied < len) {
445 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 445 unsigned from = pos & (PAGE_SIZE - 1);
446 446
447 zero_user(page, from + copied, len - copied); 447 zero_user(page, from + copied, len - copied);
448 } 448 }
@@ -458,7 +458,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
458 458
459 set_page_dirty(page); 459 set_page_dirty(page);
460 unlock_page(page); 460 unlock_page(page);
461 page_cache_release(page); 461 put_page(page);
462 462
463 return copied; 463 return copied;
464} 464}
@@ -477,8 +477,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
477 struct dentry *dentry; 477 struct dentry *dentry;
478 int i; 478 int i;
479 479
480 s->s_blocksize = PAGE_CACHE_SIZE; 480 s->s_blocksize = PAGE_SIZE;
481 s->s_blocksize_bits = PAGE_CACHE_SHIFT; 481 s->s_blocksize_bits = PAGE_SHIFT;
482 s->s_magic = magic; 482 s->s_magic = magic;
483 s->s_op = &simple_super_operations; 483 s->s_op = &simple_super_operations;
484 s->s_time_gran = 1; 484 s->s_time_gran = 1;
@@ -994,12 +994,12 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
994{ 994{
995 u64 last_fs_block = num_blocks - 1; 995 u64 last_fs_block = num_blocks - 1;
996 u64 last_fs_page = 996 u64 last_fs_page =
997 last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits); 997 last_fs_block >> (PAGE_SHIFT - blocksize_bits);
998 998
999 if (unlikely(num_blocks == 0)) 999 if (unlikely(num_blocks == 0))
1000 return 0; 1000 return 0;
1001 1001
1002 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT)) 1002 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
1003 return -EINVAL; 1003 return -EINVAL;
1004 1004
1005 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || 1005 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index a709d80c8ebc..cc26f8f215f5 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -64,7 +64,7 @@ static void writeseg_end_io(struct bio *bio)
64 64
65 bio_for_each_segment_all(bvec, bio, i) { 65 bio_for_each_segment_all(bvec, bio, i) {
66 end_page_writeback(bvec->bv_page); 66 end_page_writeback(bvec->bv_page);
67 page_cache_release(bvec->bv_page); 67 put_page(bvec->bv_page);
68 } 68 }
69 bio_put(bio); 69 bio_put(bio);
70 if (atomic_dec_and_test(&super->s_pending_writes)) 70 if (atomic_dec_and_test(&super->s_pending_writes))
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 9c501449450d..b76a62b1978f 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -46,9 +46,9 @@ static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
46 46
47 BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs)); 47 BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
48 BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift); 48 BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
49 BUG_ON(len > PAGE_CACHE_SIZE); 49 BUG_ON(len > PAGE_SIZE);
50 page_start = ofs & PAGE_CACHE_MASK; 50 page_start = ofs & PAGE_MASK;
51 page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; 51 page_end = PAGE_ALIGN(ofs + len) - 1;
52 ret = mtd_write(mtd, ofs, len, &retlen, buf); 52 ret = mtd_write(mtd, ofs, len, &retlen, buf);
53 if (ret || (retlen != len)) 53 if (ret || (retlen != len))
54 return -EIO; 54 return -EIO;
@@ -82,7 +82,7 @@ static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
82 if (!page) 82 if (!page)
83 continue; 83 continue;
84 memset(page_address(page), 0xFF, PAGE_SIZE); 84 memset(page_address(page), 0xFF, PAGE_SIZE);
85 page_cache_release(page); 85 put_page(page);
86 } 86 }
87 return 0; 87 return 0;
88} 88}
@@ -195,7 +195,7 @@ static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
196 page_address(page)); 196 page_address(page));
197 unlock_page(page); 197 unlock_page(page);
198 page_cache_release(page); 198 put_page(page);
199 if (err) 199 if (err)
200 return err; 200 return err;
201 } 201 }
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 542468e9bfb4..ddbed2be5366 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -183,7 +183,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
183 if (name->len != be16_to_cpu(dd->namelen) || 183 if (name->len != be16_to_cpu(dd->namelen) ||
184 memcmp(name->name, dd->name, name->len)) { 184 memcmp(name->name, dd->name, name->len)) {
185 kunmap_atomic(dd); 185 kunmap_atomic(dd);
186 page_cache_release(page); 186 put_page(page);
187 continue; 187 continue;
188 } 188 }
189 189
@@ -238,7 +238,7 @@ static int logfs_unlink(struct inode *dir, struct dentry *dentry)
238 return PTR_ERR(page); 238 return PTR_ERR(page);
239 } 239 }
240 index = page->index; 240 index = page->index;
241 page_cache_release(page); 241 put_page(page);
242 242
243 mutex_lock(&super->s_dirop_mutex); 243 mutex_lock(&super->s_dirop_mutex);
244 logfs_add_transaction(dir, ta); 244 logfs_add_transaction(dir, ta);
@@ -316,7 +316,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
316 be16_to_cpu(dd->namelen), 316 be16_to_cpu(dd->namelen),
317 be64_to_cpu(dd->ino), dd->type); 317 be64_to_cpu(dd->ino), dd->type);
318 kunmap(page); 318 kunmap(page);
319 page_cache_release(page); 319 put_page(page);
320 if (full) 320 if (full)
321 break; 321 break;
322 } 322 }
@@ -349,7 +349,7 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
349 dd = kmap_atomic(page); 349 dd = kmap_atomic(page);
350 ino = be64_to_cpu(dd->ino); 350 ino = be64_to_cpu(dd->ino);
351 kunmap_atomic(dd); 351 kunmap_atomic(dd);
352 page_cache_release(page); 352 put_page(page);
353 353
354 inode = logfs_iget(dir->i_sb, ino); 354 inode = logfs_iget(dir->i_sb, ino);
355 if (IS_ERR(inode)) 355 if (IS_ERR(inode))
@@ -392,7 +392,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
392 392
393 err = logfs_write_buf(dir, page, WF_LOCK); 393 err = logfs_write_buf(dir, page, WF_LOCK);
394 unlock_page(page); 394 unlock_page(page);
395 page_cache_release(page); 395 put_page(page);
396 if (!err) 396 if (!err)
397 grow_dir(dir, index); 397 grow_dir(dir, index);
398 return err; 398 return err;
@@ -561,7 +561,7 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
561 map = kmap_atomic(page); 561 map = kmap_atomic(page);
562 memcpy(dd, map, sizeof(*dd)); 562 memcpy(dd, map, sizeof(*dd));
563 kunmap_atomic(map); 563 kunmap_atomic(map);
564 page_cache_release(page); 564 put_page(page);
565 return 0; 565 return 0;
566} 566}
567 567
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 61eaeb1b6cac..f01ddfb1a03b 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -15,21 +15,21 @@ static int logfs_write_begin(struct file *file, struct address_space *mapping,
15{ 15{
16 struct inode *inode = mapping->host; 16 struct inode *inode = mapping->host;
17 struct page *page; 17 struct page *page;
18 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 18 pgoff_t index = pos >> PAGE_SHIFT;
19 19
20 page = grab_cache_page_write_begin(mapping, index, flags); 20 page = grab_cache_page_write_begin(mapping, index, flags);
21 if (!page) 21 if (!page)
22 return -ENOMEM; 22 return -ENOMEM;
23 *pagep = page; 23 *pagep = page;
24 24
25 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 25 if ((len == PAGE_SIZE) || PageUptodate(page))
26 return 0; 26 return 0;
27 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 27 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
28 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 28 unsigned start = pos & (PAGE_SIZE - 1);
29 unsigned end = start + len; 29 unsigned end = start + len;
30 30
31 /* Reading beyond i_size is simple: memset to zero */ 31 /* Reading beyond i_size is simple: memset to zero */
32 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 32 zero_user_segments(page, 0, start, end, PAGE_SIZE);
33 return 0; 33 return 0;
34 } 34 }
35 return logfs_readpage_nolock(page); 35 return logfs_readpage_nolock(page);
@@ -41,11 +41,11 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
41{ 41{
42 struct inode *inode = mapping->host; 42 struct inode *inode = mapping->host;
43 pgoff_t index = page->index; 43 pgoff_t index = page->index;
44 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 44 unsigned start = pos & (PAGE_SIZE - 1);
45 unsigned end = start + copied; 45 unsigned end = start + copied;
46 int ret = 0; 46 int ret = 0;
47 47
48 BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); 48 BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize);
49 BUG_ON(page->index > I3_BLOCKS); 49 BUG_ON(page->index > I3_BLOCKS);
50 50
51 if (copied < len) { 51 if (copied < len) {
@@ -61,8 +61,8 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
61 if (copied == 0) 61 if (copied == 0)
62 goto out; /* FIXME: do we need to update inode? */ 62 goto out; /* FIXME: do we need to update inode? */
63 63
64 if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) { 64 if (i_size_read(inode) < (index << PAGE_SHIFT) + end) {
65 i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end); 65 i_size_write(inode, (index << PAGE_SHIFT) + end);
66 mark_inode_dirty_sync(inode); 66 mark_inode_dirty_sync(inode);
67 } 67 }
68 68
@@ -75,7 +75,7 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
75 } 75 }
76out: 76out:
77 unlock_page(page); 77 unlock_page(page);
78 page_cache_release(page); 78 put_page(page);
79 return ret ? ret : copied; 79 return ret ? ret : copied;
80} 80}
81 81
@@ -118,7 +118,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
118{ 118{
119 struct inode *inode = page->mapping->host; 119 struct inode *inode = page->mapping->host;
120 loff_t i_size = i_size_read(inode); 120 loff_t i_size = i_size_read(inode);
121 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 121 pgoff_t end_index = i_size >> PAGE_SHIFT;
122 unsigned offset; 122 unsigned offset;
123 u64 bix; 123 u64 bix;
124 level_t level; 124 level_t level;
@@ -142,7 +142,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
142 return __logfs_writepage(page); 142 return __logfs_writepage(page);
143 143
144 /* Is the page fully outside i_size? (truncate in progress) */ 144 /* Is the page fully outside i_size? (truncate in progress) */
145 offset = i_size & (PAGE_CACHE_SIZE-1); 145 offset = i_size & (PAGE_SIZE-1);
146 if (bix > end_index || offset == 0) { 146 if (bix > end_index || offset == 0) {
147 unlock_page(page); 147 unlock_page(page);
148 return 0; /* don't care */ 148 return 0; /* don't care */
@@ -155,7 +155,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
155 * the page size, the remaining memory is zeroed when mapped, and 155 * the page size, the remaining memory is zeroed when mapped, and
156 * writes to that region are not written out to the file." 156 * writes to that region are not written out to the file."
157 */ 157 */
158 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 158 zero_user_segment(page, offset, PAGE_SIZE);
159 return __logfs_writepage(page); 159 return __logfs_writepage(page);
160} 160}
161 161
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 20973c9e52f8..3fb8c6d67303 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -281,7 +281,7 @@ static struct page *logfs_get_read_page(struct inode *inode, u64 bix,
281static void logfs_put_read_page(struct page *page) 281static void logfs_put_read_page(struct page *page)
282{ 282{
283 unlock_page(page); 283 unlock_page(page);
284 page_cache_release(page); 284 put_page(page);
285} 285}
286 286
287static void logfs_lock_write_page(struct page *page) 287static void logfs_lock_write_page(struct page *page)
@@ -323,7 +323,7 @@ repeat:
323 return NULL; 323 return NULL;
324 err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS); 324 err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
325 if (unlikely(err)) { 325 if (unlikely(err)) {
326 page_cache_release(page); 326 put_page(page);
327 if (err == -EEXIST) 327 if (err == -EEXIST)
328 goto repeat; 328 goto repeat;
329 return NULL; 329 return NULL;
@@ -342,7 +342,7 @@ static void logfs_unlock_write_page(struct page *page)
342static void logfs_put_write_page(struct page *page) 342static void logfs_put_write_page(struct page *page)
343{ 343{
344 logfs_unlock_write_page(page); 344 logfs_unlock_write_page(page);
345 page_cache_release(page); 345 put_page(page);
346} 346}
347 347
348static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level, 348static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
@@ -562,7 +562,7 @@ static void indirect_free_block(struct super_block *sb,
562 562
563 if (PagePrivate(page)) { 563 if (PagePrivate(page)) {
564 ClearPagePrivate(page); 564 ClearPagePrivate(page);
565 page_cache_release(page); 565 put_page(page);
566 set_page_private(page, 0); 566 set_page_private(page, 0);
567 } 567 }
568 __free_block(sb, block); 568 __free_block(sb, block);
@@ -655,7 +655,7 @@ static void alloc_data_block(struct inode *inode, struct page *page)
655 block->page = page; 655 block->page = page;
656 656
657 SetPagePrivate(page); 657 SetPagePrivate(page);
658 page_cache_get(page); 658 get_page(page);
659 set_page_private(page, (unsigned long) block); 659 set_page_private(page, (unsigned long) block);
660 660
661 block->ops = &indirect_block_ops; 661 block->ops = &indirect_block_ops;
@@ -709,7 +709,7 @@ static u64 block_get_pointer(struct page *page, int index)
709 709
710static int logfs_read_empty(struct page *page) 710static int logfs_read_empty(struct page *page)
711{ 711{
712 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 712 zero_user_segment(page, 0, PAGE_SIZE);
713 return 0; 713 return 0;
714} 714}
715 715
@@ -1660,7 +1660,7 @@ static int truncate_data_block(struct inode *inode, struct page *page,
1660 if (err) 1660 if (err)
1661 return err; 1661 return err;
1662 1662
1663 zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE); 1663 zero_user_segment(page, size - pageofs, PAGE_SIZE);
1664 return logfs_segment_write(inode, page, shadow); 1664 return logfs_segment_write(inode, page, shadow);
1665} 1665}
1666 1666
@@ -1919,7 +1919,7 @@ static void move_page_to_inode(struct inode *inode, struct page *page)
1919 block->page = NULL; 1919 block->page = NULL;
1920 if (PagePrivate(page)) { 1920 if (PagePrivate(page)) {
1921 ClearPagePrivate(page); 1921 ClearPagePrivate(page);
1922 page_cache_release(page); 1922 put_page(page);
1923 set_page_private(page, 0); 1923 set_page_private(page, 0);
1924 } 1924 }
1925} 1925}
@@ -1940,7 +1940,7 @@ static void move_inode_to_page(struct page *page, struct inode *inode)
1940 1940
1941 if (!PagePrivate(page)) { 1941 if (!PagePrivate(page)) {
1942 SetPagePrivate(page); 1942 SetPagePrivate(page);
1943 page_cache_get(page); 1943 get_page(page);
1944 set_page_private(page, (unsigned long) block); 1944 set_page_private(page, (unsigned long) block);
1945 } 1945 }
1946 1946
@@ -1971,7 +1971,7 @@ int logfs_read_inode(struct inode *inode)
1971 logfs_disk_to_inode(di, inode); 1971 logfs_disk_to_inode(di, inode);
1972 kunmap_atomic(di); 1972 kunmap_atomic(di);
1973 move_page_to_inode(inode, page); 1973 move_page_to_inode(inode, page);
1974 page_cache_release(page); 1974 put_page(page);
1975 return 0; 1975 return 0;
1976} 1976}
1977 1977
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index d270e4b2ab6b..1efd6055f4b0 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -90,9 +90,9 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
90 90
91 if (!PagePrivate(page)) { 91 if (!PagePrivate(page)) {
92 SetPagePrivate(page); 92 SetPagePrivate(page);
93 page_cache_get(page); 93 get_page(page);
94 } 94 }
95 page_cache_release(page); 95 put_page(page);
96 96
97 buf += copylen; 97 buf += copylen;
98 len -= copylen; 98 len -= copylen;
@@ -117,9 +117,9 @@ static void pad_partial_page(struct logfs_area *area)
117 memset(page_address(page) + offset, 0xff, len); 117 memset(page_address(page) + offset, 0xff, len);
118 if (!PagePrivate(page)) { 118 if (!PagePrivate(page)) {
119 SetPagePrivate(page); 119 SetPagePrivate(page);
120 page_cache_get(page); 120 get_page(page);
121 } 121 }
122 page_cache_release(page); 122 put_page(page);
123 } 123 }
124} 124}
125 125
@@ -129,20 +129,20 @@ static void pad_full_pages(struct logfs_area *area)
129 struct logfs_super *super = logfs_super(sb); 129 struct logfs_super *super = logfs_super(sb);
130 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 130 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
131 u32 len = super->s_segsize - area->a_used_bytes; 131 u32 len = super->s_segsize - area->a_used_bytes;
132 pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT; 132 pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT;
133 pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT; 133 pgoff_t no_indizes = len >> PAGE_SHIFT;
134 struct page *page; 134 struct page *page;
135 135
136 while (no_indizes) { 136 while (no_indizes) {
137 page = get_mapping_page(sb, index, 0); 137 page = get_mapping_page(sb, index, 0);
138 BUG_ON(!page); /* FIXME: reserve a pool */ 138 BUG_ON(!page); /* FIXME: reserve a pool */
139 SetPageUptodate(page); 139 SetPageUptodate(page);
140 memset(page_address(page), 0xff, PAGE_CACHE_SIZE); 140 memset(page_address(page), 0xff, PAGE_SIZE);
141 if (!PagePrivate(page)) { 141 if (!PagePrivate(page)) {
142 SetPagePrivate(page); 142 SetPagePrivate(page);
143 page_cache_get(page); 143 get_page(page);
144 } 144 }
145 page_cache_release(page); 145 put_page(page);
146 index++; 146 index++;
147 no_indizes--; 147 no_indizes--;
148 } 148 }
@@ -411,7 +411,7 @@ int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
411 if (IS_ERR(page)) 411 if (IS_ERR(page))
412 return PTR_ERR(page); 412 return PTR_ERR(page);
413 memcpy(buf, page_address(page) + offset, copylen); 413 memcpy(buf, page_address(page) + offset, copylen);
414 page_cache_release(page); 414 put_page(page);
415 415
416 buf += copylen; 416 buf += copylen;
417 len -= copylen; 417 len -= copylen;
@@ -499,7 +499,7 @@ static void move_btree_to_page(struct inode *inode, struct page *page,
499 499
500 if (!PagePrivate(page)) { 500 if (!PagePrivate(page)) {
501 SetPagePrivate(page); 501 SetPagePrivate(page);
502 page_cache_get(page); 502 get_page(page);
503 set_page_private(page, (unsigned long) block); 503 set_page_private(page, (unsigned long) block);
504 } 504 }
505 block->ops = &indirect_block_ops; 505 block->ops = &indirect_block_ops;
@@ -554,7 +554,7 @@ void move_page_to_btree(struct page *page)
554 554
555 if (PagePrivate(page)) { 555 if (PagePrivate(page)) {
556 ClearPagePrivate(page); 556 ClearPagePrivate(page);
557 page_cache_release(page); 557 put_page(page);
558 set_page_private(page, 0); 558 set_page_private(page, 0);
559 } 559 }
560 block->ops = &btree_block_ops; 560 block->ops = &btree_block_ops;
@@ -723,9 +723,9 @@ void freeseg(struct super_block *sb, u32 segno)
723 continue; 723 continue;
724 if (PagePrivate(page)) { 724 if (PagePrivate(page)) {
725 ClearPagePrivate(page); 725 ClearPagePrivate(page);
726 page_cache_release(page); 726 put_page(page);
727 } 727 }
728 page_cache_release(page); 728 put_page(page);
729 } 729 }
730} 730}
731 731
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 54360293bcb5..5751082dba52 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -48,7 +48,7 @@ void emergency_read_end(struct page *page)
48 if (page == emergency_page) 48 if (page == emergency_page)
49 mutex_unlock(&emergency_mutex); 49 mutex_unlock(&emergency_mutex);
50 else 50 else
51 page_cache_release(page); 51 put_page(page);
52} 52}
53 53
54static void dump_segfile(struct super_block *sb) 54static void dump_segfile(struct super_block *sb)
@@ -206,7 +206,7 @@ static int write_one_sb(struct super_block *sb,
206 logfs_set_segment_erased(sb, segno, ec, 0); 206 logfs_set_segment_erased(sb, segno, ec, 0);
207 logfs_write_ds(sb, ds, segno, ec); 207 logfs_write_ds(sb, ds, segno, ec);
208 err = super->s_devops->write_sb(sb, page); 208 err = super->s_devops->write_sb(sb, page);
209 page_cache_release(page); 209 put_page(page);
210 return err; 210 return err;
211} 211}
212 212
@@ -366,24 +366,24 @@ static struct page *find_super_block(struct super_block *sb)
366 return NULL; 366 return NULL;
367 last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); 367 last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
368 if (!last || IS_ERR(last)) { 368 if (!last || IS_ERR(last)) {
369 page_cache_release(first); 369 put_page(first);
370 return NULL; 370 return NULL;
371 } 371 }
372 372
373 if (!logfs_check_ds(page_address(first))) { 373 if (!logfs_check_ds(page_address(first))) {
374 page_cache_release(last); 374 put_page(last);
375 return first; 375 return first;
376 } 376 }
377 377
378 /* First one didn't work, try the second superblock */ 378 /* First one didn't work, try the second superblock */
379 if (!logfs_check_ds(page_address(last))) { 379 if (!logfs_check_ds(page_address(last))) {
380 page_cache_release(first); 380 put_page(first);
381 return last; 381 return last;
382 } 382 }
383 383
384 /* Neither worked, sorry folks */ 384 /* Neither worked, sorry folks */
385 page_cache_release(first); 385 put_page(first);
386 page_cache_release(last); 386 put_page(last);
387 return NULL; 387 return NULL;
388} 388}
389 389
@@ -425,7 +425,7 @@ static int __logfs_read_sb(struct super_block *sb)
425 super->s_data_levels = ds->ds_data_levels; 425 super->s_data_levels = ds->ds_data_levels;
426 super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels 426 super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
427 + super->s_data_levels; 427 + super->s_data_levels;
428 page_cache_release(page); 428 put_page(page);
429 return 0; 429 return 0;
430} 430}
431 431
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index d19ac258105a..33957c07cd11 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -28,7 +28,7 @@ const struct file_operations minix_dir_operations = {
28static inline void dir_put_page(struct page *page) 28static inline void dir_put_page(struct page *page)
29{ 29{
30 kunmap(page); 30 kunmap(page);
31 page_cache_release(page); 31 put_page(page);
32} 32}
33 33
34/* 34/*
@@ -38,10 +38,10 @@ static inline void dir_put_page(struct page *page)
38static unsigned 38static unsigned
39minix_last_byte(struct inode *inode, unsigned long page_nr) 39minix_last_byte(struct inode *inode, unsigned long page_nr)
40{ 40{
41 unsigned last_byte = PAGE_CACHE_SIZE; 41 unsigned last_byte = PAGE_SIZE;
42 42
43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) 43 if (page_nr == (inode->i_size >> PAGE_SHIFT))
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); 44 last_byte = inode->i_size & (PAGE_SIZE - 1);
45 return last_byte; 45 return last_byte;
46} 46}
47 47
@@ -92,8 +92,8 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
92 if (pos >= inode->i_size) 92 if (pos >= inode->i_size)
93 return 0; 93 return 0;
94 94
95 offset = pos & ~PAGE_CACHE_MASK; 95 offset = pos & ~PAGE_MASK;
96 n = pos >> PAGE_CACHE_SHIFT; 96 n = pos >> PAGE_SHIFT;
97 97
98 for ( ; n < npages; n++, offset = 0) { 98 for ( ; n < npages; n++, offset = 0) {
99 char *p, *kaddr, *limit; 99 char *p, *kaddr, *limit;
@@ -229,7 +229,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
229 lock_page(page); 229 lock_page(page);
230 kaddr = (char*)page_address(page); 230 kaddr = (char*)page_address(page);
231 dir_end = kaddr + minix_last_byte(dir, n); 231 dir_end = kaddr + minix_last_byte(dir, n);
232 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; 232 limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
233 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { 233 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
234 de = (minix_dirent *)p; 234 de = (minix_dirent *)p;
235 de3 = (minix3_dirent *)p; 235 de3 = (minix3_dirent *)p;
@@ -327,7 +327,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
327 } 327 }
328 328
329 kaddr = kmap_atomic(page); 329 kaddr = kmap_atomic(page);
330 memset(kaddr, 0, PAGE_CACHE_SIZE); 330 memset(kaddr, 0, PAGE_SIZE);
331 331
332 if (sbi->s_version == MINIX_V3) { 332 if (sbi->s_version == MINIX_V3) {
333 minix3_dirent *de3 = (minix3_dirent *)kaddr; 333 minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -350,7 +350,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
350 350
351 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); 351 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
352fail: 352fail:
353 page_cache_release(page); 353 put_page(page);
354 return err; 354 return err;
355} 355}
356 356
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index a795a11e50c7..2887d1d95ce2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -243,11 +243,11 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
243out_dir: 243out_dir:
244 if (dir_de) { 244 if (dir_de) {
245 kunmap(dir_page); 245 kunmap(dir_page);
246 page_cache_release(dir_page); 246 put_page(dir_page);
247 } 247 }
248out_old: 248out_old:
249 kunmap(old_page); 249 kunmap(old_page);
250 page_cache_release(old_page); 250 put_page(old_page);
251out: 251out:
252 return err; 252 return err;
253} 253}
diff --git a/fs/mpage.c b/fs/mpage.c
index 6bd9fd90964e..eedc644b78d7 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
107 * don't make any buffers if there is only one buffer on 107 * don't make any buffers if there is only one buffer on
108 * the page and the page just needs to be set up to date 108 * the page and the page just needs to be set up to date
109 */ 109 */
110 if (inode->i_blkbits == PAGE_CACHE_SHIFT && 110 if (inode->i_blkbits == PAGE_SHIFT &&
111 buffer_uptodate(bh)) { 111 buffer_uptodate(bh)) {
112 SetPageUptodate(page); 112 SetPageUptodate(page);
113 return; 113 return;
@@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
145{ 145{
146 struct inode *inode = page->mapping->host; 146 struct inode *inode = page->mapping->host;
147 const unsigned blkbits = inode->i_blkbits; 147 const unsigned blkbits = inode->i_blkbits;
148 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 148 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
149 const unsigned blocksize = 1 << blkbits; 149 const unsigned blocksize = 1 << blkbits;
150 sector_t block_in_file; 150 sector_t block_in_file;
151 sector_t last_block; 151 sector_t last_block;
@@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
162 if (page_has_buffers(page)) 162 if (page_has_buffers(page))
163 goto confused; 163 goto confused;
164 164
165 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 165 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
166 last_block = block_in_file + nr_pages * blocks_per_page; 166 last_block = block_in_file + nr_pages * blocks_per_page;
167 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 167 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
168 if (last_block > last_block_in_file) 168 if (last_block > last_block_in_file)
@@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
249 } 249 }
250 250
251 if (first_hole != blocks_per_page) { 251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); 252 zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
253 if (first_hole == 0) { 253 if (first_hole == 0) {
254 SetPageUptodate(page); 254 SetPageUptodate(page);
255 unlock_page(page); 255 unlock_page(page);
@@ -331,7 +331,7 @@ confused:
331 * 331 *
332 * then this code just gives up and calls the buffer_head-based read function. 332 * then this code just gives up and calls the buffer_head-based read function.
333 * It does handle a page which has holes at the end - that is a common case: 333 * It does handle a page which has holes at the end - that is a common case:
334 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 334 * the end-of-file on blocksize < PAGE_SIZE setups.
335 * 335 *
336 * BH_Boundary explanation: 336 * BH_Boundary explanation:
337 * 337 *
@@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
380 &first_logical_block, 380 &first_logical_block,
381 get_block, gfp); 381 get_block, gfp);
382 } 382 }
383 page_cache_release(page); 383 put_page(page);
384 } 384 }
385 BUG_ON(!list_empty(pages)); 385 BUG_ON(!list_empty(pages));
386 if (bio) 386 if (bio)
@@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
472 struct inode *inode = page->mapping->host; 472 struct inode *inode = page->mapping->host;
473 const unsigned blkbits = inode->i_blkbits; 473 const unsigned blkbits = inode->i_blkbits;
474 unsigned long end_index; 474 unsigned long end_index;
475 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; 475 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
476 sector_t last_block; 476 sector_t last_block;
477 sector_t block_in_file; 477 sector_t block_in_file;
478 sector_t blocks[MAX_BUF_PER_PAGE]; 478 sector_t blocks[MAX_BUF_PER_PAGE];
@@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
542 * The page has no buffers: map it to disk 542 * The page has no buffers: map it to disk
543 */ 543 */
544 BUG_ON(!PageUptodate(page)); 544 BUG_ON(!PageUptodate(page));
545 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 545 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
546 last_block = (i_size - 1) >> blkbits; 546 last_block = (i_size - 1) >> blkbits;
547 map_bh.b_page = page; 547 map_bh.b_page = page;
548 for (page_block = 0; page_block < blocks_per_page; ) { 548 for (page_block = 0; page_block < blocks_per_page; ) {
@@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
574 first_unmapped = page_block; 574 first_unmapped = page_block;
575 575
576page_is_mapped: 576page_is_mapped:
577 end_index = i_size >> PAGE_CACHE_SHIFT; 577 end_index = i_size >> PAGE_SHIFT;
578 if (page->index >= end_index) { 578 if (page->index >= end_index) {
579 /* 579 /*
580 * The page straddles i_size. It must be zeroed out on each 580 * The page straddles i_size. It must be zeroed out on each
@@ -584,11 +584,11 @@ page_is_mapped:
584 * is zeroed when mapped, and writes to that region are not 584 * is zeroed when mapped, and writes to that region are not
585 * written out to the file." 585 * written out to the file."
586 */ 586 */
587 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); 587 unsigned offset = i_size & (PAGE_SIZE - 1);
588 588
589 if (page->index > end_index || !offset) 589 if (page->index > end_index || !offset)
590 goto confused; 590 goto confused;
591 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 591 zero_user_segment(page, offset, PAGE_SIZE);
592 } 592 }
593 593
594 /* 594 /*
diff --git a/fs/namei.c b/fs/namei.c
index 794f81dce766..1d9ca2d5dff6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1740,15 +1740,17 @@ static int walk_component(struct nameidata *nd, int flags)
1740 nd->flags); 1740 nd->flags);
1741 if (IS_ERR(path.dentry)) 1741 if (IS_ERR(path.dentry))
1742 return PTR_ERR(path.dentry); 1742 return PTR_ERR(path.dentry);
1743 if (unlikely(d_is_negative(path.dentry))) { 1743
1744 dput(path.dentry);
1745 return -ENOENT;
1746 }
1747 path.mnt = nd->path.mnt; 1744 path.mnt = nd->path.mnt;
1748 err = follow_managed(&path, nd); 1745 err = follow_managed(&path, nd);
1749 if (unlikely(err < 0)) 1746 if (unlikely(err < 0))
1750 return err; 1747 return err;
1751 1748
1749 if (unlikely(d_is_negative(path.dentry))) {
1750 path_to_nameidata(&path, nd);
1751 return -ENOENT;
1752 }
1753
1752 seq = 0; /* we are already out of RCU mode */ 1754 seq = 0; /* we are already out of RCU mode */
1753 inode = d_backing_inode(path.dentry); 1755 inode = d_backing_inode(path.dentry);
1754 } 1756 }
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index b7f8eaeea5d8..bfdad003ee56 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -510,7 +510,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
510 kunmap(ctl.page); 510 kunmap(ctl.page);
511 SetPageUptodate(ctl.page); 511 SetPageUptodate(ctl.page);
512 unlock_page(ctl.page); 512 unlock_page(ctl.page);
513 page_cache_release(ctl.page); 513 put_page(ctl.page);
514 ctl.page = NULL; 514 ctl.page = NULL;
515 } 515 }
516 ctl.idx = 0; 516 ctl.idx = 0;
@@ -520,7 +520,7 @@ invalid_cache:
520 if (ctl.page) { 520 if (ctl.page) {
521 kunmap(ctl.page); 521 kunmap(ctl.page);
522 unlock_page(ctl.page); 522 unlock_page(ctl.page);
523 page_cache_release(ctl.page); 523 put_page(ctl.page);
524 ctl.page = NULL; 524 ctl.page = NULL;
525 } 525 }
526 ctl.cache = cache; 526 ctl.cache = cache;
@@ -554,14 +554,14 @@ finished:
554 kunmap(ctl.page); 554 kunmap(ctl.page);
555 SetPageUptodate(ctl.page); 555 SetPageUptodate(ctl.page);
556 unlock_page(ctl.page); 556 unlock_page(ctl.page);
557 page_cache_release(ctl.page); 557 put_page(ctl.page);
558 } 558 }
559 if (page) { 559 if (page) {
560 cache->head = ctl.head; 560 cache->head = ctl.head;
561 kunmap(page); 561 kunmap(page);
562 SetPageUptodate(page); 562 SetPageUptodate(page);
563 unlock_page(page); 563 unlock_page(page);
564 page_cache_release(page); 564 put_page(page);
565 } 565 }
566out: 566out:
567 return result; 567 return result;
@@ -649,7 +649,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
649 kunmap(ctl.page); 649 kunmap(ctl.page);
650 SetPageUptodate(ctl.page); 650 SetPageUptodate(ctl.page);
651 unlock_page(ctl.page); 651 unlock_page(ctl.page);
652 page_cache_release(ctl.page); 652 put_page(ctl.page);
653 } 653 }
654 ctl.cache = NULL; 654 ctl.cache = NULL;
655 ctl.idx -= NCP_DIRCACHE_SIZE; 655 ctl.idx -= NCP_DIRCACHE_SIZE;
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 5233fbc1747a..17cfb743b5bf 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -191,7 +191,7 @@ struct ncp_cache_head {
191 int eof; 191 int eof;
192}; 192};
193 193
194#define NCP_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *))) 194#define NCP_DIRCACHE_SIZE ((int)(PAGE_SIZE/sizeof(struct dentry *)))
195union ncp_dir_cache { 195union ncp_dir_cache {
196 struct ncp_cache_head head; 196 struct ncp_cache_head head;
197 struct dentry *dentry[NCP_DIRCACHE_SIZE]; 197 struct dentry *dentry[NCP_DIRCACHE_SIZE];
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 02e4d87d2ed3..17a42e4eb872 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -231,7 +231,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
231 size_t bytes_left = header->args.count; 231 size_t bytes_left = header->args.count;
232 unsigned int pg_offset = header->args.pgbase, pg_len; 232 unsigned int pg_offset = header->args.pgbase, pg_len;
233 struct page **pages = header->args.pages; 233 struct page **pages = header->args.pages;
234 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 234 int pg_index = header->args.pgbase >> PAGE_SHIFT;
235 const bool is_dio = (header->dreq != NULL); 235 const bool is_dio = (header->dreq != NULL);
236 struct blk_plug plug; 236 struct blk_plug plug;
237 int i; 237 int i;
@@ -263,13 +263,13 @@ bl_read_pagelist(struct nfs_pgio_header *header)
263 } 263 }
264 264
265 if (is_dio) { 265 if (is_dio) {
266 if (pg_offset + bytes_left > PAGE_CACHE_SIZE) 266 if (pg_offset + bytes_left > PAGE_SIZE)
267 pg_len = PAGE_CACHE_SIZE - pg_offset; 267 pg_len = PAGE_SIZE - pg_offset;
268 else 268 else
269 pg_len = bytes_left; 269 pg_len = bytes_left;
270 } else { 270 } else {
271 BUG_ON(pg_offset != 0); 271 BUG_ON(pg_offset != 0);
272 pg_len = PAGE_CACHE_SIZE; 272 pg_len = PAGE_SIZE;
273 } 273 }
274 274
275 if (is_hole(&be)) { 275 if (is_hole(&be)) {
@@ -339,9 +339,9 @@ static void bl_write_cleanup(struct work_struct *work)
339 339
340 if (likely(!hdr->pnfs_error)) { 340 if (likely(!hdr->pnfs_error)) {
341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); 341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
342 u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK; 342 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
343 u64 end = (hdr->args.offset + hdr->args.count + 343 u64 end = (hdr->args.offset + hdr->args.count +
344 PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK; 344 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
345 345
346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, 346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
347 (end - start) >> SECTOR_SHIFT); 347 (end - start) >> SECTOR_SHIFT);
@@ -373,7 +373,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
373 loff_t offset = header->args.offset; 373 loff_t offset = header->args.offset;
374 size_t count = header->args.count; 374 size_t count = header->args.count;
375 struct page **pages = header->args.pages; 375 struct page **pages = header->args.pages;
376 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 376 int pg_index = header->args.pgbase >> PAGE_SHIFT;
377 unsigned int pg_len; 377 unsigned int pg_len;
378 struct blk_plug plug; 378 struct blk_plug plug;
379 int i; 379 int i;
@@ -392,7 +392,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
392 blk_start_plug(&plug); 392 blk_start_plug(&plug);
393 393
394 /* we always write out the whole page */ 394 /* we always write out the whole page */
395 offset = offset & (loff_t)PAGE_CACHE_MASK; 395 offset = offset & (loff_t)PAGE_MASK;
396 isect = offset >> SECTOR_SHIFT; 396 isect = offset >> SECTOR_SHIFT;
397 397
398 for (i = pg_index; i < header->page_array.npages; i++) { 398 for (i = pg_index; i < header->page_array.npages; i++) {
@@ -408,7 +408,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
408 extent_length = be.be_length - (isect - be.be_f_offset); 408 extent_length = be.be_length - (isect - be.be_f_offset);
409 } 409 }
410 410
411 pg_len = PAGE_CACHE_SIZE; 411 pg_len = PAGE_SIZE;
412 bio = do_add_page_to_bio(bio, header->page_array.npages - i, 412 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
413 WRITE, isect, pages[i], &map, &be, 413 WRITE, isect, pages[i], &map, &be,
414 bl_end_io_write, par, 414 bl_end_io_write, par,
@@ -820,7 +820,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
820 pgoff_t end; 820 pgoff_t end;
821 821
822 /* Optimize common case that writes from 0 to end of file */ 822 /* Optimize common case that writes from 0 to end of file */
823 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); 823 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
824 if (end != inode->i_mapping->nrpages) { 824 if (end != inode->i_mapping->nrpages) {
825 rcu_read_lock(); 825 rcu_read_lock();
826 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX); 826 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
@@ -828,9 +828,9 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
828 } 828 }
829 829
830 if (!end) 830 if (!end)
831 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT); 831 return i_size_read(inode) - (idx << PAGE_SHIFT);
832 else 832 else
833 return (end - idx) << PAGE_CACHE_SHIFT; 833 return (end - idx) << PAGE_SHIFT;
834} 834}
835 835
836static void 836static void
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index bc21205309e0..18e6fd0b9506 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -40,8 +40,8 @@
40#include "../pnfs.h" 40#include "../pnfs.h"
41#include "../netns.h" 41#include "../netns.h"
42 42
43#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) 43#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT)
44#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) 44#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
45#define SECTOR_SIZE (1 << SECTOR_SHIFT) 45#define SECTOR_SIZE (1 << SECTOR_SHIFT)
46 46
47struct pnfs_block_dev; 47struct pnfs_block_dev;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d6d5d2a48e83..0c96528db94a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -736,7 +736,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
736 server->rsize = max_rpc_payload; 736 server->rsize = max_rpc_payload;
737 if (server->rsize > NFS_MAX_FILE_IO_SIZE) 737 if (server->rsize > NFS_MAX_FILE_IO_SIZE)
738 server->rsize = NFS_MAX_FILE_IO_SIZE; 738 server->rsize = NFS_MAX_FILE_IO_SIZE;
739 server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 739 server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
740 740
741 server->backing_dev_info.name = "nfs"; 741 server->backing_dev_info.name = "nfs";
742 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; 742 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
@@ -745,13 +745,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
745 server->wsize = max_rpc_payload; 745 server->wsize = max_rpc_payload;
746 if (server->wsize > NFS_MAX_FILE_IO_SIZE) 746 if (server->wsize > NFS_MAX_FILE_IO_SIZE)
747 server->wsize = NFS_MAX_FILE_IO_SIZE; 747 server->wsize = NFS_MAX_FILE_IO_SIZE;
748 server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 748 server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
749 749
750 server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); 750 server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
751 751
752 server->dtsize = nfs_block_size(fsinfo->dtpref, NULL); 752 server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
753 if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES) 753 if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES)
754 server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES; 754 server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES;
755 if (server->dtsize > server->rsize) 755 if (server->dtsize > server->rsize)
756 server->dtsize = server->rsize; 756 server->dtsize = server->rsize;
757 757
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4bfa7d8bcade..33eb81738d03 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
377 again: 377 again:
378 timestamp = jiffies; 378 timestamp = jiffies;
379 gencount = nfs_inc_attr_generation_counter(); 379 gencount = nfs_inc_attr_generation_counter();
380 error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages, 380 error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
381 NFS_SERVER(inode)->dtsize, desc->plus); 381 NFS_SERVER(inode)->dtsize, desc->plus);
382 if (error < 0) { 382 if (error < 0) {
383 /* We requested READDIRPLUS, but the server doesn't grok it */ 383 /* We requested READDIRPLUS, but the server doesn't grok it */
@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
560 count++; 560 count++;
561 561
562 if (desc->plus != 0) 562 if (desc->plus != 0)
563 nfs_prime_dcache(desc->file->f_path.dentry, entry); 563 nfs_prime_dcache(file_dentry(desc->file), entry);
564 564
565 status = nfs_readdir_add_to_array(entry, page); 565 status = nfs_readdir_add_to_array(entry, page);
566 if (status != 0) 566 if (status != 0)
@@ -707,7 +707,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
707{ 707{
708 if (!desc->page->mapping) 708 if (!desc->page->mapping)
709 nfs_readdir_clear_array(desc->page); 709 nfs_readdir_clear_array(desc->page);
710 page_cache_release(desc->page); 710 put_page(desc->page);
711 desc->page = NULL; 711 desc->page = NULL;
712} 712}
713 713
@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
864 */ 864 */
865static int nfs_readdir(struct file *file, struct dir_context *ctx) 865static int nfs_readdir(struct file *file, struct dir_context *ctx)
866{ 866{
867 struct dentry *dentry = file->f_path.dentry; 867 struct dentry *dentry = file_dentry(file);
868 struct inode *inode = d_inode(dentry); 868 struct inode *inode = d_inode(dentry);
869 nfs_readdir_descriptor_t my_desc, 869 nfs_readdir_descriptor_t my_desc,
870 *desc = &my_desc; 870 *desc = &my_desc;
@@ -1923,7 +1923,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1923 * add_to_page_cache_lru() grabs an extra page refcount. 1923 * add_to_page_cache_lru() grabs an extra page refcount.
1924 * Drop it here to avoid leaking this page later. 1924 * Drop it here to avoid leaking this page later.
1925 */ 1925 */
1926 page_cache_release(page); 1926 put_page(page);
1927 } else 1927 } else
1928 __free_page(page); 1928 __free_page(page);
1929 1929
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 7a0cfd3266e5..c93826e4a8c6 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -269,7 +269,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
269{ 269{
270 unsigned int i; 270 unsigned int i;
271 for (i = 0; i < npages; i++) 271 for (i = 0; i < npages; i++)
272 page_cache_release(pages[i]); 272 put_page(pages[i]);
273} 273}
274 274
275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, 275void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
@@ -1003,7 +1003,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1003 iov_iter_count(iter)); 1003 iov_iter_count(iter));
1004 1004
1005 pos = iocb->ki_pos; 1005 pos = iocb->ki_pos;
1006 end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; 1006 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
1007 1007
1008 inode_lock(inode); 1008 inode_lock(inode);
1009 1009
@@ -1013,7 +1013,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1013 1013
1014 if (mapping->nrpages) { 1014 if (mapping->nrpages) {
1015 result = invalidate_inode_pages2_range(mapping, 1015 result = invalidate_inode_pages2_range(mapping,
1016 pos >> PAGE_CACHE_SHIFT, end); 1016 pos >> PAGE_SHIFT, end);
1017 if (result) 1017 if (result)
1018 goto out_unlock; 1018 goto out_unlock;
1019 } 1019 }
@@ -1042,7 +1042,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1042 1042
1043 if (mapping->nrpages) { 1043 if (mapping->nrpages) {
1044 invalidate_inode_pages2_range(mapping, 1044 invalidate_inode_pages2_range(mapping,
1045 pos >> PAGE_CACHE_SHIFT, end); 1045 pos >> PAGE_SHIFT, end);
1046 } 1046 }
1047 1047
1048 inode_unlock(inode); 1048 inode_unlock(inode);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 89bf093d342a..be01095b97ae 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -320,7 +320,7 @@ static int nfs_want_read_modify_write(struct file *file, struct page *page,
320 loff_t pos, unsigned len) 320 loff_t pos, unsigned len)
321{ 321{
322 unsigned int pglen = nfs_page_length(page); 322 unsigned int pglen = nfs_page_length(page);
323 unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); 323 unsigned int offset = pos & (PAGE_SIZE - 1);
324 unsigned int end = offset + len; 324 unsigned int end = offset + len;
325 325
326 if (pnfs_ld_read_whole_page(file->f_mapping->host)) { 326 if (pnfs_ld_read_whole_page(file->f_mapping->host)) {
@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
351 struct page **pagep, void **fsdata) 351 struct page **pagep, void **fsdata)
352{ 352{
353 int ret; 353 int ret;
354 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 354 pgoff_t index = pos >> PAGE_SHIFT;
355 struct page *page; 355 struct page *page;
356 int once_thru = 0; 356 int once_thru = 0;
357 357
@@ -380,12 +380,12 @@ start:
380 ret = nfs_flush_incompatible(file, page); 380 ret = nfs_flush_incompatible(file, page);
381 if (ret) { 381 if (ret) {
382 unlock_page(page); 382 unlock_page(page);
383 page_cache_release(page); 383 put_page(page);
384 } else if (!once_thru && 384 } else if (!once_thru &&
385 nfs_want_read_modify_write(file, page, pos, len)) { 385 nfs_want_read_modify_write(file, page, pos, len)) {
386 once_thru = 1; 386 once_thru = 1;
387 ret = nfs_readpage(file, page); 387 ret = nfs_readpage(file, page);
388 page_cache_release(page); 388 put_page(page);
389 if (!ret) 389 if (!ret)
390 goto start; 390 goto start;
391 } 391 }
@@ -396,7 +396,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
396 loff_t pos, unsigned len, unsigned copied, 396 loff_t pos, unsigned len, unsigned copied,
397 struct page *page, void *fsdata) 397 struct page *page, void *fsdata)
398{ 398{
399 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 399 unsigned offset = pos & (PAGE_SIZE - 1);
400 struct nfs_open_context *ctx = nfs_file_open_context(file); 400 struct nfs_open_context *ctx = nfs_file_open_context(file);
401 int status; 401 int status;
402 402
@@ -413,20 +413,20 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
413 413
414 if (pglen == 0) { 414 if (pglen == 0) {
415 zero_user_segments(page, 0, offset, 415 zero_user_segments(page, 0, offset,
416 end, PAGE_CACHE_SIZE); 416 end, PAGE_SIZE);
417 SetPageUptodate(page); 417 SetPageUptodate(page);
418 } else if (end >= pglen) { 418 } else if (end >= pglen) {
419 zero_user_segment(page, end, PAGE_CACHE_SIZE); 419 zero_user_segment(page, end, PAGE_SIZE);
420 if (offset == 0) 420 if (offset == 0)
421 SetPageUptodate(page); 421 SetPageUptodate(page);
422 } else 422 } else
423 zero_user_segment(page, pglen, PAGE_CACHE_SIZE); 423 zero_user_segment(page, pglen, PAGE_SIZE);
424 } 424 }
425 425
426 status = nfs_updatepage(file, page, offset, copied); 426 status = nfs_updatepage(file, page, offset, copied);
427 427
428 unlock_page(page); 428 unlock_page(page);
429 page_cache_release(page); 429 put_page(page);
430 430
431 if (status < 0) 431 if (status < 0)
432 return status; 432 return status;
@@ -454,7 +454,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
454 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", 454 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
455 page, offset, length); 455 page, offset, length);
456 456
457 if (offset != 0 || length < PAGE_CACHE_SIZE) 457 if (offset != 0 || length < PAGE_SIZE)
458 return; 458 return;
459 /* Cancel any unstarted writes on this page */ 459 /* Cancel any unstarted writes on this page */
460 nfs_wb_page_cancel(page_file_mapping(page)->host, page); 460 nfs_wb_page_cancel(page_file_mapping(page)->host, page);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 33d18c411905..738c84a42eb0 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
940{ 940{
941 struct nfs_open_context *ctx; 941 struct nfs_open_context *ctx;
942 942
943 ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); 943 ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
944 if (IS_ERR(ctx)) 944 if (IS_ERR(ctx))
945 return PTR_ERR(ctx); 945 return PTR_ERR(ctx);
946 nfs_file_set_open_context(filp, ctx); 946 nfs_file_set_open_context(filp, ctx);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 565f8135ae1f..f1d1d2c472e9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -638,11 +638,11 @@ unsigned int nfs_page_length(struct page *page)
638 638
639 if (i_size > 0) { 639 if (i_size > 0) {
640 pgoff_t page_index = page_file_index(page); 640 pgoff_t page_index = page_file_index(page);
641 pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 641 pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
642 if (page_index < end_index) 642 if (page_index < end_index)
643 return PAGE_CACHE_SIZE; 643 return PAGE_SIZE;
644 if (page_index == end_index) 644 if (page_index == end_index)
645 return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; 645 return ((i_size - 1) & ~PAGE_MASK) + 1;
646 } 646 }
647 return 0; 647 return 0;
648} 648}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 22c35abbee9d..d0390516467c 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -26,7 +26,7 @@ static int
26nfs4_file_open(struct inode *inode, struct file *filp) 26nfs4_file_open(struct inode *inode, struct file *filp)
27{ 27{
28 struct nfs_open_context *ctx; 28 struct nfs_open_context *ctx;
29 struct dentry *dentry = filp->f_path.dentry; 29 struct dentry *dentry = file_dentry(filp);
30 struct dentry *parent = NULL; 30 struct dentry *parent = NULL;
31 struct inode *dir; 31 struct inode *dir;
32 unsigned openflags = filp->f_flags; 32 unsigned openflags = filp->f_flags;
@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
57 parent = dget_parent(dentry); 57 parent = dget_parent(dentry);
58 dir = d_inode(parent); 58 dir = d_inode(parent);
59 59
60 ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); 60 ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
61 err = PTR_ERR(ctx); 61 err = PTR_ERR(ctx);
62 if (IS_ERR(ctx)) 62 if (IS_ERR(ctx))
63 goto out; 63 goto out;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4e4441216804..88474a4fc669 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5001,7 +5001,7 @@ static int decode_space_limit(struct xdr_stream *xdr,
5001 blocksize = be32_to_cpup(p); 5001 blocksize = be32_to_cpup(p);
5002 maxsize = (uint64_t)nblocks * (uint64_t)blocksize; 5002 maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
5003 } 5003 }
5004 maxsize >>= PAGE_CACHE_SHIFT; 5004 maxsize >>= PAGE_SHIFT;
5005 *pagemod_limit = min_t(u64, maxsize, ULONG_MAX); 5005 *pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
5006 return 0; 5006 return 0;
5007out_overflow: 5007out_overflow:
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9aebffb40505..049c1b1f2932 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -486,7 +486,7 @@ static void __r4w_put_page(void *priv, struct page *page)
486 dprintk("%s: index=0x%lx\n", __func__, 486 dprintk("%s: index=0x%lx\n", __func__,
487 (page == ZERO_PAGE(0)) ? -1UL : page->index); 487 (page == ZERO_PAGE(0)) ? -1UL : page->index);
488 if (ZERO_PAGE(0) != page) 488 if (ZERO_PAGE(0) != page)
489 page_cache_release(page); 489 put_page(page);
490 return; 490 return;
491} 491}
492 492
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 8ce4f61cbaa5..1f6db4231057 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
342 * update_nfs_request below if the region is not locked. */ 342 * update_nfs_request below if the region is not locked. */
343 req->wb_page = page; 343 req->wb_page = page;
344 req->wb_index = page_file_index(page); 344 req->wb_index = page_file_index(page);
345 page_cache_get(page); 345 get_page(page);
346 req->wb_offset = offset; 346 req->wb_offset = offset;
347 req->wb_pgbase = offset; 347 req->wb_pgbase = offset;
348 req->wb_bytes = count; 348 req->wb_bytes = count;
@@ -392,7 +392,7 @@ static void nfs_clear_request(struct nfs_page *req)
392 struct nfs_lock_context *l_ctx = req->wb_lock_context; 392 struct nfs_lock_context *l_ctx = req->wb_lock_context;
393 393
394 if (page != NULL) { 394 if (page != NULL) {
395 page_cache_release(page); 395 put_page(page);
396 req->wb_page = NULL; 396 req->wb_page = NULL;
397 } 397 }
398 if (l_ctx != NULL) { 398 if (l_ctx != NULL) {
@@ -904,7 +904,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
904 return false; 904 return false;
905 } else { 905 } else {
906 if (req->wb_pgbase != 0 || 906 if (req->wb_pgbase != 0 ||
907 prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 907 prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
908 return false; 908 return false;
909 } 909 }
910 } 910 }
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2fa483e6dbe2..89a5ef4df08a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -841,7 +841,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
841 841
842 i_size = i_size_read(ino); 842 i_size = i_size_read(ino);
843 843
844 lgp->args.minlength = PAGE_CACHE_SIZE; 844 lgp->args.minlength = PAGE_SIZE;
845 if (lgp->args.minlength > range->length) 845 if (lgp->args.minlength > range->length)
846 lgp->args.minlength = range->length; 846 lgp->args.minlength = range->length;
847 if (range->iomode == IOMODE_READ) { 847 if (range->iomode == IOMODE_READ) {
@@ -1618,13 +1618,13 @@ lookup_again:
1618 spin_unlock(&clp->cl_lock); 1618 spin_unlock(&clp->cl_lock);
1619 } 1619 }
1620 1620
1621 pg_offset = arg.offset & ~PAGE_CACHE_MASK; 1621 pg_offset = arg.offset & ~PAGE_MASK;
1622 if (pg_offset) { 1622 if (pg_offset) {
1623 arg.offset -= pg_offset; 1623 arg.offset -= pg_offset;
1624 arg.length += pg_offset; 1624 arg.length += pg_offset;
1625 } 1625 }
1626 if (arg.length != NFS4_MAX_UINT64) 1626 if (arg.length != NFS4_MAX_UINT64)
1627 arg.length = PAGE_CACHE_ALIGN(arg.length); 1627 arg.length = PAGE_ALIGN(arg.length);
1628 1628
1629 lseg = send_layoutget(lo, ctx, &arg, gfp_flags); 1629 lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1630 atomic_dec(&lo->plh_outstanding); 1630 atomic_dec(&lo->plh_outstanding);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index eb31e23e7def..6776d7a7839e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -46,7 +46,7 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
46static 46static
47int nfs_return_empty_page(struct page *page) 47int nfs_return_empty_page(struct page *page)
48{ 48{
49 zero_user(page, 0, PAGE_CACHE_SIZE); 49 zero_user(page, 0, PAGE_SIZE);
50 SetPageUptodate(page); 50 SetPageUptodate(page);
51 unlock_page(page); 51 unlock_page(page);
52 return 0; 52 return 0;
@@ -118,8 +118,8 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118 unlock_page(page); 118 unlock_page(page);
119 return PTR_ERR(new); 119 return PTR_ERR(new);
120 } 120 }
121 if (len < PAGE_CACHE_SIZE) 121 if (len < PAGE_SIZE)
122 zero_user_segment(page, len, PAGE_CACHE_SIZE); 122 zero_user_segment(page, len, PAGE_SIZE);
123 123
124 nfs_pageio_init_read(&pgio, inode, false, 124 nfs_pageio_init_read(&pgio, inode, false,
125 &nfs_async_read_completion_ops); 125 &nfs_async_read_completion_ops);
@@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page)
295 int error; 295 int error;
296 296
297 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 297 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
298 page, PAGE_CACHE_SIZE, page_file_index(page)); 298 page, PAGE_SIZE, page_file_index(page));
299 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 299 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
300 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 300 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
301 301
@@ -361,8 +361,8 @@ readpage_async_filler(void *data, struct page *page)
361 if (IS_ERR(new)) 361 if (IS_ERR(new))
362 goto out_error; 362 goto out_error;
363 363
364 if (len < PAGE_CACHE_SIZE) 364 if (len < PAGE_SIZE)
365 zero_user_segment(page, len, PAGE_CACHE_SIZE); 365 zero_user_segment(page, len, PAGE_SIZE);
366 if (!nfs_pageio_add_request(desc->pgio, new)) { 366 if (!nfs_pageio_add_request(desc->pgio, new)) {
367 nfs_list_remove_request(new); 367 nfs_list_remove_request(new);
368 nfs_readpage_release(new); 368 nfs_readpage_release(new);
@@ -424,8 +424,8 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
424 424
425 pgm = &pgio.pg_mirrors[0]; 425 pgm = &pgio.pg_mirrors[0];
426 NFS_I(inode)->read_io += pgm->pg_bytes_written; 426 NFS_I(inode)->read_io += pgm->pg_bytes_written;
427 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> 427 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
428 PAGE_CACHE_SHIFT; 428 PAGE_SHIFT;
429 nfs_add_stats(inode, NFSIOS_READPAGES, npages); 429 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
430read_complete: 430read_complete:
431 put_nfs_open_context(desc.ctx); 431 put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5754835a2886..5f4fd53e5764 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -150,7 +150,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
150 150
151 spin_lock(&inode->i_lock); 151 spin_lock(&inode->i_lock);
152 i_size = i_size_read(inode); 152 i_size = i_size_read(inode);
153 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 153 end_index = (i_size - 1) >> PAGE_SHIFT;
154 if (i_size > 0 && page_file_index(page) < end_index) 154 if (i_size > 0 && page_file_index(page) < end_index)
155 goto out; 155 goto out;
156 end = page_file_offset(page) + ((loff_t)offset+count); 156 end = page_file_offset(page) + ((loff_t)offset+count);
@@ -1942,7 +1942,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1942int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) 1942int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
1943{ 1943{
1944 loff_t range_start = page_file_offset(page); 1944 loff_t range_start = page_file_offset(page);
1945 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1945 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
1946 struct writeback_control wbc = { 1946 struct writeback_control wbc = {
1947 .sync_mode = WB_SYNC_ALL, 1947 .sync_mode = WB_SYNC_ALL,
1948 .nr_to_write = 0, 1948 .nr_to_write = 0,
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 27f75bcbeb30..a9fb3636c142 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -458,7 +458,7 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
458 struct buffer_head *pbh; 458 struct buffer_head *pbh;
459 __u64 key; 459 __u64 key;
460 460
461 key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - 461 key = page_index(bh->b_page) << (PAGE_SHIFT -
462 bmap->b_inode->i_blkbits); 462 bmap->b_inode->i_blkbits);
463 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) 463 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
464 key++; 464 key++;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index a35ae35e6932..e0c9daf9aa22 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
62 set_buffer_uptodate(bh); 62 set_buffer_uptodate(bh);
63 63
64 unlock_page(bh->b_page); 64 unlock_page(bh->b_page);
65 page_cache_release(bh->b_page); 65 put_page(bh->b_page);
66 return bh; 66 return bh;
67} 67}
68 68
@@ -128,7 +128,7 @@ found:
128 128
129out_locked: 129out_locked:
130 unlock_page(page); 130 unlock_page(page);
131 page_cache_release(page); 131 put_page(page);
132 return err; 132 return err;
133} 133}
134 134
@@ -146,7 +146,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
146 pgoff_t index = page_index(page); 146 pgoff_t index = page_index(page);
147 int still_dirty; 147 int still_dirty;
148 148
149 page_cache_get(page); 149 get_page(page);
150 lock_page(page); 150 lock_page(page);
151 wait_on_page_writeback(page); 151 wait_on_page_writeback(page);
152 152
@@ -154,7 +154,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
154 still_dirty = PageDirty(page); 154 still_dirty = PageDirty(page);
155 mapping = page->mapping; 155 mapping = page->mapping;
156 unlock_page(page); 156 unlock_page(page);
157 page_cache_release(page); 157 put_page(page);
158 158
159 if (!still_dirty && mapping) 159 if (!still_dirty && mapping)
160 invalidate_inode_pages2_range(mapping, index, index); 160 invalidate_inode_pages2_range(mapping, index, index);
@@ -181,7 +181,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
181 obh = ctxt->bh; 181 obh = ctxt->bh;
182 ctxt->newbh = NULL; 182 ctxt->newbh = NULL;
183 183
184 if (inode->i_blkbits == PAGE_CACHE_SHIFT) { 184 if (inode->i_blkbits == PAGE_SHIFT) {
185 lock_page(obh->b_page); 185 lock_page(obh->b_page);
186 /* 186 /*
187 * We cannot call radix_tree_preload for the kernels older 187 * We cannot call radix_tree_preload for the kernels older
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 6b8b92b19cec..e08f064e4bd7 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -58,7 +58,7 @@ static inline unsigned nilfs_chunk_size(struct inode *inode)
58static inline void nilfs_put_page(struct page *page) 58static inline void nilfs_put_page(struct page *page)
59{ 59{
60 kunmap(page); 60 kunmap(page);
61 page_cache_release(page); 61 put_page(page);
62} 62}
63 63
64/* 64/*
@@ -69,9 +69,9 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
69{ 69{
70 unsigned last_byte = inode->i_size; 70 unsigned last_byte = inode->i_size;
71 71
72 last_byte -= page_nr << PAGE_CACHE_SHIFT; 72 last_byte -= page_nr << PAGE_SHIFT;
73 if (last_byte > PAGE_CACHE_SIZE) 73 if (last_byte > PAGE_SIZE)
74 last_byte = PAGE_CACHE_SIZE; 74 last_byte = PAGE_SIZE;
75 return last_byte; 75 return last_byte;
76} 76}
77 77
@@ -109,12 +109,12 @@ static void nilfs_check_page(struct page *page)
109 unsigned chunk_size = nilfs_chunk_size(dir); 109 unsigned chunk_size = nilfs_chunk_size(dir);
110 char *kaddr = page_address(page); 110 char *kaddr = page_address(page);
111 unsigned offs, rec_len; 111 unsigned offs, rec_len;
112 unsigned limit = PAGE_CACHE_SIZE; 112 unsigned limit = PAGE_SIZE;
113 struct nilfs_dir_entry *p; 113 struct nilfs_dir_entry *p;
114 char *error; 114 char *error;
115 115
116 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 116 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
117 limit = dir->i_size & ~PAGE_CACHE_MASK; 117 limit = dir->i_size & ~PAGE_MASK;
118 if (limit & (chunk_size - 1)) 118 if (limit & (chunk_size - 1))
119 goto Ebadsize; 119 goto Ebadsize;
120 if (!limit) 120 if (!limit)
@@ -161,7 +161,7 @@ Espan:
161bad_entry: 161bad_entry:
162 nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - " 162 nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
163 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 163 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
164 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 164 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
165 (unsigned long) le64_to_cpu(p->inode), 165 (unsigned long) le64_to_cpu(p->inode),
166 rec_len, p->name_len); 166 rec_len, p->name_len);
167 goto fail; 167 goto fail;
@@ -170,7 +170,7 @@ Eend:
170 nilfs_error(sb, "nilfs_check_page", 170 nilfs_error(sb, "nilfs_check_page",
171 "entry in directory #%lu spans the page boundary" 171 "entry in directory #%lu spans the page boundary"
172 "offset=%lu, inode=%lu", 172 "offset=%lu, inode=%lu",
173 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, 173 dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
174 (unsigned long) le64_to_cpu(p->inode)); 174 (unsigned long) le64_to_cpu(p->inode));
175fail: 175fail:
176 SetPageChecked(page); 176 SetPageChecked(page);
@@ -256,8 +256,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
256 loff_t pos = ctx->pos; 256 loff_t pos = ctx->pos;
257 struct inode *inode = file_inode(file); 257 struct inode *inode = file_inode(file);
258 struct super_block *sb = inode->i_sb; 258 struct super_block *sb = inode->i_sb;
259 unsigned int offset = pos & ~PAGE_CACHE_MASK; 259 unsigned int offset = pos & ~PAGE_MASK;
260 unsigned long n = pos >> PAGE_CACHE_SHIFT; 260 unsigned long n = pos >> PAGE_SHIFT;
261 unsigned long npages = dir_pages(inode); 261 unsigned long npages = dir_pages(inode);
262/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ 262/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
263 263
@@ -272,7 +272,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
272 if (IS_ERR(page)) { 272 if (IS_ERR(page)) {
273 nilfs_error(sb, __func__, "bad page in #%lu", 273 nilfs_error(sb, __func__, "bad page in #%lu",
274 inode->i_ino); 274 inode->i_ino);
275 ctx->pos += PAGE_CACHE_SIZE - offset; 275 ctx->pos += PAGE_SIZE - offset;
276 return -EIO; 276 return -EIO;
277 } 277 }
278 kaddr = page_address(page); 278 kaddr = page_address(page);
@@ -361,7 +361,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
361 if (++n >= npages) 361 if (++n >= npages)
362 n = 0; 362 n = 0;
363 /* next page is past the blocks we've got */ 363 /* next page is past the blocks we've got */
364 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { 364 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
365 nilfs_error(dir->i_sb, __func__, 365 nilfs_error(dir->i_sb, __func__,
366 "dir %lu size %lld exceeds block count %llu", 366 "dir %lu size %lld exceeds block count %llu",
367 dir->i_ino, dir->i_size, 367 dir->i_ino, dir->i_size,
@@ -401,7 +401,7 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
401 if (de) { 401 if (de) {
402 res = le64_to_cpu(de->inode); 402 res = le64_to_cpu(de->inode);
403 kunmap(page); 403 kunmap(page);
404 page_cache_release(page); 404 put_page(page);
405 } 405 }
406 return res; 406 return res;
407} 407}
@@ -460,7 +460,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
460 kaddr = page_address(page); 460 kaddr = page_address(page);
461 dir_end = kaddr + nilfs_last_byte(dir, n); 461 dir_end = kaddr + nilfs_last_byte(dir, n);
462 de = (struct nilfs_dir_entry *)kaddr; 462 de = (struct nilfs_dir_entry *)kaddr;
463 kaddr += PAGE_CACHE_SIZE - reclen; 463 kaddr += PAGE_SIZE - reclen;
464 while ((char *)de <= kaddr) { 464 while ((char *)de <= kaddr) {
465 if ((char *)de == dir_end) { 465 if ((char *)de == dir_end) {
466 /* We hit i_size */ 466 /* We hit i_size */
@@ -603,7 +603,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
603 kunmap_atomic(kaddr); 603 kunmap_atomic(kaddr);
604 nilfs_commit_chunk(page, mapping, 0, chunk_size); 604 nilfs_commit_chunk(page, mapping, 0, chunk_size);
605fail: 605fail:
606 page_cache_release(page); 606 put_page(page);
607 return err; 607 return err;
608} 608}
609 609
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 748ca238915a..0224b7826ace 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -115,7 +115,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
115 115
116 failed: 116 failed:
117 unlock_page(bh->b_page); 117 unlock_page(bh->b_page);
118 page_cache_release(bh->b_page); 118 put_page(bh->b_page);
119 return err; 119 return err;
120} 120}
121 121
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 21a1e2e0d92f..534631358b13 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -249,7 +249,7 @@ static int nilfs_set_page_dirty(struct page *page)
249 if (nr_dirty) 249 if (nr_dirty)
250 nilfs_set_file_dirty(inode, nr_dirty); 250 nilfs_set_file_dirty(inode, nr_dirty);
251 } else if (ret) { 251 } else if (ret) {
252 unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 252 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
253 253
254 nilfs_set_file_dirty(inode, nr_dirty); 254 nilfs_set_file_dirty(inode, nr_dirty);
255 } 255 }
@@ -291,7 +291,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
291 struct page *page, void *fsdata) 291 struct page *page, void *fsdata)
292{ 292{
293 struct inode *inode = mapping->host; 293 struct inode *inode = mapping->host;
294 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 294 unsigned start = pos & (PAGE_SIZE - 1);
295 unsigned nr_dirty; 295 unsigned nr_dirty;
296 int err; 296 int err;
297 297
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 1125f40233ff..f6982b9153d5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -110,7 +110,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
110 110
111 failed_bh: 111 failed_bh:
112 unlock_page(bh->b_page); 112 unlock_page(bh->b_page);
113 page_cache_release(bh->b_page); 113 put_page(bh->b_page);
114 brelse(bh); 114 brelse(bh);
115 115
116 failed_unlock: 116 failed_unlock:
@@ -170,7 +170,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
170 170
171 failed_bh: 171 failed_bh:
172 unlock_page(bh->b_page); 172 unlock_page(bh->b_page);
173 page_cache_release(bh->b_page); 173 put_page(bh->b_page);
174 brelse(bh); 174 brelse(bh);
175 failed: 175 failed:
176 return ret; 176 return ret;
@@ -363,7 +363,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
363int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) 363int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
364{ 364{
365 pgoff_t index = (pgoff_t)block >> 365 pgoff_t index = (pgoff_t)block >>
366 (PAGE_CACHE_SHIFT - inode->i_blkbits); 366 (PAGE_SHIFT - inode->i_blkbits);
367 struct page *page; 367 struct page *page;
368 unsigned long first_block; 368 unsigned long first_block;
369 int ret = 0; 369 int ret = 0;
@@ -376,7 +376,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
376 wait_on_page_writeback(page); 376 wait_on_page_writeback(page);
377 377
378 first_block = (unsigned long)index << 378 first_block = (unsigned long)index <<
379 (PAGE_CACHE_SHIFT - inode->i_blkbits); 379 (PAGE_SHIFT - inode->i_blkbits);
380 if (page_has_buffers(page)) { 380 if (page_has_buffers(page)) {
381 struct buffer_head *bh; 381 struct buffer_head *bh;
382 382
@@ -385,7 +385,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
385 } 385 }
386 still_dirty = PageDirty(page); 386 still_dirty = PageDirty(page);
387 unlock_page(page); 387 unlock_page(page);
388 page_cache_release(page); 388 put_page(page);
389 389
390 if (still_dirty || 390 if (still_dirty ||
391 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) 391 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
@@ -578,7 +578,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
578 } 578 }
579 579
580 unlock_page(page); 580 unlock_page(page);
581 page_cache_release(page); 581 put_page(page);
582 return 0; 582 return 0;
583} 583}
584 584
@@ -597,7 +597,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
597 bh_frozen = nilfs_page_get_nth_block(page, n); 597 bh_frozen = nilfs_page_get_nth_block(page, n);
598 } 598 }
599 unlock_page(page); 599 unlock_page(page);
600 page_cache_release(page); 600 put_page(page);
601 } 601 }
602 return bh_frozen; 602 return bh_frozen;
603} 603}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 7ccdb961eea9..151bc19d47c0 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -431,11 +431,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
431out_dir: 431out_dir:
432 if (dir_de) { 432 if (dir_de) {
433 kunmap(dir_page); 433 kunmap(dir_page);
434 page_cache_release(dir_page); 434 put_page(dir_page);
435 } 435 }
436out_old: 436out_old:
437 kunmap(old_page); 437 kunmap(old_page);
438 page_cache_release(old_page); 438 put_page(old_page);
439out: 439out:
440 nilfs_transaction_abort(old_dir->i_sb); 440 nilfs_transaction_abort(old_dir->i_sb);
441 return err; 441 return err;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index c20df77eff99..489391561cda 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -50,7 +50,7 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
50 if (!page_has_buffers(page)) 50 if (!page_has_buffers(page))
51 create_empty_buffers(page, 1 << blkbits, b_state); 51 create_empty_buffers(page, 1 << blkbits, b_state);
52 52
53 first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits); 53 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
54 bh = nilfs_page_get_nth_block(page, block - first_block); 54 bh = nilfs_page_get_nth_block(page, block - first_block);
55 55
56 touch_buffer(bh); 56 touch_buffer(bh);
@@ -64,7 +64,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
64 unsigned long b_state) 64 unsigned long b_state)
65{ 65{
66 int blkbits = inode->i_blkbits; 66 int blkbits = inode->i_blkbits;
67 pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); 67 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
68 struct page *page; 68 struct page *page;
69 struct buffer_head *bh; 69 struct buffer_head *bh;
70 70
@@ -75,7 +75,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); 75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
76 if (unlikely(!bh)) { 76 if (unlikely(!bh)) {
77 unlock_page(page); 77 unlock_page(page);
78 page_cache_release(page); 78 put_page(page);
79 return NULL; 79 return NULL;
80 } 80 }
81 return bh; 81 return bh;
@@ -288,7 +288,7 @@ repeat:
288 __set_page_dirty_nobuffers(dpage); 288 __set_page_dirty_nobuffers(dpage);
289 289
290 unlock_page(dpage); 290 unlock_page(dpage);
291 page_cache_release(dpage); 291 put_page(dpage);
292 unlock_page(page); 292 unlock_page(page);
293 } 293 }
294 pagevec_release(&pvec); 294 pagevec_release(&pvec);
@@ -333,7 +333,7 @@ repeat:
333 WARN_ON(PageDirty(dpage)); 333 WARN_ON(PageDirty(dpage));
334 nilfs_copy_page(dpage, page, 0); 334 nilfs_copy_page(dpage, page, 0);
335 unlock_page(dpage); 335 unlock_page(dpage);
336 page_cache_release(dpage); 336 put_page(dpage);
337 } else { 337 } else {
338 struct page *page2; 338 struct page *page2;
339 339
@@ -350,7 +350,7 @@ repeat:
350 if (unlikely(err < 0)) { 350 if (unlikely(err < 0)) {
351 WARN_ON(err == -EEXIST); 351 WARN_ON(err == -EEXIST);
352 page->mapping = NULL; 352 page->mapping = NULL;
353 page_cache_release(page); /* for cache */ 353 put_page(page); /* for cache */
354 } else { 354 } else {
355 page->mapping = dmap; 355 page->mapping = dmap;
356 dmap->nrpages++; 356 dmap->nrpages++;
@@ -523,8 +523,8 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
523 if (inode->i_mapping->nrpages == 0) 523 if (inode->i_mapping->nrpages == 0)
524 return 0; 524 return 0;
525 525
526 index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 526 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
527 nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); 527 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
528 528
529 pagevec_init(&pvec, 0); 529 pagevec_init(&pvec, 0);
530 530
@@ -537,7 +537,7 @@ repeat:
537 if (length > 0 && pvec.pages[0]->index > index) 537 if (length > 0 && pvec.pages[0]->index > index)
538 goto out; 538 goto out;
539 539
540 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 540 b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
541 i = 0; 541 i = 0;
542 do { 542 do {
543 page = pvec.pages[i]; 543 page = pvec.pages[i];
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 9b4f205d1173..5afa77fadc11 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -544,14 +544,14 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
544 blocksize, page, NULL); 544 blocksize, page, NULL);
545 545
546 unlock_page(page); 546 unlock_page(page);
547 page_cache_release(page); 547 put_page(page);
548 548
549 (*nr_salvaged_blocks)++; 549 (*nr_salvaged_blocks)++;
550 goto next; 550 goto next;
551 551
552 failed_page: 552 failed_page:
553 unlock_page(page); 553 unlock_page(page);
554 page_cache_release(page); 554 put_page(page);
555 555
556 failed_inode: 556 failed_inode:
557 printk(KERN_WARNING 557 printk(KERN_WARNING
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65adaae7e4..4317f72568e6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2070,7 +2070,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2070 goto failed_to_write; 2070 goto failed_to_write;
2071 2071
2072 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || 2072 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2073 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) { 2073 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2074 /* 2074 /*
2075 * At this point, we avoid double buffering 2075 * At this point, we avoid double buffering
2076 * for blocksize < pagesize because page dirty 2076 * for blocksize < pagesize because page dirty
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 7521e11db728..97768a1379f2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
74 74
75 set_buffer_uptodate(bh); 75 set_buffer_uptodate(bh);
76 76
77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + 77 file_ofs = ((s64)page->index << PAGE_SHIFT) +
78 bh_offset(bh); 78 bh_offset(bh);
79 read_lock_irqsave(&ni->size_lock, flags); 79 read_lock_irqsave(&ni->size_lock, flags);
80 init_size = ni->initialized_size; 80 init_size = ni->initialized_size;
@@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
142 u32 rec_size; 142 u32 rec_size;
143 143
144 rec_size = ni->itype.index.block_size; 144 rec_size = ni->itype.index.block_size;
145 recs = PAGE_CACHE_SIZE / rec_size; 145 recs = PAGE_SIZE / rec_size;
146 /* Should have been verified before we got here... */ 146 /* Should have been verified before we got here... */
147 BUG_ON(!recs); 147 BUG_ON(!recs);
148 local_irq_save(flags); 148 local_irq_save(flags);
@@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page)
229 * fully truncated, truncate will throw it away as soon as we unlock 229 * fully truncated, truncate will throw it away as soon as we unlock
230 * it so no need to worry what we do with it. 230 * it so no need to worry what we do with it.
231 */ 231 */
232 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 232 iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
233 read_lock_irqsave(&ni->size_lock, flags); 233 read_lock_irqsave(&ni->size_lock, flags);
234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; 234 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
235 init_size = ni->initialized_size; 235 init_size = ni->initialized_size;
@@ -412,9 +412,9 @@ retry_readpage:
412 vi = page->mapping->host; 412 vi = page->mapping->host;
413 i_size = i_size_read(vi); 413 i_size = i_size_read(vi);
414 /* Is the page fully outside i_size? (truncate in progress) */ 414 /* Is the page fully outside i_size? (truncate in progress) */
415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 415 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
416 PAGE_CACHE_SHIFT)) { 416 PAGE_SHIFT)) {
417 zero_user(page, 0, PAGE_CACHE_SIZE); 417 zero_user(page, 0, PAGE_SIZE);
418 ntfs_debug("Read outside i_size - truncated?"); 418 ntfs_debug("Read outside i_size - truncated?");
419 goto done; 419 goto done;
420 } 420 }
@@ -463,7 +463,7 @@ retry_readpage:
463 * ok to ignore the compressed flag here. 463 * ok to ignore the compressed flag here.
464 */ 464 */
465 if (unlikely(page->index > 0)) { 465 if (unlikely(page->index > 0)) {
466 zero_user(page, 0, PAGE_CACHE_SIZE); 466 zero_user(page, 0, PAGE_SIZE);
467 goto done; 467 goto done;
468 } 468 }
469 if (!NInoAttr(ni)) 469 if (!NInoAttr(ni))
@@ -509,7 +509,7 @@ retry_readpage:
509 le16_to_cpu(ctx->attr->data.resident.value_offset), 509 le16_to_cpu(ctx->attr->data.resident.value_offset),
510 attr_len); 510 attr_len);
511 /* Zero the remainder of the page. */ 511 /* Zero the remainder of the page. */
512 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 512 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
513 flush_dcache_page(page); 513 flush_dcache_page(page);
514 kunmap_atomic(addr); 514 kunmap_atomic(addr);
515put_unm_err_out: 515put_unm_err_out:
@@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
599 /* NOTE: Different naming scheme to ntfs_read_block()! */ 599 /* NOTE: Different naming scheme to ntfs_read_block()! */
600 600
601 /* The first block in the page. */ 601 /* The first block in the page. */
602 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 602 block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
603 603
604 read_lock_irqsave(&ni->size_lock, flags); 604 read_lock_irqsave(&ni->size_lock, flags);
605 i_size = i_size_read(vi); 605 i_size = i_size_read(vi);
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
674 // in the inode. 674 // in the inode.
675 // Again, for each page do: 675 // Again, for each page do:
676 // __set_page_dirty_buffers(); 676 // __set_page_dirty_buffers();
677 // page_cache_release() 677 // put_page()
678 // We don't need to wait on the writes. 678 // We don't need to wait on the writes.
679 // Update iblock. 679 // Update iblock.
680 } 680 }
@@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page,
925 ntfs_volume *vol = ni->vol; 925 ntfs_volume *vol = ni->vol;
926 u8 *kaddr; 926 u8 *kaddr;
927 unsigned int rec_size = ni->itype.index.block_size; 927 unsigned int rec_size = ni->itype.index.block_size;
928 ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; 928 ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
929 struct buffer_head *bh, *head, *tbh, *rec_start_bh; 929 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
930 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 930 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
931 runlist_element *rl; 931 runlist_element *rl;
@@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page,
949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); 949 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
950 bh_size = vol->sb->s_blocksize; 950 bh_size = vol->sb->s_blocksize;
951 bh_size_bits = vol->sb->s_blocksize_bits; 951 bh_size_bits = vol->sb->s_blocksize_bits;
952 max_bhs = PAGE_CACHE_SIZE / bh_size; 952 max_bhs = PAGE_SIZE / bh_size;
953 BUG_ON(!max_bhs); 953 BUG_ON(!max_bhs);
954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE); 954 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
955 955
@@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page,
961 BUG_ON(!bh); 961 BUG_ON(!bh);
962 962
963 rec_size_bits = ni->itype.index.block_size_bits; 963 rec_size_bits = ni->itype.index.block_size_bits;
964 BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); 964 BUG_ON(!(PAGE_SIZE >> rec_size_bits));
965 bhs_per_rec = rec_size >> bh_size_bits; 965 bhs_per_rec = rec_size >> bh_size_bits;
966 BUG_ON(!bhs_per_rec); 966 BUG_ON(!bhs_per_rec);
967 967
968 /* The first block in the page. */ 968 /* The first block in the page. */
969 rec_block = block = (sector_t)page->index << 969 rec_block = block = (sector_t)page->index <<
970 (PAGE_CACHE_SHIFT - bh_size_bits); 970 (PAGE_SHIFT - bh_size_bits);
971 971
972 /* The first out of bounds block for the data size. */ 972 /* The first out of bounds block for the data size. */
973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; 973 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
@@ -1133,7 +1133,7 @@ lock_retry_remap:
1133 unsigned long mft_no; 1133 unsigned long mft_no;
1134 1134
1135 /* Get the mft record number. */ 1135 /* Get the mft record number. */
1136 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1136 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1137 >> rec_size_bits; 1137 >> rec_size_bits;
1138 /* Check whether to write this mft record. */ 1138 /* Check whether to write this mft record. */
1139 tni = NULL; 1139 tni = NULL;
@@ -1249,7 +1249,7 @@ do_mirror:
1249 continue; 1249 continue;
1250 ofs = bh_offset(tbh); 1250 ofs = bh_offset(tbh);
1251 /* Get the mft record number. */ 1251 /* Get the mft record number. */
1252 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) 1252 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1253 >> rec_size_bits; 1253 >> rec_size_bits;
1254 if (mft_no < vol->mftmirr_size) 1254 if (mft_no < vol->mftmirr_size)
1255 ntfs_sync_mft_mirror(vol, mft_no, 1255 ntfs_sync_mft_mirror(vol, mft_no,
@@ -1300,7 +1300,7 @@ done:
1300 * Set page error if there is only one ntfs record in the page. 1300 * Set page error if there is only one ntfs record in the page.
1301 * Otherwise we would loose per-record granularity. 1301 * Otherwise we would loose per-record granularity.
1302 */ 1302 */
1303 if (ni->itype.index.block_size == PAGE_CACHE_SIZE) 1303 if (ni->itype.index.block_size == PAGE_SIZE)
1304 SetPageError(page); 1304 SetPageError(page);
1305 NVolSetErrors(vol); 1305 NVolSetErrors(vol);
1306 } 1306 }
@@ -1308,7 +1308,7 @@ done:
1308 ntfs_debug("Page still contains one or more dirty ntfs " 1308 ntfs_debug("Page still contains one or more dirty ntfs "
1309 "records. Redirtying the page starting at " 1309 "records. Redirtying the page starting at "
1310 "record 0x%lx.", page->index << 1310 "record 0x%lx.", page->index <<
1311 (PAGE_CACHE_SHIFT - rec_size_bits)); 1311 (PAGE_SHIFT - rec_size_bits));
1312 redirty_page_for_writepage(wbc, page); 1312 redirty_page_for_writepage(wbc, page);
1313 unlock_page(page); 1313 unlock_page(page);
1314 } else { 1314 } else {
@@ -1365,13 +1365,13 @@ retry_writepage:
1365 BUG_ON(!PageLocked(page)); 1365 BUG_ON(!PageLocked(page));
1366 i_size = i_size_read(vi); 1366 i_size = i_size_read(vi);
1367 /* Is the page fully outside i_size? (truncate in progress) */ 1367 /* Is the page fully outside i_size? (truncate in progress) */
1368 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 1368 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
1369 PAGE_CACHE_SHIFT)) { 1369 PAGE_SHIFT)) {
1370 /* 1370 /*
1371 * The page may have dirty, unmapped buffers. Make them 1371 * The page may have dirty, unmapped buffers. Make them
1372 * freeable here, so the page does not leak. 1372 * freeable here, so the page does not leak.
1373 */ 1373 */
1374 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1374 block_invalidatepage(page, 0, PAGE_SIZE);
1375 unlock_page(page); 1375 unlock_page(page);
1376 ntfs_debug("Write outside i_size - truncated?"); 1376 ntfs_debug("Write outside i_size - truncated?");
1377 return 0; 1377 return 0;
@@ -1414,10 +1414,10 @@ retry_writepage:
1414 /* NInoNonResident() == NInoIndexAllocPresent() */ 1414 /* NInoNonResident() == NInoIndexAllocPresent() */
1415 if (NInoNonResident(ni)) { 1415 if (NInoNonResident(ni)) {
1416 /* We have to zero every time due to mmap-at-end-of-file. */ 1416 /* We have to zero every time due to mmap-at-end-of-file. */
1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { 1417 if (page->index >= (i_size >> PAGE_SHIFT)) {
1418 /* The page straddles i_size. */ 1418 /* The page straddles i_size. */
1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; 1419 unsigned int ofs = i_size & ~PAGE_MASK;
1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE); 1420 zero_user_segment(page, ofs, PAGE_SIZE);
1421 } 1421 }
1422 /* Handle mst protected attributes. */ 1422 /* Handle mst protected attributes. */
1423 if (NInoMstProtected(ni)) 1423 if (NInoMstProtected(ni))
@@ -1500,7 +1500,7 @@ retry_writepage:
1500 le16_to_cpu(ctx->attr->data.resident.value_offset), 1500 le16_to_cpu(ctx->attr->data.resident.value_offset),
1501 addr, attr_len); 1501 addr, attr_len);
1502 /* Zero out of bounds area in the page cache page. */ 1502 /* Zero out of bounds area in the page cache page. */
1503 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1503 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1504 kunmap_atomic(addr); 1504 kunmap_atomic(addr);
1505 flush_dcache_page(page); 1505 flush_dcache_page(page);
1506 flush_dcache_mft_record_page(ctx->ntfs_ino); 1506 flush_dcache_mft_record_page(ctx->ntfs_ino);
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index caecc58f529c..820d6eabf60f 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -40,7 +40,7 @@
40static inline void ntfs_unmap_page(struct page *page) 40static inline void ntfs_unmap_page(struct page *page)
41{ 41{
42 kunmap(page); 42 kunmap(page);
43 page_cache_release(page); 43 put_page(page);
44} 44}
45 45
46/** 46/**
@@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page)
49 * @index: index into the page cache for @mapping of the page to map 49 * @index: index into the page cache for @mapping of the page to map
50 * 50 *
51 * Read a page from the page cache of the address space @mapping at position 51 * Read a page from the page cache of the address space @mapping at position
52 * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes. 52 * @index, where @index is in units of PAGE_SIZE, and not in bytes.
53 * 53 *
54 * If the page is not in memory it is loaded from disk first using the readpage 54 * If the page is not in memory it is loaded from disk first using the readpage
55 * method defined in the address space operations of @mapping and the page is 55 * method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 250ed5b20c8f..44a39a099b54 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -152,7 +152,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino != 152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
153 old_ctx.base_ntfs_ino) { 153 old_ctx.base_ntfs_ino) {
154 put_this_page = old_ctx.ntfs_ino->page; 154 put_this_page = old_ctx.ntfs_ino->page;
155 page_cache_get(put_this_page); 155 get_page(put_this_page);
156 } 156 }
157 /* 157 /*
158 * Reinitialize the search context so we can lookup the 158 * Reinitialize the search context so we can lookup the
@@ -275,7 +275,7 @@ retry_map:
275 * the pieces anyway. 275 * the pieces anyway.
276 */ 276 */
277 if (put_this_page) 277 if (put_this_page)
278 page_cache_release(put_this_page); 278 put_page(put_this_page);
279 } 279 }
280 return err; 280 return err;
281} 281}
@@ -1660,7 +1660,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1660 memcpy(kaddr, (u8*)a + 1660 memcpy(kaddr, (u8*)a +
1661 le16_to_cpu(a->data.resident.value_offset), 1661 le16_to_cpu(a->data.resident.value_offset),
1662 attr_size); 1662 attr_size);
1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); 1663 memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
1664 kunmap_atomic(kaddr); 1664 kunmap_atomic(kaddr);
1665 flush_dcache_page(page); 1665 flush_dcache_page(page);
1666 SetPageUptodate(page); 1666 SetPageUptodate(page);
@@ -1748,7 +1748,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1748 if (page) { 1748 if (page) {
1749 set_page_dirty(page); 1749 set_page_dirty(page);
1750 unlock_page(page); 1750 unlock_page(page);
1751 page_cache_release(page); 1751 put_page(page);
1752 } 1752 }
1753 ntfs_debug("Done."); 1753 ntfs_debug("Done.");
1754 return 0; 1754 return 0;
@@ -1835,7 +1835,7 @@ rl_err_out:
1835 ntfs_free(rl); 1835 ntfs_free(rl);
1836page_err_out: 1836page_err_out:
1837 unlock_page(page); 1837 unlock_page(page);
1838 page_cache_release(page); 1838 put_page(page);
1839 } 1839 }
1840 if (err == -EINVAL) 1840 if (err == -EINVAL)
1841 err = -EIO; 1841 err = -EIO;
@@ -2513,17 +2513,17 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2513 BUG_ON(NInoEncrypted(ni)); 2513 BUG_ON(NInoEncrypted(ni));
2514 mapping = VFS_I(ni)->i_mapping; 2514 mapping = VFS_I(ni)->i_mapping;
2515 /* Work out the starting index and page offset. */ 2515 /* Work out the starting index and page offset. */
2516 idx = ofs >> PAGE_CACHE_SHIFT; 2516 idx = ofs >> PAGE_SHIFT;
2517 start_ofs = ofs & ~PAGE_CACHE_MASK; 2517 start_ofs = ofs & ~PAGE_MASK;
2518 /* Work out the ending index and page offset. */ 2518 /* Work out the ending index and page offset. */
2519 end = ofs + cnt; 2519 end = ofs + cnt;
2520 end_ofs = end & ~PAGE_CACHE_MASK; 2520 end_ofs = end & ~PAGE_MASK;
2521 /* If the end is outside the inode size return -ESPIPE. */ 2521 /* If the end is outside the inode size return -ESPIPE. */
2522 if (unlikely(end > i_size_read(VFS_I(ni)))) { 2522 if (unlikely(end > i_size_read(VFS_I(ni)))) {
2523 ntfs_error(vol->sb, "Request exceeds end of attribute."); 2523 ntfs_error(vol->sb, "Request exceeds end of attribute.");
2524 return -ESPIPE; 2524 return -ESPIPE;
2525 } 2525 }
2526 end >>= PAGE_CACHE_SHIFT; 2526 end >>= PAGE_SHIFT;
2527 /* If there is a first partial page, need to do it the slow way. */ 2527 /* If there is a first partial page, need to do it the slow way. */
2528 if (start_ofs) { 2528 if (start_ofs) {
2529 page = read_mapping_page(mapping, idx, NULL); 2529 page = read_mapping_page(mapping, idx, NULL);
@@ -2536,7 +2536,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2536 * If the last page is the same as the first page, need to 2536 * If the last page is the same as the first page, need to
2537 * limit the write to the end offset. 2537 * limit the write to the end offset.
2538 */ 2538 */
2539 size = PAGE_CACHE_SIZE; 2539 size = PAGE_SIZE;
2540 if (idx == end) 2540 if (idx == end)
2541 size = end_ofs; 2541 size = end_ofs;
2542 kaddr = kmap_atomic(page); 2542 kaddr = kmap_atomic(page);
@@ -2544,7 +2544,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2544 flush_dcache_page(page); 2544 flush_dcache_page(page);
2545 kunmap_atomic(kaddr); 2545 kunmap_atomic(kaddr);
2546 set_page_dirty(page); 2546 set_page_dirty(page);
2547 page_cache_release(page); 2547 put_page(page);
2548 balance_dirty_pages_ratelimited(mapping); 2548 balance_dirty_pages_ratelimited(mapping);
2549 cond_resched(); 2549 cond_resched();
2550 if (idx == end) 2550 if (idx == end)
@@ -2561,7 +2561,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2561 return -ENOMEM; 2561 return -ENOMEM;
2562 } 2562 }
2563 kaddr = kmap_atomic(page); 2563 kaddr = kmap_atomic(page);
2564 memset(kaddr, val, PAGE_CACHE_SIZE); 2564 memset(kaddr, val, PAGE_SIZE);
2565 flush_dcache_page(page); 2565 flush_dcache_page(page);
2566 kunmap_atomic(kaddr); 2566 kunmap_atomic(kaddr);
2567 /* 2567 /*
@@ -2585,7 +2585,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2585 set_page_dirty(page); 2585 set_page_dirty(page);
2586 /* Finally unlock and release the page. */ 2586 /* Finally unlock and release the page. */
2587 unlock_page(page); 2587 unlock_page(page);
2588 page_cache_release(page); 2588 put_page(page);
2589 balance_dirty_pages_ratelimited(mapping); 2589 balance_dirty_pages_ratelimited(mapping);
2590 cond_resched(); 2590 cond_resched();
2591 } 2591 }
@@ -2602,7 +2602,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2602 flush_dcache_page(page); 2602 flush_dcache_page(page);
2603 kunmap_atomic(kaddr); 2603 kunmap_atomic(kaddr);
2604 set_page_dirty(page); 2604 set_page_dirty(page);
2605 page_cache_release(page); 2605 put_page(page);
2606 balance_dirty_pages_ratelimited(mapping); 2606 balance_dirty_pages_ratelimited(mapping);
2607 cond_resched(); 2607 cond_resched();
2608 } 2608 }
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
index 0809cf876098..ec130c588d2b 100644
--- a/fs/ntfs/bitmap.c
+++ b/fs/ntfs/bitmap.c
@@ -67,8 +67,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
67 * Calculate the indices for the pages containing the first and last 67 * Calculate the indices for the pages containing the first and last
68 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively. 68 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
69 */ 69 */
70 index = start_bit >> (3 + PAGE_CACHE_SHIFT); 70 index = start_bit >> (3 + PAGE_SHIFT);
71 end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT); 71 end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
72 72
73 /* Get the page containing the first bit (@start_bit). */ 73 /* Get the page containing the first bit (@start_bit). */
74 mapping = vi->i_mapping; 74 mapping = vi->i_mapping;
@@ -82,7 +82,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
82 kaddr = page_address(page); 82 kaddr = page_address(page);
83 83
84 /* Set @pos to the position of the byte containing @start_bit. */ 84 /* Set @pos to the position of the byte containing @start_bit. */
85 pos = (start_bit >> 3) & ~PAGE_CACHE_MASK; 85 pos = (start_bit >> 3) & ~PAGE_MASK;
86 86
87 /* Calculate the position of @start_bit in the first byte. */ 87 /* Calculate the position of @start_bit in the first byte. */
88 bit = start_bit & 7; 88 bit = start_bit & 7;
@@ -108,7 +108,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
108 * Depending on @value, modify all remaining whole bytes in the page up 108 * Depending on @value, modify all remaining whole bytes in the page up
109 * to @cnt. 109 * to @cnt.
110 */ 110 */
111 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); 111 len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
112 memset(kaddr + pos, value ? 0xff : 0, len); 112 memset(kaddr + pos, value ? 0xff : 0, len);
113 cnt -= len << 3; 113 cnt -= len << 3;
114 114
@@ -132,7 +132,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
132 * Depending on @value, modify all remaining whole bytes in the 132 * Depending on @value, modify all remaining whole bytes in the
133 * page up to @cnt. 133 * page up to @cnt.
134 */ 134 */
135 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); 135 len = min_t(s64, cnt >> 3, PAGE_SIZE);
136 memset(kaddr, value ? 0xff : 0, len); 136 memset(kaddr, value ? 0xff : 0, len);
137 cnt -= len << 3; 137 cnt -= len << 3;
138 } 138 }
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index f82498c35e78..f2b5e746f49b 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page,
104 unsigned int kp_ofs; 104 unsigned int kp_ofs;
105 105
106 ntfs_debug("Zeroing page region outside initialized size."); 106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { 107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
108 /*
109 * FIXME: Using clear_page() will become wrong when we get
110 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
111 */
112 clear_page(kp); 108 clear_page(kp);
113 return; 109 return;
114 } 110 }
115 kp_ofs = initialized_size & ~PAGE_CACHE_MASK; 111 kp_ofs = initialized_size & ~PAGE_MASK;
116 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); 112 memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
117 return; 113 return;
118} 114}
119 115
@@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page,
123static inline void handle_bounds_compressed_page(struct page *page, 119static inline void handle_bounds_compressed_page(struct page *page,
124 const loff_t i_size, const s64 initialized_size) 120 const loff_t i_size, const s64 initialized_size)
125{ 121{
126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && 122 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
127 (initialized_size < i_size)) 123 (initialized_size < i_size))
128 zero_partial_compressed_page(page, initialized_size); 124 zero_partial_compressed_page(page, initialized_size);
129 return; 125 return;
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
160 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was 156 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
161 * completed during the decompression of the compression block (@cb_start). 157 * completed during the decompression of the compression block (@cb_start).
162 * 158 *
163 * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up 159 * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
164 * unpredicatbly! You have been warned! 160 * unpredicatbly! You have been warned!
165 * 161 *
166 * Note to hackers: This function may not sleep until it has finished accessing 162 * Note to hackers: This function may not sleep until it has finished accessing
@@ -241,7 +237,7 @@ return_error:
241 if (di == xpage) 237 if (di == xpage)
242 *xpage_done = 1; 238 *xpage_done = 1;
243 else 239 else
244 page_cache_release(dp); 240 put_page(dp);
245 dest_pages[di] = NULL; 241 dest_pages[di] = NULL;
246 } 242 }
247 } 243 }
@@ -274,7 +270,7 @@ return_error:
274 cb = cb_sb_end; 270 cb = cb_sb_end;
275 271
276 /* Advance destination position to next sub-block. */ 272 /* Advance destination position to next sub-block. */
277 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; 273 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
278 if (!*dest_ofs && (++*dest_index > dest_max_index)) 274 if (!*dest_ofs && (++*dest_index > dest_max_index))
279 goto return_overflow; 275 goto return_overflow;
280 goto do_next_sb; 276 goto do_next_sb;
@@ -301,7 +297,7 @@ return_error:
301 297
302 /* Advance destination position to next sub-block. */ 298 /* Advance destination position to next sub-block. */
303 *dest_ofs += NTFS_SB_SIZE; 299 *dest_ofs += NTFS_SB_SIZE;
304 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { 300 if (!(*dest_ofs &= ~PAGE_MASK)) {
305finalize_page: 301finalize_page:
306 /* 302 /*
307 * First stage: add current page index to array of 303 * First stage: add current page index to array of
@@ -335,7 +331,7 @@ do_next_tag:
335 *dest_ofs += nr_bytes; 331 *dest_ofs += nr_bytes;
336 } 332 }
337 /* We have finished the current sub-block. */ 333 /* We have finished the current sub-block. */
338 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) 334 if (!(*dest_ofs &= ~PAGE_MASK))
339 goto finalize_page; 335 goto finalize_page;
340 goto do_next_sb; 336 goto do_next_sb;
341 } 337 }
@@ -462,7 +458,7 @@ return_overflow:
462 * have been written to so that we would lose data if we were to just overwrite 458 * have been written to so that we would lose data if we were to just overwrite
463 * them with the out-of-date uncompressed data. 459 * them with the out-of-date uncompressed data.
464 * 460 *
465 * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at 461 * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
466 * the end of the file I think. We need to detect this case and zero the out 462 * the end of the file I think. We need to detect this case and zero the out
467 * of bounds remainder of the page in question and mark it as handled. At the 463 * of bounds remainder of the page in question and mark it as handled. At the
468 * moment we would just return -EIO on such a page. This bug will only become 464 * moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
470 * clusters so is probably not going to be seen by anyone. Still this should 466 * clusters so is probably not going to be seen by anyone. Still this should
471 * be fixed. (AIA) 467 * be fixed. (AIA)
472 * 468 *
473 * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in 469 * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
474 * handling sparse and compressed cbs. (AIA) 470 * handling sparse and compressed cbs. (AIA)
475 * 471 *
476 * FIXME: At the moment we don't do any zeroing out in the case that 472 * FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page)
497 u64 cb_size_mask = cb_size - 1UL; 493 u64 cb_size_mask = cb_size - 1UL;
498 VCN vcn; 494 VCN vcn;
499 LCN lcn; 495 LCN lcn;
500 /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ 496 /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> 497 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits; 498 vol->cluster_size_bits;
503 /* 499 /*
504 * The first vcn after the last wanted vcn (minimum alignment is again 500 * The first vcn after the last wanted vcn (minimum alignment is again
505 * PAGE_CACHE_SIZE. 501 * PAGE_SIZE.
506 */ 502 */
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) 503 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
508 & ~cb_size_mask) >> vol->cluster_size_bits; 504 & ~cb_size_mask) >> vol->cluster_size_bits;
509 /* Number of compression blocks (cbs) in the wanted vcn range. */ 505 /* Number of compression blocks (cbs) in the wanted vcn range. */
510 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits 506 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
@@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page)
515 * guarantees of start_vcn and end_vcn, no need to round up here. 511 * guarantees of start_vcn and end_vcn, no need to round up here.
516 */ 512 */
517 unsigned int nr_pages = (end_vcn - start_vcn) << 513 unsigned int nr_pages = (end_vcn - start_vcn) <<
518 vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 514 vol->cluster_size_bits >> PAGE_SHIFT;
519 unsigned int xpage, max_page, cur_page, cur_ofs, i; 515 unsigned int xpage, max_page, cur_page, cur_ofs, i;
520 unsigned int cb_clusters, cb_max_ofs; 516 unsigned int cb_clusters, cb_max_ofs;
521 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; 517 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
@@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page)
549 * We have already been given one page, this is the one we must do. 545 * We have already been given one page, this is the one we must do.
550 * Once again, the alignment guarantees keep it simple. 546 * Once again, the alignment guarantees keep it simple.
551 */ 547 */
552 offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 548 offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
553 xpage = index - offset; 549 xpage = index - offset;
554 pages[xpage] = page; 550 pages[xpage] = page;
555 /* 551 /*
@@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page)
560 i_size = i_size_read(VFS_I(ni)); 556 i_size = i_size_read(VFS_I(ni));
561 initialized_size = ni->initialized_size; 557 initialized_size = ni->initialized_size;
562 read_unlock_irqrestore(&ni->size_lock, flags); 558 read_unlock_irqrestore(&ni->size_lock, flags);
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 559 max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
564 offset; 560 offset;
565 /* Is the page fully outside i_size? (truncate in progress) */ 561 /* Is the page fully outside i_size? (truncate in progress) */
566 if (xpage >= max_page) { 562 if (xpage >= max_page) {
567 kfree(bhs); 563 kfree(bhs);
568 kfree(pages); 564 kfree(pages);
569 zero_user(page, 0, PAGE_CACHE_SIZE); 565 zero_user(page, 0, PAGE_SIZE);
570 ntfs_debug("Compressed read outside i_size - truncated?"); 566 ntfs_debug("Compressed read outside i_size - truncated?");
571 SetPageUptodate(page); 567 SetPageUptodate(page);
572 unlock_page(page); 568 unlock_page(page);
@@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page)
591 continue; 587 continue;
592 } 588 }
593 unlock_page(page); 589 unlock_page(page);
594 page_cache_release(page); 590 put_page(page);
595 pages[i] = NULL; 591 pages[i] = NULL;
596 } 592 }
597 } 593 }
@@ -735,9 +731,9 @@ lock_retry_remap:
735 ntfs_debug("Successfully read the compression block."); 731 ntfs_debug("Successfully read the compression block.");
736 732
737 /* The last page and maximum offset within it for the current cb. */ 733 /* The last page and maximum offset within it for the current cb. */
738 cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; 734 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
739 cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; 735 cb_max_ofs = cb_max_page & ~PAGE_MASK;
740 cb_max_page >>= PAGE_CACHE_SHIFT; 736 cb_max_page >>= PAGE_SHIFT;
741 737
742 /* Catch end of file inside a compression block. */ 738 /* Catch end of file inside a compression block. */
743 if (cb_max_page > max_page) 739 if (cb_max_page > max_page)
@@ -753,16 +749,11 @@ lock_retry_remap:
753 for (; cur_page < cb_max_page; cur_page++) { 749 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page]; 750 page = pages[cur_page];
755 if (page) { 751 if (page) {
756 /*
757 * FIXME: Using clear_page() will become wrong
758 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
759 * for now there is no problem.
760 */
761 if (likely(!cur_ofs)) 752 if (likely(!cur_ofs))
762 clear_page(page_address(page)); 753 clear_page(page_address(page));
763 else 754 else
764 memset(page_address(page) + cur_ofs, 0, 755 memset(page_address(page) + cur_ofs, 0,
765 PAGE_CACHE_SIZE - 756 PAGE_SIZE -
766 cur_ofs); 757 cur_ofs);
767 flush_dcache_page(page); 758 flush_dcache_page(page);
768 kunmap(page); 759 kunmap(page);
@@ -771,10 +762,10 @@ lock_retry_remap:
771 if (cur_page == xpage) 762 if (cur_page == xpage)
772 xpage_done = 1; 763 xpage_done = 1;
773 else 764 else
774 page_cache_release(page); 765 put_page(page);
775 pages[cur_page] = NULL; 766 pages[cur_page] = NULL;
776 } 767 }
777 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 768 cb_pos += PAGE_SIZE - cur_ofs;
778 cur_ofs = 0; 769 cur_ofs = 0;
779 if (cb_pos >= cb_end) 770 if (cb_pos >= cb_end)
780 break; 771 break;
@@ -807,7 +798,7 @@ lock_retry_remap:
807 * synchronous io for the majority of pages. 798 * synchronous io for the majority of pages.
808 * Or if we choose not to do the read-ahead/-behind stuff, we 799 * Or if we choose not to do the read-ahead/-behind stuff, we
809 * could just return block_read_full_page(pages[xpage]) as long 800 * could just return block_read_full_page(pages[xpage]) as long
810 * as PAGE_CACHE_SIZE <= cb_size. 801 * as PAGE_SIZE <= cb_size.
811 */ 802 */
812 if (cb_max_ofs) 803 if (cb_max_ofs)
813 cb_max_page--; 804 cb_max_page--;
@@ -816,8 +807,8 @@ lock_retry_remap:
816 page = pages[cur_page]; 807 page = pages[cur_page];
817 if (page) 808 if (page)
818 memcpy(page_address(page) + cur_ofs, cb_pos, 809 memcpy(page_address(page) + cur_ofs, cb_pos,
819 PAGE_CACHE_SIZE - cur_ofs); 810 PAGE_SIZE - cur_ofs);
820 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 811 cb_pos += PAGE_SIZE - cur_ofs;
821 cur_ofs = 0; 812 cur_ofs = 0;
822 if (cb_pos >= cb_end) 813 if (cb_pos >= cb_end)
823 break; 814 break;
@@ -850,10 +841,10 @@ lock_retry_remap:
850 if (cur2_page == xpage) 841 if (cur2_page == xpage)
851 xpage_done = 1; 842 xpage_done = 1;
852 else 843 else
853 page_cache_release(page); 844 put_page(page);
854 pages[cur2_page] = NULL; 845 pages[cur2_page] = NULL;
855 } 846 }
856 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; 847 cb_pos2 += PAGE_SIZE - cur_ofs2;
857 cur_ofs2 = 0; 848 cur_ofs2 = 0;
858 if (cb_pos2 >= cb_end) 849 if (cb_pos2 >= cb_end)
859 break; 850 break;
@@ -884,7 +875,7 @@ lock_retry_remap:
884 kunmap(page); 875 kunmap(page);
885 unlock_page(page); 876 unlock_page(page);
886 if (prev_cur_page != xpage) 877 if (prev_cur_page != xpage)
887 page_cache_release(page); 878 put_page(page);
888 pages[prev_cur_page] = NULL; 879 pages[prev_cur_page] = NULL;
889 } 880 }
890 } 881 }
@@ -914,7 +905,7 @@ lock_retry_remap:
914 kunmap(page); 905 kunmap(page);
915 unlock_page(page); 906 unlock_page(page);
916 if (cur_page != xpage) 907 if (cur_page != xpage)
917 page_cache_release(page); 908 put_page(page);
918 pages[cur_page] = NULL; 909 pages[cur_page] = NULL;
919 } 910 }
920 } 911 }
@@ -961,7 +952,7 @@ err_out:
961 kunmap(page); 952 kunmap(page);
962 unlock_page(page); 953 unlock_page(page);
963 if (i != xpage) 954 if (i != xpage)
964 page_cache_release(page); 955 put_page(page);
965 } 956 }
966 } 957 }
967 kfree(pages); 958 kfree(pages);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index b2eff5816adc..a18613579001 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,11 +315,11 @@ found_it:
315descend_into_child_node: 315descend_into_child_node:
316 /* 316 /*
317 * Convert vcn to index into the index allocation attribute in units 317 * Convert vcn to index into the index allocation attribute in units
318 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 318 * of PAGE_SIZE and map the page cache page, reading it from
319 * disk if necessary. 319 * disk if necessary.
320 */ 320 */
321 page = ntfs_map_page(ia_mapping, vcn << 321 page = ntfs_map_page(ia_mapping, vcn <<
322 dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 322 dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
323 if (IS_ERR(page)) { 323 if (IS_ERR(page)) {
324 ntfs_error(sb, "Failed to map directory index page, error %ld.", 324 ntfs_error(sb, "Failed to map directory index page, error %ld.",
325 -PTR_ERR(page)); 325 -PTR_ERR(page));
@@ -331,9 +331,9 @@ descend_into_child_node:
331fast_descend_into_child_node: 331fast_descend_into_child_node:
332 /* Get to the index allocation block. */ 332 /* Get to the index allocation block. */
333 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 333 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
334 dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 334 dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
335 /* Bounds checks. */ 335 /* Bounds checks. */
336 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 336 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
337 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 337 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
338 "inode 0x%lx or driver bug.", dir_ni->mft_no); 338 "inode 0x%lx or driver bug.", dir_ni->mft_no);
339 goto unm_err_out; 339 goto unm_err_out;
@@ -366,7 +366,7 @@ fast_descend_into_child_node:
366 goto unm_err_out; 366 goto unm_err_out;
367 } 367 }
368 index_end = (u8*)ia + dir_ni->itype.index.block_size; 368 index_end = (u8*)ia + dir_ni->itype.index.block_size;
369 if (index_end > kaddr + PAGE_CACHE_SIZE) { 369 if (index_end > kaddr + PAGE_SIZE) {
370 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 370 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
371 "0x%lx crosses page boundary. Impossible! " 371 "0x%lx crosses page boundary. Impossible! "
372 "Cannot access! This is probably a bug in the " 372 "Cannot access! This is probably a bug in the "
@@ -559,9 +559,9 @@ found_it2:
559 /* If vcn is in the same page cache page as old_vcn we 559 /* If vcn is in the same page cache page as old_vcn we
560 * recycle the mapped page. */ 560 * recycle the mapped page. */
561 if (old_vcn << vol->cluster_size_bits >> 561 if (old_vcn << vol->cluster_size_bits >>
562 PAGE_CACHE_SHIFT == vcn << 562 PAGE_SHIFT == vcn <<
563 vol->cluster_size_bits >> 563 vol->cluster_size_bits >>
564 PAGE_CACHE_SHIFT) 564 PAGE_SHIFT)
565 goto fast_descend_into_child_node; 565 goto fast_descend_into_child_node;
566 unlock_page(page); 566 unlock_page(page);
567 ntfs_unmap_page(page); 567 ntfs_unmap_page(page);
@@ -793,11 +793,11 @@ found_it:
793descend_into_child_node: 793descend_into_child_node:
794 /* 794 /*
795 * Convert vcn to index into the index allocation attribute in units 795 * Convert vcn to index into the index allocation attribute in units
796 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 796 * of PAGE_SIZE and map the page cache page, reading it from
797 * disk if necessary. 797 * disk if necessary.
798 */ 798 */
799 page = ntfs_map_page(ia_mapping, vcn << 799 page = ntfs_map_page(ia_mapping, vcn <<
800 dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 800 dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
801 if (IS_ERR(page)) { 801 if (IS_ERR(page)) {
802 ntfs_error(sb, "Failed to map directory index page, error %ld.", 802 ntfs_error(sb, "Failed to map directory index page, error %ld.",
803 -PTR_ERR(page)); 803 -PTR_ERR(page));
@@ -809,9 +809,9 @@ descend_into_child_node:
809fast_descend_into_child_node: 809fast_descend_into_child_node:
810 /* Get to the index allocation block. */ 810 /* Get to the index allocation block. */
811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
813 /* Bounds checks. */ 813 /* Bounds checks. */
814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
816 "inode 0x%lx or driver bug.", dir_ni->mft_no); 816 "inode 0x%lx or driver bug.", dir_ni->mft_no);
817 goto unm_err_out; 817 goto unm_err_out;
@@ -844,7 +844,7 @@ fast_descend_into_child_node:
844 goto unm_err_out; 844 goto unm_err_out;
845 } 845 }
846 index_end = (u8*)ia + dir_ni->itype.index.block_size; 846 index_end = (u8*)ia + dir_ni->itype.index.block_size;
847 if (index_end > kaddr + PAGE_CACHE_SIZE) { 847 if (index_end > kaddr + PAGE_SIZE) {
848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
849 "0x%lx crosses page boundary. Impossible! " 849 "0x%lx crosses page boundary. Impossible! "
850 "Cannot access! This is probably a bug in the " 850 "Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@ found_it2:
968 /* If vcn is in the same page cache page as old_vcn we 968 /* If vcn is in the same page cache page as old_vcn we
969 * recycle the mapped page. */ 969 * recycle the mapped page. */
970 if (old_vcn << vol->cluster_size_bits >> 970 if (old_vcn << vol->cluster_size_bits >>
971 PAGE_CACHE_SHIFT == vcn << 971 PAGE_SHIFT == vcn <<
972 vol->cluster_size_bits >> 972 vol->cluster_size_bits >>
973 PAGE_CACHE_SHIFT) 973 PAGE_SHIFT)
974 goto fast_descend_into_child_node; 974 goto fast_descend_into_child_node;
975 unlock_page(page); 975 unlock_page(page);
976 ntfs_unmap_page(page); 976 ntfs_unmap_page(page);
@@ -1246,15 +1246,15 @@ skip_index_root:
1246 goto iput_err_out; 1246 goto iput_err_out;
1247 } 1247 }
1248 /* Get the starting bit position in the current bitmap page. */ 1248 /* Get the starting bit position in the current bitmap page. */
1249 cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1); 1249 cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1);
1250 bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1); 1250 bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1);
1251get_next_bmp_page: 1251get_next_bmp_page:
1252 ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx", 1252 ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
1253 (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT), 1253 (unsigned long long)bmp_pos >> (3 + PAGE_SHIFT),
1254 (unsigned long long)bmp_pos & 1254 (unsigned long long)bmp_pos &
1255 (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1)); 1255 (unsigned long long)((PAGE_SIZE * 8) - 1));
1256 bmp_page = ntfs_map_page(bmp_mapping, 1256 bmp_page = ntfs_map_page(bmp_mapping,
1257 bmp_pos >> (3 + PAGE_CACHE_SHIFT)); 1257 bmp_pos >> (3 + PAGE_SHIFT));
1258 if (IS_ERR(bmp_page)) { 1258 if (IS_ERR(bmp_page)) {
1259 ntfs_error(sb, "Reading index bitmap failed."); 1259 ntfs_error(sb, "Reading index bitmap failed.");
1260 err = PTR_ERR(bmp_page); 1260 err = PTR_ERR(bmp_page);
@@ -1270,9 +1270,9 @@ find_next_index_buffer:
1270 * If we have reached the end of the bitmap page, get the next 1270 * If we have reached the end of the bitmap page, get the next
1271 * page, and put away the old one. 1271 * page, and put away the old one.
1272 */ 1272 */
1273 if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) { 1273 if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) {
1274 ntfs_unmap_page(bmp_page); 1274 ntfs_unmap_page(bmp_page);
1275 bmp_pos += PAGE_CACHE_SIZE * 8; 1275 bmp_pos += PAGE_SIZE * 8;
1276 cur_bmp_pos = 0; 1276 cur_bmp_pos = 0;
1277 goto get_next_bmp_page; 1277 goto get_next_bmp_page;
1278 } 1278 }
@@ -1285,8 +1285,8 @@ find_next_index_buffer:
1285 ntfs_debug("Handling index buffer 0x%llx.", 1285 ntfs_debug("Handling index buffer 0x%llx.",
1286 (unsigned long long)bmp_pos + cur_bmp_pos); 1286 (unsigned long long)bmp_pos + cur_bmp_pos);
1287 /* If the current index buffer is in the same page we reuse the page. */ 1287 /* If the current index buffer is in the same page we reuse the page. */
1288 if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) != 1288 if ((prev_ia_pos & (s64)PAGE_MASK) !=
1289 (ia_pos & (s64)PAGE_CACHE_MASK)) { 1289 (ia_pos & (s64)PAGE_MASK)) {
1290 prev_ia_pos = ia_pos; 1290 prev_ia_pos = ia_pos;
1291 if (likely(ia_page != NULL)) { 1291 if (likely(ia_page != NULL)) {
1292 unlock_page(ia_page); 1292 unlock_page(ia_page);
@@ -1296,7 +1296,7 @@ find_next_index_buffer:
1296 * Map the page cache page containing the current ia_pos, 1296 * Map the page cache page containing the current ia_pos,
1297 * reading it from disk if necessary. 1297 * reading it from disk if necessary.
1298 */ 1298 */
1299 ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT); 1299 ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT);
1300 if (IS_ERR(ia_page)) { 1300 if (IS_ERR(ia_page)) {
1301 ntfs_error(sb, "Reading index allocation data failed."); 1301 ntfs_error(sb, "Reading index allocation data failed.");
1302 err = PTR_ERR(ia_page); 1302 err = PTR_ERR(ia_page);
@@ -1307,10 +1307,10 @@ find_next_index_buffer:
1307 kaddr = (u8*)page_address(ia_page); 1307 kaddr = (u8*)page_address(ia_page);
1308 } 1308 }
1309 /* Get the current index buffer. */ 1309 /* Get the current index buffer. */
1310 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & 1310 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
1311 ~(s64)(ndir->itype.index.block_size - 1))); 1311 ~(s64)(ndir->itype.index.block_size - 1)));
1312 /* Bounds checks. */ 1312 /* Bounds checks. */
1313 if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { 1313 if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
1314 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 1314 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
1315 "inode 0x%lx or driver bug.", vdir->i_ino); 1315 "inode 0x%lx or driver bug.", vdir->i_ino);
1316 goto err_out; 1316 goto err_out;
@@ -1348,7 +1348,7 @@ find_next_index_buffer:
1348 goto err_out; 1348 goto err_out;
1349 } 1349 }
1350 index_end = (u8*)ia + ndir->itype.index.block_size; 1350 index_end = (u8*)ia + ndir->itype.index.block_size;
1351 if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) { 1351 if (unlikely(index_end > kaddr + PAGE_SIZE)) {
1352 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 1352 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
1353 "0x%lx crosses page boundary. Impossible! " 1353 "0x%lx crosses page boundary. Impossible! "
1354 "Cannot access! This is probably a bug in the " 1354 "Cannot access! This is probably a bug in the "
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index bed4d427dfae..91117ada8528 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -220,8 +220,8 @@ do_non_resident_extend:
220 m = NULL; 220 m = NULL;
221 } 221 }
222 mapping = vi->i_mapping; 222 mapping = vi->i_mapping;
223 index = old_init_size >> PAGE_CACHE_SHIFT; 223 index = old_init_size >> PAGE_SHIFT;
224 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 224 end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
225 do { 225 do {
226 /* 226 /*
227 * Read the page. If the page is not present, this will zero 227 * Read the page. If the page is not present, this will zero
@@ -233,7 +233,7 @@ do_non_resident_extend:
233 goto init_err_out; 233 goto init_err_out;
234 } 234 }
235 if (unlikely(PageError(page))) { 235 if (unlikely(PageError(page))) {
236 page_cache_release(page); 236 put_page(page);
237 err = -EIO; 237 err = -EIO;
238 goto init_err_out; 238 goto init_err_out;
239 } 239 }
@@ -242,13 +242,13 @@ do_non_resident_extend:
242 * enough to make ntfs_writepage() work. 242 * enough to make ntfs_writepage() work.
243 */ 243 */
244 write_lock_irqsave(&ni->size_lock, flags); 244 write_lock_irqsave(&ni->size_lock, flags);
245 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; 245 ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
246 if (ni->initialized_size > new_init_size) 246 if (ni->initialized_size > new_init_size)
247 ni->initialized_size = new_init_size; 247 ni->initialized_size = new_init_size;
248 write_unlock_irqrestore(&ni->size_lock, flags); 248 write_unlock_irqrestore(&ni->size_lock, flags);
249 /* Set the page dirty so it gets written out. */ 249 /* Set the page dirty so it gets written out. */
250 set_page_dirty(page); 250 set_page_dirty(page);
251 page_cache_release(page); 251 put_page(page);
252 /* 252 /*
253 * Play nice with the vm and the rest of the system. This is 253 * Play nice with the vm and the rest of the system. This is
254 * very much needed as we can potentially be modifying the 254 * very much needed as we can potentially be modifying the
@@ -543,7 +543,7 @@ out:
543err_out: 543err_out:
544 while (nr > 0) { 544 while (nr > 0) {
545 unlock_page(pages[--nr]); 545 unlock_page(pages[--nr]);
546 page_cache_release(pages[nr]); 546 put_page(pages[nr]);
547 } 547 }
548 goto out; 548 goto out;
549} 549}
@@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
573 * only partially being written to. 573 * only partially being written to.
574 * 574 *
575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
576 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside 576 * greater than PAGE_SIZE, that all pages in @pages are entirely inside
577 * the same cluster and that they are the entirety of that cluster, and that 577 * the same cluster and that they are the entirety of that cluster, and that
578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. 578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
579 * 579 *
@@ -653,7 +653,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
653 u = 0; 653 u = 0;
654do_next_page: 654do_next_page:
655 page = pages[u]; 655 page = pages[u];
656 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 656 bh_pos = (s64)page->index << PAGE_SHIFT;
657 bh = head = page_buffers(page); 657 bh = head = page_buffers(page);
658 do { 658 do {
659 VCN cdelta; 659 VCN cdelta;
@@ -810,11 +810,11 @@ map_buffer_cached:
810 810
811 kaddr = kmap_atomic(page); 811 kaddr = kmap_atomic(page);
812 if (bh_pos < pos) { 812 if (bh_pos < pos) {
813 pofs = bh_pos & ~PAGE_CACHE_MASK; 813 pofs = bh_pos & ~PAGE_MASK;
814 memset(kaddr + pofs, 0, pos - bh_pos); 814 memset(kaddr + pofs, 0, pos - bh_pos);
815 } 815 }
816 if (bh_end > end) { 816 if (bh_end > end) {
817 pofs = end & ~PAGE_CACHE_MASK; 817 pofs = end & ~PAGE_MASK;
818 memset(kaddr + pofs, 0, bh_end - end); 818 memset(kaddr + pofs, 0, bh_end - end);
819 } 819 }
820 kunmap_atomic(kaddr); 820 kunmap_atomic(kaddr);
@@ -942,7 +942,7 @@ rl_not_mapped_enoent:
942 * unmapped. This can only happen when the cluster size is 942 * unmapped. This can only happen when the cluster size is
943 * less than the page cache size. 943 * less than the page cache size.
944 */ 944 */
945 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { 945 if (unlikely(vol->cluster_size < PAGE_SIZE)) {
946 bh_cend = (bh_end + vol->cluster_size - 1) >> 946 bh_cend = (bh_end + vol->cluster_size - 1) >>
947 vol->cluster_size_bits; 947 vol->cluster_size_bits;
948 if ((bh_cend <= cpos || bh_cpos >= cend)) { 948 if ((bh_cend <= cpos || bh_cpos >= cend)) {
@@ -1208,7 +1208,7 @@ rl_not_mapped_enoent:
1208 wait_on_buffer(bh); 1208 wait_on_buffer(bh);
1209 if (likely(buffer_uptodate(bh))) { 1209 if (likely(buffer_uptodate(bh))) {
1210 page = bh->b_page; 1210 page = bh->b_page;
1211 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + 1211 bh_pos = ((s64)page->index << PAGE_SHIFT) +
1212 bh_offset(bh); 1212 bh_offset(bh);
1213 /* 1213 /*
1214 * If the buffer overflows the initialized size, need 1214 * If the buffer overflows the initialized size, need
@@ -1350,7 +1350,7 @@ rl_not_mapped_enoent:
1350 bh = head = page_buffers(page); 1350 bh = head = page_buffers(page);
1351 do { 1351 do {
1352 if (u == nr_pages && 1352 if (u == nr_pages &&
1353 ((s64)page->index << PAGE_CACHE_SHIFT) + 1353 ((s64)page->index << PAGE_SHIFT) +
1354 bh_offset(bh) >= end) 1354 bh_offset(bh) >= end)
1355 break; 1355 break;
1356 if (!buffer_new(bh)) 1356 if (!buffer_new(bh))
@@ -1422,7 +1422,7 @@ static inline int ntfs_commit_pages_after_non_resident_write(
1422 bool partial; 1422 bool partial;
1423 1423
1424 page = pages[u]; 1424 page = pages[u];
1425 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 1425 bh_pos = (s64)page->index << PAGE_SHIFT;
1426 bh = head = page_buffers(page); 1426 bh = head = page_buffers(page);
1427 partial = false; 1427 partial = false;
1428 do { 1428 do {
@@ -1639,7 +1639,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
1639 if (end < attr_len) 1639 if (end < attr_len)
1640 memcpy(kaddr + end, kattr + end, attr_len - end); 1640 memcpy(kaddr + end, kattr + end, attr_len - end);
1641 /* Zero the region outside the end of the attribute value. */ 1641 /* Zero the region outside the end of the attribute value. */
1642 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1642 memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
1643 flush_dcache_page(page); 1643 flush_dcache_page(page);
1644 SetPageUptodate(page); 1644 SetPageUptodate(page);
1645 } 1645 }
@@ -1706,7 +1706,7 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
1706 unsigned len, copied; 1706 unsigned len, copied;
1707 1707
1708 do { 1708 do {
1709 len = PAGE_CACHE_SIZE - ofs; 1709 len = PAGE_SIZE - ofs;
1710 if (len > bytes) 1710 if (len > bytes)
1711 len = bytes; 1711 len = bytes;
1712 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, 1712 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
@@ -1724,14 +1724,14 @@ out:
1724 return total; 1724 return total;
1725err: 1725err:
1726 /* Zero the rest of the target like __copy_from_user(). */ 1726 /* Zero the rest of the target like __copy_from_user(). */
1727 len = PAGE_CACHE_SIZE - copied; 1727 len = PAGE_SIZE - copied;
1728 do { 1728 do {
1729 if (len > bytes) 1729 if (len > bytes)
1730 len = bytes; 1730 len = bytes;
1731 zero_user(*pages, copied, len); 1731 zero_user(*pages, copied, len);
1732 bytes -= len; 1732 bytes -= len;
1733 copied = 0; 1733 copied = 0;
1734 len = PAGE_CACHE_SIZE; 1734 len = PAGE_SIZE;
1735 } while (++pages < last_page); 1735 } while (++pages < last_page);
1736 goto out; 1736 goto out;
1737} 1737}
@@ -1787,8 +1787,8 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1787 * attributes. 1787 * attributes.
1788 */ 1788 */
1789 nr_pages = 1; 1789 nr_pages = 1;
1790 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) 1790 if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
1791 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; 1791 nr_pages = vol->cluster_size >> PAGE_SHIFT;
1792 last_vcn = -1; 1792 last_vcn = -1;
1793 do { 1793 do {
1794 VCN vcn; 1794 VCN vcn;
@@ -1796,9 +1796,9 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1796 unsigned ofs, do_pages, u; 1796 unsigned ofs, do_pages, u;
1797 size_t copied; 1797 size_t copied;
1798 1798
1799 start_idx = idx = pos >> PAGE_CACHE_SHIFT; 1799 start_idx = idx = pos >> PAGE_SHIFT;
1800 ofs = pos & ~PAGE_CACHE_MASK; 1800 ofs = pos & ~PAGE_MASK;
1801 bytes = PAGE_CACHE_SIZE - ofs; 1801 bytes = PAGE_SIZE - ofs;
1802 do_pages = 1; 1802 do_pages = 1;
1803 if (nr_pages > 1) { 1803 if (nr_pages > 1) {
1804 vcn = pos >> vol->cluster_size_bits; 1804 vcn = pos >> vol->cluster_size_bits;
@@ -1832,7 +1832,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1832 if (lcn == LCN_HOLE) { 1832 if (lcn == LCN_HOLE) {
1833 start_idx = (pos & ~(s64) 1833 start_idx = (pos & ~(s64)
1834 vol->cluster_size_mask) 1834 vol->cluster_size_mask)
1835 >> PAGE_CACHE_SHIFT; 1835 >> PAGE_SHIFT;
1836 bytes = vol->cluster_size - (pos & 1836 bytes = vol->cluster_size - (pos &
1837 vol->cluster_size_mask); 1837 vol->cluster_size_mask);
1838 do_pages = nr_pages; 1838 do_pages = nr_pages;
@@ -1871,12 +1871,12 @@ again:
1871 if (unlikely(status)) { 1871 if (unlikely(status)) {
1872 do { 1872 do {
1873 unlock_page(pages[--do_pages]); 1873 unlock_page(pages[--do_pages]);
1874 page_cache_release(pages[do_pages]); 1874 put_page(pages[do_pages]);
1875 } while (do_pages); 1875 } while (do_pages);
1876 break; 1876 break;
1877 } 1877 }
1878 } 1878 }
1879 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; 1879 u = (pos >> PAGE_SHIFT) - pages[0]->index;
1880 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, 1880 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
1881 i, bytes); 1881 i, bytes);
1882 ntfs_flush_dcache_pages(pages + u, do_pages - u); 1882 ntfs_flush_dcache_pages(pages + u, do_pages - u);
@@ -1889,7 +1889,7 @@ again:
1889 } 1889 }
1890 do { 1890 do {
1891 unlock_page(pages[--do_pages]); 1891 unlock_page(pages[--do_pages]);
1892 page_cache_release(pages[do_pages]); 1892 put_page(pages[do_pages]);
1893 } while (do_pages); 1893 } while (do_pages);
1894 if (unlikely(status < 0)) 1894 if (unlikely(status < 0))
1895 break; 1895 break;
@@ -1921,7 +1921,7 @@ again:
1921 } 1921 }
1922 } while (iov_iter_count(i)); 1922 } while (iov_iter_count(i));
1923 if (cached_page) 1923 if (cached_page)
1924 page_cache_release(cached_page); 1924 put_page(cached_page);
1925 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", 1925 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
1926 written ? "written" : "status", (unsigned long)written, 1926 written ? "written" : "status", (unsigned long)written,
1927 (long)status); 1927 (long)status);
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 096c135691ae..0d645f357930 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,11 +272,11 @@ done:
272descend_into_child_node: 272descend_into_child_node:
273 /* 273 /*
274 * Convert vcn to index into the index allocation attribute in units 274 * Convert vcn to index into the index allocation attribute in units
275 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 275 * of PAGE_SIZE and map the page cache page, reading it from
276 * disk if necessary. 276 * disk if necessary.
277 */ 277 */
278 page = ntfs_map_page(ia_mapping, vcn << 278 page = ntfs_map_page(ia_mapping, vcn <<
279 idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 279 idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
280 if (IS_ERR(page)) { 280 if (IS_ERR(page)) {
281 ntfs_error(sb, "Failed to map index page, error %ld.", 281 ntfs_error(sb, "Failed to map index page, error %ld.",
282 -PTR_ERR(page)); 282 -PTR_ERR(page));
@@ -288,9 +288,9 @@ descend_into_child_node:
288fast_descend_into_child_node: 288fast_descend_into_child_node:
289 /* Get to the index allocation block. */ 289 /* Get to the index allocation block. */
290 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 290 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
291 idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 291 idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
292 /* Bounds checks. */ 292 /* Bounds checks. */
293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
294 ntfs_error(sb, "Out of bounds check failed. Corrupt inode " 294 ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
295 "0x%lx or driver bug.", idx_ni->mft_no); 295 "0x%lx or driver bug.", idx_ni->mft_no);
296 goto unm_err_out; 296 goto unm_err_out;
@@ -323,7 +323,7 @@ fast_descend_into_child_node:
323 goto unm_err_out; 323 goto unm_err_out;
324 } 324 }
325 index_end = (u8*)ia + idx_ni->itype.index.block_size; 325 index_end = (u8*)ia + idx_ni->itype.index.block_size;
326 if (index_end > kaddr + PAGE_CACHE_SIZE) { 326 if (index_end > kaddr + PAGE_SIZE) {
327 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx " 327 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
328 "crosses page boundary. Impossible! Cannot " 328 "crosses page boundary. Impossible! Cannot "
329 "access! This is probably a bug in the " 329 "access! This is probably a bug in the "
@@ -427,9 +427,9 @@ ia_done:
427 * the mapped page. 427 * the mapped page.
428 */ 428 */
429 if (old_vcn << vol->cluster_size_bits >> 429 if (old_vcn << vol->cluster_size_bits >>
430 PAGE_CACHE_SHIFT == vcn << 430 PAGE_SHIFT == vcn <<
431 vol->cluster_size_bits >> 431 vol->cluster_size_bits >>
432 PAGE_CACHE_SHIFT) 432 PAGE_SHIFT)
433 goto fast_descend_into_child_node; 433 goto fast_descend_into_child_node;
434 unlock_page(page); 434 unlock_page(page);
435 ntfs_unmap_page(page); 435 ntfs_unmap_page(page);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d284f07eda77..f40972d6df90 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -868,12 +868,12 @@ skip_attr_list_load:
868 ni->itype.index.block_size); 868 ni->itype.index.block_size);
869 goto unm_err_out; 869 goto unm_err_out;
870 } 870 }
871 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { 871 if (ni->itype.index.block_size > PAGE_SIZE) {
872 ntfs_error(vi->i_sb, "Index block size (%u) > " 872 ntfs_error(vi->i_sb, "Index block size (%u) > "
873 "PAGE_CACHE_SIZE (%ld) is not " 873 "PAGE_SIZE (%ld) is not "
874 "supported. Sorry.", 874 "supported. Sorry.",
875 ni->itype.index.block_size, 875 ni->itype.index.block_size,
876 PAGE_CACHE_SIZE); 876 PAGE_SIZE);
877 err = -EOPNOTSUPP; 877 err = -EOPNOTSUPP;
878 goto unm_err_out; 878 goto unm_err_out;
879 } 879 }
@@ -1585,10 +1585,10 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1585 "two.", ni->itype.index.block_size); 1585 "two.", ni->itype.index.block_size);
1586 goto unm_err_out; 1586 goto unm_err_out;
1587 } 1587 }
1588 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { 1588 if (ni->itype.index.block_size > PAGE_SIZE) {
1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE " 1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
1590 "(%ld) is not supported. Sorry.", 1590 "(%ld) is not supported. Sorry.",
1591 ni->itype.index.block_size, PAGE_CACHE_SIZE); 1591 ni->itype.index.block_size, PAGE_SIZE);
1592 err = -EOPNOTSUPP; 1592 err = -EOPNOTSUPP;
1593 goto unm_err_out; 1593 goto unm_err_out;
1594 } 1594 }
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
index 1711b710b641..27a24a42f712 100644
--- a/fs/ntfs/lcnalloc.c
+++ b/fs/ntfs/lcnalloc.c
@@ -283,15 +283,15 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
283 ntfs_unmap_page(page); 283 ntfs_unmap_page(page);
284 } 284 }
285 page = ntfs_map_page(mapping, last_read_pos >> 285 page = ntfs_map_page(mapping, last_read_pos >>
286 PAGE_CACHE_SHIFT); 286 PAGE_SHIFT);
287 if (IS_ERR(page)) { 287 if (IS_ERR(page)) {
288 err = PTR_ERR(page); 288 err = PTR_ERR(page);
289 ntfs_error(vol->sb, "Failed to map page."); 289 ntfs_error(vol->sb, "Failed to map page.");
290 goto out; 290 goto out;
291 } 291 }
292 buf_size = last_read_pos & ~PAGE_CACHE_MASK; 292 buf_size = last_read_pos & ~PAGE_MASK;
293 buf = page_address(page) + buf_size; 293 buf = page_address(page) + buf_size;
294 buf_size = PAGE_CACHE_SIZE - buf_size; 294 buf_size = PAGE_SIZE - buf_size;
295 if (unlikely(last_read_pos + buf_size > i_size)) 295 if (unlikely(last_read_pos + buf_size > i_size))
296 buf_size = i_size - last_read_pos; 296 buf_size = i_size - last_read_pos;
297 buf_size <<= 3; 297 buf_size <<= 3;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index c71de292c5ad..9d71213ca81e 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -381,7 +381,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
381 * completely inside @rp, just copy it from there. Otherwise map all 381 * completely inside @rp, just copy it from there. Otherwise map all
382 * the required pages and copy the data from them. 382 * the required pages and copy the data from them.
383 */ 383 */
384 size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK); 384 size = PAGE_SIZE - (pos & ~PAGE_MASK);
385 if (size >= le32_to_cpu(rp->system_page_size)) { 385 if (size >= le32_to_cpu(rp->system_page_size)) {
386 memcpy(trp, rp, le32_to_cpu(rp->system_page_size)); 386 memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
387 } else { 387 } else {
@@ -394,8 +394,8 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
394 /* Copy the remaining data one page at a time. */ 394 /* Copy the remaining data one page at a time. */
395 have_read = size; 395 have_read = size;
396 to_read = le32_to_cpu(rp->system_page_size) - size; 396 to_read = le32_to_cpu(rp->system_page_size) - size;
397 idx = (pos + size) >> PAGE_CACHE_SHIFT; 397 idx = (pos + size) >> PAGE_SHIFT;
398 BUG_ON((pos + size) & ~PAGE_CACHE_MASK); 398 BUG_ON((pos + size) & ~PAGE_MASK);
399 do { 399 do {
400 page = ntfs_map_page(vi->i_mapping, idx); 400 page = ntfs_map_page(vi->i_mapping, idx);
401 if (IS_ERR(page)) { 401 if (IS_ERR(page)) {
@@ -406,7 +406,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
406 err = -EIO; 406 err = -EIO;
407 goto err_out; 407 goto err_out;
408 } 408 }
409 size = min_t(int, to_read, PAGE_CACHE_SIZE); 409 size = min_t(int, to_read, PAGE_SIZE);
410 memcpy((u8*)trp + have_read, page_address(page), size); 410 memcpy((u8*)trp + have_read, page_address(page), size);
411 ntfs_unmap_page(page); 411 ntfs_unmap_page(page);
412 have_read += size; 412 have_read += size;
@@ -509,11 +509,11 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
509 * log page size if the page cache size is between the default log page 509 * log page size if the page cache size is between the default log page
510 * size and twice that. 510 * size and twice that.
511 */ 511 */
512 if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <= 512 if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <=
513 DefaultLogPageSize * 2) 513 DefaultLogPageSize * 2)
514 log_page_size = DefaultLogPageSize; 514 log_page_size = DefaultLogPageSize;
515 else 515 else
516 log_page_size = PAGE_CACHE_SIZE; 516 log_page_size = PAGE_SIZE;
517 log_page_mask = log_page_size - 1; 517 log_page_mask = log_page_size - 1;
518 /* 518 /*
519 * Use ntfs_ffs() instead of ffs() to enable the compiler to 519 * Use ntfs_ffs() instead of ffs() to enable the compiler to
@@ -539,7 +539,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
539 * to be empty. 539 * to be empty.
540 */ 540 */
541 for (pos = 0; pos < size; pos <<= 1) { 541 for (pos = 0; pos < size; pos <<= 1) {
542 pgoff_t idx = pos >> PAGE_CACHE_SHIFT; 542 pgoff_t idx = pos >> PAGE_SHIFT;
543 if (!page || page->index != idx) { 543 if (!page || page->index != idx) {
544 if (page) 544 if (page)
545 ntfs_unmap_page(page); 545 ntfs_unmap_page(page);
@@ -550,7 +550,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
550 goto err_out; 550 goto err_out;
551 } 551 }
552 } 552 }
553 kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK); 553 kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
554 /* 554 /*
555 * A non-empty block means the logfile is not empty while an 555 * A non-empty block means the logfile is not empty while an
556 * empty block after a non-empty block has been encountered 556 * empty block after a non-empty block has been encountered
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 3014a36a255b..37b2501caaa4 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -61,16 +61,16 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
61 * here if the volume was that big... 61 * here if the volume was that big...
62 */ 62 */
63 index = (u64)ni->mft_no << vol->mft_record_size_bits >> 63 index = (u64)ni->mft_no << vol->mft_record_size_bits >>
64 PAGE_CACHE_SHIFT; 64 PAGE_SHIFT;
65 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 65 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
66 66
67 i_size = i_size_read(mft_vi); 67 i_size = i_size_read(mft_vi);
68 /* The maximum valid index into the page cache for $MFT's data. */ 68 /* The maximum valid index into the page cache for $MFT's data. */
69 end_index = i_size >> PAGE_CACHE_SHIFT; 69 end_index = i_size >> PAGE_SHIFT;
70 70
71 /* If the wanted index is out of bounds the mft record doesn't exist. */ 71 /* If the wanted index is out of bounds the mft record doesn't exist. */
72 if (unlikely(index >= end_index)) { 72 if (unlikely(index >= end_index)) {
73 if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs + 73 if (index > end_index || (i_size & ~PAGE_MASK) < ofs +
74 vol->mft_record_size) { 74 vol->mft_record_size) {
75 page = ERR_PTR(-ENOENT); 75 page = ERR_PTR(-ENOENT);
76 ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, " 76 ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
@@ -487,7 +487,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
487 } 487 }
488 /* Get the page containing the mirror copy of the mft record @m. */ 488 /* Get the page containing the mirror copy of the mft record @m. */
489 page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >> 489 page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
490 (PAGE_CACHE_SHIFT - vol->mft_record_size_bits)); 490 (PAGE_SHIFT - vol->mft_record_size_bits));
491 if (IS_ERR(page)) { 491 if (IS_ERR(page)) {
492 ntfs_error(vol->sb, "Failed to map mft mirror page."); 492 ntfs_error(vol->sb, "Failed to map mft mirror page.");
493 err = PTR_ERR(page); 493 err = PTR_ERR(page);
@@ -497,7 +497,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
497 BUG_ON(!PageUptodate(page)); 497 BUG_ON(!PageUptodate(page));
498 ClearPageUptodate(page); 498 ClearPageUptodate(page);
499 /* Offset of the mft mirror record inside the page. */ 499 /* Offset of the mft mirror record inside the page. */
500 page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 500 page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
501 /* The address in the page of the mirror copy of the mft record @m. */ 501 /* The address in the page of the mirror copy of the mft record @m. */
502 kmirr = page_address(page) + page_ofs; 502 kmirr = page_address(page) + page_ofs;
503 /* Copy the mst protected mft record to the mirror. */ 503 /* Copy the mst protected mft record to the mirror. */
@@ -1178,8 +1178,8 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
1178 for (; pass <= 2;) { 1178 for (; pass <= 2;) {
1179 /* Cap size to pass_end. */ 1179 /* Cap size to pass_end. */
1180 ofs = data_pos >> 3; 1180 ofs = data_pos >> 3;
1181 page_ofs = ofs & ~PAGE_CACHE_MASK; 1181 page_ofs = ofs & ~PAGE_MASK;
1182 size = PAGE_CACHE_SIZE - page_ofs; 1182 size = PAGE_SIZE - page_ofs;
1183 ll = ((pass_end + 7) >> 3) - ofs; 1183 ll = ((pass_end + 7) >> 3) - ofs;
1184 if (size > ll) 1184 if (size > ll)
1185 size = ll; 1185 size = ll;
@@ -1190,7 +1190,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
1190 */ 1190 */
1191 if (size) { 1191 if (size) {
1192 page = ntfs_map_page(mftbmp_mapping, 1192 page = ntfs_map_page(mftbmp_mapping,
1193 ofs >> PAGE_CACHE_SHIFT); 1193 ofs >> PAGE_SHIFT);
1194 if (IS_ERR(page)) { 1194 if (IS_ERR(page)) {
1195 ntfs_error(vol->sb, "Failed to read mft " 1195 ntfs_error(vol->sb, "Failed to read mft "
1196 "bitmap, aborting."); 1196 "bitmap, aborting.");
@@ -1328,13 +1328,13 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
1328 */ 1328 */
1329 ll = lcn >> 3; 1329 ll = lcn >> 3;
1330 page = ntfs_map_page(vol->lcnbmp_ino->i_mapping, 1330 page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
1331 ll >> PAGE_CACHE_SHIFT); 1331 ll >> PAGE_SHIFT);
1332 if (IS_ERR(page)) { 1332 if (IS_ERR(page)) {
1333 up_write(&mftbmp_ni->runlist.lock); 1333 up_write(&mftbmp_ni->runlist.lock);
1334 ntfs_error(vol->sb, "Failed to read from lcn bitmap."); 1334 ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
1335 return PTR_ERR(page); 1335 return PTR_ERR(page);
1336 } 1336 }
1337 b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK); 1337 b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
1338 tb = 1 << (lcn & 7ull); 1338 tb = 1 << (lcn & 7ull);
1339 down_write(&vol->lcnbmp_lock); 1339 down_write(&vol->lcnbmp_lock);
1340 if (*b != 0xff && !(*b & tb)) { 1340 if (*b != 0xff && !(*b & tb)) {
@@ -2103,14 +2103,14 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
2103 * The index into the page cache and the offset within the page cache 2103 * The index into the page cache and the offset within the page cache
2104 * page of the wanted mft record. 2104 * page of the wanted mft record.
2105 */ 2105 */
2106 index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; 2106 index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT;
2107 ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 2107 ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
2108 /* The maximum valid index into the page cache for $MFT's data. */ 2108 /* The maximum valid index into the page cache for $MFT's data. */
2109 i_size = i_size_read(mft_vi); 2109 i_size = i_size_read(mft_vi);
2110 end_index = i_size >> PAGE_CACHE_SHIFT; 2110 end_index = i_size >> PAGE_SHIFT;
2111 if (unlikely(index >= end_index)) { 2111 if (unlikely(index >= end_index)) {
2112 if (unlikely(index > end_index || ofs + vol->mft_record_size >= 2112 if (unlikely(index > end_index || ofs + vol->mft_record_size >=
2113 (i_size & ~PAGE_CACHE_MASK))) { 2113 (i_size & ~PAGE_MASK))) {
2114 ntfs_error(vol->sb, "Tried to format non-existing mft " 2114 ntfs_error(vol->sb, "Tried to format non-existing mft "
2115 "record 0x%llx.", (long long)mft_no); 2115 "record 0x%llx.", (long long)mft_no);
2116 return -ENOENT; 2116 return -ENOENT;
@@ -2515,8 +2515,8 @@ mft_rec_already_initialized:
2515 * We now have allocated and initialized the mft record. Calculate the 2515 * We now have allocated and initialized the mft record. Calculate the
2516 * index of and the offset within the page cache page the record is in. 2516 * index of and the offset within the page cache page the record is in.
2517 */ 2517 */
2518 index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; 2518 index = bit << vol->mft_record_size_bits >> PAGE_SHIFT;
2519 ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 2519 ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK;
2520 /* Read, map, and pin the page containing the mft record. */ 2520 /* Read, map, and pin the page containing the mft record. */
2521 page = ntfs_map_page(vol->mft_ino->i_mapping, index); 2521 page = ntfs_map_page(vol->mft_ino->i_mapping, index);
2522 if (IS_ERR(page)) { 2522 if (IS_ERR(page)) {
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index c581e26a350d..12de47b96ca9 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -43,7 +43,7 @@ typedef enum {
43 NTFS_MAX_NAME_LEN = 255, 43 NTFS_MAX_NAME_LEN = 255,
44 NTFS_MAX_ATTR_NAME_LEN = 255, 44 NTFS_MAX_ATTR_NAME_LEN = 255,
45 NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */ 45 NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */
46 NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE, 46 NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE,
47} NTFS_CONSTANTS; 47} NTFS_CONSTANTS;
48 48
49/* Global variables. */ 49/* Global variables. */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1b38abdaa3ed..ecb49870a680 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,14 +823,14 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)", 823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
824 vol->mft_record_size_bits, vol->mft_record_size_bits); 824 vol->mft_record_size_bits, vol->mft_record_size_bits);
825 /* 825 /*
826 * We cannot support mft record sizes above the PAGE_CACHE_SIZE since 826 * We cannot support mft record sizes above the PAGE_SIZE since
827 * we store $MFT/$DATA, the table of mft records in the page cache. 827 * we store $MFT/$DATA, the table of mft records in the page cache.
828 */ 828 */
829 if (vol->mft_record_size > PAGE_CACHE_SIZE) { 829 if (vol->mft_record_size > PAGE_SIZE) {
830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the " 830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
831 "PAGE_CACHE_SIZE on your system (%lu). " 831 "PAGE_SIZE on your system (%lu). "
832 "This is not supported. Sorry.", 832 "This is not supported. Sorry.",
833 vol->mft_record_size, PAGE_CACHE_SIZE); 833 vol->mft_record_size, PAGE_SIZE);
834 return false; 834 return false;
835 } 835 }
836 /* We cannot support mft record sizes below the sector size. */ 836 /* We cannot support mft record sizes below the sector size. */
@@ -1096,7 +1096,7 @@ static bool check_mft_mirror(ntfs_volume *vol)
1096 1096
1097 ntfs_debug("Entering."); 1097 ntfs_debug("Entering.");
1098 /* Compare contents of $MFT and $MFTMirr. */ 1098 /* Compare contents of $MFT and $MFTMirr. */
1099 mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size; 1099 mrecs_per_page = PAGE_SIZE / vol->mft_record_size;
1100 BUG_ON(!mrecs_per_page); 1100 BUG_ON(!mrecs_per_page);
1101 BUG_ON(!vol->mftmirr_size); 1101 BUG_ON(!vol->mftmirr_size);
1102 mft_page = mirr_page = NULL; 1102 mft_page = mirr_page = NULL;
@@ -1615,20 +1615,20 @@ static bool load_and_init_attrdef(ntfs_volume *vol)
1615 if (!vol->attrdef) 1615 if (!vol->attrdef)
1616 goto iput_failed; 1616 goto iput_failed;
1617 index = 0; 1617 index = 0;
1618 max_index = i_size >> PAGE_CACHE_SHIFT; 1618 max_index = i_size >> PAGE_SHIFT;
1619 size = PAGE_CACHE_SIZE; 1619 size = PAGE_SIZE;
1620 while (index < max_index) { 1620 while (index < max_index) {
1621 /* Read the attrdef table and copy it into the linear buffer. */ 1621 /* Read the attrdef table and copy it into the linear buffer. */
1622read_partial_attrdef_page: 1622read_partial_attrdef_page:
1623 page = ntfs_map_page(ino->i_mapping, index); 1623 page = ntfs_map_page(ino->i_mapping, index);
1624 if (IS_ERR(page)) 1624 if (IS_ERR(page))
1625 goto free_iput_failed; 1625 goto free_iput_failed;
1626 memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT), 1626 memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT),
1627 page_address(page), size); 1627 page_address(page), size);
1628 ntfs_unmap_page(page); 1628 ntfs_unmap_page(page);
1629 }; 1629 };
1630 if (size == PAGE_CACHE_SIZE) { 1630 if (size == PAGE_SIZE) {
1631 size = i_size & ~PAGE_CACHE_MASK; 1631 size = i_size & ~PAGE_MASK;
1632 if (size) 1632 if (size)
1633 goto read_partial_attrdef_page; 1633 goto read_partial_attrdef_page;
1634 } 1634 }
@@ -1684,20 +1684,20 @@ static bool load_and_init_upcase(ntfs_volume *vol)
1684 if (!vol->upcase) 1684 if (!vol->upcase)
1685 goto iput_upcase_failed; 1685 goto iput_upcase_failed;
1686 index = 0; 1686 index = 0;
1687 max_index = i_size >> PAGE_CACHE_SHIFT; 1687 max_index = i_size >> PAGE_SHIFT;
1688 size = PAGE_CACHE_SIZE; 1688 size = PAGE_SIZE;
1689 while (index < max_index) { 1689 while (index < max_index) {
1690 /* Read the upcase table and copy it into the linear buffer. */ 1690 /* Read the upcase table and copy it into the linear buffer. */
1691read_partial_upcase_page: 1691read_partial_upcase_page:
1692 page = ntfs_map_page(ino->i_mapping, index); 1692 page = ntfs_map_page(ino->i_mapping, index);
1693 if (IS_ERR(page)) 1693 if (IS_ERR(page))
1694 goto iput_upcase_failed; 1694 goto iput_upcase_failed;
1695 memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT), 1695 memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT),
1696 page_address(page), size); 1696 page_address(page), size);
1697 ntfs_unmap_page(page); 1697 ntfs_unmap_page(page);
1698 }; 1698 };
1699 if (size == PAGE_CACHE_SIZE) { 1699 if (size == PAGE_SIZE) {
1700 size = i_size & ~PAGE_CACHE_MASK; 1700 size = i_size & ~PAGE_MASK;
1701 if (size) 1701 if (size)
1702 goto read_partial_upcase_page; 1702 goto read_partial_upcase_page;
1703 } 1703 }
@@ -2471,14 +2471,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2471 down_read(&vol->lcnbmp_lock); 2471 down_read(&vol->lcnbmp_lock);
2472 /* 2472 /*
2473 * Convert the number of bits into bytes rounded up, then convert into 2473 * Convert the number of bits into bytes rounded up, then convert into
2474 * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one 2474 * multiples of PAGE_SIZE, rounding up so that if we have one
2475 * full and one partial page max_index = 2. 2475 * full and one partial page max_index = 2.
2476 */ 2476 */
2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> 2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
2478 PAGE_CACHE_SHIFT; 2478 PAGE_SHIFT;
2479 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2479 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
2481 max_index, PAGE_CACHE_SIZE / 4); 2481 max_index, PAGE_SIZE / 4);
2482 for (index = 0; index < max_index; index++) { 2482 for (index = 0; index < max_index; index++) {
2483 unsigned long *kaddr; 2483 unsigned long *kaddr;
2484 2484
@@ -2491,7 +2491,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2491 if (IS_ERR(page)) { 2491 if (IS_ERR(page)) {
2492 ntfs_debug("read_mapping_page() error. Skipping " 2492 ntfs_debug("read_mapping_page() error. Skipping "
2493 "page (index 0x%lx).", index); 2493 "page (index 0x%lx).", index);
2494 nr_free -= PAGE_CACHE_SIZE * 8; 2494 nr_free -= PAGE_SIZE * 8;
2495 continue; 2495 continue;
2496 } 2496 }
2497 kaddr = kmap_atomic(page); 2497 kaddr = kmap_atomic(page);
@@ -2503,9 +2503,9 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2503 * ntfs_readpage(). 2503 * ntfs_readpage().
2504 */ 2504 */
2505 nr_free -= bitmap_weight(kaddr, 2505 nr_free -= bitmap_weight(kaddr,
2506 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2506 PAGE_SIZE * BITS_PER_BYTE);
2507 kunmap_atomic(kaddr); 2507 kunmap_atomic(kaddr);
2508 page_cache_release(page); 2508 put_page(page);
2509 } 2509 }
2510 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); 2510 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
2511 /* 2511 /*
@@ -2547,9 +2547,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2547 pgoff_t index; 2547 pgoff_t index;
2548 2548
2549 ntfs_debug("Entering."); 2549 ntfs_debug("Entering.");
2550 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2550 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
2552 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2552 "0x%lx.", max_index, PAGE_SIZE / 4);
2553 for (index = 0; index < max_index; index++) { 2553 for (index = 0; index < max_index; index++) {
2554 unsigned long *kaddr; 2554 unsigned long *kaddr;
2555 2555
@@ -2562,7 +2562,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2562 if (IS_ERR(page)) { 2562 if (IS_ERR(page)) {
2563 ntfs_debug("read_mapping_page() error. Skipping " 2563 ntfs_debug("read_mapping_page() error. Skipping "
2564 "page (index 0x%lx).", index); 2564 "page (index 0x%lx).", index);
2565 nr_free -= PAGE_CACHE_SIZE * 8; 2565 nr_free -= PAGE_SIZE * 8;
2566 continue; 2566 continue;
2567 } 2567 }
2568 kaddr = kmap_atomic(page); 2568 kaddr = kmap_atomic(page);
@@ -2574,9 +2574,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2574 * ntfs_readpage(). 2574 * ntfs_readpage().
2575 */ 2575 */
2576 nr_free -= bitmap_weight(kaddr, 2576 nr_free -= bitmap_weight(kaddr,
2577 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2577 PAGE_SIZE * BITS_PER_BYTE);
2578 kunmap_atomic(kaddr); 2578 kunmap_atomic(kaddr);
2579 page_cache_release(page); 2579 put_page(page);
2580 } 2580 }
2581 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", 2581 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
2582 index - 1); 2582 index - 1);
@@ -2618,17 +2618,17 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2618 /* Type of filesystem. */ 2618 /* Type of filesystem. */
2619 sfs->f_type = NTFS_SB_MAGIC; 2619 sfs->f_type = NTFS_SB_MAGIC;
2620 /* Optimal transfer block size. */ 2620 /* Optimal transfer block size. */
2621 sfs->f_bsize = PAGE_CACHE_SIZE; 2621 sfs->f_bsize = PAGE_SIZE;
2622 /* 2622 /*
2623 * Total data blocks in filesystem in units of f_bsize and since 2623 * Total data blocks in filesystem in units of f_bsize and since
2624 * inodes are also stored in data blocs ($MFT is a file) this is just 2624 * inodes are also stored in data blocs ($MFT is a file) this is just
2625 * the total clusters. 2625 * the total clusters.
2626 */ 2626 */
2627 sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >> 2627 sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
2628 PAGE_CACHE_SHIFT; 2628 PAGE_SHIFT;
2629 /* Free data blocks in filesystem in units of f_bsize. */ 2629 /* Free data blocks in filesystem in units of f_bsize. */
2630 size = get_nr_free_clusters(vol) << vol->cluster_size_bits >> 2630 size = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
2631 PAGE_CACHE_SHIFT; 2631 PAGE_SHIFT;
2632 if (size < 0LL) 2632 if (size < 0LL)
2633 size = 0LL; 2633 size = 0LL;
2634 /* Free blocks avail to non-superuser, same as above on NTFS. */ 2634 /* Free blocks avail to non-superuser, same as above on NTFS. */
@@ -2639,11 +2639,11 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; 2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
2640 /* 2640 /*
2641 * Convert the maximum number of set bits into bytes rounded up, then 2641 * Convert the maximum number of set bits into bytes rounded up, then
2642 * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we 2642 * convert into multiples of PAGE_SIZE, rounding up so that if we
2643 * have one full and one partial page max_index = 2. 2643 * have one full and one partial page max_index = 2.
2644 */ 2644 */
2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) 2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
2646 + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2646 + 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT;
2647 read_unlock_irqrestore(&mft_ni->size_lock, flags); 2647 read_unlock_irqrestore(&mft_ni->size_lock, flags);
2648 /* Number of inodes in filesystem (at this point in time). */ 2648 /* Number of inodes in filesystem (at this point in time). */
2649 sfs->f_files = size; 2649 sfs->f_files = size;
@@ -2765,15 +2765,15 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2765 if (!parse_options(vol, (char*)opt)) 2765 if (!parse_options(vol, (char*)opt))
2766 goto err_out_now; 2766 goto err_out_now;
2767 2767
2768 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2768 /* We support sector sizes up to the PAGE_SIZE. */
2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
2770 if (!silent) 2770 if (!silent)
2771 ntfs_error(sb, "Device has unsupported sector size " 2771 ntfs_error(sb, "Device has unsupported sector size "
2772 "(%i). The maximum supported sector " 2772 "(%i). The maximum supported sector "
2773 "size on this architecture is %lu " 2773 "size on this architecture is %lu "
2774 "bytes.", 2774 "bytes.",
2775 bdev_logical_block_size(sb->s_bdev), 2775 bdev_logical_block_size(sb->s_bdev),
2776 PAGE_CACHE_SIZE); 2776 PAGE_SIZE);
2777 goto err_out_now; 2777 goto err_out_now;
2778 } 2778 }
2779 /* 2779 /*
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 70907d638b60..e361d1a0ca09 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6671,7 +6671,7 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6671{ 6671{
6672 int i; 6672 int i;
6673 struct page *page; 6673 struct page *page;
6674 unsigned int from, to = PAGE_CACHE_SIZE; 6674 unsigned int from, to = PAGE_SIZE;
6675 struct super_block *sb = inode->i_sb; 6675 struct super_block *sb = inode->i_sb;
6676 6676
6677 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); 6677 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
@@ -6679,21 +6679,21 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6679 if (numpages == 0) 6679 if (numpages == 0)
6680 goto out; 6680 goto out;
6681 6681
6682 to = PAGE_CACHE_SIZE; 6682 to = PAGE_SIZE;
6683 for(i = 0; i < numpages; i++) { 6683 for(i = 0; i < numpages; i++) {
6684 page = pages[i]; 6684 page = pages[i];
6685 6685
6686 from = start & (PAGE_CACHE_SIZE - 1); 6686 from = start & (PAGE_SIZE - 1);
6687 if ((end >> PAGE_CACHE_SHIFT) == page->index) 6687 if ((end >> PAGE_SHIFT) == page->index)
6688 to = end & (PAGE_CACHE_SIZE - 1); 6688 to = end & (PAGE_SIZE - 1);
6689 6689
6690 BUG_ON(from > PAGE_CACHE_SIZE); 6690 BUG_ON(from > PAGE_SIZE);
6691 BUG_ON(to > PAGE_CACHE_SIZE); 6691 BUG_ON(to > PAGE_SIZE);
6692 6692
6693 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, 6693 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
6694 &phys); 6694 &phys);
6695 6695
6696 start = (page->index + 1) << PAGE_CACHE_SHIFT; 6696 start = (page->index + 1) << PAGE_SHIFT;
6697 } 6697 }
6698out: 6698out:
6699 if (pages) 6699 if (pages)
@@ -6712,7 +6712,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6712 6712
6713 numpages = 0; 6713 numpages = 0;
6714 last_page_bytes = PAGE_ALIGN(end); 6714 last_page_bytes = PAGE_ALIGN(end);
6715 index = start >> PAGE_CACHE_SHIFT; 6715 index = start >> PAGE_SHIFT;
6716 do { 6716 do {
6717 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); 6717 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
6718 if (!pages[numpages]) { 6718 if (!pages[numpages]) {
@@ -6723,7 +6723,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6723 6723
6724 numpages++; 6724 numpages++;
6725 index++; 6725 index++;
6726 } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); 6726 } while (index < (last_page_bytes >> PAGE_SHIFT));
6727 6727
6728out: 6728out:
6729 if (ret != 0) { 6729 if (ret != 0) {
@@ -6950,8 +6950,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6950 * to do that now. 6950 * to do that now.
6951 */ 6951 */
6952 if (!ocfs2_sparse_alloc(osb) && 6952 if (!ocfs2_sparse_alloc(osb) &&
6953 PAGE_CACHE_SIZE < osb->s_clustersize) 6953 PAGE_SIZE < osb->s_clustersize)
6954 end = PAGE_CACHE_SIZE; 6954 end = PAGE_SIZE;
6955 6955
6956 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); 6956 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
6957 if (ret) { 6957 if (ret) {
@@ -6971,8 +6971,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6971 goto out_unlock; 6971 goto out_unlock;
6972 } 6972 }
6973 6973
6974 page_end = PAGE_CACHE_SIZE; 6974 page_end = PAGE_SIZE;
6975 if (PAGE_CACHE_SIZE > osb->s_clustersize) 6975 if (PAGE_SIZE > osb->s_clustersize)
6976 page_end = osb->s_clustersize; 6976 page_end = osb->s_clustersize;
6977 6977
6978 for (i = 0; i < num_pages; i++) 6978 for (i = 0; i < num_pages; i++)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1581240a7ca0..ad1577348a92 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -234,7 +234,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
234 234
235 size = i_size_read(inode); 235 size = i_size_read(inode);
236 236
237 if (size > PAGE_CACHE_SIZE || 237 if (size > PAGE_SIZE ||
238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { 238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
239 ocfs2_error(inode->i_sb, 239 ocfs2_error(inode->i_sb,
240 "Inode %llu has with inline data has bad size: %Lu\n", 240 "Inode %llu has with inline data has bad size: %Lu\n",
@@ -247,7 +247,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
247 if (size) 247 if (size)
248 memcpy(kaddr, di->id2.i_data.id_data, size); 248 memcpy(kaddr, di->id2.i_data.id_data, size);
249 /* Clear the remaining part of the page */ 249 /* Clear the remaining part of the page */
250 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); 250 memset(kaddr + size, 0, PAGE_SIZE - size);
251 flush_dcache_page(page); 251 flush_dcache_page(page);
252 kunmap_atomic(kaddr); 252 kunmap_atomic(kaddr);
253 253
@@ -282,7 +282,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
282{ 282{
283 struct inode *inode = page->mapping->host; 283 struct inode *inode = page->mapping->host;
284 struct ocfs2_inode_info *oi = OCFS2_I(inode); 284 struct ocfs2_inode_info *oi = OCFS2_I(inode);
285 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; 285 loff_t start = (loff_t)page->index << PAGE_SHIFT;
286 int ret, unlock = 1; 286 int ret, unlock = 1;
287 287
288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, 288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
@@ -385,7 +385,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
385 * drop out in that case as it's not worth handling here. 385 * drop out in that case as it's not worth handling here.
386 */ 386 */
387 last = list_entry(pages->prev, struct page, lru); 387 last = list_entry(pages->prev, struct page, lru);
388 start = (loff_t)last->index << PAGE_CACHE_SHIFT; 388 start = (loff_t)last->index << PAGE_SHIFT;
389 if (start >= i_size_read(inode)) 389 if (start >= i_size_read(inode))
390 goto out_unlock; 390 goto out_unlock;
391 391
@@ -511,12 +511,12 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
511 unsigned int *start, 511 unsigned int *start,
512 unsigned int *end) 512 unsigned int *end)
513{ 513{
514 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; 514 unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
515 515
516 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { 516 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
517 unsigned int cpp; 517 unsigned int cpp;
518 518
519 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); 519 cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
520 520
521 cluster_start = cpos % cpp; 521 cluster_start = cpos % cpp;
522 cluster_start = cluster_start << osb->s_clustersize_bits; 522 cluster_start = cluster_start << osb->s_clustersize_bits;
@@ -684,13 +684,13 @@ next_bh:
684 return ret; 684 return ret;
685} 685}
686 686
687#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 687#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
688#define OCFS2_MAX_CTXT_PAGES 1 688#define OCFS2_MAX_CTXT_PAGES 1
689#else 689#else
690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) 690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
691#endif 691#endif
692 692
693#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) 693#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
694 694
695struct ocfs2_unwritten_extent { 695struct ocfs2_unwritten_extent {
696 struct list_head ue_node; 696 struct list_head ue_node;
@@ -785,7 +785,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
785 if (pages[i]) { 785 if (pages[i]) {
786 unlock_page(pages[i]); 786 unlock_page(pages[i]);
787 mark_page_accessed(pages[i]); 787 mark_page_accessed(pages[i]);
788 page_cache_release(pages[i]); 788 put_page(pages[i]);
789 } 789 }
790 } 790 }
791} 791}
@@ -808,7 +808,7 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
808 } 808 }
809 } 809 }
810 mark_page_accessed(wc->w_target_page); 810 mark_page_accessed(wc->w_target_page);
811 page_cache_release(wc->w_target_page); 811 put_page(wc->w_target_page);
812 } 812 }
813 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 813 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
814} 814}
@@ -857,7 +857,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
857 wc->w_di_bh = di_bh; 857 wc->w_di_bh = di_bh;
858 wc->w_type = type; 858 wc->w_type = type;
859 859
860 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) 860 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
861 wc->w_large_pages = 1; 861 wc->w_large_pages = 1;
862 else 862 else
863 wc->w_large_pages = 0; 863 wc->w_large_pages = 0;
@@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode,
920 loff_t user_pos, unsigned user_len) 920 loff_t user_pos, unsigned user_len)
921{ 921{
922 int i; 922 int i;
923 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), 923 unsigned from = user_pos & (PAGE_SIZE - 1),
924 to = user_pos + user_len; 924 to = user_pos + user_len;
925 struct page *tmppage; 925 struct page *tmppage;
926 926
@@ -960,7 +960,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
960 (page_offset(page) <= user_pos)); 960 (page_offset(page) <= user_pos));
961 961
962 if (page == wc->w_target_page) { 962 if (page == wc->w_target_page) {
963 map_from = user_pos & (PAGE_CACHE_SIZE - 1); 963 map_from = user_pos & (PAGE_SIZE - 1);
964 map_to = map_from + user_len; 964 map_to = map_from + user_len;
965 965
966 if (new) 966 if (new)
@@ -1034,7 +1034,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1034 struct inode *inode = mapping->host; 1034 struct inode *inode = mapping->host;
1035 loff_t last_byte; 1035 loff_t last_byte;
1036 1036
1037 target_index = user_pos >> PAGE_CACHE_SHIFT; 1037 target_index = user_pos >> PAGE_SHIFT;
1038 1038
1039 /* 1039 /*
1040 * Figure out how many pages we'll be manipulating here. For 1040 * Figure out how many pages we'll be manipulating here. For
@@ -1053,14 +1053,14 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1053 */ 1053 */
1054 last_byte = max(user_pos + user_len, i_size_read(inode)); 1054 last_byte = max(user_pos + user_len, i_size_read(inode));
1055 BUG_ON(last_byte < 1); 1055 BUG_ON(last_byte < 1);
1056 end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1; 1056 end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
1057 if ((start + wc->w_num_pages) > end_index) 1057 if ((start + wc->w_num_pages) > end_index)
1058 wc->w_num_pages = end_index - start; 1058 wc->w_num_pages = end_index - start;
1059 } else { 1059 } else {
1060 wc->w_num_pages = 1; 1060 wc->w_num_pages = 1;
1061 start = target_index; 1061 start = target_index;
1062 } 1062 }
1063 end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT; 1063 end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
1064 1064
1065 for(i = 0; i < wc->w_num_pages; i++) { 1065 for(i = 0; i < wc->w_num_pages; i++) {
1066 index = start + i; 1066 index = start + i;
@@ -1082,7 +1082,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1082 goto out; 1082 goto out;
1083 } 1083 }
1084 1084
1085 page_cache_get(mmap_page); 1085 get_page(mmap_page);
1086 wc->w_pages[i] = mmap_page; 1086 wc->w_pages[i] = mmap_page;
1087 wc->w_target_locked = true; 1087 wc->w_target_locked = true;
1088 } else if (index >= target_index && index <= end_index && 1088 } else if (index >= target_index && index <= end_index &&
@@ -1272,7 +1272,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1272{ 1272{
1273 struct ocfs2_write_cluster_desc *desc; 1273 struct ocfs2_write_cluster_desc *desc;
1274 1274
1275 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); 1275 wc->w_target_from = pos & (PAGE_SIZE - 1);
1276 wc->w_target_to = wc->w_target_from + len; 1276 wc->w_target_to = wc->w_target_from + len;
1277 1277
1278 if (alloc == 0) 1278 if (alloc == 0)
@@ -1309,7 +1309,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1309 &wc->w_target_to); 1309 &wc->w_target_to);
1310 } else { 1310 } else {
1311 wc->w_target_from = 0; 1311 wc->w_target_from = 0;
1312 wc->w_target_to = PAGE_CACHE_SIZE; 1312 wc->w_target_to = PAGE_SIZE;
1313 } 1313 }
1314} 1314}
1315 1315
@@ -1981,7 +1981,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
1981 struct page *page, void *fsdata) 1981 struct page *page, void *fsdata)
1982{ 1982{
1983 int i, ret; 1983 int i, ret;
1984 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); 1984 unsigned from, to, start = pos & (PAGE_SIZE - 1);
1985 struct inode *inode = mapping->host; 1985 struct inode *inode = mapping->host;
1986 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1986 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1987 struct ocfs2_write_ctxt *wc = fsdata; 1987 struct ocfs2_write_ctxt *wc = fsdata;
@@ -2027,8 +2027,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
2027 from = wc->w_target_from; 2027 from = wc->w_target_from;
2028 to = wc->w_target_to; 2028 to = wc->w_target_to;
2029 2029
2030 BUG_ON(from > PAGE_CACHE_SIZE || 2030 BUG_ON(from > PAGE_SIZE ||
2031 to > PAGE_CACHE_SIZE || 2031 to > PAGE_SIZE ||
2032 to < from); 2032 to < from);
2033 } else { 2033 } else {
2034 /* 2034 /*
@@ -2037,7 +2037,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
2037 * to flush their entire range. 2037 * to flush their entire range.
2038 */ 2038 */
2039 from = 0; 2039 from = 0;
2040 to = PAGE_CACHE_SIZE; 2040 to = PAGE_SIZE;
2041 } 2041 }
2042 2042
2043 if (page_has_buffers(tmppage)) { 2043 if (page_has_buffers(tmppage)) {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index bd15929b5f92..1934abb6b680 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -417,13 +417,13 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
417 bio->bi_private = wc; 417 bio->bi_private = wc;
418 bio->bi_end_io = o2hb_bio_end_io; 418 bio->bi_end_io = o2hb_bio_end_io;
419 419
420 vec_start = (cs << bits) % PAGE_CACHE_SIZE; 420 vec_start = (cs << bits) % PAGE_SIZE;
421 while(cs < max_slots) { 421 while(cs < max_slots) {
422 current_page = cs / spp; 422 current_page = cs / spp;
423 page = reg->hr_slot_data[current_page]; 423 page = reg->hr_slot_data[current_page];
424 424
425 vec_len = min(PAGE_CACHE_SIZE - vec_start, 425 vec_len = min(PAGE_SIZE - vec_start,
426 (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); 426 (max_slots-cs) * (PAGE_SIZE/spp) );
427 427
428 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", 428 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
429 current_page, vec_len, vec_start); 429 current_page, vec_len, vec_start);
@@ -431,7 +431,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
431 len = bio_add_page(bio, page, vec_len, vec_start); 431 len = bio_add_page(bio, page, vec_len, vec_start);
432 if (len != vec_len) break; 432 if (len != vec_len) break;
433 433
434 cs += vec_len / (PAGE_CACHE_SIZE/spp); 434 cs += vec_len / (PAGE_SIZE/spp);
435 vec_start = 0; 435 vec_start = 0;
436 } 436 }
437 437
@@ -1576,7 +1576,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
1576 1576
1577static void o2hb_init_region_params(struct o2hb_region *reg) 1577static void o2hb_init_region_params(struct o2hb_region *reg)
1578{ 1578{
1579 reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; 1579 reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
1580 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; 1580 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
1581 1581
1582 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", 1582 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 03768bb3aab1..47b3b2d4e775 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -571,8 +571,8 @@ static int dlmfs_fill_super(struct super_block * sb,
571 int silent) 571 int silent)
572{ 572{
573 sb->s_maxbytes = MAX_LFS_FILESIZE; 573 sb->s_maxbytes = MAX_LFS_FILESIZE;
574 sb->s_blocksize = PAGE_CACHE_SIZE; 574 sb->s_blocksize = PAGE_SIZE;
575 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 575 sb->s_blocksize_bits = PAGE_SHIFT;
576 sb->s_magic = DLMFS_MAGIC; 576 sb->s_magic = DLMFS_MAGIC;
577 sb->s_op = &dlmfs_ops; 577 sb->s_op = &dlmfs_ops;
578 sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); 578 sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c18ab45f8d21..5308841756be 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -770,14 +770,14 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
770{ 770{
771 struct address_space *mapping = inode->i_mapping; 771 struct address_space *mapping = inode->i_mapping;
772 struct page *page; 772 struct page *page;
773 unsigned long index = abs_from >> PAGE_CACHE_SHIFT; 773 unsigned long index = abs_from >> PAGE_SHIFT;
774 handle_t *handle; 774 handle_t *handle;
775 int ret = 0; 775 int ret = 0;
776 unsigned zero_from, zero_to, block_start, block_end; 776 unsigned zero_from, zero_to, block_start, block_end;
777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
778 778
779 BUG_ON(abs_from >= abs_to); 779 BUG_ON(abs_from >= abs_to);
780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
781 BUG_ON(abs_from & (inode->i_blkbits - 1)); 781 BUG_ON(abs_from & (inode->i_blkbits - 1));
782 782
783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh); 783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
@@ -794,10 +794,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
794 } 794 }
795 795
796 /* Get the offsets within the page that we want to zero */ 796 /* Get the offsets within the page that we want to zero */
797 zero_from = abs_from & (PAGE_CACHE_SIZE - 1); 797 zero_from = abs_from & (PAGE_SIZE - 1);
798 zero_to = abs_to & (PAGE_CACHE_SIZE - 1); 798 zero_to = abs_to & (PAGE_SIZE - 1);
799 if (!zero_to) 799 if (!zero_to)
800 zero_to = PAGE_CACHE_SIZE; 800 zero_to = PAGE_SIZE;
801 801
802 trace_ocfs2_write_zero_page( 802 trace_ocfs2_write_zero_page(
803 (unsigned long long)OCFS2_I(inode)->ip_blkno, 803 (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -851,7 +851,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
851 851
852out_unlock: 852out_unlock:
853 unlock_page(page); 853 unlock_page(page);
854 page_cache_release(page); 854 put_page(page);
855out_commit_trans: 855out_commit_trans:
856 if (handle) 856 if (handle)
857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -959,7 +959,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
959 BUG_ON(range_start >= range_end); 959 BUG_ON(range_start >= range_end);
960 960
961 while (zero_pos < range_end) { 961 while (zero_pos < range_end) {
962 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; 962 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
963 if (next_pos > range_end) 963 if (next_pos > range_end)
964 next_pos = range_end; 964 next_pos = range_end;
965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh); 965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 9ea081f4e6e4..71545ad4628c 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -65,13 +65,13 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
65 struct inode *inode = file_inode(file); 65 struct inode *inode = file_inode(file);
66 struct address_space *mapping = inode->i_mapping; 66 struct address_space *mapping = inode->i_mapping;
67 loff_t pos = page_offset(page); 67 loff_t pos = page_offset(page);
68 unsigned int len = PAGE_CACHE_SIZE; 68 unsigned int len = PAGE_SIZE;
69 pgoff_t last_index; 69 pgoff_t last_index;
70 struct page *locked_page = NULL; 70 struct page *locked_page = NULL;
71 void *fsdata; 71 void *fsdata;
72 loff_t size = i_size_read(inode); 72 loff_t size = i_size_read(inode);
73 73
74 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 74 last_index = (size - 1) >> PAGE_SHIFT;
75 75
76 /* 76 /*
77 * There are cases that lead to the page no longer bebongs to the 77 * There are cases that lead to the page no longer bebongs to the
@@ -102,7 +102,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
102 * because the "write" would invalidate their data. 102 * because the "write" would invalidate their data.
103 */ 103 */
104 if (page->index == last_index) 104 if (page->index == last_index)
105 len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; 105 len = ((size - 1) & ~PAGE_MASK) + 1;
106 106
107 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, 107 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
108 &locked_page, &fsdata, di_bh, page); 108 &locked_page, &fsdata, di_bh, page);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6cf6538a0651..e63af7ddfe68 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -822,10 +822,10 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
822 u32 clusters = pg_index; 822 u32 clusters = pg_index;
823 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 823 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
824 824
825 if (unlikely(PAGE_CACHE_SHIFT > cbits)) 825 if (unlikely(PAGE_SHIFT > cbits))
826 clusters = pg_index << (PAGE_CACHE_SHIFT - cbits); 826 clusters = pg_index << (PAGE_SHIFT - cbits);
827 else if (PAGE_CACHE_SHIFT < cbits) 827 else if (PAGE_SHIFT < cbits)
828 clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT); 828 clusters = pg_index >> (cbits - PAGE_SHIFT);
829 829
830 return clusters; 830 return clusters;
831} 831}
@@ -839,10 +839,10 @@ static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
839 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 839 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
840 pgoff_t index = clusters; 840 pgoff_t index = clusters;
841 841
842 if (PAGE_CACHE_SHIFT > cbits) { 842 if (PAGE_SHIFT > cbits) {
843 index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits); 843 index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
844 } else if (PAGE_CACHE_SHIFT < cbits) { 844 } else if (PAGE_SHIFT < cbits) {
845 index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT); 845 index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
846 } 846 }
847 847
848 return index; 848 return index;
@@ -853,8 +853,8 @@ static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
853 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; 853 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
854 unsigned int pages_per_cluster = 1; 854 unsigned int pages_per_cluster = 1;
855 855
856 if (PAGE_CACHE_SHIFT < cbits) 856 if (PAGE_SHIFT < cbits)
857 pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT); 857 pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
858 858
859 return pages_per_cluster; 859 return pages_per_cluster;
860} 860}
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 3892f3c079ca..ab6a6cdcf91c 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -867,6 +867,10 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
867 int status = 0; 867 int status = 0;
868 868
869 trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type); 869 trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
870 if (!sb_has_quota_loaded(sb, type)) {
871 status = -ESRCH;
872 goto out;
873 }
870 status = ocfs2_lock_global_qf(info, 0); 874 status = ocfs2_lock_global_qf(info, 0);
871 if (status < 0) 875 if (status < 0)
872 goto out; 876 goto out;
@@ -878,8 +882,11 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
878out_global: 882out_global:
879 ocfs2_unlock_global_qf(info, 0); 883 ocfs2_unlock_global_qf(info, 0);
880out: 884out:
881 /* Avoid logging ENOENT since it just means there isn't next ID */ 885 /*
882 if (status && status != -ENOENT) 886 * Avoid logging ENOENT since it just means there isn't next ID and
887 * ESRCH which means quota isn't enabled for the filesystem.
888 */
889 if (status && status != -ENOENT && status != -ESRCH)
883 mlog_errno(status); 890 mlog_errno(status);
884 return status; 891 return status;
885} 892}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3eff031aaf26..744d5d90c363 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2937,16 +2937,16 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2937 end = i_size_read(inode); 2937 end = i_size_read(inode);
2938 2938
2939 while (offset < end) { 2939 while (offset < end) {
2940 page_index = offset >> PAGE_CACHE_SHIFT; 2940 page_index = offset >> PAGE_SHIFT;
2941 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; 2941 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
2942 if (map_end > end) 2942 if (map_end > end)
2943 map_end = end; 2943 map_end = end;
2944 2944
2945 /* from, to is the offset within the page. */ 2945 /* from, to is the offset within the page. */
2946 from = offset & (PAGE_CACHE_SIZE - 1); 2946 from = offset & (PAGE_SIZE - 1);
2947 to = PAGE_CACHE_SIZE; 2947 to = PAGE_SIZE;
2948 if (map_end & (PAGE_CACHE_SIZE - 1)) 2948 if (map_end & (PAGE_SIZE - 1))
2949 to = map_end & (PAGE_CACHE_SIZE - 1); 2949 to = map_end & (PAGE_SIZE - 1);
2950 2950
2951 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2951 page = find_or_create_page(mapping, page_index, GFP_NOFS);
2952 if (!page) { 2952 if (!page) {
@@ -2956,10 +2956,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2956 } 2956 }
2957 2957
2958 /* 2958 /*
2959 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2959 * In case PAGE_SIZE <= CLUSTER_SIZE, This page
2960 * can't be dirtied before we CoW it out. 2960 * can't be dirtied before we CoW it out.
2961 */ 2961 */
2962 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2962 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2963 BUG_ON(PageDirty(page)); 2963 BUG_ON(PageDirty(page));
2964 2964
2965 if (!PageUptodate(page)) { 2965 if (!PageUptodate(page)) {
@@ -2987,7 +2987,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2987 mark_page_accessed(page); 2987 mark_page_accessed(page);
2988unlock: 2988unlock:
2989 unlock_page(page); 2989 unlock_page(page);
2990 page_cache_release(page); 2990 put_page(page);
2991 page = NULL; 2991 page = NULL;
2992 offset = map_end; 2992 offset = map_end;
2993 if (ret) 2993 if (ret)
@@ -3165,8 +3165,8 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
3165 } 3165 }
3166 3166
3167 while (offset < end) { 3167 while (offset < end) {
3168 page_index = offset >> PAGE_CACHE_SHIFT; 3168 page_index = offset >> PAGE_SHIFT;
3169 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; 3169 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
3170 if (map_end > end) 3170 if (map_end > end)
3171 map_end = end; 3171 map_end = end;
3172 3172
@@ -3182,7 +3182,7 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
3182 mark_page_accessed(page); 3182 mark_page_accessed(page);
3183 3183
3184 unlock_page(page); 3184 unlock_page(page);
3185 page_cache_release(page); 3185 put_page(page);
3186 page = NULL; 3186 page = NULL;
3187 offset = map_end; 3187 offset = map_end;
3188 if (ret) 3188 if (ret)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7db631e1c8b0..d7cae3327de5 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -605,8 +605,8 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
605 /* 605 /*
606 * We might be limited by page cache size. 606 * We might be limited by page cache size.
607 */ 607 */
608 if (bytes > PAGE_CACHE_SIZE) { 608 if (bytes > PAGE_SIZE) {
609 bytes = PAGE_CACHE_SIZE; 609 bytes = PAGE_SIZE;
610 trim = 1; 610 trim = 1;
611 /* 611 /*
612 * Shift by 31 here so that we don't get larger than 612 * Shift by 31 here so that we don't get larger than
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
index f30b6ecacdd1..324f0af40d7b 100644
--- a/fs/orangefs/dir.c
+++ b/fs/orangefs/dir.c
@@ -153,7 +153,6 @@ static int orangefs_readdir(struct file *file, struct dir_context *ctx)
153 struct dentry *dentry = file->f_path.dentry; 153 struct dentry *dentry = file->f_path.dentry;
154 struct orangefs_kernel_op_s *new_op = NULL; 154 struct orangefs_kernel_op_s *new_op = NULL;
155 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode); 155 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
156 int buffer_full = 0;
157 struct orangefs_readdir_response_s readdir_response; 156 struct orangefs_readdir_response_s readdir_response;
158 void *dents_buf; 157 void *dents_buf;
159 int i = 0; 158 int i = 0;
@@ -235,7 +234,7 @@ get_new_buffer_index:
235 if (ret == -EIO && op_state_purged(new_op)) { 234 if (ret == -EIO && op_state_purged(new_op)) {
236 gossip_err("%s: Client is down. Aborting readdir call.\n", 235 gossip_err("%s: Client is down. Aborting readdir call.\n",
237 __func__); 236 __func__);
238 goto out_slot; 237 goto out_free_op;
239 } 238 }
240 239
241 if (ret < 0 || new_op->downcall.status != 0) { 240 if (ret < 0 || new_op->downcall.status != 0) {
@@ -244,14 +243,14 @@ get_new_buffer_index:
244 new_op->downcall.status); 243 new_op->downcall.status);
245 if (ret >= 0) 244 if (ret >= 0)
246 ret = new_op->downcall.status; 245 ret = new_op->downcall.status;
247 goto out_slot; 246 goto out_free_op;
248 } 247 }
249 248
250 dents_buf = new_op->downcall.trailer_buf; 249 dents_buf = new_op->downcall.trailer_buf;
251 if (dents_buf == NULL) { 250 if (dents_buf == NULL) {
252 gossip_err("Invalid NULL buffer in readdir response\n"); 251 gossip_err("Invalid NULL buffer in readdir response\n");
253 ret = -ENOMEM; 252 ret = -ENOMEM;
254 goto out_slot; 253 goto out_free_op;
255 } 254 }
256 255
257 bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size, 256 bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size,
@@ -350,8 +349,7 @@ get_new_buffer_index:
350 /* 349 /*
351 * Did we hit the end of the directory? 350 * Did we hit the end of the directory?
352 */ 351 */
353 if (readdir_response.token == ORANGEFS_READDIR_END && 352 if (readdir_response.token == ORANGEFS_READDIR_END) {
354 !buffer_full) {
355 gossip_debug(GOSSIP_DIR_DEBUG, 353 gossip_debug(GOSSIP_DIR_DEBUG,
356 "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n"); 354 "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
357 ctx->pos = ORANGEFS_READDIR_END; 355 ctx->pos = ORANGEFS_READDIR_END;
@@ -363,8 +361,6 @@ out_destroy_handle:
363out_vfree: 361out_vfree:
364 gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf); 362 gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf);
365 vfree(dents_buf); 363 vfree(dents_buf);
366out_slot:
367 orangefs_readdir_index_put(buffer_index);
368out_free_op: 364out_free_op:
369 op_release(new_op); 365 op_release(new_op);
370 gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret); 366 gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret);
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 2382e267b49e..85640e955cde 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -18,8 +18,8 @@ static int read_one_page(struct page *page)
18 int max_block; 18 int max_block;
19 ssize_t bytes_read = 0; 19 ssize_t bytes_read = 0;
20 struct inode *inode = page->mapping->host; 20 struct inode *inode = page->mapping->host;
21 const __u32 blocksize = PAGE_CACHE_SIZE; /* inode->i_blksize */ 21 const __u32 blocksize = PAGE_SIZE; /* inode->i_blksize */
22 const __u32 blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */ 22 const __u32 blockbits = PAGE_SHIFT; /* inode->i_blkbits */
23 struct iov_iter to; 23 struct iov_iter to;
24 struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE}; 24 struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
25 25
@@ -86,7 +86,7 @@ static int orangefs_readpages(struct file *file,
86 "failure adding page to cache, read_one_page returned: %d\n", 86 "failure adding page to cache, read_one_page returned: %d\n",
87 ret); 87 ret);
88 } else { 88 } else {
89 page_cache_release(page); 89 put_page(page);
90 } 90 }
91 } 91 }
92 BUG_ON(!list_empty(pages)); 92 BUG_ON(!list_empty(pages));
@@ -204,22 +204,8 @@ static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
204 if (ret != 0) 204 if (ret != 0)
205 return ret; 205 return ret;
206 206
207 /* 207 if (orig_size != i_size_read(inode))
208 * Only change the c/mtime if we are changing the size or we are
209 * explicitly asked to change it. This handles the semantic difference
210 * between truncate() and ftruncate() as implemented in the VFS.
211 *
212 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
213 * special case where we need to update the times despite not having
214 * these flags set. For all other operations the VFS set these flags
215 * explicitly if it wants a timestamp update.
216 */
217 if (orig_size != i_size_read(inode) &&
218 !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
219 iattr->ia_ctime = iattr->ia_mtime =
220 current_fs_time(inode->i_sb);
221 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; 208 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
222 }
223 209
224 return ret; 210 return ret;
225} 211}
@@ -328,7 +314,7 @@ static int orangefs_init_iops(struct inode *inode)
328 case S_IFREG: 314 case S_IFREG:
329 inode->i_op = &orangefs_file_inode_operations; 315 inode->i_op = &orangefs_file_inode_operations;
330 inode->i_fop = &orangefs_file_operations; 316 inode->i_fop = &orangefs_file_operations;
331 inode->i_blkbits = PAGE_CACHE_SHIFT; 317 inode->i_blkbits = PAGE_SHIFT;
332 break; 318 break;
333 case S_IFLNK: 319 case S_IFLNK:
334 inode->i_op = &orangefs_symlink_inode_operations; 320 inode->i_op = &orangefs_symlink_inode_operations;
@@ -456,7 +442,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
456 inode->i_uid = current_fsuid(); 442 inode->i_uid = current_fsuid();
457 inode->i_gid = current_fsgid(); 443 inode->i_gid = current_fsgid();
458 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 444 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
459 inode->i_size = PAGE_CACHE_SIZE; 445 inode->i_size = PAGE_SIZE;
460 inode->i_rdev = dev; 446 inode->i_rdev = dev;
461 447
462 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); 448 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 1f8acc9f9a88..75375e90a63f 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -170,7 +170,7 @@ orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
170 int i; 170 int i;
171 171
172 for (i = 0; i < bufmap->page_count; i++) 172 for (i = 0; i < bufmap->page_count; i++)
173 page_cache_release(bufmap->page_array[i]); 173 put_page(bufmap->page_array[i]);
174} 174}
175 175
176static void 176static void
@@ -299,7 +299,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
299 299
300 for (i = 0; i < ret; i++) { 300 for (i = 0; i < ret; i++) {
301 SetPageError(bufmap->page_array[i]); 301 SetPageError(bufmap->page_array[i]);
302 page_cache_release(bufmap->page_array[i]); 302 put_page(bufmap->page_array[i]);
303 } 303 }
304 return -ENOMEM; 304 return -ENOMEM;
305 } 305 }
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 19670b8b4053..1714a737d556 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -126,8 +126,7 @@ out:
126 126
127void orangefs_debugfs_cleanup(void) 127void orangefs_debugfs_cleanup(void)
128{ 128{
129 if (debug_dir) 129 debugfs_remove_recursive(debug_dir);
130 debugfs_remove_recursive(debug_dir);
131} 130}
132 131
133/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ 132/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 40f5163b56aa..2d129b5886ee 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -303,7 +303,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
303 } 303 }
304 break; 304 break;
305 case S_IFDIR: 305 case S_IFDIR:
306 inode->i_size = PAGE_CACHE_SIZE; 306 inode->i_size = PAGE_SIZE;
307 orangefs_inode->blksize = (1 << inode->i_blkbits); 307 orangefs_inode->blksize = (1 << inode->i_blkbits);
308 spin_lock(&inode->i_lock); 308 spin_lock(&inode->i_lock);
309 inode_set_bytes(inode, inode->i_size); 309 inode_set_bytes(inode, inode->i_size);
@@ -315,9 +315,13 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
315 inode->i_size = (loff_t)strlen(new_op-> 315 inode->i_size = (loff_t)strlen(new_op->
316 downcall.resp.getattr.link_target); 316 downcall.resp.getattr.link_target);
317 orangefs_inode->blksize = (1 << inode->i_blkbits); 317 orangefs_inode->blksize = (1 << inode->i_blkbits);
318 strlcpy(orangefs_inode->link_target, 318 ret = strscpy(orangefs_inode->link_target,
319 new_op->downcall.resp.getattr.link_target, 319 new_op->downcall.resp.getattr.link_target,
320 ORANGEFS_NAME_MAX); 320 ORANGEFS_NAME_MAX);
321 if (ret == -E2BIG) {
322 ret = -EIO;
323 goto out;
324 }
321 inode->i_link = orangefs_inode->link_target; 325 inode->i_link = orangefs_inode->link_target;
322 } 326 }
323 break; 327 break;
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 45ce4ff4cbc7..1efc6f8a5224 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -1,3 +1,4 @@
1#include <linux/kernel.h>
1#include <linux/types.h> 2#include <linux/types.h>
2#include <linux/spinlock_types.h> 3#include <linux/spinlock_types.h>
3#include <linux/slab.h> 4#include <linux/slab.h>
@@ -74,8 +75,8 @@ static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh,
74 void *p, int size) 75 void *p, int size)
75{ 76{
76 77
77 memset(p, 0, size);
78 memcpy(p, kh->u, 16); 78 memcpy(p, kh->u, 16);
79 memset(p + 16, 0, size - 16);
79 80
80} 81}
81 82
@@ -407,7 +408,7 @@ enum {
407 * space. Zero signifies the upstream version of the kernel module. 408 * space. Zero signifies the upstream version of the kernel module.
408 */ 409 */
409#define ORANGEFS_KERNEL_PROTO_VERSION 0 410#define ORANGEFS_KERNEL_PROTO_VERSION 0
410#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904 411#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20903
411 412
412/* 413/*
413 * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl. 414 * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl.
@@ -427,26 +428,28 @@ struct ORANGEFS_dev_map_desc {
427/* gossip.h *****************************************************************/ 428/* gossip.h *****************************************************************/
428 429
429#ifdef GOSSIP_DISABLE_DEBUG 430#ifdef GOSSIP_DISABLE_DEBUG
430#define gossip_debug(mask, format, f...) do {} while (0) 431#define gossip_debug(mask, fmt, ...) \
432do { \
433 if (0) \
434 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
435} while (0)
431#else 436#else
432extern __u64 gossip_debug_mask; 437extern __u64 gossip_debug_mask;
433extern struct client_debug_mask client_debug_mask; 438extern struct client_debug_mask client_debug_mask;
434 439
435/* try to avoid function call overhead by checking masks in macro */ 440/* try to avoid function call overhead by checking masks in macro */
436#define gossip_debug(mask, format, f...) \ 441#define gossip_debug(mask, fmt, ...) \
437do { \ 442do { \
438 if (gossip_debug_mask & mask) \ 443 if (gossip_debug_mask & (mask)) \
439 printk(format, ##f); \ 444 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
440} while (0) 445} while (0)
441#endif /* GOSSIP_DISABLE_DEBUG */ 446#endif /* GOSSIP_DISABLE_DEBUG */
442 447
443/* do file and line number printouts w/ the GNU preprocessor */ 448/* do file and line number printouts w/ the GNU preprocessor */
444#define gossip_ldebug(mask, format, f...) \ 449#define gossip_ldebug(mask, fmt, ...) \
445 gossip_debug(mask, "%s: " format, __func__, ##f) 450 gossip_debug(mask, "%s: " fmt, __func__, ##__VA_ARGS__)
446 451
447#define gossip_err printk 452#define gossip_err pr_err
448#define gossip_lerr(format, f...) \ 453#define gossip_lerr(fmt, ...) \
449 gossip_err("%s line %d: " format, \ 454 gossip_err("%s line %d: " fmt, \
450 __FILE__, \ 455 __FILE__, __LINE__, ##__VA_ARGS__)
451 __LINE__, \
452 ##f)
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index ef5da7538cd5..63a6280d8c3a 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -73,10 +73,6 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
73 "%s: prefix %s name %s, buffer_size %zd\n", 73 "%s: prefix %s name %s, buffer_size %zd\n",
74 __func__, prefix, name, size); 74 __func__, prefix, name, size);
75 75
76 if (name == NULL || (size > 0 && buffer == NULL)) {
77 gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n");
78 return -EINVAL;
79 }
80 if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) { 76 if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
81 gossip_err("Invalid key length (%d)\n", 77 gossip_err("Invalid key length (%d)\n",
82 (int)(strlen(name) + strlen(prefix))); 78 (int)(strlen(name) + strlen(prefix)));
@@ -146,8 +142,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
146 goto out_release_op; 142 goto out_release_op;
147 } 143 }
148 144
149 memset(buffer, 0, size);
150 memcpy(buffer, new_op->downcall.resp.getxattr.val, length); 145 memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
146 memset(buffer + length, 0, size - length);
151 gossip_debug(GOSSIP_XATTR_DEBUG, 147 gossip_debug(GOSSIP_XATTR_DEBUG,
152 "orangefs_inode_getxattr: inode %pU " 148 "orangefs_inode_getxattr: inode %pU "
153 "key %s key_sz %d, val_len %d\n", 149 "key %s key_sz %d, val_len %d\n",
@@ -239,8 +235,7 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
239 "%s: prefix %s, name %s, buffer_size %zd\n", 235 "%s: prefix %s, name %s, buffer_size %zd\n",
240 __func__, prefix, name, size); 236 __func__, prefix, name, size);
241 237
242 if (size < 0 || 238 if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
243 size >= ORANGEFS_MAX_XATTR_VALUELEN ||
244 flags < 0) { 239 flags < 0) {
245 gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n", 240 gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
246 (int)size, 241 (int)size,
@@ -248,12 +243,6 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
248 return -EINVAL; 243 return -EINVAL;
249 } 244 }
250 245
251 if (name == NULL ||
252 (size > 0 && value == NULL)) {
253 gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n");
254 return -EINVAL;
255 }
256
257 internal_flag = convert_to_internal_xattr_flags(flags); 246 internal_flag = convert_to_internal_xattr_flags(flags);
258 247
259 if (prefix) { 248 if (prefix) {
@@ -353,10 +342,6 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
353 gossip_err("%s: bogus NULL pointers\n", __func__); 342 gossip_err("%s: bogus NULL pointers\n", __func__);
354 return -EINVAL; 343 return -EINVAL;
355 } 344 }
356 if (size < 0) {
357 gossip_err("Invalid size (%d)\n", (int)size);
358 return -EINVAL;
359 }
360 345
361 down_read(&orangefs_inode->xattr_sem); 346 down_read(&orangefs_inode->xattr_sem);
362 new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR); 347 new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ef64984c9bbc..5d972e6cd3fe 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
295 } 295 }
296} 296}
297 297
298static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
299{
300 struct dentry *real;
301
302 if (d_is_dir(dentry)) {
303 if (!inode || inode == d_inode(dentry))
304 return dentry;
305 goto bug;
306 }
307
308 real = ovl_dentry_upper(dentry);
309 if (real && (!inode || inode == d_inode(real)))
310 return real;
311
312 real = ovl_dentry_lower(dentry);
313 if (!real)
314 goto bug;
315
316 if (!inode || inode == d_inode(real))
317 return real;
318
319 /* Handle recursion */
320 if (real->d_flags & DCACHE_OP_REAL)
321 return real->d_op->d_real(real, inode);
322
323bug:
324 WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
325 inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
326 return dentry;
327}
328
298static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags) 329static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
299{ 330{
300 struct ovl_entry *oe = dentry->d_fsdata; 331 struct ovl_entry *oe = dentry->d_fsdata;
@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
339static const struct dentry_operations ovl_dentry_operations = { 370static const struct dentry_operations ovl_dentry_operations = {
340 .d_release = ovl_dentry_release, 371 .d_release = ovl_dentry_release,
341 .d_select_inode = ovl_d_select_inode, 372 .d_select_inode = ovl_d_select_inode,
373 .d_real = ovl_d_real,
342}; 374};
343 375
344static const struct dentry_operations ovl_reval_dentry_operations = { 376static const struct dentry_operations ovl_reval_dentry_operations = {
345 .d_release = ovl_dentry_release, 377 .d_release = ovl_dentry_release,
346 .d_select_inode = ovl_d_select_inode, 378 .d_select_inode = ovl_d_select_inode,
379 .d_real = ovl_d_real,
347 .d_revalidate = ovl_dentry_revalidate, 380 .d_revalidate = ovl_dentry_revalidate,
348 .d_weak_revalidate = ovl_dentry_weak_revalidate, 381 .d_weak_revalidate = ovl_dentry_weak_revalidate,
349}; 382};
diff --git a/fs/pipe.c b/fs/pipe.c
index ab8dad3ccb6a..0d3f5165cb0b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -134,7 +134,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
134 if (page_count(page) == 1 && !pipe->tmp_page) 134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page; 135 pipe->tmp_page = page;
136 else 136 else
137 page_cache_release(page); 137 put_page(page);
138} 138}
139 139
140/** 140/**
@@ -180,7 +180,7 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
180 */ 180 */
181void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) 181void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
182{ 182{
183 page_cache_get(buf->page); 183 get_page(buf->page);
184} 184}
185EXPORT_SYMBOL(generic_pipe_buf_get); 185EXPORT_SYMBOL(generic_pipe_buf_get);
186 186
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(generic_pipe_buf_confirm);
211void generic_pipe_buf_release(struct pipe_inode_info *pipe, 211void generic_pipe_buf_release(struct pipe_inode_info *pipe,
212 struct pipe_buffer *buf) 212 struct pipe_buffer *buf)
213{ 213{
214 page_cache_release(buf->page); 214 put_page(buf->page);
215} 215}
216EXPORT_SYMBOL(generic_pipe_buf_release); 216EXPORT_SYMBOL(generic_pipe_buf_release);
217 217
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df431642042..229cb546bee0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -553,7 +553,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
553 if (radix_tree_exceptional_entry(page)) 553 if (radix_tree_exceptional_entry(page))
554 mss->swap += PAGE_SIZE; 554 mss->swap += PAGE_SIZE;
555 else 555 else
556 page_cache_release(page); 556 put_page(page);
557 557
558 return; 558 return;
559 } 559 }
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 55bb57e6a30d..8afe10cf7df8 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -279,12 +279,12 @@ static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
279 if (!page) 279 if (!page)
280 return VM_FAULT_OOM; 280 return VM_FAULT_OOM;
281 if (!PageUptodate(page)) { 281 if (!PageUptodate(page)) {
282 offset = (loff_t) index << PAGE_CACHE_SHIFT; 282 offset = (loff_t) index << PAGE_SHIFT;
283 buf = __va((page_to_pfn(page) << PAGE_SHIFT)); 283 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); 284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
285 if (rc < 0) { 285 if (rc < 0) {
286 unlock_page(page); 286 unlock_page(page);
287 page_cache_release(page); 287 put_page(page);
288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
289 } 289 }
290 SetPageUptodate(page); 290 SetPageUptodate(page);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index dc645b66cd79..45d6110744cb 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -420,8 +420,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
420 pstore_sb = sb; 420 pstore_sb = sb;
421 421
422 sb->s_maxbytes = MAX_LFS_FILESIZE; 422 sb->s_maxbytes = MAX_LFS_FILESIZE;
423 sb->s_blocksize = PAGE_CACHE_SIZE; 423 sb->s_blocksize = PAGE_SIZE;
424 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 424 sb->s_blocksize_bits = PAGE_SHIFT;
425 sb->s_magic = PSTOREFS_MAGIC; 425 sb->s_magic = PSTOREFS_MAGIC;
426 sb->s_op = &pstore_ops; 426 sb->s_op = &pstore_ops;
427 sb->s_time_gran = 1; 427 sb->s_time_gran = 1;
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index e1f37278cf97..144ceda4948e 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -35,9 +35,9 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
35static unsigned last_entry(struct inode *inode, unsigned long page_nr) 35static unsigned last_entry(struct inode *inode, unsigned long page_nr)
36{ 36{
37 unsigned long last_byte = inode->i_size; 37 unsigned long last_byte = inode->i_size;
38 last_byte -= page_nr << PAGE_CACHE_SHIFT; 38 last_byte -= page_nr << PAGE_SHIFT;
39 if (last_byte > PAGE_CACHE_SIZE) 39 if (last_byte > PAGE_SIZE)
40 last_byte = PAGE_CACHE_SIZE; 40 last_byte = PAGE_SIZE;
41 return last_byte / QNX6_DIR_ENTRY_SIZE; 41 return last_byte / QNX6_DIR_ENTRY_SIZE;
42} 42}
43 43
@@ -47,9 +47,9 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
47{ 47{
48 struct qnx6_sb_info *sbi = QNX6_SB(sb); 48 struct qnx6_sb_info *sbi = QNX6_SB(sb);
49 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ 49 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
50 u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */ 50 u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
51 /* within page */ 51 /* within page */
52 u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK; 52 u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
53 struct address_space *mapping = sbi->longfile->i_mapping; 53 struct address_space *mapping = sbi->longfile->i_mapping;
54 struct page *page = read_mapping_page(mapping, n, NULL); 54 struct page *page = read_mapping_page(mapping, n, NULL);
55 if (IS_ERR(page)) 55 if (IS_ERR(page))
@@ -115,8 +115,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
115 struct qnx6_sb_info *sbi = QNX6_SB(s); 115 struct qnx6_sb_info *sbi = QNX6_SB(s);
116 loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); 116 loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
117 unsigned long npages = dir_pages(inode); 117 unsigned long npages = dir_pages(inode);
118 unsigned long n = pos >> PAGE_CACHE_SHIFT; 118 unsigned long n = pos >> PAGE_SHIFT;
119 unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; 119 unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
120 bool done = false; 120 bool done = false;
121 121
122 ctx->pos = pos; 122 ctx->pos = pos;
@@ -131,7 +131,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
131 131
132 if (IS_ERR(page)) { 132 if (IS_ERR(page)) {
133 pr_err("%s(): read failed\n", __func__); 133 pr_err("%s(): read failed\n", __func__);
134 ctx->pos = (n + 1) << PAGE_CACHE_SHIFT; 134 ctx->pos = (n + 1) << PAGE_SHIFT;
135 return PTR_ERR(page); 135 return PTR_ERR(page);
136 } 136 }
137 de = ((struct qnx6_dir_entry *)page_address(page)) + start; 137 de = ((struct qnx6_dir_entry *)page_address(page)) + start;
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 47bb1de07155..1192422a1c56 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -542,8 +542,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
542 iget_failed(inode); 542 iget_failed(inode);
543 return ERR_PTR(-EIO); 543 return ERR_PTR(-EIO);
544 } 544 }
545 n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS); 545 n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
546 offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS); 546 offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
547 mapping = sbi->inodes->i_mapping; 547 mapping = sbi->inodes->i_mapping;
548 page = read_mapping_page(mapping, n, NULL); 548 page = read_mapping_page(mapping, n, NULL);
549 if (IS_ERR(page)) { 549 if (IS_ERR(page)) {
diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
index d3fb2b698800..f23b5c4a66ad 100644
--- a/fs/qnx6/qnx6.h
+++ b/fs/qnx6/qnx6.h
@@ -128,7 +128,7 @@ extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
128static inline void qnx6_put_page(struct page *page) 128static inline void qnx6_put_page(struct page *page)
129{ 129{
130 kunmap(page); 130 kunmap(page);
131 page_cache_release(page); 131 put_page(page);
132} 132}
133 133
134extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, 134extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ba827daea5a0..ff21980d0119 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2047,11 +2047,20 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2047 struct quota_info *dqopt = sb_dqopt(sb); 2047 struct quota_info *dqopt = sb_dqopt(sb);
2048 int err; 2048 int err;
2049 2049
2050 if (!dqopt->ops[qid->type]->get_next_id) 2050 mutex_lock(&dqopt->dqonoff_mutex);
2051 return -ENOSYS; 2051 if (!sb_has_quota_active(sb, qid->type)) {
2052 err = -ESRCH;
2053 goto out;
2054 }
2055 if (!dqopt->ops[qid->type]->get_next_id) {
2056 err = -ENOSYS;
2057 goto out;
2058 }
2052 mutex_lock(&dqopt->dqio_mutex); 2059 mutex_lock(&dqopt->dqio_mutex);
2053 err = dqopt->ops[qid->type]->get_next_id(sb, qid); 2060 err = dqopt->ops[qid->type]->get_next_id(sb, qid);
2054 mutex_unlock(&dqopt->dqio_mutex); 2061 mutex_unlock(&dqopt->dqio_mutex);
2062out:
2063 mutex_unlock(&dqopt->dqonoff_mutex);
2055 2064
2056 return err; 2065 return err;
2057} 2066}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 38981b037524..1ab6e6c2e60e 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -223,8 +223,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
223 return err; 223 return err;
224 224
225 sb->s_maxbytes = MAX_LFS_FILESIZE; 225 sb->s_maxbytes = MAX_LFS_FILESIZE;
226 sb->s_blocksize = PAGE_CACHE_SIZE; 226 sb->s_blocksize = PAGE_SIZE;
227 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 227 sb->s_blocksize_bits = PAGE_SHIFT;
228 sb->s_magic = RAMFS_MAGIC; 228 sb->s_magic = RAMFS_MAGIC;
229 sb->s_op = &ramfs_ops; 229 sb->s_op = &ramfs_ops;
230 sb->s_time_gran = 1; 230 sb->s_time_gran = 1;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9424a4ba93a9..389773711de4 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -180,11 +180,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
180 int partial = 0; 180 int partial = 0;
181 unsigned blocksize; 181 unsigned blocksize;
182 struct buffer_head *bh, *head; 182 struct buffer_head *bh, *head;
183 unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; 183 unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
184 int new; 184 int new;
185 int logit = reiserfs_file_data_log(inode); 185 int logit = reiserfs_file_data_log(inode);
186 struct super_block *s = inode->i_sb; 186 struct super_block *s = inode->i_sb;
187 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; 187 int bh_per_page = PAGE_SIZE / s->s_blocksize;
188 struct reiserfs_transaction_handle th; 188 struct reiserfs_transaction_handle th;
189 int ret = 0; 189 int ret = 0;
190 190
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ae9e5b308cf9..d5c2e9c865de 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -386,7 +386,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
386 goto finished; 386 goto finished;
387 } 387 }
388 /* read file tail into part of page */ 388 /* read file tail into part of page */
389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1); 389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
390 copy_item_head(&tmp_ih, ih); 390 copy_item_head(&tmp_ih, ih);
391 391
392 /* 392 /*
@@ -587,10 +587,10 @@ static int convert_tail_for_hole(struct inode *inode,
587 return -EIO; 587 return -EIO;
588 588
589 /* always try to read until the end of the block */ 589 /* always try to read until the end of the block */
590 tail_start = tail_offset & (PAGE_CACHE_SIZE - 1); 590 tail_start = tail_offset & (PAGE_SIZE - 1);
591 tail_end = (tail_start | (bh_result->b_size - 1)) + 1; 591 tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
592 592
593 index = tail_offset >> PAGE_CACHE_SHIFT; 593 index = tail_offset >> PAGE_SHIFT;
594 /* 594 /*
595 * hole_page can be zero in case of direct_io, we are sure 595 * hole_page can be zero in case of direct_io, we are sure
596 * that we cannot get here if we write with O_DIRECT into tail page 596 * that we cannot get here if we write with O_DIRECT into tail page
@@ -629,7 +629,7 @@ static int convert_tail_for_hole(struct inode *inode,
629unlock: 629unlock:
630 if (tail_page != hole_page) { 630 if (tail_page != hole_page) {
631 unlock_page(tail_page); 631 unlock_page(tail_page);
632 page_cache_release(tail_page); 632 put_page(tail_page);
633 } 633 }
634out: 634out:
635 return retval; 635 return retval;
@@ -2189,11 +2189,11 @@ static int grab_tail_page(struct inode *inode,
2189 * we want the page with the last byte in the file, 2189 * we want the page with the last byte in the file,
2190 * not the page that will hold the next byte for appending 2190 * not the page that will hold the next byte for appending
2191 */ 2191 */
2192 unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; 2192 unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
2193 unsigned long pos = 0; 2193 unsigned long pos = 0;
2194 unsigned long start = 0; 2194 unsigned long start = 0;
2195 unsigned long blocksize = inode->i_sb->s_blocksize; 2195 unsigned long blocksize = inode->i_sb->s_blocksize;
2196 unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1); 2196 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
2197 struct buffer_head *bh; 2197 struct buffer_head *bh;
2198 struct buffer_head *head; 2198 struct buffer_head *head;
2199 struct page *page; 2199 struct page *page;
@@ -2251,7 +2251,7 @@ out:
2251 2251
2252unlock: 2252unlock:
2253 unlock_page(page); 2253 unlock_page(page);
2254 page_cache_release(page); 2254 put_page(page);
2255 return error; 2255 return error;
2256} 2256}
2257 2257
@@ -2265,7 +2265,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2265{ 2265{
2266 struct reiserfs_transaction_handle th; 2266 struct reiserfs_transaction_handle th;
2267 /* we want the offset for the first byte after the end of the file */ 2267 /* we want the offset for the first byte after the end of the file */
2268 unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 2268 unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
2269 unsigned blocksize = inode->i_sb->s_blocksize; 2269 unsigned blocksize = inode->i_sb->s_blocksize;
2270 unsigned length; 2270 unsigned length;
2271 struct page *page = NULL; 2271 struct page *page = NULL;
@@ -2345,7 +2345,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2345 } 2345 }
2346 } 2346 }
2347 unlock_page(page); 2347 unlock_page(page);
2348 page_cache_release(page); 2348 put_page(page);
2349 } 2349 }
2350 2350
2351 reiserfs_write_unlock(inode->i_sb); 2351 reiserfs_write_unlock(inode->i_sb);
@@ -2354,7 +2354,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2354out: 2354out:
2355 if (page) { 2355 if (page) {
2356 unlock_page(page); 2356 unlock_page(page);
2357 page_cache_release(page); 2357 put_page(page);
2358 } 2358 }
2359 2359
2360 reiserfs_write_unlock(inode->i_sb); 2360 reiserfs_write_unlock(inode->i_sb);
@@ -2426,7 +2426,7 @@ research:
2426 } else if (is_direct_le_ih(ih)) { 2426 } else if (is_direct_le_ih(ih)) {
2427 char *p; 2427 char *p;
2428 p = page_address(bh_result->b_page); 2428 p = page_address(bh_result->b_page);
2429 p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1); 2429 p += (byte_offset - 1) & (PAGE_SIZE - 1);
2430 copy_size = ih_item_len(ih) - pos_in_item; 2430 copy_size = ih_item_len(ih) - pos_in_item;
2431 2431
2432 fs_gen = get_generation(inode->i_sb); 2432 fs_gen = get_generation(inode->i_sb);
@@ -2525,7 +2525,7 @@ static int reiserfs_write_full_page(struct page *page,
2525 struct writeback_control *wbc) 2525 struct writeback_control *wbc)
2526{ 2526{
2527 struct inode *inode = page->mapping->host; 2527 struct inode *inode = page->mapping->host;
2528 unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; 2528 unsigned long end_index = inode->i_size >> PAGE_SHIFT;
2529 int error = 0; 2529 int error = 0;
2530 unsigned long block; 2530 unsigned long block;
2531 sector_t last_block; 2531 sector_t last_block;
@@ -2535,7 +2535,7 @@ static int reiserfs_write_full_page(struct page *page,
2535 int checked = PageChecked(page); 2535 int checked = PageChecked(page);
2536 struct reiserfs_transaction_handle th; 2536 struct reiserfs_transaction_handle th;
2537 struct super_block *s = inode->i_sb; 2537 struct super_block *s = inode->i_sb;
2538 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; 2538 int bh_per_page = PAGE_SIZE / s->s_blocksize;
2539 th.t_trans_id = 0; 2539 th.t_trans_id = 0;
2540 2540
2541 /* no logging allowed when nonblocking or from PF_MEMALLOC */ 2541 /* no logging allowed when nonblocking or from PF_MEMALLOC */
@@ -2564,16 +2564,16 @@ static int reiserfs_write_full_page(struct page *page,
2564 if (page->index >= end_index) { 2564 if (page->index >= end_index) {
2565 unsigned last_offset; 2565 unsigned last_offset;
2566 2566
2567 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 2567 last_offset = inode->i_size & (PAGE_SIZE - 1);
2568 /* no file contents in this page */ 2568 /* no file contents in this page */
2569 if (page->index >= end_index + 1 || !last_offset) { 2569 if (page->index >= end_index + 1 || !last_offset) {
2570 unlock_page(page); 2570 unlock_page(page);
2571 return 0; 2571 return 0;
2572 } 2572 }
2573 zero_user_segment(page, last_offset, PAGE_CACHE_SIZE); 2573 zero_user_segment(page, last_offset, PAGE_SIZE);
2574 } 2574 }
2575 bh = head; 2575 bh = head;
2576 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); 2576 block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
2577 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 2577 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2578 /* first map all the buffers, logging any direct items we find */ 2578 /* first map all the buffers, logging any direct items we find */
2579 do { 2579 do {
@@ -2774,7 +2774,7 @@ static int reiserfs_write_begin(struct file *file,
2774 *fsdata = (void *)(unsigned long)flags; 2774 *fsdata = (void *)(unsigned long)flags;
2775 } 2775 }
2776 2776
2777 index = pos >> PAGE_CACHE_SHIFT; 2777 index = pos >> PAGE_SHIFT;
2778 page = grab_cache_page_write_begin(mapping, index, flags); 2778 page = grab_cache_page_write_begin(mapping, index, flags);
2779 if (!page) 2779 if (!page)
2780 return -ENOMEM; 2780 return -ENOMEM;
@@ -2822,7 +2822,7 @@ static int reiserfs_write_begin(struct file *file,
2822 } 2822 }
2823 if (ret) { 2823 if (ret) {
2824 unlock_page(page); 2824 unlock_page(page);
2825 page_cache_release(page); 2825 put_page(page);
2826 /* Truncate allocated blocks */ 2826 /* Truncate allocated blocks */
2827 reiserfs_truncate_failed_write(inode); 2827 reiserfs_truncate_failed_write(inode);
2828 } 2828 }
@@ -2909,7 +2909,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2909 else 2909 else
2910 th = NULL; 2910 th = NULL;
2911 2911
2912 start = pos & (PAGE_CACHE_SIZE - 1); 2912 start = pos & (PAGE_SIZE - 1);
2913 if (unlikely(copied < len)) { 2913 if (unlikely(copied < len)) {
2914 if (!PageUptodate(page)) 2914 if (!PageUptodate(page))
2915 copied = 0; 2915 copied = 0;
@@ -2974,7 +2974,7 @@ out:
2974 if (locked) 2974 if (locked)
2975 reiserfs_write_unlock(inode->i_sb); 2975 reiserfs_write_unlock(inode->i_sb);
2976 unlock_page(page); 2976 unlock_page(page);
2977 page_cache_release(page); 2977 put_page(page);
2978 2978
2979 if (pos + len > inode->i_size) 2979 if (pos + len > inode->i_size)
2980 reiserfs_truncate_failed_write(inode); 2980 reiserfs_truncate_failed_write(inode);
@@ -2996,7 +2996,7 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2996 unsigned from, unsigned to) 2996 unsigned from, unsigned to)
2997{ 2997{
2998 struct inode *inode = page->mapping->host; 2998 struct inode *inode = page->mapping->host;
2999 loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to; 2999 loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
3000 int ret = 0; 3000 int ret = 0;
3001 int update_sd = 0; 3001 int update_sd = 0;
3002 struct reiserfs_transaction_handle *th = NULL; 3002 struct reiserfs_transaction_handle *th = NULL;
@@ -3181,7 +3181,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
3181 struct inode *inode = page->mapping->host; 3181 struct inode *inode = page->mapping->host;
3182 unsigned int curr_off = 0; 3182 unsigned int curr_off = 0;
3183 unsigned int stop = offset + length; 3183 unsigned int stop = offset + length;
3184 int partial_page = (offset || length < PAGE_CACHE_SIZE); 3184 int partial_page = (offset || length < PAGE_SIZE);
3185 int ret = 1; 3185 int ret = 1;
3186 3186
3187 BUG_ON(!PageLocked(page)); 3187 BUG_ON(!PageLocked(page));
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 036a1fc0a8c3..57045f423893 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -203,7 +203,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
203 * __reiserfs_write_begin on that page. This will force a 203 * __reiserfs_write_begin on that page. This will force a
204 * reiserfs_get_block to unpack the tail for us. 204 * reiserfs_get_block to unpack the tail for us.
205 */ 205 */
206 index = inode->i_size >> PAGE_CACHE_SHIFT; 206 index = inode->i_size >> PAGE_SHIFT;
207 mapping = inode->i_mapping; 207 mapping = inode->i_mapping;
208 page = grab_cache_page(mapping, index); 208 page = grab_cache_page(mapping, index);
209 retval = -ENOMEM; 209 retval = -ENOMEM;
@@ -221,7 +221,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
221 221
222out_unlock: 222out_unlock:
223 unlock_page(page); 223 unlock_page(page);
224 page_cache_release(page); 224 put_page(page);
225 225
226out: 226out:
227 inode_unlock(inode); 227 inode_unlock(inode);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 44c2bdced1c8..2ace90e981f0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -599,18 +599,18 @@ static int journal_list_still_alive(struct super_block *s,
599 * This does a check to see if the buffer belongs to one of these 599 * This does a check to see if the buffer belongs to one of these
600 * lost pages before doing the final put_bh. If page->mapping was 600 * lost pages before doing the final put_bh. If page->mapping was
601 * null, it tries to free buffers on the page, which should make the 601 * null, it tries to free buffers on the page, which should make the
602 * final page_cache_release drop the page from the lru. 602 * final put_page drop the page from the lru.
603 */ 603 */
604static void release_buffer_page(struct buffer_head *bh) 604static void release_buffer_page(struct buffer_head *bh)
605{ 605{
606 struct page *page = bh->b_page; 606 struct page *page = bh->b_page;
607 if (!page->mapping && trylock_page(page)) { 607 if (!page->mapping && trylock_page(page)) {
608 page_cache_get(page); 608 get_page(page);
609 put_bh(bh); 609 put_bh(bh);
610 if (!page->mapping) 610 if (!page->mapping)
611 try_to_free_buffers(page); 611 try_to_free_buffers(page);
612 unlock_page(page); 612 unlock_page(page);
613 page_cache_release(page); 613 put_page(page);
614 } else { 614 } else {
615 put_bh(bh); 615 put_bh(bh);
616 } 616 }
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 24cbe013240f..5feacd689241 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1342,7 +1342,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1342 */ 1342 */
1343 1343
1344 data = kmap_atomic(un_bh->b_page); 1344 data = kmap_atomic(un_bh->b_page);
1345 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); 1345 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1));
1346 memcpy(data + off, 1346 memcpy(data + off,
1347 ih_item_body(PATH_PLAST_BUFFER(path), &s_ih), 1347 ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
1348 ret_value); 1348 ret_value);
@@ -1511,7 +1511,7 @@ static void unmap_buffers(struct page *page, loff_t pos)
1511 1511
1512 if (page) { 1512 if (page) {
1513 if (page_has_buffers(page)) { 1513 if (page_has_buffers(page)) {
1514 tail_index = pos & (PAGE_CACHE_SIZE - 1); 1514 tail_index = pos & (PAGE_SIZE - 1);
1515 cur_index = 0; 1515 cur_index = 0;
1516 head = page_buffers(page); 1516 head = page_buffers(page);
1517 bh = head; 1517 bh = head;
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index f41e19b4bb42..2d5489b0a269 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -151,7 +151,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
151 */ 151 */
152 if (up_to_date_bh) { 152 if (up_to_date_bh) {
153 unsigned pgoff = 153 unsigned pgoff =
154 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); 154 (tail_offset + total_tail - 1) & (PAGE_SIZE - 1);
155 char *kaddr = kmap_atomic(up_to_date_bh->b_page); 155 char *kaddr = kmap_atomic(up_to_date_bh->b_page);
156 memset(kaddr + pgoff, 0, blk_size - total_tail); 156 memset(kaddr + pgoff, 0, blk_size - total_tail);
157 kunmap_atomic(kaddr); 157 kunmap_atomic(kaddr);
@@ -271,7 +271,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
271 * the page was locked and this part of the page was up to date when 271 * the page was locked and this part of the page was up to date when
272 * indirect2direct was called, so we know the bytes are still valid 272 * indirect2direct was called, so we know the bytes are still valid
273 */ 273 */
274 tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); 274 tail = tail + (pos & (PAGE_SIZE - 1));
275 275
276 PATH_LAST_POSITION(path)++; 276 PATH_LAST_POSITION(path)++;
277 277
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 57e0b2310532..28f5f8b11370 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -415,7 +415,7 @@ out:
415static inline void reiserfs_put_page(struct page *page) 415static inline void reiserfs_put_page(struct page *page)
416{ 416{
417 kunmap(page); 417 kunmap(page);
418 page_cache_release(page); 418 put_page(page);
419} 419}
420 420
421static struct page *reiserfs_get_page(struct inode *dir, size_t n) 421static struct page *reiserfs_get_page(struct inode *dir, size_t n)
@@ -427,7 +427,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
427 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this 427 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this
428 */ 428 */
429 mapping_set_gfp_mask(mapping, GFP_NOFS); 429 mapping_set_gfp_mask(mapping, GFP_NOFS);
430 page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL); 430 page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
431 if (!IS_ERR(page)) { 431 if (!IS_ERR(page)) {
432 kmap(page); 432 kmap(page);
433 if (PageError(page)) 433 if (PageError(page))
@@ -526,10 +526,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
526 while (buffer_pos < buffer_size || buffer_pos == 0) { 526 while (buffer_pos < buffer_size || buffer_pos == 0) {
527 size_t chunk; 527 size_t chunk;
528 size_t skip = 0; 528 size_t skip = 0;
529 size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); 529 size_t page_offset = (file_pos & (PAGE_SIZE - 1));
530 530
531 if (buffer_size - buffer_pos > PAGE_CACHE_SIZE) 531 if (buffer_size - buffer_pos > PAGE_SIZE)
532 chunk = PAGE_CACHE_SIZE; 532 chunk = PAGE_SIZE;
533 else 533 else
534 chunk = buffer_size - buffer_pos; 534 chunk = buffer_size - buffer_pos;
535 535
@@ -546,8 +546,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
546 struct reiserfs_xattr_header *rxh; 546 struct reiserfs_xattr_header *rxh;
547 547
548 skip = file_pos = sizeof(struct reiserfs_xattr_header); 548 skip = file_pos = sizeof(struct reiserfs_xattr_header);
549 if (chunk + skip > PAGE_CACHE_SIZE) 549 if (chunk + skip > PAGE_SIZE)
550 chunk = PAGE_CACHE_SIZE - skip; 550 chunk = PAGE_SIZE - skip;
551 rxh = (struct reiserfs_xattr_header *)data; 551 rxh = (struct reiserfs_xattr_header *)data;
552 rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC); 552 rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);
553 rxh->h_hash = cpu_to_le32(xahash); 553 rxh->h_hash = cpu_to_le32(xahash);
@@ -675,8 +675,8 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
675 char *data; 675 char *data;
676 size_t skip = 0; 676 size_t skip = 0;
677 677
678 if (isize - file_pos > PAGE_CACHE_SIZE) 678 if (isize - file_pos > PAGE_SIZE)
679 chunk = PAGE_CACHE_SIZE; 679 chunk = PAGE_SIZE;
680 else 680 else
681 chunk = isize - file_pos; 681 chunk = isize - file_pos;
682 682
diff --git a/fs/splice.c b/fs/splice.c
index 9947b5c69664..b018eb485019 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -88,7 +88,7 @@ out_unlock:
88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, 88static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
89 struct pipe_buffer *buf) 89 struct pipe_buffer *buf)
90{ 90{
91 page_cache_release(buf->page); 91 put_page(buf->page);
92 buf->flags &= ~PIPE_BUF_FLAG_LRU; 92 buf->flags &= ~PIPE_BUF_FLAG_LRU;
93} 93}
94 94
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(splice_to_pipe);
268 268
269void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) 269void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
270{ 270{
271 page_cache_release(spd->pages[i]); 271 put_page(spd->pages[i]);
272} 272}
273 273
274/* 274/*
@@ -328,9 +328,9 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
328 if (splice_grow_spd(pipe, &spd)) 328 if (splice_grow_spd(pipe, &spd))
329 return -ENOMEM; 329 return -ENOMEM;
330 330
331 index = *ppos >> PAGE_CACHE_SHIFT; 331 index = *ppos >> PAGE_SHIFT;
332 loff = *ppos & ~PAGE_CACHE_MASK; 332 loff = *ppos & ~PAGE_MASK;
333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 333 req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
334 nr_pages = min(req_pages, spd.nr_pages_max); 334 nr_pages = min(req_pages, spd.nr_pages_max);
335 335
336 /* 336 /*
@@ -365,7 +365,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
365 error = add_to_page_cache_lru(page, mapping, index, 365 error = add_to_page_cache_lru(page, mapping, index,
366 mapping_gfp_constraint(mapping, GFP_KERNEL)); 366 mapping_gfp_constraint(mapping, GFP_KERNEL));
367 if (unlikely(error)) { 367 if (unlikely(error)) {
368 page_cache_release(page); 368 put_page(page);
369 if (error == -EEXIST) 369 if (error == -EEXIST)
370 continue; 370 continue;
371 break; 371 break;
@@ -385,7 +385,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
385 * Now loop over the map and see if we need to start IO on any 385 * Now loop over the map and see if we need to start IO on any
386 * pages, fill in the partial map, etc. 386 * pages, fill in the partial map, etc.
387 */ 387 */
388 index = *ppos >> PAGE_CACHE_SHIFT; 388 index = *ppos >> PAGE_SHIFT;
389 nr_pages = spd.nr_pages; 389 nr_pages = spd.nr_pages;
390 spd.nr_pages = 0; 390 spd.nr_pages = 0;
391 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 391 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
@@ -397,7 +397,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
397 /* 397 /*
398 * this_len is the max we'll use from this page 398 * this_len is the max we'll use from this page
399 */ 399 */
400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 400 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
401 page = spd.pages[page_nr]; 401 page = spd.pages[page_nr];
402 402
403 if (PageReadahead(page)) 403 if (PageReadahead(page))
@@ -426,7 +426,7 @@ retry_lookup:
426 error = -ENOMEM; 426 error = -ENOMEM;
427 break; 427 break;
428 } 428 }
429 page_cache_release(spd.pages[page_nr]); 429 put_page(spd.pages[page_nr]);
430 spd.pages[page_nr] = page; 430 spd.pages[page_nr] = page;
431 } 431 }
432 /* 432 /*
@@ -456,7 +456,7 @@ fill_it:
456 * i_size must be checked after PageUptodate. 456 * i_size must be checked after PageUptodate.
457 */ 457 */
458 isize = i_size_read(mapping->host); 458 isize = i_size_read(mapping->host);
459 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 459 end_index = (isize - 1) >> PAGE_SHIFT;
460 if (unlikely(!isize || index > end_index)) 460 if (unlikely(!isize || index > end_index))
461 break; 461 break;
462 462
@@ -470,7 +470,7 @@ fill_it:
470 /* 470 /*
471 * max good bytes in this page 471 * max good bytes in this page
472 */ 472 */
473 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 473 plen = ((isize - 1) & ~PAGE_MASK) + 1;
474 if (plen <= loff) 474 if (plen <= loff)
475 break; 475 break;
476 476
@@ -494,8 +494,8 @@ fill_it:
494 * we got, 'nr_pages' is how many pages are in the map. 494 * we got, 'nr_pages' is how many pages are in the map.
495 */ 495 */
496 while (page_nr < nr_pages) 496 while (page_nr < nr_pages)
497 page_cache_release(spd.pages[page_nr++]); 497 put_page(spd.pages[page_nr++]);
498 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 498 in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
499 499
500 if (spd.nr_pages) 500 if (spd.nr_pages)
501 error = splice_to_pipe(pipe, &spd); 501 error = splice_to_pipe(pipe, &spd);
@@ -636,8 +636,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
636 goto shrink_ret; 636 goto shrink_ret;
637 } 637 }
638 638
639 offset = *ppos & ~PAGE_CACHE_MASK; 639 offset = *ppos & ~PAGE_MASK;
640 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 640 nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
641 641
642 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { 642 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
643 struct page *page; 643 struct page *page;
@@ -647,7 +647,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
647 if (!page) 647 if (!page)
648 goto err; 648 goto err;
649 649
650 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); 650 this_len = min_t(size_t, len, PAGE_SIZE - offset);
651 vec[i].iov_base = (void __user *) page_address(page); 651 vec[i].iov_base = (void __user *) page_address(page);
652 vec[i].iov_len = this_len; 652 vec[i].iov_len = this_len;
653 spd.pages[i] = page; 653 spd.pages[i] = page;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 0cea9b9236d0..2c2618410d51 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -181,11 +181,11 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
181 in = min(bytes, msblk->devblksize - offset); 181 in = min(bytes, msblk->devblksize - offset);
182 bytes -= in; 182 bytes -= in;
183 while (in) { 183 while (in) {
184 if (pg_offset == PAGE_CACHE_SIZE) { 184 if (pg_offset == PAGE_SIZE) {
185 data = squashfs_next_page(output); 185 data = squashfs_next_page(output);
186 pg_offset = 0; 186 pg_offset = 0;
187 } 187 }
188 avail = min_t(int, in, PAGE_CACHE_SIZE - 188 avail = min_t(int, in, PAGE_SIZE -
189 pg_offset); 189 pg_offset);
190 memcpy(data + pg_offset, bh[k]->b_data + offset, 190 memcpy(data + pg_offset, bh[k]->b_data + offset,
191 avail); 191 avail);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 1cb70a0b2168..23813c078cc9 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -30,7 +30,7 @@
30 * access the metadata and fragment caches. 30 * access the metadata and fragment caches.
31 * 31 *
32 * To avoid out of memory and fragmentation issues with vmalloc the cache 32 * To avoid out of memory and fragmentation issues with vmalloc the cache
33 * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. 33 * uses sequences of kmalloced PAGE_SIZE buffers.
34 * 34 *
35 * It should be noted that the cache is not used for file datablocks, these 35 * It should be noted that the cache is not used for file datablocks, these
36 * are decompressed and cached in the page-cache in the normal way. The 36 * are decompressed and cached in the page-cache in the normal way. The
@@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
231/* 231/*
232 * Initialise cache allocating the specified number of entries, each of 232 * Initialise cache allocating the specified number of entries, each of
233 * size block_size. To avoid vmalloc fragmentation issues each entry 233 * size block_size. To avoid vmalloc fragmentation issues each entry
234 * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. 234 * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
235 */ 235 */
236struct squashfs_cache *squashfs_cache_init(char *name, int entries, 236struct squashfs_cache *squashfs_cache_init(char *name, int entries,
237 int block_size) 237 int block_size)
@@ -255,7 +255,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
255 cache->unused = entries; 255 cache->unused = entries;
256 cache->entries = entries; 256 cache->entries = entries;
257 cache->block_size = block_size; 257 cache->block_size = block_size;
258 cache->pages = block_size >> PAGE_CACHE_SHIFT; 258 cache->pages = block_size >> PAGE_SHIFT;
259 cache->pages = cache->pages ? cache->pages : 1; 259 cache->pages = cache->pages ? cache->pages : 1;
260 cache->name = name; 260 cache->name = name;
261 cache->num_waiters = 0; 261 cache->num_waiters = 0;
@@ -275,7 +275,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
275 } 275 }
276 276
277 for (j = 0; j < cache->pages; j++) { 277 for (j = 0; j < cache->pages; j++) {
278 entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 278 entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
279 if (entry->data[j] == NULL) { 279 if (entry->data[j] == NULL) {
280 ERROR("Failed to allocate %s buffer\n", name); 280 ERROR("Failed to allocate %s buffer\n", name);
281 goto cleanup; 281 goto cleanup;
@@ -314,10 +314,10 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
314 return min(length, entry->length - offset); 314 return min(length, entry->length - offset);
315 315
316 while (offset < entry->length) { 316 while (offset < entry->length) {
317 void *buff = entry->data[offset / PAGE_CACHE_SIZE] 317 void *buff = entry->data[offset / PAGE_SIZE]
318 + (offset % PAGE_CACHE_SIZE); 318 + (offset % PAGE_SIZE);
319 int bytes = min_t(int, entry->length - offset, 319 int bytes = min_t(int, entry->length - offset,
320 PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); 320 PAGE_SIZE - (offset % PAGE_SIZE));
321 321
322 if (bytes >= remaining) { 322 if (bytes >= remaining) {
323 memcpy(buffer, buff, remaining); 323 memcpy(buffer, buff, remaining);
@@ -415,7 +415,7 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
415 */ 415 */
416void *squashfs_read_table(struct super_block *sb, u64 block, int length) 416void *squashfs_read_table(struct super_block *sb, u64 block, int length)
417{ 417{
418 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 418 int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
419 int i, res; 419 int i, res;
420 void *table, *buffer, **data; 420 void *table, *buffer, **data;
421 struct squashfs_page_actor *actor; 421 struct squashfs_page_actor *actor;
@@ -436,7 +436,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
436 goto failed2; 436 goto failed2;
437 } 437 }
438 438
439 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) 439 for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
440 data[i] = buffer; 440 data[i] = buffer;
441 441
442 res = squashfs_read_data(sb, block, length | 442 res = squashfs_read_data(sb, block, length |
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index e9034bf6e5ae..d2bc13636f79 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -102,7 +102,7 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags)
102 * Read decompressor specific options from file system if present 102 * Read decompressor specific options from file system if present
103 */ 103 */
104 if (SQUASHFS_COMP_OPTS(flags)) { 104 if (SQUASHFS_COMP_OPTS(flags)) {
105 buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 105 buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
106 if (buffer == NULL) { 106 if (buffer == NULL) {
107 comp_opts = ERR_PTR(-ENOMEM); 107 comp_opts = ERR_PTR(-ENOMEM);
108 goto out; 108 goto out;
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e5c9689062ba..13d80947bf9e 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -175,7 +175,7 @@ static long long read_indexes(struct super_block *sb, int n,
175{ 175{
176 int err, i; 176 int err, i;
177 long long block = 0; 177 long long block = 0;
178 __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 178 __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
179 179
180 if (blist == NULL) { 180 if (blist == NULL) {
181 ERROR("read_indexes: Failed to allocate block_list\n"); 181 ERROR("read_indexes: Failed to allocate block_list\n");
@@ -183,7 +183,7 @@ static long long read_indexes(struct super_block *sb, int n,
183 } 183 }
184 184
185 while (n) { 185 while (n) {
186 int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); 186 int blocks = min_t(int, n, PAGE_SIZE >> 2);
187 187
188 err = squashfs_read_metadata(sb, blist, start_block, 188 err = squashfs_read_metadata(sb, blist, start_block,
189 offset, blocks << 2); 189 offset, blocks << 2);
@@ -377,19 +377,19 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
377 struct inode *inode = page->mapping->host; 377 struct inode *inode = page->mapping->host;
378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
379 void *pageaddr; 379 void *pageaddr;
380 int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; 380 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
381 int start_index = page->index & ~mask, end_index = start_index | mask; 381 int start_index = page->index & ~mask, end_index = start_index | mask;
382 382
383 /* 383 /*
384 * Loop copying datablock into pages. As the datablock likely covers 384 * Loop copying datablock into pages. As the datablock likely covers
385 * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly 385 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
386 * grab the pages from the page cache, except for the page that we've 386 * grab the pages from the page cache, except for the page that we've
387 * been called to fill. 387 * been called to fill.
388 */ 388 */
389 for (i = start_index; i <= end_index && bytes > 0; i++, 389 for (i = start_index; i <= end_index && bytes > 0; i++,
390 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { 390 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
391 struct page *push_page; 391 struct page *push_page;
392 int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; 392 int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
393 393
394 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); 394 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
395 395
@@ -404,14 +404,14 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
404 404
405 pageaddr = kmap_atomic(push_page); 405 pageaddr = kmap_atomic(push_page);
406 squashfs_copy_data(pageaddr, buffer, offset, avail); 406 squashfs_copy_data(pageaddr, buffer, offset, avail);
407 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 407 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
408 kunmap_atomic(pageaddr); 408 kunmap_atomic(pageaddr);
409 flush_dcache_page(push_page); 409 flush_dcache_page(push_page);
410 SetPageUptodate(push_page); 410 SetPageUptodate(push_page);
411skip_page: 411skip_page:
412 unlock_page(push_page); 412 unlock_page(push_page);
413 if (i != page->index) 413 if (i != page->index)
414 page_cache_release(push_page); 414 put_page(push_page);
415 } 415 }
416} 416}
417 417
@@ -454,7 +454,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
454{ 454{
455 struct inode *inode = page->mapping->host; 455 struct inode *inode = page->mapping->host;
456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
457 int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); 457 int index = page->index >> (msblk->block_log - PAGE_SHIFT);
458 int file_end = i_size_read(inode) >> msblk->block_log; 458 int file_end = i_size_read(inode) >> msblk->block_log;
459 int res; 459 int res;
460 void *pageaddr; 460 void *pageaddr;
@@ -462,8 +462,8 @@ static int squashfs_readpage(struct file *file, struct page *page)
462 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", 462 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
463 page->index, squashfs_i(inode)->start); 463 page->index, squashfs_i(inode)->start);
464 464
465 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 465 if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
466 PAGE_CACHE_SHIFT)) 466 PAGE_SHIFT))
467 goto out; 467 goto out;
468 468
469 if (index < file_end || squashfs_i(inode)->fragment_block == 469 if (index < file_end || squashfs_i(inode)->fragment_block ==
@@ -487,7 +487,7 @@ error_out:
487 SetPageError(page); 487 SetPageError(page);
488out: 488out:
489 pageaddr = kmap_atomic(page); 489 pageaddr = kmap_atomic(page);
490 memset(pageaddr, 0, PAGE_CACHE_SIZE); 490 memset(pageaddr, 0, PAGE_SIZE);
491 kunmap_atomic(pageaddr); 491 kunmap_atomic(pageaddr);
492 flush_dcache_page(page); 492 flush_dcache_page(page);
493 if (!PageError(page)) 493 if (!PageError(page))
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 43e7a7eddac0..cb485d8e0e91 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -30,8 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
30 struct inode *inode = target_page->mapping->host; 30 struct inode *inode = target_page->mapping->host;
31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
32 32
33 int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 33 int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
34 int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; 34 int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
35 int start_index = target_page->index & ~mask; 35 int start_index = target_page->index & ~mask;
36 int end_index = start_index | mask; 36 int end_index = start_index | mask;
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM; 37 int i, n, pages, missing_pages, bytes, res = -ENOMEM;
@@ -68,7 +68,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
68 68
69 if (PageUptodate(page[i])) { 69 if (PageUptodate(page[i])) {
70 unlock_page(page[i]); 70 unlock_page(page[i]);
71 page_cache_release(page[i]); 71 put_page(page[i]);
72 page[i] = NULL; 72 page[i] = NULL;
73 missing_pages++; 73 missing_pages++;
74 } 74 }
@@ -96,10 +96,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
96 goto mark_errored; 96 goto mark_errored;
97 97
98 /* Last page may have trailing bytes not filled */ 98 /* Last page may have trailing bytes not filled */
99 bytes = res % PAGE_CACHE_SIZE; 99 bytes = res % PAGE_SIZE;
100 if (bytes) { 100 if (bytes) {
101 pageaddr = kmap_atomic(page[pages - 1]); 101 pageaddr = kmap_atomic(page[pages - 1]);
102 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); 102 memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
103 kunmap_atomic(pageaddr); 103 kunmap_atomic(pageaddr);
104 } 104 }
105 105
@@ -109,7 +109,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
109 SetPageUptodate(page[i]); 109 SetPageUptodate(page[i]);
110 unlock_page(page[i]); 110 unlock_page(page[i]);
111 if (page[i] != target_page) 111 if (page[i] != target_page)
112 page_cache_release(page[i]); 112 put_page(page[i]);
113 } 113 }
114 114
115 kfree(actor); 115 kfree(actor);
@@ -127,7 +127,7 @@ mark_errored:
127 flush_dcache_page(page[i]); 127 flush_dcache_page(page[i]);
128 SetPageError(page[i]); 128 SetPageError(page[i]);
129 unlock_page(page[i]); 129 unlock_page(page[i]);
130 page_cache_release(page[i]); 130 put_page(page[i]);
131 } 131 }
132 132
133out: 133out:
@@ -153,21 +153,21 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
153 } 153 }
154 154
155 for (n = 0; n < pages && bytes > 0; n++, 155 for (n = 0; n < pages && bytes > 0; n++,
156 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { 156 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
157 int avail = min_t(int, bytes, PAGE_CACHE_SIZE); 157 int avail = min_t(int, bytes, PAGE_SIZE);
158 158
159 if (page[n] == NULL) 159 if (page[n] == NULL)
160 continue; 160 continue;
161 161
162 pageaddr = kmap_atomic(page[n]); 162 pageaddr = kmap_atomic(page[n]);
163 squashfs_copy_data(pageaddr, buffer, offset, avail); 163 squashfs_copy_data(pageaddr, buffer, offset, avail);
164 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 164 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
165 kunmap_atomic(pageaddr); 165 kunmap_atomic(pageaddr);
166 flush_dcache_page(page[n]); 166 flush_dcache_page(page[n]);
167 SetPageUptodate(page[n]); 167 SetPageUptodate(page[n]);
168 unlock_page(page[n]); 168 unlock_page(page[n]);
169 if (page[n] != target_page) 169 if (page[n] != target_page)
170 page_cache_release(page[n]); 170 put_page(page[n]);
171 } 171 }
172 172
173out: 173out:
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index c31e2bc9c081..ff4468bd18b0 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -117,13 +117,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
117 data = squashfs_first_page(output); 117 data = squashfs_first_page(output);
118 buff = stream->output; 118 buff = stream->output;
119 while (data) { 119 while (data) {
120 if (bytes <= PAGE_CACHE_SIZE) { 120 if (bytes <= PAGE_SIZE) {
121 memcpy(data, buff, bytes); 121 memcpy(data, buff, bytes);
122 break; 122 break;
123 } 123 }
124 memcpy(data, buff, PAGE_CACHE_SIZE); 124 memcpy(data, buff, PAGE_SIZE);
125 buff += PAGE_CACHE_SIZE; 125 buff += PAGE_SIZE;
126 bytes -= PAGE_CACHE_SIZE; 126 bytes -= PAGE_SIZE;
127 data = squashfs_next_page(output); 127 data = squashfs_next_page(output);
128 } 128 }
129 squashfs_finish_page(output); 129 squashfs_finish_page(output);
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 244b9fbfff7b..934c17e96590 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -102,13 +102,13 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
102 data = squashfs_first_page(output); 102 data = squashfs_first_page(output);
103 buff = stream->output; 103 buff = stream->output;
104 while (data) { 104 while (data) {
105 if (bytes <= PAGE_CACHE_SIZE) { 105 if (bytes <= PAGE_SIZE) {
106 memcpy(data, buff, bytes); 106 memcpy(data, buff, bytes);
107 break; 107 break;
108 } else { 108 } else {
109 memcpy(data, buff, PAGE_CACHE_SIZE); 109 memcpy(data, buff, PAGE_SIZE);
110 buff += PAGE_CACHE_SIZE; 110 buff += PAGE_SIZE;
111 bytes -= PAGE_CACHE_SIZE; 111 bytes -= PAGE_SIZE;
112 data = squashfs_next_page(output); 112 data = squashfs_next_page(output);
113 } 113 }
114 } 114 }
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 5a1c11f56441..9b7b1b6a7892 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -48,7 +48,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
48 if (actor == NULL) 48 if (actor == NULL)
49 return NULL; 49 return NULL;
50 50
51 actor->length = length ? : pages * PAGE_CACHE_SIZE; 51 actor->length = length ? : pages * PAGE_SIZE;
52 actor->buffer = buffer; 52 actor->buffer = buffer;
53 actor->pages = pages; 53 actor->pages = pages;
54 actor->next_page = 0; 54 actor->next_page = 0;
@@ -88,7 +88,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
88 if (actor == NULL) 88 if (actor == NULL)
89 return NULL; 89 return NULL;
90 90
91 actor->length = length ? : pages * PAGE_CACHE_SIZE; 91 actor->length = length ? : pages * PAGE_SIZE;
92 actor->page = page; 92 actor->page = page;
93 actor->pages = pages; 93 actor->pages = pages;
94 actor->next_page = 0; 94 actor->next_page = 0;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 26dd82008b82..98537eab27e2 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -24,7 +24,7 @@ static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
24 if (actor == NULL) 24 if (actor == NULL)
25 return NULL; 25 return NULL;
26 26
27 actor->length = length ? : pages * PAGE_CACHE_SIZE; 27 actor->length = length ? : pages * PAGE_SIZE;
28 actor->page = page; 28 actor->page = page;
29 actor->pages = pages; 29 actor->pages = pages;
30 actor->next_page = 0; 30 actor->next_page = 0;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5e79bfa4f260..cf01e15a7b16 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -152,7 +152,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
152 * Check the system page size is not larger than the filesystem 152 * Check the system page size is not larger than the filesystem
153 * block size (by default 128K). This is currently not supported. 153 * block size (by default 128K). This is currently not supported.
154 */ 154 */
155 if (PAGE_CACHE_SIZE > msblk->block_size) { 155 if (PAGE_SIZE > msblk->block_size) {
156 ERROR("Page size > filesystem block size (%d). This is " 156 ERROR("Page size > filesystem block size (%d). This is "
157 "currently not supported!\n", msblk->block_size); 157 "currently not supported!\n", msblk->block_size);
158 goto failed_mount; 158 goto failed_mount;
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index dbcc2f54bad4..d688ef42a6a1 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -48,10 +48,10 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
48 struct inode *inode = page->mapping->host; 48 struct inode *inode = page->mapping->host;
49 struct super_block *sb = inode->i_sb; 49 struct super_block *sb = inode->i_sb;
50 struct squashfs_sb_info *msblk = sb->s_fs_info; 50 struct squashfs_sb_info *msblk = sb->s_fs_info;
51 int index = page->index << PAGE_CACHE_SHIFT; 51 int index = page->index << PAGE_SHIFT;
52 u64 block = squashfs_i(inode)->start; 52 u64 block = squashfs_i(inode)->start;
53 int offset = squashfs_i(inode)->offset; 53 int offset = squashfs_i(inode)->offset;
54 int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); 54 int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
55 int bytes, copied; 55 int bytes, copied;
56 void *pageaddr; 56 void *pageaddr;
57 struct squashfs_cache_entry *entry; 57 struct squashfs_cache_entry *entry;
@@ -94,7 +94,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset, 94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
95 length - bytes); 95 length - bytes);
96 if (copied == length - bytes) 96 if (copied == length - bytes)
97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); 97 memset(pageaddr + length, 0, PAGE_SIZE - length);
98 else 98 else
99 block = entry->next_index; 99 block = entry->next_index;
100 kunmap_atomic(pageaddr); 100 kunmap_atomic(pageaddr);
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index c609624e4b8a..6bfaef73d065 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -141,7 +141,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
141 stream->buf.in_pos = 0; 141 stream->buf.in_pos = 0;
142 stream->buf.in_size = 0; 142 stream->buf.in_size = 0;
143 stream->buf.out_pos = 0; 143 stream->buf.out_pos = 0;
144 stream->buf.out_size = PAGE_CACHE_SIZE; 144 stream->buf.out_size = PAGE_SIZE;
145 stream->buf.out = squashfs_first_page(output); 145 stream->buf.out = squashfs_first_page(output);
146 146
147 do { 147 do {
@@ -158,7 +158,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
158 stream->buf.out = squashfs_next_page(output); 158 stream->buf.out = squashfs_next_page(output);
159 if (stream->buf.out != NULL) { 159 if (stream->buf.out != NULL) {
160 stream->buf.out_pos = 0; 160 stream->buf.out_pos = 0;
161 total += PAGE_CACHE_SIZE; 161 total += PAGE_SIZE;
162 } 162 }
163 } 163 }
164 164
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 8727caba6882..2ec24d128bce 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -69,7 +69,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
69 int zlib_err, zlib_init = 0, k = 0; 69 int zlib_err, zlib_init = 0, k = 0;
70 z_stream *stream = strm; 70 z_stream *stream = strm;
71 71
72 stream->avail_out = PAGE_CACHE_SIZE; 72 stream->avail_out = PAGE_SIZE;
73 stream->next_out = squashfs_first_page(output); 73 stream->next_out = squashfs_first_page(output);
74 stream->avail_in = 0; 74 stream->avail_in = 0;
75 75
@@ -85,7 +85,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
85 if (stream->avail_out == 0) { 85 if (stream->avail_out == 0) {
86 stream->next_out = squashfs_next_page(output); 86 stream->next_out = squashfs_next_page(output);
87 if (stream->next_out != NULL) 87 if (stream->next_out != NULL)
88 stream->avail_out = PAGE_CACHE_SIZE; 88 stream->avail_out = PAGE_SIZE;
89 } 89 }
90 90
91 if (!zlib_init) { 91 if (!zlib_init) {
diff --git a/fs/sync.c b/fs/sync.c
index dd5d1711c7ac..2a54c1f22035 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -302,7 +302,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
302 goto out; 302 goto out;
303 303
304 if (sizeof(pgoff_t) == 4) { 304 if (sizeof(pgoff_t) == 4) {
305 if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { 305 if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
306 /* 306 /*
307 * The range starts outside a 32 bit machine's 307 * The range starts outside a 32 bit machine's
308 * pagecache addressing capabilities. Let it "succeed" 308 * pagecache addressing capabilities. Let it "succeed"
@@ -310,7 +310,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
310 ret = 0; 310 ret = 0;
311 goto out; 311 goto out;
312 } 312 }
313 if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { 313 if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
314 /* 314 /*
315 * Out to EOF 315 * Out to EOF
316 */ 316 */
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 63c1bcb224ee..c0f0a3e643eb 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -30,7 +30,7 @@ const struct file_operations sysv_dir_operations = {
30static inline void dir_put_page(struct page *page) 30static inline void dir_put_page(struct page *page)
31{ 31{
32 kunmap(page); 32 kunmap(page);
33 page_cache_release(page); 33 put_page(page);
34} 34}
35 35
36static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) 36static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
@@ -73,8 +73,8 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
73 if (pos >= inode->i_size) 73 if (pos >= inode->i_size)
74 return 0; 74 return 0;
75 75
76 offset = pos & ~PAGE_CACHE_MASK; 76 offset = pos & ~PAGE_MASK;
77 n = pos >> PAGE_CACHE_SHIFT; 77 n = pos >> PAGE_SHIFT;
78 78
79 for ( ; n < npages; n++, offset = 0) { 79 for ( ; n < npages; n++, offset = 0) {
80 char *kaddr, *limit; 80 char *kaddr, *limit;
@@ -85,7 +85,7 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
85 continue; 85 continue;
86 kaddr = (char *)page_address(page); 86 kaddr = (char *)page_address(page);
87 de = (struct sysv_dir_entry *)(kaddr+offset); 87 de = (struct sysv_dir_entry *)(kaddr+offset);
88 limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; 88 limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE;
89 for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) { 89 for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
90 char *name = de->name; 90 char *name = de->name;
91 91
@@ -146,7 +146,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
146 if (!IS_ERR(page)) { 146 if (!IS_ERR(page)) {
147 kaddr = (char*)page_address(page); 147 kaddr = (char*)page_address(page);
148 de = (struct sysv_dir_entry *) kaddr; 148 de = (struct sysv_dir_entry *) kaddr;
149 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; 149 kaddr += PAGE_SIZE - SYSV_DIRSIZE;
150 for ( ; (char *) de <= kaddr ; de++) { 150 for ( ; (char *) de <= kaddr ; de++) {
151 if (!de->inode) 151 if (!de->inode)
152 continue; 152 continue;
@@ -190,7 +190,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
190 goto out; 190 goto out;
191 kaddr = (char*)page_address(page); 191 kaddr = (char*)page_address(page);
192 de = (struct sysv_dir_entry *)kaddr; 192 de = (struct sysv_dir_entry *)kaddr;
193 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; 193 kaddr += PAGE_SIZE - SYSV_DIRSIZE;
194 while ((char *)de <= kaddr) { 194 while ((char *)de <= kaddr) {
195 if (!de->inode) 195 if (!de->inode)
196 goto got_it; 196 goto got_it;
@@ -261,7 +261,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
261 kmap(page); 261 kmap(page);
262 262
263 base = (char*)page_address(page); 263 base = (char*)page_address(page);
264 memset(base, 0, PAGE_CACHE_SIZE); 264 memset(base, 0, PAGE_SIZE);
265 265
266 de = (struct sysv_dir_entry *) base; 266 de = (struct sysv_dir_entry *) base;
267 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 267 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
@@ -273,7 +273,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
273 kunmap(page); 273 kunmap(page);
274 err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); 274 err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
275fail: 275fail:
276 page_cache_release(page); 276 put_page(page);
277 return err; 277 return err;
278} 278}
279 279
@@ -296,7 +296,7 @@ int sysv_empty_dir(struct inode * inode)
296 296
297 kaddr = (char *)page_address(page); 297 kaddr = (char *)page_address(page);
298 de = (struct sysv_dir_entry *)kaddr; 298 de = (struct sysv_dir_entry *)kaddr;
299 kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE; 299 kaddr += PAGE_SIZE-SYSV_DIRSIZE;
300 300
301 for ( ;(char *)de <= kaddr; de++) { 301 for ( ;(char *)de <= kaddr; de++) {
302 if (!de->inode) 302 if (!de->inode)
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 11e83ed0b4bf..90b60c03b588 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -264,11 +264,11 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
264out_dir: 264out_dir:
265 if (dir_de) { 265 if (dir_de) {
266 kunmap(dir_page); 266 kunmap(dir_page);
267 page_cache_release(dir_page); 267 put_page(dir_page);
268 } 268 }
269out_old: 269out_old:
270 kunmap(old_page); 270 kunmap(old_page);
271 page_cache_release(old_page); 271 put_page(old_page);
272out: 272out:
273 return err; 273 return err;
274} 274}
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 065c88f8e4b8..446753d8ac34 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -121,7 +121,7 @@ static int do_readpage(struct page *page)
121 if (block >= beyond) { 121 if (block >= beyond) {
122 /* Reading beyond inode */ 122 /* Reading beyond inode */
123 SetPageChecked(page); 123 SetPageChecked(page);
124 memset(addr, 0, PAGE_CACHE_SIZE); 124 memset(addr, 0, PAGE_SIZE);
125 goto out; 125 goto out;
126 } 126 }
127 127
@@ -223,7 +223,7 @@ static int write_begin_slow(struct address_space *mapping,
223{ 223{
224 struct inode *inode = mapping->host; 224 struct inode *inode = mapping->host;
225 struct ubifs_info *c = inode->i_sb->s_fs_info; 225 struct ubifs_info *c = inode->i_sb->s_fs_info;
226 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 226 pgoff_t index = pos >> PAGE_SHIFT;
227 struct ubifs_budget_req req = { .new_page = 1 }; 227 struct ubifs_budget_req req = { .new_page = 1 };
228 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 228 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
229 struct page *page; 229 struct page *page;
@@ -254,13 +254,13 @@ static int write_begin_slow(struct address_space *mapping,
254 } 254 }
255 255
256 if (!PageUptodate(page)) { 256 if (!PageUptodate(page)) {
257 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) 257 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
258 SetPageChecked(page); 258 SetPageChecked(page);
259 else { 259 else {
260 err = do_readpage(page); 260 err = do_readpage(page);
261 if (err) { 261 if (err) {
262 unlock_page(page); 262 unlock_page(page);
263 page_cache_release(page); 263 put_page(page);
264 ubifs_release_budget(c, &req); 264 ubifs_release_budget(c, &req);
265 return err; 265 return err;
266 } 266 }
@@ -428,7 +428,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
428 struct inode *inode = mapping->host; 428 struct inode *inode = mapping->host;
429 struct ubifs_info *c = inode->i_sb->s_fs_info; 429 struct ubifs_info *c = inode->i_sb->s_fs_info;
430 struct ubifs_inode *ui = ubifs_inode(inode); 430 struct ubifs_inode *ui = ubifs_inode(inode);
431 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 431 pgoff_t index = pos >> PAGE_SHIFT;
432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
433 int skipped_read = 0; 433 int skipped_read = 0;
434 struct page *page; 434 struct page *page;
@@ -446,7 +446,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
446 446
447 if (!PageUptodate(page)) { 447 if (!PageUptodate(page)) {
448 /* The page is not loaded from the flash */ 448 /* The page is not loaded from the flash */
449 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { 449 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
450 /* 450 /*
451 * We change whole page so no need to load it. But we 451 * We change whole page so no need to load it. But we
452 * do not know whether this page exists on the media or 452 * do not know whether this page exists on the media or
@@ -462,7 +462,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
462 err = do_readpage(page); 462 err = do_readpage(page);
463 if (err) { 463 if (err) {
464 unlock_page(page); 464 unlock_page(page);
465 page_cache_release(page); 465 put_page(page);
466 return err; 466 return err;
467 } 467 }
468 } 468 }
@@ -494,7 +494,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
494 mutex_unlock(&ui->ui_mutex); 494 mutex_unlock(&ui->ui_mutex);
495 } 495 }
496 unlock_page(page); 496 unlock_page(page);
497 page_cache_release(page); 497 put_page(page);
498 498
499 return write_begin_slow(mapping, pos, len, pagep, flags); 499 return write_begin_slow(mapping, pos, len, pagep, flags);
500 } 500 }
@@ -549,12 +549,12 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", 549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
550 inode->i_ino, pos, page->index, len, copied, inode->i_size); 550 inode->i_ino, pos, page->index, len, copied, inode->i_size);
551 551
552 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) { 552 if (unlikely(copied < len && len == PAGE_SIZE)) {
553 /* 553 /*
554 * VFS copied less data to the page that it intended and 554 * VFS copied less data to the page that it intended and
555 * declared in its '->write_begin()' call via the @len 555 * declared in its '->write_begin()' call via the @len
556 * argument. If the page was not up-to-date, and @len was 556 * argument. If the page was not up-to-date, and @len was
557 * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did 557 * @PAGE_SIZE, the 'ubifs_write_begin()' function did
558 * not load it from the media (for optimization reasons). This 558 * not load it from the media (for optimization reasons). This
559 * means that part of the page contains garbage. So read the 559 * means that part of the page contains garbage. So read the
560 * page now. 560 * page now.
@@ -593,7 +593,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
593 593
594out: 594out:
595 unlock_page(page); 595 unlock_page(page);
596 page_cache_release(page); 596 put_page(page);
597 return copied; 597 return copied;
598} 598}
599 599
@@ -621,10 +621,10 @@ static int populate_page(struct ubifs_info *c, struct page *page,
621 621
622 addr = zaddr = kmap(page); 622 addr = zaddr = kmap(page);
623 623
624 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 624 end_index = (i_size - 1) >> PAGE_SHIFT;
625 if (!i_size || page->index > end_index) { 625 if (!i_size || page->index > end_index) {
626 hole = 1; 626 hole = 1;
627 memset(addr, 0, PAGE_CACHE_SIZE); 627 memset(addr, 0, PAGE_SIZE);
628 goto out_hole; 628 goto out_hole;
629 } 629 }
630 630
@@ -673,7 +673,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
673 } 673 }
674 674
675 if (end_index == page->index) { 675 if (end_index == page->index) {
676 int len = i_size & (PAGE_CACHE_SIZE - 1); 676 int len = i_size & (PAGE_SIZE - 1);
677 677
678 if (len && len < read) 678 if (len && len < read)
679 memset(zaddr + len, 0, read - len); 679 memset(zaddr + len, 0, read - len);
@@ -773,7 +773,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
773 isize = i_size_read(inode); 773 isize = i_size_read(inode);
774 if (isize == 0) 774 if (isize == 0)
775 goto out_free; 775 goto out_free;
776 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 776 end_index = ((isize - 1) >> PAGE_SHIFT);
777 777
778 for (page_idx = 1; page_idx < page_cnt; page_idx++) { 778 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
779 pgoff_t page_offset = offset + page_idx; 779 pgoff_t page_offset = offset + page_idx;
@@ -788,7 +788,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
788 if (!PageUptodate(page)) 788 if (!PageUptodate(page))
789 err = populate_page(c, page, bu, &n); 789 err = populate_page(c, page, bu, &n);
790 unlock_page(page); 790 unlock_page(page);
791 page_cache_release(page); 791 put_page(page);
792 if (err) 792 if (err)
793 break; 793 break;
794 } 794 }
@@ -905,7 +905,7 @@ static int do_writepage(struct page *page, int len)
905#ifdef UBIFS_DEBUG 905#ifdef UBIFS_DEBUG
906 struct ubifs_inode *ui = ubifs_inode(inode); 906 struct ubifs_inode *ui = ubifs_inode(inode);
907 spin_lock(&ui->ui_lock); 907 spin_lock(&ui->ui_lock);
908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); 908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
909 spin_unlock(&ui->ui_lock); 909 spin_unlock(&ui->ui_lock);
910#endif 910#endif
911 911
@@ -1001,8 +1001,8 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1001 struct inode *inode = page->mapping->host; 1001 struct inode *inode = page->mapping->host;
1002 struct ubifs_inode *ui = ubifs_inode(inode); 1002 struct ubifs_inode *ui = ubifs_inode(inode);
1003 loff_t i_size = i_size_read(inode), synced_i_size; 1003 loff_t i_size = i_size_read(inode), synced_i_size;
1004 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 1004 pgoff_t end_index = i_size >> PAGE_SHIFT;
1005 int err, len = i_size & (PAGE_CACHE_SIZE - 1); 1005 int err, len = i_size & (PAGE_SIZE - 1);
1006 void *kaddr; 1006 void *kaddr;
1007 1007
1008 dbg_gen("ino %lu, pg %lu, pg flags %#lx", 1008 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
@@ -1021,7 +1021,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1021 1021
1022 /* Is the page fully inside @i_size? */ 1022 /* Is the page fully inside @i_size? */
1023 if (page->index < end_index) { 1023 if (page->index < end_index) {
1024 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { 1024 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1025 err = inode->i_sb->s_op->write_inode(inode, NULL); 1025 err = inode->i_sb->s_op->write_inode(inode, NULL);
1026 if (err) 1026 if (err)
1027 goto out_unlock; 1027 goto out_unlock;
@@ -1034,7 +1034,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1034 * with this. 1034 * with this.
1035 */ 1035 */
1036 } 1036 }
1037 return do_writepage(page, PAGE_CACHE_SIZE); 1037 return do_writepage(page, PAGE_SIZE);
1038 } 1038 }
1039 1039
1040 /* 1040 /*
@@ -1045,7 +1045,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1045 * writes to that region are not written out to the file." 1045 * writes to that region are not written out to the file."
1046 */ 1046 */
1047 kaddr = kmap_atomic(page); 1047 kaddr = kmap_atomic(page);
1048 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); 1048 memset(kaddr + len, 0, PAGE_SIZE - len);
1049 flush_dcache_page(page); 1049 flush_dcache_page(page);
1050 kunmap_atomic(kaddr); 1050 kunmap_atomic(kaddr);
1051 1051
@@ -1138,7 +1138,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1138 truncate_setsize(inode, new_size); 1138 truncate_setsize(inode, new_size);
1139 1139
1140 if (offset) { 1140 if (offset) {
1141 pgoff_t index = new_size >> PAGE_CACHE_SHIFT; 1141 pgoff_t index = new_size >> PAGE_SHIFT;
1142 struct page *page; 1142 struct page *page;
1143 1143
1144 page = find_lock_page(inode->i_mapping, index); 1144 page = find_lock_page(inode->i_mapping, index);
@@ -1157,9 +1157,9 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1157 clear_page_dirty_for_io(page); 1157 clear_page_dirty_for_io(page);
1158 if (UBIFS_BLOCKS_PER_PAGE_SHIFT) 1158 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1159 offset = new_size & 1159 offset = new_size &
1160 (PAGE_CACHE_SIZE - 1); 1160 (PAGE_SIZE - 1);
1161 err = do_writepage(page, offset); 1161 err = do_writepage(page, offset);
1162 page_cache_release(page); 1162 put_page(page);
1163 if (err) 1163 if (err)
1164 goto out_budg; 1164 goto out_budg;
1165 /* 1165 /*
@@ -1173,7 +1173,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1173 * having to read it. 1173 * having to read it.
1174 */ 1174 */
1175 unlock_page(page); 1175 unlock_page(page);
1176 page_cache_release(page); 1176 put_page(page);
1177 } 1177 }
1178 } 1178 }
1179 } 1179 }
@@ -1285,7 +1285,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1285 struct ubifs_info *c = inode->i_sb->s_fs_info; 1285 struct ubifs_info *c = inode->i_sb->s_fs_info;
1286 1286
1287 ubifs_assert(PagePrivate(page)); 1287 ubifs_assert(PagePrivate(page));
1288 if (offset || length < PAGE_CACHE_SIZE) 1288 if (offset || length < PAGE_SIZE)
1289 /* Partial page remains dirty */ 1289 /* Partial page remains dirty */
1290 return; 1290 return;
1291 1291
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index a233ba913be4..e98c24ee25a1 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2237,12 +2237,12 @@ static int __init ubifs_init(void)
2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); 2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
2238 2238
2239 /* 2239 /*
2240 * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to 2240 * We require that PAGE_SIZE is greater-than-or-equal-to
2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. 2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
2242 */ 2242 */
2243 if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) { 2243 if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
2244 pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", 2244 pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
2245 current->pid, (unsigned int)PAGE_CACHE_SIZE); 2245 current->pid, (unsigned int)PAGE_SIZE);
2246 return -EINVAL; 2246 return -EINVAL;
2247 } 2247 }
2248 2248
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index c2a57e193a81..4cd7e569cd00 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -46,8 +46,8 @@
46#define UBIFS_SUPER_MAGIC 0x24051905 46#define UBIFS_SUPER_MAGIC 0x24051905
47 47
48/* Number of UBIFS blocks per VFS page */ 48/* Number of UBIFS blocks per VFS page */
49#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE) 49#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE)
50#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT) 50#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT)
51 51
52/* "File system end of life" sequence number watermark */ 52/* "File system end of life" sequence number watermark */
53#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL 53#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1af98963d860..877ba1c9b461 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -46,7 +46,7 @@ static void __udf_adinicb_readpage(struct page *page)
46 46
47 kaddr = kmap(page); 47 kaddr = kmap(page);
48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); 48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
49 memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); 49 memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
50 flush_dcache_page(page); 50 flush_dcache_page(page);
51 SetPageUptodate(page); 51 SetPageUptodate(page);
52 kunmap(page); 52 kunmap(page);
@@ -87,14 +87,14 @@ static int udf_adinicb_write_begin(struct file *file,
87{ 87{
88 struct page *page; 88 struct page *page;
89 89
90 if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) 90 if (WARN_ON_ONCE(pos >= PAGE_SIZE))
91 return -EIO; 91 return -EIO;
92 page = grab_cache_page_write_begin(mapping, 0, flags); 92 page = grab_cache_page_write_begin(mapping, 0, flags);
93 if (!page) 93 if (!page)
94 return -ENOMEM; 94 return -ENOMEM;
95 *pagep = page; 95 *pagep = page;
96 96
97 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) 97 if (!PageUptodate(page) && len != PAGE_SIZE)
98 __udf_adinicb_readpage(page); 98 __udf_adinicb_readpage(page);
99 return 0; 99 return 0;
100} 100}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 166d3ed32c39..2dc461eeb415 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -287,7 +287,7 @@ int udf_expand_file_adinicb(struct inode *inode)
287 if (!PageUptodate(page)) { 287 if (!PageUptodate(page)) {
288 kaddr = kmap(page); 288 kaddr = kmap(page);
289 memset(kaddr + iinfo->i_lenAlloc, 0x00, 289 memset(kaddr + iinfo->i_lenAlloc, 0x00,
290 PAGE_CACHE_SIZE - iinfo->i_lenAlloc); 290 PAGE_SIZE - iinfo->i_lenAlloc);
291 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, 291 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
292 iinfo->i_lenAlloc); 292 iinfo->i_lenAlloc);
293 flush_dcache_page(page); 293 flush_dcache_page(page);
@@ -319,7 +319,7 @@ int udf_expand_file_adinicb(struct inode *inode)
319 inode->i_data.a_ops = &udf_adinicb_aops; 319 inode->i_data.a_ops = &udf_adinicb_aops;
320 up_write(&iinfo->i_data_sem); 320 up_write(&iinfo->i_data_sem);
321 } 321 }
322 page_cache_release(page); 322 put_page(page);
323 mark_inode_dirty(inode); 323 mark_inode_dirty(inode);
324 324
325 return err; 325 return err;
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index dc5fae601c24..0447b949c7f5 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -237,7 +237,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237 sector_t newb, struct page *locked_page) 237 sector_t newb, struct page *locked_page)
238{ 238{
239 const unsigned blks_per_page = 239 const unsigned blks_per_page =
240 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 240 1 << (PAGE_SHIFT - inode->i_blkbits);
241 const unsigned mask = blks_per_page - 1; 241 const unsigned mask = blks_per_page - 1;
242 struct address_space * const mapping = inode->i_mapping; 242 struct address_space * const mapping = inode->i_mapping;
243 pgoff_t index, cur_index, last_index; 243 pgoff_t index, cur_index, last_index;
@@ -255,9 +255,9 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
255 255
256 cur_index = locked_page->index; 256 cur_index = locked_page->index;
257 end = count + beg; 257 end = count + beg;
258 last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 258 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
259 for (i = beg; i < end; i = (i | mask) + 1) { 259 for (i = beg; i < end; i = (i | mask) + 1) {
260 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 260 index = i >> (PAGE_SHIFT - inode->i_blkbits);
261 261
262 if (likely(cur_index != index)) { 262 if (likely(cur_index != index)) {
263 page = ufs_get_locked_page(mapping, index); 263 page = ufs_get_locked_page(mapping, index);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 74f2e80288bf..0b1457292734 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -62,7 +62,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
62static inline void ufs_put_page(struct page *page) 62static inline void ufs_put_page(struct page *page)
63{ 63{
64 kunmap(page); 64 kunmap(page);
65 page_cache_release(page); 65 put_page(page);
66} 66}
67 67
68ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) 68ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
@@ -111,13 +111,13 @@ static void ufs_check_page(struct page *page)
111 struct super_block *sb = dir->i_sb; 111 struct super_block *sb = dir->i_sb;
112 char *kaddr = page_address(page); 112 char *kaddr = page_address(page);
113 unsigned offs, rec_len; 113 unsigned offs, rec_len;
114 unsigned limit = PAGE_CACHE_SIZE; 114 unsigned limit = PAGE_SIZE;
115 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; 115 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
116 struct ufs_dir_entry *p; 116 struct ufs_dir_entry *p;
117 char *error; 117 char *error;
118 118
119 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 119 if ((dir->i_size >> PAGE_SHIFT) == page->index) {
120 limit = dir->i_size & ~PAGE_CACHE_MASK; 120 limit = dir->i_size & ~PAGE_MASK;
121 if (limit & chunk_mask) 121 if (limit & chunk_mask)
122 goto Ebadsize; 122 goto Ebadsize;
123 if (!limit) 123 if (!limit)
@@ -170,7 +170,7 @@ Einumber:
170bad_entry: 170bad_entry:
171 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " 171 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
172 "offset=%lu, rec_len=%d, name_len=%d", 172 "offset=%lu, rec_len=%d, name_len=%d",
173 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, 173 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
174 rec_len, ufs_get_de_namlen(sb, p)); 174 rec_len, ufs_get_de_namlen(sb, p));
175 goto fail; 175 goto fail;
176Eend: 176Eend:
@@ -178,7 +178,7 @@ Eend:
178 ufs_error(sb, __func__, 178 ufs_error(sb, __func__,
179 "entry in directory #%lu spans the page boundary" 179 "entry in directory #%lu spans the page boundary"
180 "offset=%lu", 180 "offset=%lu",
181 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); 181 dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
182fail: 182fail:
183 SetPageChecked(page); 183 SetPageChecked(page);
184 SetPageError(page); 184 SetPageError(page);
@@ -211,9 +211,9 @@ ufs_last_byte(struct inode *inode, unsigned long page_nr)
211{ 211{
212 unsigned last_byte = inode->i_size; 212 unsigned last_byte = inode->i_size;
213 213
214 last_byte -= page_nr << PAGE_CACHE_SHIFT; 214 last_byte -= page_nr << PAGE_SHIFT;
215 if (last_byte > PAGE_CACHE_SIZE) 215 if (last_byte > PAGE_SIZE)
216 last_byte = PAGE_CACHE_SIZE; 216 last_byte = PAGE_SIZE;
217 return last_byte; 217 return last_byte;
218} 218}
219 219
@@ -341,7 +341,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
341 kaddr = page_address(page); 341 kaddr = page_address(page);
342 dir_end = kaddr + ufs_last_byte(dir, n); 342 dir_end = kaddr + ufs_last_byte(dir, n);
343 de = (struct ufs_dir_entry *)kaddr; 343 de = (struct ufs_dir_entry *)kaddr;
344 kaddr += PAGE_CACHE_SIZE - reclen; 344 kaddr += PAGE_SIZE - reclen;
345 while ((char *)de <= kaddr) { 345 while ((char *)de <= kaddr) {
346 if ((char *)de == dir_end) { 346 if ((char *)de == dir_end) {
347 /* We hit i_size */ 347 /* We hit i_size */
@@ -432,8 +432,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
432 loff_t pos = ctx->pos; 432 loff_t pos = ctx->pos;
433 struct inode *inode = file_inode(file); 433 struct inode *inode = file_inode(file);
434 struct super_block *sb = inode->i_sb; 434 struct super_block *sb = inode->i_sb;
435 unsigned int offset = pos & ~PAGE_CACHE_MASK; 435 unsigned int offset = pos & ~PAGE_MASK;
436 unsigned long n = pos >> PAGE_CACHE_SHIFT; 436 unsigned long n = pos >> PAGE_SHIFT;
437 unsigned long npages = dir_pages(inode); 437 unsigned long npages = dir_pages(inode);
438 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 438 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
439 int need_revalidate = file->f_version != inode->i_version; 439 int need_revalidate = file->f_version != inode->i_version;
@@ -454,14 +454,14 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
454 ufs_error(sb, __func__, 454 ufs_error(sb, __func__,
455 "bad page in #%lu", 455 "bad page in #%lu",
456 inode->i_ino); 456 inode->i_ino);
457 ctx->pos += PAGE_CACHE_SIZE - offset; 457 ctx->pos += PAGE_SIZE - offset;
458 return -EIO; 458 return -EIO;
459 } 459 }
460 kaddr = page_address(page); 460 kaddr = page_address(page);
461 if (unlikely(need_revalidate)) { 461 if (unlikely(need_revalidate)) {
462 if (offset) { 462 if (offset) {
463 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); 463 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
464 ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; 464 ctx->pos = (n<<PAGE_SHIFT) + offset;
465 } 465 }
466 file->f_version = inode->i_version; 466 file->f_version = inode->i_version;
467 need_revalidate = 0; 467 need_revalidate = 0;
@@ -574,7 +574,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
574 574
575 kmap(page); 575 kmap(page);
576 base = (char*)page_address(page); 576 base = (char*)page_address(page);
577 memset(base, 0, PAGE_CACHE_SIZE); 577 memset(base, 0, PAGE_SIZE);
578 578
579 de = (struct ufs_dir_entry *) base; 579 de = (struct ufs_dir_entry *) base;
580 580
@@ -594,7 +594,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
594 594
595 err = ufs_commit_chunk(page, 0, chunk_size); 595 err = ufs_commit_chunk(page, 0, chunk_size);
596fail: 596fail:
597 page_cache_release(page); 597 put_page(page);
598 return err; 598 return err;
599} 599}
600 600
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index d897e169ab9c..9f49431e798d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -1051,13 +1051,13 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1051 lastfrag--; 1051 lastfrag--;
1052 1052
1053 lastpage = ufs_get_locked_page(mapping, lastfrag >> 1053 lastpage = ufs_get_locked_page(mapping, lastfrag >>
1054 (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1054 (PAGE_SHIFT - inode->i_blkbits));
1055 if (IS_ERR(lastpage)) { 1055 if (IS_ERR(lastpage)) {
1056 err = -EIO; 1056 err = -EIO;
1057 goto out; 1057 goto out;
1058 } 1058 }
1059 1059
1060 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1060 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1061 bh = page_buffers(lastpage); 1061 bh = page_buffers(lastpage);
1062 for (i = 0; i < end; ++i) 1062 for (i = 0; i < end; ++i)
1063 bh = bh->b_this_page; 1063 bh = bh->b_this_page;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index acf4a3b61b81..a1559f762805 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -305,7 +305,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
305 ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); 305 ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
306 else { 306 else {
307 kunmap(dir_page); 307 kunmap(dir_page);
308 page_cache_release(dir_page); 308 put_page(dir_page);
309 } 309 }
310 inode_dec_link_count(old_dir); 310 inode_dec_link_count(old_dir);
311 } 311 }
@@ -315,11 +315,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
315out_dir: 315out_dir:
316 if (dir_de) { 316 if (dir_de) {
317 kunmap(dir_page); 317 kunmap(dir_page);
318 page_cache_release(dir_page); 318 put_page(dir_page);
319 } 319 }
320out_old: 320out_old:
321 kunmap(old_page); 321 kunmap(old_page);
322 page_cache_release(old_page); 322 put_page(old_page);
323out: 323out:
324 return err; 324 return err;
325} 325}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index b6c2f94e041e..a409e3e7827a 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -261,14 +261,14 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
261 if (unlikely(page->mapping == NULL)) { 261 if (unlikely(page->mapping == NULL)) {
262 /* Truncate got there first */ 262 /* Truncate got there first */
263 unlock_page(page); 263 unlock_page(page);
264 page_cache_release(page); 264 put_page(page);
265 page = NULL; 265 page = NULL;
266 goto out; 266 goto out;
267 } 267 }
268 268
269 if (!PageUptodate(page) || PageError(page)) { 269 if (!PageUptodate(page) || PageError(page)) {
270 unlock_page(page); 270 unlock_page(page);
271 page_cache_release(page); 271 put_page(page);
272 272
273 printk(KERN_ERR "ufs_change_blocknr: " 273 printk(KERN_ERR "ufs_change_blocknr: "
274 "can not read page: ino %lu, index: %lu\n", 274 "can not read page: ino %lu, index: %lu\n",
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 954175928240..b7fbf53dbc81 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -283,7 +283,7 @@ extern struct page *ufs_get_locked_page(struct address_space *mapping,
283static inline void ufs_put_locked_page(struct page *page) 283static inline void ufs_put_locked_page(struct page *page)
284{ 284{
285 unlock_page(page); 285 unlock_page(page);
286 page_cache_release(page); 286 put_page(page);
287} 287}
288 288
289 289
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 041b6948aecc..ce41d7fe753c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3742,11 +3742,11 @@ xfs_bmap_btalloc(
3742 args.prod = align; 3742 args.prod = align;
3743 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3743 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3744 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3744 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3745 } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { 3745 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3746 args.prod = 1; 3746 args.prod = 1;
3747 args.mod = 0; 3747 args.mod = 0;
3748 } else { 3748 } else {
3749 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; 3749 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3750 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 3750 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3751 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3751 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3752 } 3752 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d445a64b979e..e49b2406d15d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -704,7 +704,7 @@ next_buffer:
704 704
705 xfs_iunlock(ip, XFS_ILOCK_EXCL); 705 xfs_iunlock(ip, XFS_ILOCK_EXCL);
706out_invalidate: 706out_invalidate:
707 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); 707 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
708 return; 708 return;
709} 709}
710 710
@@ -925,9 +925,9 @@ xfs_do_writepage(
925 * ---------------------------------^------------------| 925 * ---------------------------------^------------------|
926 */ 926 */
927 offset = i_size_read(inode); 927 offset = i_size_read(inode);
928 end_index = offset >> PAGE_CACHE_SHIFT; 928 end_index = offset >> PAGE_SHIFT;
929 if (page->index < end_index) 929 if (page->index < end_index)
930 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT; 930 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
931 else { 931 else {
932 /* 932 /*
933 * Check whether the page to write out is beyond or straddles 933 * Check whether the page to write out is beyond or straddles
@@ -940,7 +940,7 @@ xfs_do_writepage(
940 * | | Straddles | 940 * | | Straddles |
941 * ---------------------------------^-----------|--------| 941 * ---------------------------------^-----------|--------|
942 */ 942 */
943 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1); 943 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
944 944
945 /* 945 /*
946 * Skip the page if it is fully outside i_size, e.g. due to a 946 * Skip the page if it is fully outside i_size, e.g. due to a
@@ -971,7 +971,7 @@ xfs_do_writepage(
971 * memory is zeroed when mapped, and writes to that region are 971 * memory is zeroed when mapped, and writes to that region are
972 * not written out to the file." 972 * not written out to the file."
973 */ 973 */
974 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); 974 zero_user_segment(page, offset_into_page, PAGE_SIZE);
975 975
976 /* Adjust the end_offset to the end of file */ 976 /* Adjust the end_offset to the end of file */
977 end_offset = offset; 977 end_offset = offset;
@@ -1475,7 +1475,7 @@ xfs_vm_write_failed(
1475 loff_t block_offset; 1475 loff_t block_offset;
1476 loff_t block_start; 1476 loff_t block_start;
1477 loff_t block_end; 1477 loff_t block_end;
1478 loff_t from = pos & (PAGE_CACHE_SIZE - 1); 1478 loff_t from = pos & (PAGE_SIZE - 1);
1479 loff_t to = from + len; 1479 loff_t to = from + len;
1480 struct buffer_head *bh, *head; 1480 struct buffer_head *bh, *head;
1481 struct xfs_mount *mp = XFS_I(inode)->i_mount; 1481 struct xfs_mount *mp = XFS_I(inode)->i_mount;
@@ -1491,7 +1491,7 @@ xfs_vm_write_failed(
1491 * start of the page by using shifts rather than masks the mismatch 1491 * start of the page by using shifts rather than masks the mismatch
1492 * problem. 1492 * problem.
1493 */ 1493 */
1494 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT; 1494 block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
1495 1495
1496 ASSERT(block_offset + from == pos); 1496 ASSERT(block_offset + from == pos);
1497 1497
@@ -1558,12 +1558,12 @@ xfs_vm_write_begin(
1558 struct page **pagep, 1558 struct page **pagep,
1559 void **fsdata) 1559 void **fsdata)
1560{ 1560{
1561 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1561 pgoff_t index = pos >> PAGE_SHIFT;
1562 struct page *page; 1562 struct page *page;
1563 int status; 1563 int status;
1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount; 1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
1565 1565
1566 ASSERT(len <= PAGE_CACHE_SIZE); 1566 ASSERT(len <= PAGE_SIZE);
1567 1567
1568 page = grab_cache_page_write_begin(mapping, index, flags); 1568 page = grab_cache_page_write_begin(mapping, index, flags);
1569 if (!page) 1569 if (!page)
@@ -1592,7 +1592,7 @@ xfs_vm_write_begin(
1592 truncate_pagecache_range(inode, start, pos + len); 1592 truncate_pagecache_range(inode, start, pos + len);
1593 } 1593 }
1594 1594
1595 page_cache_release(page); 1595 put_page(page);
1596 page = NULL; 1596 page = NULL;
1597 } 1597 }
1598 1598
@@ -1620,7 +1620,7 @@ xfs_vm_write_end(
1620{ 1620{
1621 int ret; 1621 int ret;
1622 1622
1623 ASSERT(len <= PAGE_CACHE_SIZE); 1623 ASSERT(len <= PAGE_SIZE);
1624 1624
1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1626 if (unlikely(ret < len)) { 1626 if (unlikely(ret < len)) {
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a32c1dcae2ff..3b6309865c65 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1237,7 +1237,7 @@ xfs_free_file_space(
1237 /* wait for the completion of any pending DIOs */ 1237 /* wait for the completion of any pending DIOs */
1238 inode_dio_wait(VFS_I(ip)); 1238 inode_dio_wait(VFS_I(ip));
1239 1239
1240 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1240 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1241 ioffset = round_down(offset, rounding); 1241 ioffset = round_down(offset, rounding);
1242 iendoffset = round_up(offset + len, rounding) - 1; 1242 iendoffset = round_up(offset + len, rounding) - 1;
1243 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset, 1243 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
@@ -1466,7 +1466,7 @@ xfs_shift_file_space(
1466 if (error) 1466 if (error)
1467 return error; 1467 return error;
1468 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 1468 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1469 offset >> PAGE_CACHE_SHIFT, -1); 1469 offset >> PAGE_SHIFT, -1);
1470 if (error) 1470 if (error)
1471 return error; 1471 return error;
1472 1472
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ac0fd32de31e..569938a4a357 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -106,8 +106,8 @@ xfs_iozero(
106 unsigned offset, bytes; 106 unsigned offset, bytes;
107 void *fsdata; 107 void *fsdata;
108 108
109 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 109 offset = (pos & (PAGE_SIZE -1)); /* Within page */
110 bytes = PAGE_CACHE_SIZE - offset; 110 bytes = PAGE_SIZE - offset;
111 if (bytes > count) 111 if (bytes > count)
112 bytes = count; 112 bytes = count;
113 113
@@ -799,8 +799,8 @@ xfs_file_dio_aio_write(
799 /* see generic_file_direct_write() for why this is necessary */ 799 /* see generic_file_direct_write() for why this is necessary */
800 if (mapping->nrpages) { 800 if (mapping->nrpages) {
801 invalidate_inode_pages2_range(mapping, 801 invalidate_inode_pages2_range(mapping,
802 pos >> PAGE_CACHE_SHIFT, 802 pos >> PAGE_SHIFT,
803 end >> PAGE_CACHE_SHIFT); 803 end >> PAGE_SHIFT);
804 } 804 }
805 805
806 if (ret > 0) { 806 if (ret > 0) {
@@ -1207,9 +1207,9 @@ xfs_find_get_desired_pgoff(
1207 1207
1208 pagevec_init(&pvec, 0); 1208 pagevec_init(&pvec, 0);
1209 1209
1210 index = startoff >> PAGE_CACHE_SHIFT; 1210 index = startoff >> PAGE_SHIFT;
1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1212 end = endoff >> PAGE_CACHE_SHIFT; 1212 end = endoff >> PAGE_SHIFT;
1213 do { 1213 do {
1214 int want; 1214 int want;
1215 unsigned nr_pages; 1215 unsigned nr_pages;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239a0fa9..a8192dc797dc 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -135,7 +135,7 @@ typedef __u32 xfs_nlink_t;
135 * Size of block device i/o is parameterized here. 135 * Size of block device i/o is parameterized here.
136 * Currently the system supports page-sized i/o. 136 * Currently the system supports page-sized i/o.
137 */ 137 */
138#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT 138#define BLKDEV_IOSHIFT PAGE_SHIFT
139#define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) 139#define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT)
140/* number of BB's per block device block */ 140/* number of BB's per block device block */
141#define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) 141#define BLKDEV_BB BTOBB(BLKDEV_IOSIZE)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 536a0ee9cd5a..cfd4210dd015 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -171,7 +171,7 @@ xfs_sb_validate_fsb_count(
171 ASSERT(sbp->sb_blocklog >= BBSHIFT); 171 ASSERT(sbp->sb_blocklog >= BBSHIFT);
172 172
173 /* Limited by ULONG_MAX of page cache index */ 173 /* Limited by ULONG_MAX of page cache index */
174 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 174 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
175 return -EFBIG; 175 return -EFBIG;
176 return 0; 176 return 0;
177} 177}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bac6b3435591..eafe257b357a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -231,12 +231,12 @@ static inline unsigned long
231xfs_preferred_iosize(xfs_mount_t *mp) 231xfs_preferred_iosize(xfs_mount_t *mp)
232{ 232{
233 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE) 233 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
234 return PAGE_CACHE_SIZE; 234 return PAGE_SIZE;
235 return (mp->m_swidth ? 235 return (mp->m_swidth ?
236 (mp->m_swidth << mp->m_sb.sb_blocklog) : 236 (mp->m_swidth << mp->m_sb.sb_blocklog) :
237 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ? 237 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
238 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) : 238 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) :
239 PAGE_CACHE_SIZE)); 239 PAGE_SIZE));
240} 240}
241 241
242#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ 242#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index ade236e90bb3..51ddaf2c2b8c 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -293,8 +293,8 @@ xfs_fs_commit_blocks(
293 * Make sure reads through the pagecache see the new data. 293 * Make sure reads through the pagecache see the new data.
294 */ 294 */
295 error = invalidate_inode_pages2_range(inode->i_mapping, 295 error = invalidate_inode_pages2_range(inode->i_mapping,
296 start >> PAGE_CACHE_SHIFT, 296 start >> PAGE_SHIFT,
297 (end - 1) >> PAGE_CACHE_SHIFT); 297 (end - 1) >> PAGE_SHIFT);
298 WARN_ON_ONCE(error); 298 WARN_ON_ONCE(error);
299 299
300 error = xfs_iomap_write_unwritten(ip, start, length); 300 error = xfs_iomap_write_unwritten(ip, start, length);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d760934109b5..187e14b696c2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -556,10 +556,10 @@ xfs_max_file_offset(
556 /* Figure out maximum filesize, on Linux this can depend on 556 /* Figure out maximum filesize, on Linux this can depend on
557 * the filesystem blocksize (on 32 bit platforms). 557 * the filesystem blocksize (on 32 bit platforms).
558 * __block_write_begin does this in an [unsigned] long... 558 * __block_write_begin does this in an [unsigned] long...
559 * page->index << (PAGE_CACHE_SHIFT - bbits) 559 * page->index << (PAGE_SHIFT - bbits)
560 * So, for page sized blocks (4K on 32 bit platforms), 560 * So, for page sized blocks (4K on 32 bit platforms),
561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
562 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 562 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
563 * but for smaller blocksizes it is less (bbits = log2 bsize). 563 * but for smaller blocksizes it is less (bbits = log2 bsize).
564 * Note1: get_block_t takes a long (implicit cast from above) 564 * Note1: get_block_t takes a long (implicit cast from above)
565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
@@ -570,10 +570,10 @@ xfs_max_file_offset(
570#if BITS_PER_LONG == 32 570#if BITS_PER_LONG == 32
571# if defined(CONFIG_LBDAF) 571# if defined(CONFIG_LBDAF)
572 ASSERT(sizeof(sector_t) == 8); 572 ASSERT(sizeof(sector_t) == 8);
573 pagefactor = PAGE_CACHE_SIZE; 573 pagefactor = PAGE_SIZE;
574 bitshift = BITS_PER_LONG; 574 bitshift = BITS_PER_LONG;
575# else 575# else
576 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); 576 pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
577# endif 577# endif
578#endif 578#endif
579 579
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index afae2316bd43..055a08ddac02 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -92,7 +92,7 @@ struct ttm_placement {
92 */ 92 */
93struct ttm_bus_placement { 93struct ttm_bus_placement {
94 void *addr; 94 void *addr;
95 unsigned long base; 95 phys_addr_t base;
96 unsigned long size; 96 unsigned long size;
97 unsigned long offset; 97 unsigned long offset;
98 bool is_iomem; 98 bool is_iomem;
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index df4f369254c0..506c3531832e 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -559,25 +559,25 @@ static inline int atomic_dec_if_positive(atomic_t *v)
559#endif 559#endif
560 560
561/** 561/**
562 * fetch_or - perform *ptr |= mask and return old value of *ptr 562 * atomic_fetch_or - perform *p |= mask and return old value of *p
563 * @ptr: pointer to value 563 * @p: pointer to atomic_t
564 * @mask: mask to OR on the value 564 * @mask: mask to OR on the atomic_t
565 *
566 * cmpxchg based fetch_or, macro so it works for different integer types
567 */ 565 */
568#ifndef fetch_or 566#ifndef atomic_fetch_or
569#define fetch_or(ptr, mask) \ 567static inline int atomic_fetch_or(atomic_t *p, int mask)
570({ typeof(*(ptr)) __old, __val = *(ptr); \ 568{
571 for (;;) { \ 569 int old, val = atomic_read(p);
572 __old = cmpxchg((ptr), __val, __val | (mask)); \ 570
573 if (__old == __val) \ 571 for (;;) {
574 break; \ 572 old = atomic_cmpxchg(p, val, val | mask);
575 __val = __old; \ 573 if (old == val)
576 } \ 574 break;
577 __old; \ 575 val = old;
578}) 576 }
579#endif
580 577
578 return old;
579}
580#endif
581 581
582#ifdef CONFIG_GENERIC_ATOMIC64 582#ifdef CONFIG_GENERIC_ATOMIC64
583#include <asm-generic/atomic64.h> 583#include <asm-generic/atomic64.h>
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..3f103076d0bf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@ struct bdi_writeback {
135 135
136struct backing_dev_info { 136struct backing_dev_info {
137 struct list_head bdi_list; 137 struct list_head bdi_list;
138 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 138 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
139 unsigned int capabilities; /* Device capabilities */ 139 unsigned int capabilities; /* Device capabilities */
140 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 140 congested_fn *congested_fn; /* Function pointer if device is md/dm */
141 void *congested_data; /* Pointer to aux data for congested func */ 141 void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 88bc64f00bb5..6b7481f62218 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43#define BIO_MAX_PAGES 256 43#define BIO_MAX_PAGES 256
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46 46
47/* 47/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7e5d7e018bea..669e419d6234 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1372,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1372 1372
1373static inline void put_dev_sector(Sector p) 1373static inline void put_dev_sector(Sector p)
1374{ 1374{
1375 page_cache_release(p.v); 1375 put_page(p.v);
1376} 1376}
1377 1377
1378static inline bool __bvec_gap_to_prev(struct request_queue *q, 1378static inline bool __bvec_gap_to_prev(struct request_queue *q,
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index f0ba9c2ec639..e3354b74286c 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -24,6 +24,8 @@
24#define PHY_ID_BCM7250 0xae025280 24#define PHY_ID_BCM7250 0xae025280
25#define PHY_ID_BCM7364 0xae025260 25#define PHY_ID_BCM7364 0xae025260
26#define PHY_ID_BCM7366 0x600d8490 26#define PHY_ID_BCM7366 0x600d8490
27#define PHY_ID_BCM7346 0x600d8650
28#define PHY_ID_BCM7362 0x600d84b0
27#define PHY_ID_BCM7425 0x600d86b0 29#define PHY_ID_BCM7425 0x600d86b0
28#define PHY_ID_BCM7429 0x600d8730 30#define PHY_ID_BCM7429 0x600d8730
29#define PHY_ID_BCM7435 0x600d8750 31#define PHY_ID_BCM7435 0x600d8750
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c67f052cc5e5..d48daa3f6f20 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -43,7 +43,7 @@ enum bh_state_bits {
43 */ 43 */
44}; 44};
45 45
46#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) 46#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
47 47
48struct page; 48struct page;
49struct buffer_head; 49struct buffer_head;
@@ -263,7 +263,7 @@ void buffer_init(void);
263static inline void attach_page_buffers(struct page *page, 263static inline void attach_page_buffers(struct page *page,
264 struct buffer_head *head) 264 struct buffer_head *head)
265{ 265{
266 page_cache_get(page); 266 get_page(page);
267 SetPagePrivate(page); 267 SetPagePrivate(page);
268 set_page_private(page, (unsigned long)head); 268 set_page_private(page, (unsigned long)head);
269} 269}
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e7975e4681e1..db92a8d4926e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc);
176 */ 176 */
177static inline int calc_pages_for(u64 off, u64 len) 177static inline int calc_pages_for(u64 off, u64 len)
178{ 178{
179 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - 179 return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
180 (off >> PAGE_CACHE_SHIFT); 180 (off >> PAGE_SHIFT);
181} 181}
182 182
183extern struct kmem_cache *ceph_inode_cachep; 183extern struct kmem_cache *ceph_inode_cachep;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246feed3..eeae401a2412 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -199,7 +199,7 @@
199#define unreachable() __builtin_unreachable() 199#define unreachable() __builtin_unreachable()
200 200
201/* Mark a function definition as prohibited from being cloned. */ 201/* Mark a function definition as prohibited from being cloned. */
202#define __noclone __attribute__((__noclone__)) 202#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
203 203
204#endif /* GCC_VERSION >= 40500 */ 204#endif /* GCC_VERSION >= 40500 */
205 205
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 485fe5519448..d9d6a9d77489 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -188,7 +188,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
188} 188}
189 189
190#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \ 190#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \
191static struct configfs_attribute _pfx##attr_##_name = { \ 191static struct configfs_bin_attribute _pfx##attr_##_name = { \
192 .cb_attr = { \ 192 .cb_attr = { \
193 .ca_name = __stringify(_name), \ 193 .ca_name = __stringify(_name), \
194 .ca_mode = S_IRUGO, \ 194 .ca_mode = S_IRUGO, \
@@ -200,7 +200,7 @@ static struct configfs_attribute _pfx##attr_##_name = { \
200} 200}
201 201
202#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \ 202#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \
203static struct configfs_attribute _pfx##attr_##_name = { \ 203static struct configfs_bin_attribute _pfx##attr_##_name = { \
204 .cb_attr = { \ 204 .cb_attr = { \
205 .ca_name = __stringify(_name), \ 205 .ca_name = __stringify(_name), \
206 .ca_mode = S_IWUSR, \ 206 .ca_mode = S_IWUSR, \
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7cb043d8f4e8..4bb4de8d95ea 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -161,6 +161,7 @@ struct dentry_operations {
161 struct vfsmount *(*d_automount)(struct path *); 161 struct vfsmount *(*d_automount)(struct path *);
162 int (*d_manage)(struct dentry *, bool); 162 int (*d_manage)(struct dentry *, bool);
163 struct inode *(*d_select_inode)(struct dentry *, unsigned); 163 struct inode *(*d_select_inode)(struct dentry *, unsigned);
164 struct dentry *(*d_real)(struct dentry *, struct inode *);
164} ____cacheline_aligned; 165} ____cacheline_aligned;
165 166
166/* 167/*
@@ -229,6 +230,7 @@ struct dentry_operations {
229#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ 230#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
230 231
231#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */ 232#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
233#define DCACHE_OP_REAL 0x08000000
232 234
233extern seqlock_t rename_lock; 235extern seqlock_t rename_lock;
234 236
@@ -555,4 +557,12 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
555 return upper; 557 return upper;
556} 558}
557 559
560static inline struct dentry *d_real(struct dentry *dentry)
561{
562 if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
563 return dentry->d_op->d_real(dentry, NULL);
564 else
565 return dentry;
566}
567
558#endif /* __LINUX_DCACHE_H */ 568#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 9eb215a155e0..b90e9bdbd1dd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -262,7 +262,7 @@ struct f2fs_node {
262/* 262/*
263 * For NAT entries 263 * For NAT entries
264 */ 264 */
265#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) 265#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
266 266
267struct f2fs_nat_entry { 267struct f2fs_nat_entry {
268 __u8 version; /* latest version of cached nat entry */ 268 __u8 version; /* latest version of cached nat entry */
@@ -282,7 +282,7 @@ struct f2fs_nat_block {
282 * Not allow to change this. 282 * Not allow to change this.
283 */ 283 */
284#define SIT_VBLOCK_MAP_SIZE 64 284#define SIT_VBLOCK_MAP_SIZE 64
285#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) 285#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
286 286
287/* 287/*
288 * Note that f2fs_sit_entry->vblocks has the following bit-field information. 288 * Note that f2fs_sit_entry->vblocks has the following bit-field information.
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 43aa1f8855c7..a51a5361695f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
465void bpf_prog_destroy(struct bpf_prog *fp); 465void bpf_prog_destroy(struct bpf_prog *fp);
466 466
467int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 467int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
468int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
469 bool locked);
468int sk_attach_bpf(u32 ufd, struct sock *sk); 470int sk_attach_bpf(u32 ufd, struct sock *sk);
469int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); 471int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
470int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); 472int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
471int sk_detach_filter(struct sock *sk); 473int sk_detach_filter(struct sock *sk);
474int __sk_detach_filter(struct sock *sk, bool locked);
475
472int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 476int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
473 unsigned int len); 477 unsigned int len);
474 478
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 14a97194b34b..70e61b58baaf 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -929,7 +929,7 @@ static inline struct file *get_file(struct file *f)
929/* Page cache limit. The filesystems should put that into their s_maxbytes 929/* Page cache limit. The filesystems should put that into their s_maxbytes
930 limits, otherwise bad things can happen in VM. */ 930 limits, otherwise bad things can happen in VM. */
931#if BITS_PER_LONG==32 931#if BITS_PER_LONG==32
932#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 932#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
933#elif BITS_PER_LONG==64 933#elif BITS_PER_LONG==64
934#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) 934#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
935#endif 935#endif
@@ -1241,6 +1241,16 @@ static inline struct inode *file_inode(const struct file *f)
1241 return f->f_inode; 1241 return f->f_inode;
1242} 1242}
1243 1243
1244static inline struct dentry *file_dentry(const struct file *file)
1245{
1246 struct dentry *dentry = file->f_path.dentry;
1247
1248 if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
1249 return dentry->d_op->d_real(dentry, file_inode(file));
1250 else
1251 return dentry;
1252}
1253
1244static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) 1254static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
1245{ 1255{
1246 return locks_lock_inode_wait(file_inode(filp), fl); 1256 return locks_lock_inode_wait(file_inode(filp), fl);
@@ -2067,7 +2077,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int);
2067/* /sys/fs */ 2077/* /sys/fs */
2068extern struct kobject *fs_kobj; 2078extern struct kobject *fs_kobj;
2069 2079
2070#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) 2080#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
2071 2081
2072#ifdef CONFIG_MANDATORY_FILE_LOCKING 2082#ifdef CONFIG_MANDATORY_FILE_LOCKING
2073extern int locks_mandatory_locked(struct file *); 2083extern int locks_mandatory_locked(struct file *);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 79b0ef6aaa14..7008623e24b1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
127 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 127 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
128 return __pmd_trans_huge_lock(pmd, vma); 128 return __pmd_trans_huge_lock(pmd, vma);
129 else 129 else
130 return false; 130 return NULL;
131} 131}
132static inline int hpage_nr_pages(struct page *page) 132static inline int hpage_nr_pages(struct page *page)
133{ 133{
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a5c539fa5d2b..ef7a6ecd8584 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -195,9 +195,7 @@ struct iommu_ops {
195 /* Get the number of windows per domain */ 195 /* Get the number of windows per domain */
196 u32 (*domain_get_windows)(struct iommu_domain *domain); 196 u32 (*domain_get_windows)(struct iommu_domain *domain);
197 197
198#ifdef CONFIG_OF_IOMMU
199 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 198 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
200#endif
201 199
202 unsigned long pgsize_bitmap; 200 unsigned long pgsize_bitmap;
203 void *priv; 201 void *priv;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d1b7b5..ffcff53e3b2b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
623 * 623 *
624 * A page may belong to an inode's memory mapping. In this case, page->mapping 624 * A page may belong to an inode's memory mapping. In this case, page->mapping
625 * is the pointer to the inode, and page->index is the file offset of the page, 625 * is the pointer to the inode, and page->index is the file offset of the page,
626 * in units of PAGE_CACHE_SIZE. 626 * in units of PAGE_SIZE.
627 * 627 *
628 * If pagecache pages are not associated with an inode, they are said to be 628 * If pagecache pages are not associated with an inode, they are said to be
629 * anonymous pages. These may become associated with the swapcache, and in that 629 * anonymous pages. These may become associated with the swapcache, and in that
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b37313b..c2d75b4fa86c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct vm_area_struct {
341 341
342 /* Information about our backing store: */ 342 /* Information about our backing store: */
343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
344 units, *not* PAGE_CACHE_SIZE */ 344 units */
345 struct file * vm_file; /* File we map to (can be NULL). */ 345 struct file * vm_file; /* File we map to (can be NULL). */
346 void * vm_private_data; /* was vm_pte (shared mem) */ 346 void * vm_private_data; /* was vm_pte (shared mem) */
347 347
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cb0d5d09c2e4..8395308a2445 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2120,7 +2120,10 @@ struct napi_gro_cb {
2120 /* Used in foo-over-udp, set in udp[46]_gro_receive */ 2120 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2121 u8 is_ipv6:1; 2121 u8 is_ipv6:1;
2122 2122
2123 /* 7 bit hole */ 2123 /* Used in GRE, set in fou/gue_gro_receive */
2124 u8 is_fou:1;
2125
2126 /* 6 bit hole */
2124 2127
2125 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 2128 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2126 __wsum csum; 2129 __wsum csum;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 0e1f433cc4b7..f48b8a664b0f 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -234,6 +234,10 @@ struct ip_set {
234 spinlock_t lock; 234 spinlock_t lock;
235 /* References to the set */ 235 /* References to the set */
236 u32 ref; 236 u32 ref;
237 /* References to the set for netlink events like dump,
238 * ref can be swapped out by ip_set_swap
239 */
240 u32 ref_netlink;
237 /* The core set type */ 241 /* The core set type */
238 struct ip_set_type *type; 242 struct ip_set_type *type;
239 /* The type variant doing the real job */ 243 /* The type variant doing the real job */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f2f650f136ee..957049f72290 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@ struct nfs_page {
41 struct page *wb_page; /* page to read in/write out */ 41 struct page *wb_page; /* page to read in/write out */
42 struct nfs_open_context *wb_context; /* File state context info */ 42 struct nfs_open_context *wb_context; /* File state context info */
43 struct nfs_lock_context *wb_lock_context; /* lock context info */ 43 struct nfs_lock_context *wb_lock_context; /* lock context info */
44 pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ 44 pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
45 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 45 unsigned int wb_offset, /* Offset & ~PAGE_MASK */
46 wb_pgbase, /* Start of page data */ 46 wb_pgbase, /* Start of page data */
47 wb_bytes; /* Length of request */ 47 wb_bytes; /* Length of request */
48 struct kref wb_kref; /* reference count */ 48 struct kref wb_kref; /* reference count */
@@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head)
184static inline 184static inline
185loff_t req_offset(struct nfs_page *req) 185loff_t req_offset(struct nfs_page *req)
186{ 186{
187 return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; 187 return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
188} 188}
189 189
190#endif /* _LINUX_NFS_PAGE_H */ 190#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763e4b86..e9fcf90b270d 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
331{ 331{
332 unsigned len = le16_to_cpu(dlen); 332 unsigned len = le16_to_cpu(dlen);
333 333
334#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 334#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
335 if (len == NILFS_MAX_REC_LEN) 335 if (len == NILFS_MAX_REC_LEN)
336 return 1 << 16; 336 return 1 << 16;
337#endif 337#endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
340 340
341static inline __le16 nilfs_rec_len_to_disk(unsigned len) 341static inline __le16 nilfs_rec_len_to_disk(unsigned len)
342{ 342{
343#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 343#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
344 if (len == (1 << 16)) 344 if (len == (1 << 16))
345 return cpu_to_le16(NILFS_MAX_REC_LEN); 345 return cpu_to_le16(NILFS_MAX_REC_LEN);
346 else if (len > (1 << 16)) 346 else if (len > (1 << 16))
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1ebd65c91422..7e1ab155c67c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
86 (__force unsigned long)mask; 86 (__force unsigned long)mask;
87} 87}
88 88
89/*
90 * The page cache can be done in larger chunks than
91 * one page, because it allows for more efficient
92 * throughput (it can then be mapped into user
93 * space in smaller chunks for same flexibility).
94 *
95 * Or rather, it _will_ be done in larger chunks.
96 */
97#define PAGE_CACHE_SHIFT PAGE_SHIFT
98#define PAGE_CACHE_SIZE PAGE_SIZE
99#define PAGE_CACHE_MASK PAGE_MASK
100#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
101
102#define page_cache_get(page) get_page(page)
103#define page_cache_release(page) put_page(page)
104void release_pages(struct page **pages, int nr, bool cold); 89void release_pages(struct page **pages, int nr, bool cold);
105 90
106/* 91/*
@@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
390 return page->index << compound_order(page); 375 return page->index << compound_order(page);
391 376
392 if (likely(!PageTransTail(page))) 377 if (likely(!PageTransTail(page)))
393 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 378 return page->index;
394 379
395 /* 380 /*
396 * We don't initialize ->index for tail pages: calculate based on 381 * We don't initialize ->index for tail pages: calculate based on
397 * head page 382 * head page
398 */ 383 */
399 pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 384 pgoff = compound_head(page)->index;
400 pgoff += page - compound_head(page); 385 pgoff += page - compound_head(page);
401 return pgoff; 386 return pgoff;
402} 387}
@@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
406 */ 391 */
407static inline loff_t page_offset(struct page *page) 392static inline loff_t page_offset(struct page *page)
408{ 393{
409 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 394 return ((loff_t)page->index) << PAGE_SHIFT;
410} 395}
411 396
412static inline loff_t page_file_offset(struct page *page) 397static inline loff_t page_file_offset(struct page *page)
413{ 398{
414 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; 399 return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
415} 400}
416 401
417extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 402extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
425 return linear_hugepage_index(vma, address); 410 return linear_hugepage_index(vma, address);
426 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 411 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
427 pgoff += vma->vm_pgoff; 412 pgoff += vma->vm_pgoff;
428 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 413 return pgoff;
429} 414}
430 415
431extern void __lock_page(struct page *page); 416extern void __lock_page(struct page *page);
@@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
535/* 520/*
536 * Fault a userspace page into pagetables. Return non-zero on a fault. 521 * Fault a userspace page into pagetables. Return non-zero on a fault.
537 * 522 *
538 * This assumes that two userspace pages are always sufficient. That's 523 * This assumes that two userspace pages are always sufficient.
539 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
540 */ 524 */
541static inline int fault_in_pages_writeable(char __user *uaddr, int size) 525static inline int fault_in_pages_writeable(char __user *uaddr, int size)
542{ 526{
@@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page,
671 655
672static inline unsigned long dir_pages(struct inode *inode) 656static inline unsigned long dir_pages(struct inode *inode)
673{ 657{
674 return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> 658 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
675 PAGE_CACHE_SHIFT; 659 PAGE_SHIFT;
676} 660}
677 661
678#endif /* _LINUX_PAGEMAP_H */ 662#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 3ec5309e29f3..ac6d872ce067 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -42,6 +42,13 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
42 BUG(); 42 BUG();
43} 43}
44 44
45static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
46 size_t n)
47{
48 BUG();
49 return -EFAULT;
50}
51
45static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, 52static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
46 struct iov_iter *i) 53 struct iov_iter *i)
47{ 54{
@@ -66,14 +73,17 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
66#endif 73#endif
67 74
68/* 75/*
69 * Architectures that define ARCH_HAS_PMEM_API must provide 76 * memcpy_from_pmem - read from persistent memory with error handling
70 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), 77 * @dst: destination buffer
71 * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem() 78 * @src: source buffer
72 * and arch_has_wmb_pmem(). 79 * @size: transfer length
80 *
81 * Returns 0 on success negative error code on failure.
73 */ 82 */
74static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) 83static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
84 size_t size)
75{ 85{
76 memcpy(dst, (void __force const *) src, size); 86 return arch_memcpy_from_pmem(dst, src, size);
77} 87}
78 88
79static inline bool arch_has_pmem_api(void) 89static inline bool arch_has_pmem_api(void)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9dff190e6a0a..0450831d5a40 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -720,7 +720,7 @@ struct signal_struct {
720 struct task_cputime cputime_expires; 720 struct task_cputime cputime_expires;
721 721
722#ifdef CONFIG_NO_HZ_FULL 722#ifdef CONFIG_NO_HZ_FULL
723 unsigned long tick_dep_mask; 723 atomic_t tick_dep_mask;
724#endif 724#endif
725 725
726 struct list_head cpu_timers[3]; 726 struct list_head cpu_timers[3];
@@ -1549,7 +1549,7 @@ struct task_struct {
1549#endif 1549#endif
1550 1550
1551#ifdef CONFIG_NO_HZ_FULL 1551#ifdef CONFIG_NO_HZ_FULL
1552 unsigned long tick_dep_mask; 1552 atomic_t tick_dep_mask;
1553#endif 1553#endif
1554 unsigned long nvcsw, nivcsw; /* context switch counts */ 1554 unsigned long nvcsw, nivcsw; /* context switch counts */
1555 u64 start_time; /* monotonic time in nsec */ 1555 u64 start_time; /* monotonic time in nsec */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 4bcf5a61aada..e6bc30a42a74 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -108,7 +108,6 @@ struct stmmac_axi {
108}; 108};
109 109
110struct plat_stmmacenet_data { 110struct plat_stmmacenet_data {
111 char *phy_bus_name;
112 int bus_id; 111 int bus_id;
113 int phy_addr; 112 int phy_addr;
114 int interface; 113 int interface;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..7ca44fb5b675 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
129 * 129 *
130 * These happen to all be powers of 2, which is not strictly 130 * These happen to all be powers of 2, which is not strictly
131 * necessary but helps enforce the real limitation, which is 131 * necessary but helps enforce the real limitation, which is
132 * that they should be multiples of PAGE_CACHE_SIZE. 132 * that they should be multiples of PAGE_SIZE.
133 * 133 *
134 * For UDP transports, a block plus NFS,RPC, and UDP headers 134 * For UDP transports, a block plus NFS,RPC, and UDP headers
135 * has to fit into the IP datagram limit of 64K. The largest 135 * has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d18b65c53dbb..2b83359c19ca 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -433,9 +433,9 @@ struct backing_dev_info;
433#define si_swapinfo(val) \ 433#define si_swapinfo(val) \
434 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 434 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
435/* only sparc can not include linux/pagemap.h in this file 435/* only sparc can not include linux/pagemap.h in this file
436 * so leave page_cache_release and release_pages undeclared... */ 436 * so leave put_page and release_pages undeclared... */
437#define free_page_and_swap_cache(page) \ 437#define free_page_and_swap_cache(page) \
438 page_cache_release(page) 438 put_page(page)
439#define free_pages_and_swap_cache(pages, nr) \ 439#define free_pages_and_swap_cache(pages, nr) \
440 release_pages((pages), (nr), false); 440 release_pages((pages), (nr), false);
441 441
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2a19fe111c78..03e322b30218 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -135,6 +135,7 @@ void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
135static inline void tc_action_net_exit(struct tc_action_net *tn) 135static inline void tc_action_net_exit(struct tc_action_net *tn)
136{ 136{
137 tcf_hashinfo_destroy(tn->ops, tn->hinfo); 137 tcf_hashinfo_destroy(tn->ops, tn->hinfo);
138 kfree(tn->hinfo);
138} 139}
139 140
140int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 141int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0c09da34b67a..e385eb3076a1 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1001,6 +1001,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
1001 * flag indicates that the PN was verified for replay protection. 1001 * flag indicates that the PN was verified for replay protection.
1002 * Note that this flag is also currently only supported when a frame 1002 * Note that this flag is also currently only supported when a frame
1003 * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set) 1003 * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
1004 * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did
1005 * de-duplication by itself.
1004 * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on 1006 * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
1005 * the frame. 1007 * the frame.
1006 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on 1008 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 65521cfdcade..03fb33efcae2 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -386,11 +386,9 @@ static inline struct list_head *sctp_list_dequeue(struct list_head *list)
386{ 386{
387 struct list_head *result = NULL; 387 struct list_head *result = NULL;
388 388
389 if (list->next != list) { 389 if (!list_empty(list)) {
390 result = list->next; 390 result = list->next;
391 list->next = result->next; 391 list_del_init(result);
392 list->next->prev = list;
393 INIT_LIST_HEAD(result);
394 } 392 }
395 return result; 393 return result;
396} 394}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c067019ed12a..74d79bde7075 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -516,6 +516,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
516 return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; 516 return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
517} 517}
518 518
519/**
520 * scsi_device_supports_vpd - test if a device supports VPD pages
521 * @sdev: the &struct scsi_device to test
522 *
523 * If the 'try_vpd_pages' flag is set it takes precedence.
524 * Otherwise we will assume VPD pages are supported if the
525 * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
526 */
527static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
528{
529 /* Attempt VPD inquiry if the device blacklist explicitly calls
530 * for it.
531 */
532 if (sdev->try_vpd_pages)
533 return 1;
534 /*
535 * Although VPD inquiries can go to SCSI-2 type devices,
536 * some USB ones crash on receiving them, and the pages
537 * we currently ask for are for SPC-3 and beyond
538 */
539 if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
540 return 1;
541 return 0;
542}
543
519#define MODULE_ALIAS_SCSI_DEVICE(type) \ 544#define MODULE_ALIAS_SCSI_DEVICE(type) \
520 MODULE_ALIAS("scsi:t-" __stringify(type) "*") 545 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
521#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" 546#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 685a51aa98cc..8ff6d40a294f 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -76,6 +76,7 @@ struct target_core_fabric_ops {
76 struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, 76 struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
77 struct config_group *, const char *); 77 struct config_group *, const char *);
78 void (*fabric_drop_wwn)(struct se_wwn *); 78 void (*fabric_drop_wwn)(struct se_wwn *);
79 void (*add_wwn_groups)(struct se_wwn *);
79 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, 80 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
80 struct config_group *, const char *); 81 struct config_group *, const char *);
81 void (*fabric_drop_tpg)(struct se_portal_group *); 82 void (*fabric_drop_tpg)(struct se_portal_group *);
@@ -87,7 +88,6 @@ struct target_core_fabric_ops {
87 struct config_group *, const char *); 88 struct config_group *, const char *);
88 void (*fabric_drop_np)(struct se_tpg_np *); 89 void (*fabric_drop_np)(struct se_tpg_np *);
89 int (*fabric_init_nodeacl)(struct se_node_acl *, const char *); 90 int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
90 void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
91 91
92 struct configfs_attribute **tfc_discovery_attrs; 92 struct configfs_attribute **tfc_discovery_attrs;
93 struct configfs_attribute **tfc_wwn_attrs; 93 struct configfs_attribute **tfc_wwn_attrs;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 677807f29a1c..e90e82ad6875 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,7 +23,7 @@ struct map_lookup;
23struct extent_buffer; 23struct extent_buffer;
24struct btrfs_work; 24struct btrfs_work;
25struct __btrfs_workqueue; 25struct __btrfs_workqueue;
26struct btrfs_qgroup_operation; 26struct btrfs_qgroup_extent_record;
27 27
28#define show_ref_type(type) \ 28#define show_ref_type(type) \
29 __print_symbolic(type, \ 29 __print_symbolic(type, \
@@ -1231,6 +1231,93 @@ DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
1231 1231
1232 TP_ARGS(ref_root, reserved) 1232 TP_ARGS(ref_root, reserved)
1233); 1233);
1234
1235DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
1236 TP_PROTO(struct btrfs_qgroup_extent_record *rec),
1237
1238 TP_ARGS(rec),
1239
1240 TP_STRUCT__entry(
1241 __field( u64, bytenr )
1242 __field( u64, num_bytes )
1243 ),
1244
1245 TP_fast_assign(
1246 __entry->bytenr = rec->bytenr,
1247 __entry->num_bytes = rec->num_bytes;
1248 ),
1249
1250 TP_printk("bytenr = %llu, num_bytes = %llu",
1251 (unsigned long long)__entry->bytenr,
1252 (unsigned long long)__entry->num_bytes)
1253);
1254
1255DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
1256
1257 TP_PROTO(struct btrfs_qgroup_extent_record *rec),
1258
1259 TP_ARGS(rec)
1260);
1261
1262DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
1263
1264 TP_PROTO(struct btrfs_qgroup_extent_record *rec),
1265
1266 TP_ARGS(rec)
1267);
1268
1269TRACE_EVENT(btrfs_qgroup_account_extent,
1270
1271 TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
1272
1273 TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots),
1274
1275 TP_STRUCT__entry(
1276 __field( u64, bytenr )
1277 __field( u64, num_bytes )
1278 __field( u64, nr_old_roots )
1279 __field( u64, nr_new_roots )
1280 ),
1281
1282 TP_fast_assign(
1283 __entry->bytenr = bytenr;
1284 __entry->num_bytes = num_bytes;
1285 __entry->nr_old_roots = nr_old_roots;
1286 __entry->nr_new_roots = nr_new_roots;
1287 ),
1288
1289 TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
1290 "nr_new_roots = %llu",
1291 __entry->bytenr,
1292 __entry->num_bytes,
1293 __entry->nr_old_roots,
1294 __entry->nr_new_roots)
1295);
1296
1297TRACE_EVENT(qgroup_update_counters,
1298
1299 TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count),
1300
1301 TP_ARGS(qgid, cur_old_count, cur_new_count),
1302
1303 TP_STRUCT__entry(
1304 __field( u64, qgid )
1305 __field( u64, cur_old_count )
1306 __field( u64, cur_new_count )
1307 ),
1308
1309 TP_fast_assign(
1310 __entry->qgid = qgid;
1311 __entry->cur_old_count = cur_old_count;
1312 __entry->cur_new_count = cur_new_count;
1313 ),
1314
1315 TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
1316 __entry->qgid,
1317 __entry->cur_old_count,
1318 __entry->cur_new_count)
1319);
1320
1234#endif /* _TRACE_BTRFS_H */ 1321#endif /* _TRACE_BTRFS_H */
1235 1322
1236/* This part must be outside protection */ 1323/* This part must be outside protection */
diff --git a/include/trace/events/page_isolation.h b/include/trace/events/page_isolation.h
index 6fb644029c80..8738a78e6bf4 100644
--- a/include/trace/events/page_isolation.h
+++ b/include/trace/events/page_isolation.h
@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated,
29 29
30 TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s", 30 TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
31 __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, 31 __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
32 __entry->end_pfn == __entry->fin_pfn ? "success" : "fail") 32 __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
33); 33);
34 34
35#endif /* _TRACE_PAGE_ISOLATION_H */ 35#endif /* _TRACE_PAGE_ISOLATION_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 924f537183fd..23917bb47bf3 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -375,6 +375,7 @@ struct bpf_tunnel_key {
375 }; 375 };
376 __u8 tunnel_tos; 376 __u8 tunnel_tos;
377 __u8 tunnel_ttl; 377 __u8 tunnel_ttl;
378 __u16 tunnel_ext;
378 __u32 tunnel_label; 379 __u32 tunnel_label;
379}; 380};
380 381
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index aa9f10428743..621fa8ac4425 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1 +1,5 @@
1#include <linux/compiler.h> 1#include <linux/compiler.h>
2
3#ifndef __always_inline
4#define __always_inline inline
5#endif
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 06d6c6228a7a..d5ce71607972 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -899,7 +899,7 @@ struct usb_ssp_cap_descriptor {
899 __le32 bmAttributes; 899 __le32 bmAttributes;
900#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */ 900#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */
901#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */ 901#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */
902 __u16 wFunctionalitySupport; 902 __le16 wFunctionalitySupport;
903#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf) 903#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf)
904#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8) 904#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
905#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12) 905#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index c18264df9504..4cb65bbfa654 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -40,6 +40,8 @@
40#define VIRTIO_CONFIG_S_DRIVER_OK 4 40#define VIRTIO_CONFIG_S_DRIVER_OK 4
41/* Driver has finished configuring features */ 41/* Driver has finished configuring features */
42#define VIRTIO_CONFIG_S_FEATURES_OK 8 42#define VIRTIO_CONFIG_S_FEATURES_OK 8
43/* Device entered invalid state, driver must reset it */
44#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40
43/* We've given up on this device. */ 45/* We've given up on this device. */
44#define VIRTIO_CONFIG_S_FAILED 0x80 46#define VIRTIO_CONFIG_S_FAILED 0x80
45 47
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index eeba75395f7d..ad66589f2ae6 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -194,8 +194,9 @@ int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
194int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width); 194int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
195void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format); 195void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
196void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, 196void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
197 u32 pixel_format, int stride, 197 unsigned int uv_stride,
198 int u_offset, int v_offset); 198 unsigned int u_offset,
199 unsigned int v_offset);
199void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, 200void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
200 u32 pixel_format, int stride, int height); 201 u32 pixel_format, int stride, int height);
201int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc); 202int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
@@ -236,7 +237,7 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
236int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc, 237int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
237 unsigned long bandwidth_mbs, int burstsize); 238 unsigned long bandwidth_mbs, int burstsize);
238void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc); 239void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
239int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width); 240void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
240struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel); 241struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
241void ipu_dmfc_put(struct dmfc_channel *dmfc); 242void ipu_dmfc_put(struct dmfc_channel *dmfc);
242 243
diff --git a/init/Kconfig b/init/Kconfig
index e0d26162432e..0dfd09d54c65 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -272,8 +272,9 @@ config CROSS_MEMORY_ATTACH
272 See the man page for more details. 272 See the man page for more details.
273 273
274config FHANDLE 274config FHANDLE
275 bool "open by fhandle syscalls" 275 bool "open by fhandle syscalls" if EXPERT
276 select EXPORTFS 276 select EXPORTFS
277 default y
277 help 278 help
278 If you say Y here, a user level program will be able to map 279 If you say Y here, a user level program will be able to map
279 file names to handle and then later use the handle for 280 file names to handle and then later use the handle for
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 781c1399c6a3..ade739f67f1d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -307,8 +307,8 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
307 struct inode *inode; 307 struct inode *inode;
308 struct ipc_namespace *ns = data; 308 struct ipc_namespace *ns = data;
309 309
310 sb->s_blocksize = PAGE_CACHE_SIZE; 310 sb->s_blocksize = PAGE_SIZE;
311 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 311 sb->s_blocksize_bits = PAGE_SHIFT;
312 sb->s_magic = MQUEUE_MAGIC; 312 sb->s_magic = MQUEUE_MAGIC;
313 sb->s_op = &mqueue_super_ops; 313 sb->s_op = &mqueue_super_ops;
314 314
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2a2efe1bc76c..adc5e4bd74f8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -137,11 +137,13 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
137 "map_type:\t%u\n" 137 "map_type:\t%u\n"
138 "key_size:\t%u\n" 138 "key_size:\t%u\n"
139 "value_size:\t%u\n" 139 "value_size:\t%u\n"
140 "max_entries:\t%u\n", 140 "max_entries:\t%u\n"
141 "map_flags:\t%#x\n",
141 map->map_type, 142 map->map_type,
142 map->key_size, 143 map->key_size,
143 map->value_size, 144 map->value_size,
144 map->max_entries); 145 map->max_entries,
146 map->map_flags);
145} 147}
146#endif 148#endif
147 149
diff --git a/kernel/events/core.c b/kernel/events/core.c
index de24fbce5277..52bedc5a5aaa 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2417,14 +2417,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
2417 cpuctx->task_ctx = NULL; 2417 cpuctx->task_ctx = NULL;
2418 } 2418 }
2419 2419
2420 is_active ^= ctx->is_active; /* changed bits */ 2420 /*
2421 2421 * Always update time if it was set; not only when it changes.
2422 * Otherwise we can 'forget' to update time for any but the last
2423 * context we sched out. For example:
2424 *
2425 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2426 * ctx_sched_out(.event_type = EVENT_PINNED)
2427 *
2428 * would only update time for the pinned events.
2429 */
2422 if (is_active & EVENT_TIME) { 2430 if (is_active & EVENT_TIME) {
2423 /* update (and stop) ctx time */ 2431 /* update (and stop) ctx time */
2424 update_context_time(ctx); 2432 update_context_time(ctx);
2425 update_cgrp_time_from_cpuctx(cpuctx); 2433 update_cgrp_time_from_cpuctx(cpuctx);
2426 } 2434 }
2427 2435
2436 is_active ^= ctx->is_active; /* changed bits */
2437
2428 if (!ctx->nr_active || !(is_active & EVENT_ALL)) 2438 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2429 return; 2439 return;
2430 2440
@@ -8532,6 +8542,7 @@ SYSCALL_DEFINE5(perf_event_open,
8532 f_flags); 8542 f_flags);
8533 if (IS_ERR(event_file)) { 8543 if (IS_ERR(event_file)) {
8534 err = PTR_ERR(event_file); 8544 err = PTR_ERR(event_file);
8545 event_file = NULL;
8535 goto err_context; 8546 goto err_context;
8536 } 8547 }
8537 8548
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 220fc17b9718..7edc95edfaee 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -321,7 +321,7 @@ retry:
321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
322 322
323 ret = __replace_page(vma, vaddr, old_page, new_page); 323 ret = __replace_page(vma, vaddr, old_page, new_page);
324 page_cache_release(new_page); 324 put_page(new_page);
325put_old: 325put_old:
326 put_page(old_page); 326 put_page(old_page);
327 327
@@ -539,14 +539,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
539 * see uprobe_register(). 539 * see uprobe_register().
540 */ 540 */
541 if (mapping->a_ops->readpage) 541 if (mapping->a_ops->readpage)
542 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); 542 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
543 else 543 else
544 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); 544 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
545 if (IS_ERR(page)) 545 if (IS_ERR(page))
546 return PTR_ERR(page); 546 return PTR_ERR(page);
547 547
548 copy_from_page(page, offset, insn, nbytes); 548 copy_from_page(page, offset, insn, nbytes);
549 page_cache_release(page); 549 put_page(page);
550 550
551 return 0; 551 return 0;
552} 552}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 53ab2f85d77e..2324ba5310db 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2000,6 +2000,77 @@ static inline int get_first_held_lock(struct task_struct *curr,
2000} 2000}
2001 2001
2002/* 2002/*
2003 * Returns the next chain_key iteration
2004 */
2005static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2006{
2007 u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2008
2009 printk(" class_idx:%d -> chain_key:%016Lx",
2010 class_idx,
2011 (unsigned long long)new_chain_key);
2012 return new_chain_key;
2013}
2014
2015static void
2016print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2017{
2018 struct held_lock *hlock;
2019 u64 chain_key = 0;
2020 int depth = curr->lockdep_depth;
2021 int i;
2022
2023 printk("depth: %u\n", depth + 1);
2024 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2025 hlock = curr->held_locks + i;
2026 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2027
2028 print_lock(hlock);
2029 }
2030
2031 print_chain_key_iteration(hlock_next->class_idx, chain_key);
2032 print_lock(hlock_next);
2033}
2034
2035static void print_chain_keys_chain(struct lock_chain *chain)
2036{
2037 int i;
2038 u64 chain_key = 0;
2039 int class_id;
2040
2041 printk("depth: %u\n", chain->depth);
2042 for (i = 0; i < chain->depth; i++) {
2043 class_id = chain_hlocks[chain->base + i];
2044 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2045
2046 print_lock_name(lock_classes + class_id);
2047 printk("\n");
2048 }
2049}
2050
2051static void print_collision(struct task_struct *curr,
2052 struct held_lock *hlock_next,
2053 struct lock_chain *chain)
2054{
2055 printk("\n");
2056 printk("======================\n");
2057 printk("[chain_key collision ]\n");
2058 print_kernel_ident();
2059 printk("----------------------\n");
2060 printk("%s/%d: ", current->comm, task_pid_nr(current));
2061 printk("Hash chain already cached but the contents don't match!\n");
2062
2063 printk("Held locks:");
2064 print_chain_keys_held_locks(curr, hlock_next);
2065
2066 printk("Locks in cached chain:");
2067 print_chain_keys_chain(chain);
2068
2069 printk("\nstack backtrace:\n");
2070 dump_stack();
2071}
2072
2073/*
2003 * Checks whether the chain and the current held locks are consistent 2074 * Checks whether the chain and the current held locks are consistent
2004 * in depth and also in content. If they are not it most likely means 2075 * in depth and also in content. If they are not it most likely means
2005 * that there was a collision during the calculation of the chain_key. 2076 * that there was a collision during the calculation of the chain_key.
@@ -2014,14 +2085,18 @@ static int check_no_collision(struct task_struct *curr,
2014 2085
2015 i = get_first_held_lock(curr, hlock); 2086 i = get_first_held_lock(curr, hlock);
2016 2087
2017 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) 2088 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2089 print_collision(curr, hlock, chain);
2018 return 0; 2090 return 0;
2091 }
2019 2092
2020 for (j = 0; j < chain->depth - 1; j++, i++) { 2093 for (j = 0; j < chain->depth - 1; j++, i++) {
2021 id = curr->held_locks[i].class_idx - 1; 2094 id = curr->held_locks[i].class_idx - 1;
2022 2095
2023 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) 2096 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2097 print_collision(curr, hlock, chain);
2024 return 0; 2098 return 0;
2099 }
2025 } 2100 }
2026#endif 2101#endif
2027 return 1; 2102 return 1;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8465eeab8b3..8b489fcac37b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -321,6 +321,24 @@ static inline void init_hrtick(void)
321} 321}
322#endif /* CONFIG_SCHED_HRTICK */ 322#endif /* CONFIG_SCHED_HRTICK */
323 323
324/*
325 * cmpxchg based fetch_or, macro so it works for different integer types
326 */
327#define fetch_or(ptr, mask) \
328 ({ \
329 typeof(ptr) _ptr = (ptr); \
330 typeof(mask) _mask = (mask); \
331 typeof(*_ptr) _old, _val = *_ptr; \
332 \
333 for (;;) { \
334 _old = cmpxchg(_ptr, _val, _val | _mask); \
335 if (_old == _val) \
336 break; \
337 _val = _old; \
338 } \
339 _old; \
340})
341
324#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 342#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
325/* 343/*
326 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 344 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 084b79f5917e..58e3310c9b21 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
157cpumask_var_t tick_nohz_full_mask; 157cpumask_var_t tick_nohz_full_mask;
158cpumask_var_t housekeeping_mask; 158cpumask_var_t housekeeping_mask;
159bool tick_nohz_full_running; 159bool tick_nohz_full_running;
160static unsigned long tick_dep_mask; 160static atomic_t tick_dep_mask;
161 161
162static void trace_tick_dependency(unsigned long dep) 162static bool check_tick_dependency(atomic_t *dep)
163{ 163{
164 if (dep & TICK_DEP_MASK_POSIX_TIMER) { 164 int val = atomic_read(dep);
165
166 if (val & TICK_DEP_MASK_POSIX_TIMER) {
165 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 167 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
166 return; 168 return true;
167 } 169 }
168 170
169 if (dep & TICK_DEP_MASK_PERF_EVENTS) { 171 if (val & TICK_DEP_MASK_PERF_EVENTS) {
170 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); 172 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
171 return; 173 return true;
172 } 174 }
173 175
174 if (dep & TICK_DEP_MASK_SCHED) { 176 if (val & TICK_DEP_MASK_SCHED) {
175 trace_tick_stop(0, TICK_DEP_MASK_SCHED); 177 trace_tick_stop(0, TICK_DEP_MASK_SCHED);
176 return; 178 return true;
177 } 179 }
178 180
179 if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE) 181 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
180 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); 182 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
183 return true;
184 }
185
186 return false;
181} 187}
182 188
183static bool can_stop_full_tick(struct tick_sched *ts) 189static bool can_stop_full_tick(struct tick_sched *ts)
184{ 190{
185 WARN_ON_ONCE(!irqs_disabled()); 191 WARN_ON_ONCE(!irqs_disabled());
186 192
187 if (tick_dep_mask) { 193 if (check_tick_dependency(&tick_dep_mask))
188 trace_tick_dependency(tick_dep_mask);
189 return false; 194 return false;
190 }
191 195
192 if (ts->tick_dep_mask) { 196 if (check_tick_dependency(&ts->tick_dep_mask))
193 trace_tick_dependency(ts->tick_dep_mask);
194 return false; 197 return false;
195 }
196 198
197 if (current->tick_dep_mask) { 199 if (check_tick_dependency(&current->tick_dep_mask))
198 trace_tick_dependency(current->tick_dep_mask);
199 return false; 200 return false;
200 }
201 201
202 if (current->signal->tick_dep_mask) { 202 if (check_tick_dependency(&current->signal->tick_dep_mask))
203 trace_tick_dependency(current->signal->tick_dep_mask);
204 return false; 203 return false;
205 }
206 204
207 return true; 205 return true;
208} 206}
@@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void)
259 preempt_enable(); 257 preempt_enable();
260} 258}
261 259
262static void tick_nohz_dep_set_all(unsigned long *dep, 260static void tick_nohz_dep_set_all(atomic_t *dep,
263 enum tick_dep_bits bit) 261 enum tick_dep_bits bit)
264{ 262{
265 unsigned long prev; 263 int prev;
266 264
267 prev = fetch_or(dep, BIT_MASK(bit)); 265 prev = atomic_fetch_or(dep, BIT(bit));
268 if (!prev) 266 if (!prev)
269 tick_nohz_full_kick_all(); 267 tick_nohz_full_kick_all();
270} 268}
@@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit)
280 278
281void tick_nohz_dep_clear(enum tick_dep_bits bit) 279void tick_nohz_dep_clear(enum tick_dep_bits bit)
282{ 280{
283 clear_bit(bit, &tick_dep_mask); 281 atomic_andnot(BIT(bit), &tick_dep_mask);
284} 282}
285 283
286/* 284/*
@@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit)
289 */ 287 */
290void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) 288void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
291{ 289{
292 unsigned long prev; 290 int prev;
293 struct tick_sched *ts; 291 struct tick_sched *ts;
294 292
295 ts = per_cpu_ptr(&tick_cpu_sched, cpu); 293 ts = per_cpu_ptr(&tick_cpu_sched, cpu);
296 294
297 prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit)); 295 prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit));
298 if (!prev) { 296 if (!prev) {
299 preempt_disable(); 297 preempt_disable();
300 /* Perf needs local kick that is NMI safe */ 298 /* Perf needs local kick that is NMI safe */
@@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
313{ 311{
314 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 312 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
315 313
316 clear_bit(bit, &ts->tick_dep_mask); 314 atomic_andnot(BIT(bit), &ts->tick_dep_mask);
317} 315}
318 316
319/* 317/*
@@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
331 329
332void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) 330void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
333{ 331{
334 clear_bit(bit, &tsk->tick_dep_mask); 332 atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
335} 333}
336 334
337/* 335/*
@@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
345 343
346void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) 344void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
347{ 345{
348 clear_bit(bit, &sig->tick_dep_mask); 346 atomic_andnot(BIT(bit), &sig->tick_dep_mask);
349} 347}
350 348
351/* 349/*
@@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void)
366 ts = this_cpu_ptr(&tick_cpu_sched); 364 ts = this_cpu_ptr(&tick_cpu_sched);
367 365
368 if (ts->tick_stopped) { 366 if (ts->tick_stopped) {
369 if (current->tick_dep_mask || current->signal->tick_dep_mask) 367 if (atomic_read(&current->tick_dep_mask) ||
368 atomic_read(&current->signal->tick_dep_mask))
370 tick_nohz_full_kick(); 369 tick_nohz_full_kick();
371 } 370 }
372out: 371out:
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index eb4e32566a83..bf38226e5c17 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -60,7 +60,7 @@ struct tick_sched {
60 u64 next_timer; 60 u64 next_timer;
61 ktime_t idle_expires; 61 ktime_t idle_expires;
62 int do_timer_last; 62 int do_timer_last;
63 unsigned long tick_dep_mask; 63 atomic_t tick_dep_mask;
64}; 64};
65 65
66extern struct tick_sched *tick_get_tick_sched(int cpu); 66extern struct tick_sched *tick_get_tick_sched(int cpu);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 27a7a26b1ece..8f22fbedc3a6 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2444,6 +2444,22 @@ static struct bpf_test tests[] = {
2444 { { 0, 4294967295U } }, 2444 { { 0, 4294967295U } },
2445 }, 2445 },
2446 { 2446 {
2447 "ALU_ADD_X: 2 + 4294967294 = 0",
2448 .u.insns_int = {
2449 BPF_LD_IMM64(R0, 2),
2450 BPF_LD_IMM64(R1, 4294967294U),
2451 BPF_ALU32_REG(BPF_ADD, R0, R1),
2452 BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
2453 BPF_ALU32_IMM(BPF_MOV, R0, 0),
2454 BPF_EXIT_INSN(),
2455 BPF_ALU32_IMM(BPF_MOV, R0, 1),
2456 BPF_EXIT_INSN(),
2457 },
2458 INTERNAL,
2459 { },
2460 { { 0, 1 } },
2461 },
2462 {
2447 "ALU64_ADD_X: 1 + 2 = 3", 2463 "ALU64_ADD_X: 1 + 2 = 3",
2448 .u.insns_int = { 2464 .u.insns_int = {
2449 BPF_LD_IMM64(R0, 1), 2465 BPF_LD_IMM64(R0, 1),
@@ -2467,6 +2483,23 @@ static struct bpf_test tests[] = {
2467 { }, 2483 { },
2468 { { 0, 4294967295U } }, 2484 { { 0, 4294967295U } },
2469 }, 2485 },
2486 {
2487 "ALU64_ADD_X: 2 + 4294967294 = 4294967296",
2488 .u.insns_int = {
2489 BPF_LD_IMM64(R0, 2),
2490 BPF_LD_IMM64(R1, 4294967294U),
2491 BPF_LD_IMM64(R2, 4294967296ULL),
2492 BPF_ALU64_REG(BPF_ADD, R0, R1),
2493 BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
2494 BPF_MOV32_IMM(R0, 0),
2495 BPF_EXIT_INSN(),
2496 BPF_MOV32_IMM(R0, 1),
2497 BPF_EXIT_INSN(),
2498 },
2499 INTERNAL,
2500 { },
2501 { { 0, 1 } },
2502 },
2470 /* BPF_ALU | BPF_ADD | BPF_K */ 2503 /* BPF_ALU | BPF_ADD | BPF_K */
2471 { 2504 {
2472 "ALU_ADD_K: 1 + 2 = 3", 2505 "ALU_ADD_K: 1 + 2 = 3",
@@ -2502,6 +2535,21 @@ static struct bpf_test tests[] = {
2502 { { 0, 4294967295U } }, 2535 { { 0, 4294967295U } },
2503 }, 2536 },
2504 { 2537 {
2538 "ALU_ADD_K: 4294967294 + 2 = 0",
2539 .u.insns_int = {
2540 BPF_LD_IMM64(R0, 4294967294U),
2541 BPF_ALU32_IMM(BPF_ADD, R0, 2),
2542 BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
2543 BPF_ALU32_IMM(BPF_MOV, R0, 0),
2544 BPF_EXIT_INSN(),
2545 BPF_ALU32_IMM(BPF_MOV, R0, 1),
2546 BPF_EXIT_INSN(),
2547 },
2548 INTERNAL,
2549 { },
2550 { { 0, 1 } },
2551 },
2552 {
2505 "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", 2553 "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
2506 .u.insns_int = { 2554 .u.insns_int = {
2507 BPF_LD_IMM64(R2, 0x0), 2555 BPF_LD_IMM64(R2, 0x0),
@@ -2518,6 +2566,70 @@ static struct bpf_test tests[] = {
2518 { { 0, 0x1 } }, 2566 { { 0, 0x1 } },
2519 }, 2567 },
2520 { 2568 {
2569 "ALU_ADD_K: 0 + 0xffff = 0xffff",
2570 .u.insns_int = {
2571 BPF_LD_IMM64(R2, 0x0),
2572 BPF_LD_IMM64(R3, 0xffff),
2573 BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
2574 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2575 BPF_MOV32_IMM(R0, 2),
2576 BPF_EXIT_INSN(),
2577 BPF_MOV32_IMM(R0, 1),
2578 BPF_EXIT_INSN(),
2579 },
2580 INTERNAL,
2581 { },
2582 { { 0, 0x1 } },
2583 },
2584 {
2585 "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
2586 .u.insns_int = {
2587 BPF_LD_IMM64(R2, 0x0),
2588 BPF_LD_IMM64(R3, 0x7fffffff),
2589 BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
2590 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2591 BPF_MOV32_IMM(R0, 2),
2592 BPF_EXIT_INSN(),
2593 BPF_MOV32_IMM(R0, 1),
2594 BPF_EXIT_INSN(),
2595 },
2596 INTERNAL,
2597 { },
2598 { { 0, 0x1 } },
2599 },
2600 {
2601 "ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
2602 .u.insns_int = {
2603 BPF_LD_IMM64(R2, 0x0),
2604 BPF_LD_IMM64(R3, 0x80000000),
2605 BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
2606 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2607 BPF_MOV32_IMM(R0, 2),
2608 BPF_EXIT_INSN(),
2609 BPF_MOV32_IMM(R0, 1),
2610 BPF_EXIT_INSN(),
2611 },
2612 INTERNAL,
2613 { },
2614 { { 0, 0x1 } },
2615 },
2616 {
2617 "ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
2618 .u.insns_int = {
2619 BPF_LD_IMM64(R2, 0x0),
2620 BPF_LD_IMM64(R3, 0x80008000),
2621 BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
2622 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2623 BPF_MOV32_IMM(R0, 2),
2624 BPF_EXIT_INSN(),
2625 BPF_MOV32_IMM(R0, 1),
2626 BPF_EXIT_INSN(),
2627 },
2628 INTERNAL,
2629 { },
2630 { { 0, 0x1 } },
2631 },
2632 {
2521 "ALU64_ADD_K: 1 + 2 = 3", 2633 "ALU64_ADD_K: 1 + 2 = 3",
2522 .u.insns_int = { 2634 .u.insns_int = {
2523 BPF_LD_IMM64(R0, 1), 2635 BPF_LD_IMM64(R0, 1),
@@ -2551,6 +2663,22 @@ static struct bpf_test tests[] = {
2551 { { 0, 2147483647 } }, 2663 { { 0, 2147483647 } },
2552 }, 2664 },
2553 { 2665 {
2666 "ALU64_ADD_K: 4294967294 + 2 = 4294967296",
2667 .u.insns_int = {
2668 BPF_LD_IMM64(R0, 4294967294U),
2669 BPF_LD_IMM64(R1, 4294967296ULL),
2670 BPF_ALU64_IMM(BPF_ADD, R0, 2),
2671 BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
2672 BPF_ALU32_IMM(BPF_MOV, R0, 0),
2673 BPF_EXIT_INSN(),
2674 BPF_ALU32_IMM(BPF_MOV, R0, 1),
2675 BPF_EXIT_INSN(),
2676 },
2677 INTERNAL,
2678 { },
2679 { { 0, 1 } },
2680 },
2681 {
2554 "ALU64_ADD_K: 2147483646 + -2147483647 = -1", 2682 "ALU64_ADD_K: 2147483646 + -2147483647 = -1",
2555 .u.insns_int = { 2683 .u.insns_int = {
2556 BPF_LD_IMM64(R0, 2147483646), 2684 BPF_LD_IMM64(R0, 2147483646),
@@ -2593,6 +2721,70 @@ static struct bpf_test tests[] = {
2593 { }, 2721 { },
2594 { { 0, 0x1 } }, 2722 { { 0, 0x1 } },
2595 }, 2723 },
2724 {
2725 "ALU64_ADD_K: 0 + 0xffff = 0xffff",
2726 .u.insns_int = {
2727 BPF_LD_IMM64(R2, 0x0),
2728 BPF_LD_IMM64(R3, 0xffff),
2729 BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
2730 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2731 BPF_MOV32_IMM(R0, 2),
2732 BPF_EXIT_INSN(),
2733 BPF_MOV32_IMM(R0, 1),
2734 BPF_EXIT_INSN(),
2735 },
2736 INTERNAL,
2737 { },
2738 { { 0, 0x1 } },
2739 },
2740 {
2741 "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
2742 .u.insns_int = {
2743 BPF_LD_IMM64(R2, 0x0),
2744 BPF_LD_IMM64(R3, 0x7fffffff),
2745 BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
2746 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2747 BPF_MOV32_IMM(R0, 2),
2748 BPF_EXIT_INSN(),
2749 BPF_MOV32_IMM(R0, 1),
2750 BPF_EXIT_INSN(),
2751 },
2752 INTERNAL,
2753 { },
2754 { { 0, 0x1 } },
2755 },
2756 {
2757 "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
2758 .u.insns_int = {
2759 BPF_LD_IMM64(R2, 0x0),
2760 BPF_LD_IMM64(R3, 0xffffffff80000000LL),
2761 BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
2762 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2763 BPF_MOV32_IMM(R0, 2),
2764 BPF_EXIT_INSN(),
2765 BPF_MOV32_IMM(R0, 1),
2766 BPF_EXIT_INSN(),
2767 },
2768 INTERNAL,
2769 { },
2770 { { 0, 0x1 } },
2771 },
2772 {
2773 "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
2774 .u.insns_int = {
2775 BPF_LD_IMM64(R2, 0x0),
2776 BPF_LD_IMM64(R3, 0xffffffff80008000LL),
2777 BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
2778 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2779 BPF_MOV32_IMM(R0, 2),
2780 BPF_EXIT_INSN(),
2781 BPF_MOV32_IMM(R0, 1),
2782 BPF_EXIT_INSN(),
2783 },
2784 INTERNAL,
2785 { },
2786 { { 0, 0x1 } },
2787 },
2596 /* BPF_ALU | BPF_SUB | BPF_X */ 2788 /* BPF_ALU | BPF_SUB | BPF_X */
2597 { 2789 {
2598 "ALU_SUB_X: 3 - 1 = 2", 2790 "ALU_SUB_X: 3 - 1 = 2",
@@ -4222,6 +4414,20 @@ static struct bpf_test tests[] = {
4222 { }, 4414 { },
4223 { { 0, 1 } }, 4415 { { 0, 1 } },
4224 }, 4416 },
4417 {
4418 "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
4419 .u.insns_int = {
4420 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4421 BPF_LD_IMM64(R1, -1),
4422 BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
4423 BPF_EXIT_INSN(),
4424 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4425 BPF_EXIT_INSN(),
4426 },
4427 INTERNAL,
4428 { },
4429 { { 0, 1 } },
4430 },
4225 /* BPF_JMP | BPF_JGE | BPF_K */ 4431 /* BPF_JMP | BPF_JGE | BPF_K */
4226 { 4432 {
4227 "JMP_JGE_K: if (3 >= 2) return 1", 4433 "JMP_JGE_K: if (3 >= 2) return 1",
@@ -4303,7 +4509,7 @@ static struct bpf_test tests[] = {
4303 .u.insns_int = { 4509 .u.insns_int = {
4304 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4510 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4305 BPF_LD_IMM64(R1, 3), 4511 BPF_LD_IMM64(R1, 3),
4306 BPF_JMP_IMM(BPF_JNE, R1, 2, 1), 4512 BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
4307 BPF_EXIT_INSN(), 4513 BPF_EXIT_INSN(),
4308 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4514 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4309 BPF_EXIT_INSN(), 4515 BPF_EXIT_INSN(),
@@ -4317,7 +4523,7 @@ static struct bpf_test tests[] = {
4317 .u.insns_int = { 4523 .u.insns_int = {
4318 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4524 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4319 BPF_LD_IMM64(R1, 3), 4525 BPF_LD_IMM64(R1, 3),
4320 BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1), 4526 BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
4321 BPF_EXIT_INSN(), 4527 BPF_EXIT_INSN(),
4322 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4528 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4323 BPF_EXIT_INSN(), 4529 BPF_EXIT_INSN(),
@@ -4404,6 +4610,21 @@ static struct bpf_test tests[] = {
4404 { }, 4610 { },
4405 { { 0, 1 } }, 4611 { { 0, 1 } },
4406 }, 4612 },
4613 {
4614 "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
4615 .u.insns_int = {
4616 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4617 BPF_LD_IMM64(R1, -1),
4618 BPF_LD_IMM64(R2, 1),
4619 BPF_JMP_REG(BPF_JGT, R1, R2, 1),
4620 BPF_EXIT_INSN(),
4621 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4622 BPF_EXIT_INSN(),
4623 },
4624 INTERNAL,
4625 { },
4626 { { 0, 1 } },
4627 },
4407 /* BPF_JMP | BPF_JGE | BPF_X */ 4628 /* BPF_JMP | BPF_JGE | BPF_X */
4408 { 4629 {
4409 "JMP_JGE_X: if (3 >= 2) return 1", 4630 "JMP_JGE_X: if (3 >= 2) return 1",
@@ -4474,7 +4695,7 @@ static struct bpf_test tests[] = {
4474 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4695 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4475 BPF_LD_IMM64(R1, 3), 4696 BPF_LD_IMM64(R1, 3),
4476 BPF_LD_IMM64(R2, 2), 4697 BPF_LD_IMM64(R2, 2),
4477 BPF_JMP_REG(BPF_JNE, R1, R2, 1), 4698 BPF_JMP_REG(BPF_JSET, R1, R2, 1),
4478 BPF_EXIT_INSN(), 4699 BPF_EXIT_INSN(),
4479 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4700 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4480 BPF_EXIT_INSN(), 4701 BPF_EXIT_INSN(),
@@ -4489,7 +4710,7 @@ static struct bpf_test tests[] = {
4489 BPF_ALU32_IMM(BPF_MOV, R0, 0), 4710 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4490 BPF_LD_IMM64(R1, 3), 4711 BPF_LD_IMM64(R1, 3),
4491 BPF_LD_IMM64(R2, 0xffffffff), 4712 BPF_LD_IMM64(R2, 0xffffffff),
4492 BPF_JMP_REG(BPF_JNE, R1, R2, 1), 4713 BPF_JMP_REG(BPF_JSET, R1, R2, 1),
4493 BPF_EXIT_INSN(), 4714 BPF_EXIT_INSN(),
4494 BPF_ALU32_IMM(BPF_MOV, R0, 1), 4715 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4495 BPF_EXIT_INSN(), 4716 BPF_EXIT_INSN(),
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8a5bc66b0c0..b8024fa7101d 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -97,8 +97,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
97 break; 97 break;
98 case POSIX_FADV_WILLNEED: 98 case POSIX_FADV_WILLNEED:
99 /* First and last PARTIAL page! */ 99 /* First and last PARTIAL page! */
100 start_index = offset >> PAGE_CACHE_SHIFT; 100 start_index = offset >> PAGE_SHIFT;
101 end_index = endbyte >> PAGE_CACHE_SHIFT; 101 end_index = endbyte >> PAGE_SHIFT;
102 102
103 /* Careful about overflow on the "+1" */ 103 /* Careful about overflow on the "+1" */
104 nrpages = end_index - start_index + 1; 104 nrpages = end_index - start_index + 1;
@@ -124,8 +124,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
124 * preserved on the expectation that it is better to preserve 124 * preserved on the expectation that it is better to preserve
125 * needed memory than to discard unneeded memory. 125 * needed memory than to discard unneeded memory.
126 */ 126 */
127 start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; 127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
128 end_index = (endbyte >> PAGE_CACHE_SHIFT); 128 end_index = (endbyte >> PAGE_SHIFT);
129 129
130 if (end_index >= start_index) { 130 if (end_index >= start_index) {
131 unsigned long count = invalidate_mapping_pages(mapping, 131 unsigned long count = invalidate_mapping_pages(mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index a8c69c8c0a90..f2479af09da9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -265,7 +265,7 @@ void delete_from_page_cache(struct page *page)
265 265
266 if (freepage) 266 if (freepage)
267 freepage(page); 267 freepage(page);
268 page_cache_release(page); 268 put_page(page);
269} 269}
270EXPORT_SYMBOL(delete_from_page_cache); 270EXPORT_SYMBOL(delete_from_page_cache);
271 271
@@ -352,8 +352,8 @@ EXPORT_SYMBOL(filemap_flush);
352static int __filemap_fdatawait_range(struct address_space *mapping, 352static int __filemap_fdatawait_range(struct address_space *mapping,
353 loff_t start_byte, loff_t end_byte) 353 loff_t start_byte, loff_t end_byte)
354{ 354{
355 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 355 pgoff_t index = start_byte >> PAGE_SHIFT;
356 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 356 pgoff_t end = end_byte >> PAGE_SHIFT;
357 struct pagevec pvec; 357 struct pagevec pvec;
358 int nr_pages; 358 int nr_pages;
359 int ret = 0; 359 int ret = 0;
@@ -550,7 +550,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
550 pgoff_t offset = old->index; 550 pgoff_t offset = old->index;
551 freepage = mapping->a_ops->freepage; 551 freepage = mapping->a_ops->freepage;
552 552
553 page_cache_get(new); 553 get_page(new);
554 new->mapping = mapping; 554 new->mapping = mapping;
555 new->index = offset; 555 new->index = offset;
556 556
@@ -572,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
572 radix_tree_preload_end(); 572 radix_tree_preload_end();
573 if (freepage) 573 if (freepage)
574 freepage(old); 574 freepage(old);
575 page_cache_release(old); 575 put_page(old);
576 } 576 }
577 577
578 return error; 578 return error;
@@ -651,7 +651,7 @@ static int __add_to_page_cache_locked(struct page *page,
651 return error; 651 return error;
652 } 652 }
653 653
654 page_cache_get(page); 654 get_page(page);
655 page->mapping = mapping; 655 page->mapping = mapping;
656 page->index = offset; 656 page->index = offset;
657 657
@@ -675,7 +675,7 @@ err_insert:
675 spin_unlock_irq(&mapping->tree_lock); 675 spin_unlock_irq(&mapping->tree_lock);
676 if (!huge) 676 if (!huge)
677 mem_cgroup_cancel_charge(page, memcg, false); 677 mem_cgroup_cancel_charge(page, memcg, false);
678 page_cache_release(page); 678 put_page(page);
679 return error; 679 return error;
680} 680}
681 681
@@ -1083,7 +1083,7 @@ repeat:
1083 * include/linux/pagemap.h for details. 1083 * include/linux/pagemap.h for details.
1084 */ 1084 */
1085 if (unlikely(page != *pagep)) { 1085 if (unlikely(page != *pagep)) {
1086 page_cache_release(page); 1086 put_page(page);
1087 goto repeat; 1087 goto repeat;
1088 } 1088 }
1089 } 1089 }
@@ -1121,7 +1121,7 @@ repeat:
1121 /* Has the page been truncated? */ 1121 /* Has the page been truncated? */
1122 if (unlikely(page->mapping != mapping)) { 1122 if (unlikely(page->mapping != mapping)) {
1123 unlock_page(page); 1123 unlock_page(page);
1124 page_cache_release(page); 1124 put_page(page);
1125 goto repeat; 1125 goto repeat;
1126 } 1126 }
1127 VM_BUG_ON_PAGE(page->index != offset, page); 1127 VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1168,7 +1168,7 @@ repeat:
1168 if (fgp_flags & FGP_LOCK) { 1168 if (fgp_flags & FGP_LOCK) {
1169 if (fgp_flags & FGP_NOWAIT) { 1169 if (fgp_flags & FGP_NOWAIT) {
1170 if (!trylock_page(page)) { 1170 if (!trylock_page(page)) {
1171 page_cache_release(page); 1171 put_page(page);
1172 return NULL; 1172 return NULL;
1173 } 1173 }
1174 } else { 1174 } else {
@@ -1178,7 +1178,7 @@ repeat:
1178 /* Has the page been truncated? */ 1178 /* Has the page been truncated? */
1179 if (unlikely(page->mapping != mapping)) { 1179 if (unlikely(page->mapping != mapping)) {
1180 unlock_page(page); 1180 unlock_page(page);
1181 page_cache_release(page); 1181 put_page(page);
1182 goto repeat; 1182 goto repeat;
1183 } 1183 }
1184 VM_BUG_ON_PAGE(page->index != offset, page); 1184 VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1209,7 +1209,7 @@ no_page:
1209 err = add_to_page_cache_lru(page, mapping, offset, 1209 err = add_to_page_cache_lru(page, mapping, offset,
1210 gfp_mask & GFP_RECLAIM_MASK); 1210 gfp_mask & GFP_RECLAIM_MASK);
1211 if (unlikely(err)) { 1211 if (unlikely(err)) {
1212 page_cache_release(page); 1212 put_page(page);
1213 page = NULL; 1213 page = NULL;
1214 if (err == -EEXIST) 1214 if (err == -EEXIST)
1215 goto repeat; 1215 goto repeat;
@@ -1278,7 +1278,7 @@ repeat:
1278 1278
1279 /* Has the page moved? */ 1279 /* Has the page moved? */
1280 if (unlikely(page != *slot)) { 1280 if (unlikely(page != *slot)) {
1281 page_cache_release(page); 1281 put_page(page);
1282 goto repeat; 1282 goto repeat;
1283 } 1283 }
1284export: 1284export:
@@ -1343,7 +1343,7 @@ repeat:
1343 1343
1344 /* Has the page moved? */ 1344 /* Has the page moved? */
1345 if (unlikely(page != *slot)) { 1345 if (unlikely(page != *slot)) {
1346 page_cache_release(page); 1346 put_page(page);
1347 goto repeat; 1347 goto repeat;
1348 } 1348 }
1349 1349
@@ -1405,7 +1405,7 @@ repeat:
1405 1405
1406 /* Has the page moved? */ 1406 /* Has the page moved? */
1407 if (unlikely(page != *slot)) { 1407 if (unlikely(page != *slot)) {
1408 page_cache_release(page); 1408 put_page(page);
1409 goto repeat; 1409 goto repeat;
1410 } 1410 }
1411 1411
@@ -1415,7 +1415,7 @@ repeat:
1415 * negatives, which is just confusing to the caller. 1415 * negatives, which is just confusing to the caller.
1416 */ 1416 */
1417 if (page->mapping == NULL || page->index != iter.index) { 1417 if (page->mapping == NULL || page->index != iter.index) {
1418 page_cache_release(page); 1418 put_page(page);
1419 break; 1419 break;
1420 } 1420 }
1421 1421
@@ -1482,7 +1482,7 @@ repeat:
1482 1482
1483 /* Has the page moved? */ 1483 /* Has the page moved? */
1484 if (unlikely(page != *slot)) { 1484 if (unlikely(page != *slot)) {
1485 page_cache_release(page); 1485 put_page(page);
1486 goto repeat; 1486 goto repeat;
1487 } 1487 }
1488 1488
@@ -1549,7 +1549,7 @@ repeat:
1549 1549
1550 /* Has the page moved? */ 1550 /* Has the page moved? */
1551 if (unlikely(page != *slot)) { 1551 if (unlikely(page != *slot)) {
1552 page_cache_release(page); 1552 put_page(page);
1553 goto repeat; 1553 goto repeat;
1554 } 1554 }
1555export: 1555export:
@@ -1610,11 +1610,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1610 unsigned int prev_offset; 1610 unsigned int prev_offset;
1611 int error = 0; 1611 int error = 0;
1612 1612
1613 index = *ppos >> PAGE_CACHE_SHIFT; 1613 index = *ppos >> PAGE_SHIFT;
1614 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1614 prev_index = ra->prev_pos >> PAGE_SHIFT;
1615 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1615 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1616 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1616 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1617 offset = *ppos & ~PAGE_CACHE_MASK; 1617 offset = *ppos & ~PAGE_MASK;
1618 1618
1619 for (;;) { 1619 for (;;) {
1620 struct page *page; 1620 struct page *page;
@@ -1648,7 +1648,7 @@ find_page:
1648 if (PageUptodate(page)) 1648 if (PageUptodate(page))
1649 goto page_ok; 1649 goto page_ok;
1650 1650
1651 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1651 if (inode->i_blkbits == PAGE_SHIFT ||
1652 !mapping->a_ops->is_partially_uptodate) 1652 !mapping->a_ops->is_partially_uptodate)
1653 goto page_not_up_to_date; 1653 goto page_not_up_to_date;
1654 if (!trylock_page(page)) 1654 if (!trylock_page(page))
@@ -1672,18 +1672,18 @@ page_ok:
1672 */ 1672 */
1673 1673
1674 isize = i_size_read(inode); 1674 isize = i_size_read(inode);
1675 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1675 end_index = (isize - 1) >> PAGE_SHIFT;
1676 if (unlikely(!isize || index > end_index)) { 1676 if (unlikely(!isize || index > end_index)) {
1677 page_cache_release(page); 1677 put_page(page);
1678 goto out; 1678 goto out;
1679 } 1679 }
1680 1680
1681 /* nr is the maximum number of bytes to copy from this page */ 1681 /* nr is the maximum number of bytes to copy from this page */
1682 nr = PAGE_CACHE_SIZE; 1682 nr = PAGE_SIZE;
1683 if (index == end_index) { 1683 if (index == end_index) {
1684 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1684 nr = ((isize - 1) & ~PAGE_MASK) + 1;
1685 if (nr <= offset) { 1685 if (nr <= offset) {
1686 page_cache_release(page); 1686 put_page(page);
1687 goto out; 1687 goto out;
1688 } 1688 }
1689 } 1689 }
@@ -1711,11 +1711,11 @@ page_ok:
1711 1711
1712 ret = copy_page_to_iter(page, offset, nr, iter); 1712 ret = copy_page_to_iter(page, offset, nr, iter);
1713 offset += ret; 1713 offset += ret;
1714 index += offset >> PAGE_CACHE_SHIFT; 1714 index += offset >> PAGE_SHIFT;
1715 offset &= ~PAGE_CACHE_MASK; 1715 offset &= ~PAGE_MASK;
1716 prev_offset = offset; 1716 prev_offset = offset;
1717 1717
1718 page_cache_release(page); 1718 put_page(page);
1719 written += ret; 1719 written += ret;
1720 if (!iov_iter_count(iter)) 1720 if (!iov_iter_count(iter))
1721 goto out; 1721 goto out;
@@ -1735,7 +1735,7 @@ page_not_up_to_date_locked:
1735 /* Did it get truncated before we got the lock? */ 1735 /* Did it get truncated before we got the lock? */
1736 if (!page->mapping) { 1736 if (!page->mapping) {
1737 unlock_page(page); 1737 unlock_page(page);
1738 page_cache_release(page); 1738 put_page(page);
1739 continue; 1739 continue;
1740 } 1740 }
1741 1741
@@ -1757,7 +1757,7 @@ readpage:
1757 1757
1758 if (unlikely(error)) { 1758 if (unlikely(error)) {
1759 if (error == AOP_TRUNCATED_PAGE) { 1759 if (error == AOP_TRUNCATED_PAGE) {
1760 page_cache_release(page); 1760 put_page(page);
1761 error = 0; 1761 error = 0;
1762 goto find_page; 1762 goto find_page;
1763 } 1763 }
@@ -1774,7 +1774,7 @@ readpage:
1774 * invalidate_mapping_pages got it 1774 * invalidate_mapping_pages got it
1775 */ 1775 */
1776 unlock_page(page); 1776 unlock_page(page);
1777 page_cache_release(page); 1777 put_page(page);
1778 goto find_page; 1778 goto find_page;
1779 } 1779 }
1780 unlock_page(page); 1780 unlock_page(page);
@@ -1789,7 +1789,7 @@ readpage:
1789 1789
1790readpage_error: 1790readpage_error:
1791 /* UHHUH! A synchronous read error occurred. Report it */ 1791 /* UHHUH! A synchronous read error occurred. Report it */
1792 page_cache_release(page); 1792 put_page(page);
1793 goto out; 1793 goto out;
1794 1794
1795no_cached_page: 1795no_cached_page:
@@ -1805,7 +1805,7 @@ no_cached_page:
1805 error = add_to_page_cache_lru(page, mapping, index, 1805 error = add_to_page_cache_lru(page, mapping, index,
1806 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1806 mapping_gfp_constraint(mapping, GFP_KERNEL));
1807 if (error) { 1807 if (error) {
1808 page_cache_release(page); 1808 put_page(page);
1809 if (error == -EEXIST) { 1809 if (error == -EEXIST) {
1810 error = 0; 1810 error = 0;
1811 goto find_page; 1811 goto find_page;
@@ -1817,10 +1817,10 @@ no_cached_page:
1817 1817
1818out: 1818out:
1819 ra->prev_pos = prev_index; 1819 ra->prev_pos = prev_index;
1820 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1820 ra->prev_pos <<= PAGE_SHIFT;
1821 ra->prev_pos |= prev_offset; 1821 ra->prev_pos |= prev_offset;
1822 1822
1823 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1823 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
1824 file_accessed(filp); 1824 file_accessed(filp);
1825 return written ? written : error; 1825 return written ? written : error;
1826} 1826}
@@ -1912,7 +1912,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
1912 else if (ret == -EEXIST) 1912 else if (ret == -EEXIST)
1913 ret = 0; /* losing race to add is OK */ 1913 ret = 0; /* losing race to add is OK */
1914 1914
1915 page_cache_release(page); 1915 put_page(page);
1916 1916
1917 } while (ret == AOP_TRUNCATED_PAGE); 1917 } while (ret == AOP_TRUNCATED_PAGE);
1918 1918
@@ -2022,8 +2022,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2022 loff_t size; 2022 loff_t size;
2023 int ret = 0; 2023 int ret = 0;
2024 2024
2025 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2025 size = round_up(i_size_read(inode), PAGE_SIZE);
2026 if (offset >= size >> PAGE_CACHE_SHIFT) 2026 if (offset >= size >> PAGE_SHIFT)
2027 return VM_FAULT_SIGBUS; 2027 return VM_FAULT_SIGBUS;
2028 2028
2029 /* 2029 /*
@@ -2049,7 +2049,7 @@ retry_find:
2049 } 2049 }
2050 2050
2051 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 2051 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
2052 page_cache_release(page); 2052 put_page(page);
2053 return ret | VM_FAULT_RETRY; 2053 return ret | VM_FAULT_RETRY;
2054 } 2054 }
2055 2055
@@ -2072,10 +2072,10 @@ retry_find:
2072 * Found the page and have a reference on it. 2072 * Found the page and have a reference on it.
2073 * We must recheck i_size under page lock. 2073 * We must recheck i_size under page lock.
2074 */ 2074 */
2075 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 2075 size = round_up(i_size_read(inode), PAGE_SIZE);
2076 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 2076 if (unlikely(offset >= size >> PAGE_SHIFT)) {
2077 unlock_page(page); 2077 unlock_page(page);
2078 page_cache_release(page); 2078 put_page(page);
2079 return VM_FAULT_SIGBUS; 2079 return VM_FAULT_SIGBUS;
2080 } 2080 }
2081 2081
@@ -2120,7 +2120,7 @@ page_not_uptodate:
2120 if (!PageUptodate(page)) 2120 if (!PageUptodate(page))
2121 error = -EIO; 2121 error = -EIO;
2122 } 2122 }
2123 page_cache_release(page); 2123 put_page(page);
2124 2124
2125 if (!error || error == AOP_TRUNCATED_PAGE) 2125 if (!error || error == AOP_TRUNCATED_PAGE)
2126 goto retry_find; 2126 goto retry_find;
@@ -2164,7 +2164,7 @@ repeat:
2164 2164
2165 /* Has the page moved? */ 2165 /* Has the page moved? */
2166 if (unlikely(page != *slot)) { 2166 if (unlikely(page != *slot)) {
2167 page_cache_release(page); 2167 put_page(page);
2168 goto repeat; 2168 goto repeat;
2169 } 2169 }
2170 2170
@@ -2178,8 +2178,8 @@ repeat:
2178 if (page->mapping != mapping || !PageUptodate(page)) 2178 if (page->mapping != mapping || !PageUptodate(page))
2179 goto unlock; 2179 goto unlock;
2180 2180
2181 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2181 size = round_up(i_size_read(mapping->host), PAGE_SIZE);
2182 if (page->index >= size >> PAGE_CACHE_SHIFT) 2182 if (page->index >= size >> PAGE_SHIFT)
2183 goto unlock; 2183 goto unlock;
2184 2184
2185 pte = vmf->pte + page->index - vmf->pgoff; 2185 pte = vmf->pte + page->index - vmf->pgoff;
@@ -2195,7 +2195,7 @@ repeat:
2195unlock: 2195unlock:
2196 unlock_page(page); 2196 unlock_page(page);
2197skip: 2197skip:
2198 page_cache_release(page); 2198 put_page(page);
2199next: 2199next:
2200 if (iter.index == vmf->max_pgoff) 2200 if (iter.index == vmf->max_pgoff)
2201 break; 2201 break;
@@ -2278,7 +2278,7 @@ static struct page *wait_on_page_read(struct page *page)
2278 if (!IS_ERR(page)) { 2278 if (!IS_ERR(page)) {
2279 wait_on_page_locked(page); 2279 wait_on_page_locked(page);
2280 if (!PageUptodate(page)) { 2280 if (!PageUptodate(page)) {
2281 page_cache_release(page); 2281 put_page(page);
2282 page = ERR_PTR(-EIO); 2282 page = ERR_PTR(-EIO);
2283 } 2283 }
2284 } 2284 }
@@ -2301,7 +2301,7 @@ repeat:
2301 return ERR_PTR(-ENOMEM); 2301 return ERR_PTR(-ENOMEM);
2302 err = add_to_page_cache_lru(page, mapping, index, gfp); 2302 err = add_to_page_cache_lru(page, mapping, index, gfp);
2303 if (unlikely(err)) { 2303 if (unlikely(err)) {
2304 page_cache_release(page); 2304 put_page(page);
2305 if (err == -EEXIST) 2305 if (err == -EEXIST)
2306 goto repeat; 2306 goto repeat;
2307 /* Presumably ENOMEM for radix tree node */ 2307 /* Presumably ENOMEM for radix tree node */
@@ -2311,7 +2311,7 @@ repeat:
2311filler: 2311filler:
2312 err = filler(data, page); 2312 err = filler(data, page);
2313 if (err < 0) { 2313 if (err < 0) {
2314 page_cache_release(page); 2314 put_page(page);
2315 return ERR_PTR(err); 2315 return ERR_PTR(err);
2316 } 2316 }
2317 2317
@@ -2364,7 +2364,7 @@ filler:
2364 /* Case c or d, restart the operation */ 2364 /* Case c or d, restart the operation */
2365 if (!page->mapping) { 2365 if (!page->mapping) {
2366 unlock_page(page); 2366 unlock_page(page);
2367 page_cache_release(page); 2367 put_page(page);
2368 goto repeat; 2368 goto repeat;
2369 } 2369 }
2370 2370
@@ -2511,7 +2511,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2511 struct iov_iter data; 2511 struct iov_iter data;
2512 2512
2513 write_len = iov_iter_count(from); 2513 write_len = iov_iter_count(from);
2514 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2514 end = (pos + write_len - 1) >> PAGE_SHIFT;
2515 2515
2516 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2516 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2517 if (written) 2517 if (written)
@@ -2525,7 +2525,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2525 */ 2525 */
2526 if (mapping->nrpages) { 2526 if (mapping->nrpages) {
2527 written = invalidate_inode_pages2_range(mapping, 2527 written = invalidate_inode_pages2_range(mapping,
2528 pos >> PAGE_CACHE_SHIFT, end); 2528 pos >> PAGE_SHIFT, end);
2529 /* 2529 /*
2530 * If a page can not be invalidated, return 0 to fall back 2530 * If a page can not be invalidated, return 0 to fall back
2531 * to buffered write. 2531 * to buffered write.
@@ -2550,7 +2550,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2550 */ 2550 */
2551 if (mapping->nrpages) { 2551 if (mapping->nrpages) {
2552 invalidate_inode_pages2_range(mapping, 2552 invalidate_inode_pages2_range(mapping,
2553 pos >> PAGE_CACHE_SHIFT, end); 2553 pos >> PAGE_SHIFT, end);
2554 } 2554 }
2555 2555
2556 if (written > 0) { 2556 if (written > 0) {
@@ -2611,8 +2611,8 @@ ssize_t generic_perform_write(struct file *file,
2611 size_t copied; /* Bytes copied from user */ 2611 size_t copied; /* Bytes copied from user */
2612 void *fsdata; 2612 void *fsdata;
2613 2613
2614 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2614 offset = (pos & (PAGE_SIZE - 1));
2615 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2615 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2616 iov_iter_count(i)); 2616 iov_iter_count(i));
2617 2617
2618again: 2618again:
@@ -2665,7 +2665,7 @@ again:
2665 * because not all segments in the iov can be copied at 2665 * because not all segments in the iov can be copied at
2666 * once without a pagefault. 2666 * once without a pagefault.
2667 */ 2667 */
2668 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2668 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2669 iov_iter_single_seg_count(i)); 2669 iov_iter_single_seg_count(i));
2670 goto again; 2670 goto again;
2671 } 2671 }
@@ -2752,8 +2752,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2752 iocb->ki_pos = endbyte + 1; 2752 iocb->ki_pos = endbyte + 1;
2753 written += status; 2753 written += status;
2754 invalidate_mapping_pages(mapping, 2754 invalidate_mapping_pages(mapping,
2755 pos >> PAGE_CACHE_SHIFT, 2755 pos >> PAGE_SHIFT,
2756 endbyte >> PAGE_CACHE_SHIFT); 2756 endbyte >> PAGE_SHIFT);
2757 } else { 2757 } else {
2758 /* 2758 /*
2759 * We don't know how much we wrote, so just return 2759 * We don't know how much we wrote, so just return
diff --git a/mm/gup.c b/mm/gup.c
index 7f1c4fb77cfa..fb87aea9edc8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1107,7 +1107,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1107 * @addr: user address 1107 * @addr: user address
1108 * 1108 *
1109 * Returns struct page pointer of user page pinned for dump, 1109 * Returns struct page pointer of user page pinned for dump,
1110 * to be freed afterwards by page_cache_release() or put_page(). 1110 * to be freed afterwards by put_page().
1111 * 1111 *
1112 * Returns NULL on any kind of failure - a hole must then be inserted into 1112 * Returns NULL on any kind of failure - a hole must then be inserted into
1113 * the corefile, to preserve alignment with its headers; and also returns 1113 * the corefile, to preserve alignment with its headers; and also returns
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06058eaa173b..19d0d08b396f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3346,7 +3346,7 @@ retry_avoidcopy:
3346 old_page != pagecache_page) 3346 old_page != pagecache_page)
3347 outside_reserve = 1; 3347 outside_reserve = 1;
3348 3348
3349 page_cache_get(old_page); 3349 get_page(old_page);
3350 3350
3351 /* 3351 /*
3352 * Drop page table lock as buddy allocator may be called. It will 3352 * Drop page table lock as buddy allocator may be called. It will
@@ -3364,7 +3364,7 @@ retry_avoidcopy:
3364 * may get SIGKILLed if it later faults. 3364 * may get SIGKILLed if it later faults.
3365 */ 3365 */
3366 if (outside_reserve) { 3366 if (outside_reserve) {
3367 page_cache_release(old_page); 3367 put_page(old_page);
3368 BUG_ON(huge_pte_none(pte)); 3368 BUG_ON(huge_pte_none(pte));
3369 unmap_ref_private(mm, vma, old_page, address); 3369 unmap_ref_private(mm, vma, old_page, address);
3370 BUG_ON(huge_pte_none(pte)); 3370 BUG_ON(huge_pte_none(pte));
@@ -3425,9 +3425,9 @@ retry_avoidcopy:
3425 spin_unlock(ptl); 3425 spin_unlock(ptl);
3426 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3426 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3427out_release_all: 3427out_release_all:
3428 page_cache_release(new_page); 3428 put_page(new_page);
3429out_release_old: 3429out_release_old:
3430 page_cache_release(old_page); 3430 put_page(old_page);
3431 3431
3432 spin_lock(ptl); /* Caller expects lock to be held */ 3432 spin_lock(ptl); /* Caller expects lock to be held */
3433 return ret; 3433 return ret;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index acb3b6c4dd89..38f1dd79acdb 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
498 struct kasan_alloc_meta *alloc_info = 498 struct kasan_alloc_meta *alloc_info =
499 get_alloc_info(cache, object); 499 get_alloc_info(cache, object);
500 alloc_info->state = KASAN_STATE_FREE; 500 alloc_info->state = KASAN_STATE_FREE;
501 set_track(&free_info->track); 501 set_track(&free_info->track, GFP_NOWAIT);
502 } 502 }
503#endif 503#endif
504 504
diff --git a/mm/madvise.c b/mm/madvise.c
index a01147359f3b..07427d3fcead 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -170,7 +170,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
171 vma, index); 171 vma, index);
172 if (page) 172 if (page)
173 page_cache_release(page); 173 put_page(page);
174 } 174 }
175 175
176 return 0; 176 return 0;
@@ -204,14 +204,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
204 page = find_get_entry(mapping, index); 204 page = find_get_entry(mapping, index);
205 if (!radix_tree_exceptional_entry(page)) { 205 if (!radix_tree_exceptional_entry(page)) {
206 if (page) 206 if (page)
207 page_cache_release(page); 207 put_page(page);
208 continue; 208 continue;
209 } 209 }
210 swap = radix_to_swp_entry(page); 210 swap = radix_to_swp_entry(page);
211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, 211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
212 NULL, 0); 212 NULL, 0);
213 if (page) 213 if (page)
214 page_cache_release(page); 214 put_page(page);
215 } 215 }
216 216
217 lru_add_drain(); /* Push any new pages onto the LRU now */ 217 lru_add_drain(); /* Push any new pages onto the LRU now */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5a544c6c0717..78f5f2641b91 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -538,7 +538,7 @@ static int delete_from_lru_cache(struct page *p)
538 /* 538 /*
539 * drop the page count elevated by isolate_lru_page() 539 * drop the page count elevated by isolate_lru_page()
540 */ 540 */
541 page_cache_release(p); 541 put_page(p);
542 return 0; 542 return 0;
543 } 543 }
544 return -EIO; 544 return -EIO;
diff --git a/mm/memory.c b/mm/memory.c
index 098f00d05461..93897f23cc11 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2054,7 +2054,7 @@ static inline int wp_page_reuse(struct mm_struct *mm,
2054 VM_BUG_ON_PAGE(PageAnon(page), page); 2054 VM_BUG_ON_PAGE(PageAnon(page), page);
2055 mapping = page->mapping; 2055 mapping = page->mapping;
2056 unlock_page(page); 2056 unlock_page(page);
2057 page_cache_release(page); 2057 put_page(page);
2058 2058
2059 if ((dirtied || page_mkwrite) && mapping) { 2059 if ((dirtied || page_mkwrite) && mapping) {
2060 /* 2060 /*
@@ -2188,7 +2188,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2188 } 2188 }
2189 2189
2190 if (new_page) 2190 if (new_page)
2191 page_cache_release(new_page); 2191 put_page(new_page);
2192 2192
2193 pte_unmap_unlock(page_table, ptl); 2193 pte_unmap_unlock(page_table, ptl);
2194 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2194 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -2203,14 +2203,14 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2203 munlock_vma_page(old_page); 2203 munlock_vma_page(old_page);
2204 unlock_page(old_page); 2204 unlock_page(old_page);
2205 } 2205 }
2206 page_cache_release(old_page); 2206 put_page(old_page);
2207 } 2207 }
2208 return page_copied ? VM_FAULT_WRITE : 0; 2208 return page_copied ? VM_FAULT_WRITE : 0;
2209oom_free_new: 2209oom_free_new:
2210 page_cache_release(new_page); 2210 put_page(new_page);
2211oom: 2211oom:
2212 if (old_page) 2212 if (old_page)
2213 page_cache_release(old_page); 2213 put_page(old_page);
2214 return VM_FAULT_OOM; 2214 return VM_FAULT_OOM;
2215} 2215}
2216 2216
@@ -2258,7 +2258,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2258{ 2258{
2259 int page_mkwrite = 0; 2259 int page_mkwrite = 0;
2260 2260
2261 page_cache_get(old_page); 2261 get_page(old_page);
2262 2262
2263 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2263 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2264 int tmp; 2264 int tmp;
@@ -2267,7 +2267,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2267 tmp = do_page_mkwrite(vma, old_page, address); 2267 tmp = do_page_mkwrite(vma, old_page, address);
2268 if (unlikely(!tmp || (tmp & 2268 if (unlikely(!tmp || (tmp &
2269 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 2269 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2270 page_cache_release(old_page); 2270 put_page(old_page);
2271 return tmp; 2271 return tmp;
2272 } 2272 }
2273 /* 2273 /*
@@ -2281,7 +2281,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2281 if (!pte_same(*page_table, orig_pte)) { 2281 if (!pte_same(*page_table, orig_pte)) {
2282 unlock_page(old_page); 2282 unlock_page(old_page);
2283 pte_unmap_unlock(page_table, ptl); 2283 pte_unmap_unlock(page_table, ptl);
2284 page_cache_release(old_page); 2284 put_page(old_page);
2285 return 0; 2285 return 0;
2286 } 2286 }
2287 page_mkwrite = 1; 2287 page_mkwrite = 1;
@@ -2341,7 +2341,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2341 */ 2341 */
2342 if (PageAnon(old_page) && !PageKsm(old_page)) { 2342 if (PageAnon(old_page) && !PageKsm(old_page)) {
2343 if (!trylock_page(old_page)) { 2343 if (!trylock_page(old_page)) {
2344 page_cache_get(old_page); 2344 get_page(old_page);
2345 pte_unmap_unlock(page_table, ptl); 2345 pte_unmap_unlock(page_table, ptl);
2346 lock_page(old_page); 2346 lock_page(old_page);
2347 page_table = pte_offset_map_lock(mm, pmd, address, 2347 page_table = pte_offset_map_lock(mm, pmd, address,
@@ -2349,10 +2349,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2349 if (!pte_same(*page_table, orig_pte)) { 2349 if (!pte_same(*page_table, orig_pte)) {
2350 unlock_page(old_page); 2350 unlock_page(old_page);
2351 pte_unmap_unlock(page_table, ptl); 2351 pte_unmap_unlock(page_table, ptl);
2352 page_cache_release(old_page); 2352 put_page(old_page);
2353 return 0; 2353 return 0;
2354 } 2354 }
2355 page_cache_release(old_page); 2355 put_page(old_page);
2356 } 2356 }
2357 if (reuse_swap_page(old_page)) { 2357 if (reuse_swap_page(old_page)) {
2358 /* 2358 /*
@@ -2375,7 +2375,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2375 /* 2375 /*
2376 * Ok, we need to copy. Oh, well.. 2376 * Ok, we need to copy. Oh, well..
2377 */ 2377 */
2378 page_cache_get(old_page); 2378 get_page(old_page);
2379 2379
2380 pte_unmap_unlock(page_table, ptl); 2380 pte_unmap_unlock(page_table, ptl);
2381 return wp_page_copy(mm, vma, address, page_table, pmd, 2381 return wp_page_copy(mm, vma, address, page_table, pmd,
@@ -2400,7 +2400,6 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
2400 2400
2401 vba = vma->vm_pgoff; 2401 vba = vma->vm_pgoff;
2402 vea = vba + vma_pages(vma) - 1; 2402 vea = vba + vma_pages(vma) - 1;
2403 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2404 zba = details->first_index; 2403 zba = details->first_index;
2405 if (zba < vba) 2404 if (zba < vba)
2406 zba = vba; 2405 zba = vba;
@@ -2619,7 +2618,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2619 * parallel locked swapcache. 2618 * parallel locked swapcache.
2620 */ 2619 */
2621 unlock_page(swapcache); 2620 unlock_page(swapcache);
2622 page_cache_release(swapcache); 2621 put_page(swapcache);
2623 } 2622 }
2624 2623
2625 if (flags & FAULT_FLAG_WRITE) { 2624 if (flags & FAULT_FLAG_WRITE) {
@@ -2641,10 +2640,10 @@ out_nomap:
2641out_page: 2640out_page:
2642 unlock_page(page); 2641 unlock_page(page);
2643out_release: 2642out_release:
2644 page_cache_release(page); 2643 put_page(page);
2645 if (page != swapcache) { 2644 if (page != swapcache) {
2646 unlock_page(swapcache); 2645 unlock_page(swapcache);
2647 page_cache_release(swapcache); 2646 put_page(swapcache);
2648 } 2647 }
2649 return ret; 2648 return ret;
2650} 2649}
@@ -2752,7 +2751,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2752 if (userfaultfd_missing(vma)) { 2751 if (userfaultfd_missing(vma)) {
2753 pte_unmap_unlock(page_table, ptl); 2752 pte_unmap_unlock(page_table, ptl);
2754 mem_cgroup_cancel_charge(page, memcg, false); 2753 mem_cgroup_cancel_charge(page, memcg, false);
2755 page_cache_release(page); 2754 put_page(page);
2756 return handle_userfault(vma, address, flags, 2755 return handle_userfault(vma, address, flags,
2757 VM_UFFD_MISSING); 2756 VM_UFFD_MISSING);
2758 } 2757 }
@@ -2771,10 +2770,10 @@ unlock:
2771 return 0; 2770 return 0;
2772release: 2771release:
2773 mem_cgroup_cancel_charge(page, memcg, false); 2772 mem_cgroup_cancel_charge(page, memcg, false);
2774 page_cache_release(page); 2773 put_page(page);
2775 goto unlock; 2774 goto unlock;
2776oom_free_page: 2775oom_free_page:
2777 page_cache_release(page); 2776 put_page(page);
2778oom: 2777oom:
2779 return VM_FAULT_OOM; 2778 return VM_FAULT_OOM;
2780} 2779}
@@ -2807,7 +2806,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2807 if (unlikely(PageHWPoison(vmf.page))) { 2806 if (unlikely(PageHWPoison(vmf.page))) {
2808 if (ret & VM_FAULT_LOCKED) 2807 if (ret & VM_FAULT_LOCKED)
2809 unlock_page(vmf.page); 2808 unlock_page(vmf.page);
2810 page_cache_release(vmf.page); 2809 put_page(vmf.page);
2811 return VM_FAULT_HWPOISON; 2810 return VM_FAULT_HWPOISON;
2812 } 2811 }
2813 2812
@@ -2996,7 +2995,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2996 if (unlikely(!pte_same(*pte, orig_pte))) { 2995 if (unlikely(!pte_same(*pte, orig_pte))) {
2997 pte_unmap_unlock(pte, ptl); 2996 pte_unmap_unlock(pte, ptl);
2998 unlock_page(fault_page); 2997 unlock_page(fault_page);
2999 page_cache_release(fault_page); 2998 put_page(fault_page);
3000 return ret; 2999 return ret;
3001 } 3000 }
3002 do_set_pte(vma, address, fault_page, pte, false, false); 3001 do_set_pte(vma, address, fault_page, pte, false, false);
@@ -3024,7 +3023,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3024 return VM_FAULT_OOM; 3023 return VM_FAULT_OOM;
3025 3024
3026 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { 3025 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
3027 page_cache_release(new_page); 3026 put_page(new_page);
3028 return VM_FAULT_OOM; 3027 return VM_FAULT_OOM;
3029 } 3028 }
3030 3029
@@ -3041,7 +3040,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3041 pte_unmap_unlock(pte, ptl); 3040 pte_unmap_unlock(pte, ptl);
3042 if (fault_page) { 3041 if (fault_page) {
3043 unlock_page(fault_page); 3042 unlock_page(fault_page);
3044 page_cache_release(fault_page); 3043 put_page(fault_page);
3045 } else { 3044 } else {
3046 /* 3045 /*
3047 * The fault handler has no page to lock, so it holds 3046 * The fault handler has no page to lock, so it holds
@@ -3057,7 +3056,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3057 pte_unmap_unlock(pte, ptl); 3056 pte_unmap_unlock(pte, ptl);
3058 if (fault_page) { 3057 if (fault_page) {
3059 unlock_page(fault_page); 3058 unlock_page(fault_page);
3060 page_cache_release(fault_page); 3059 put_page(fault_page);
3061 } else { 3060 } else {
3062 /* 3061 /*
3063 * The fault handler has no page to lock, so it holds 3062 * The fault handler has no page to lock, so it holds
@@ -3068,7 +3067,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3068 return ret; 3067 return ret;
3069uncharge_out: 3068uncharge_out:
3070 mem_cgroup_cancel_charge(new_page, memcg, false); 3069 mem_cgroup_cancel_charge(new_page, memcg, false);
3071 page_cache_release(new_page); 3070 put_page(new_page);
3072 return ret; 3071 return ret;
3073} 3072}
3074 3073
@@ -3096,7 +3095,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3096 tmp = do_page_mkwrite(vma, fault_page, address); 3095 tmp = do_page_mkwrite(vma, fault_page, address);
3097 if (unlikely(!tmp || 3096 if (unlikely(!tmp ||
3098 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3097 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3099 page_cache_release(fault_page); 3098 put_page(fault_page);
3100 return tmp; 3099 return tmp;
3101 } 3100 }
3102 } 3101 }
@@ -3105,7 +3104,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3105 if (unlikely(!pte_same(*pte, orig_pte))) { 3104 if (unlikely(!pte_same(*pte, orig_pte))) {
3106 pte_unmap_unlock(pte, ptl); 3105 pte_unmap_unlock(pte, ptl);
3107 unlock_page(fault_page); 3106 unlock_page(fault_page);
3108 page_cache_release(fault_page); 3107 put_page(fault_page);
3109 return ret; 3108 return ret;
3110 } 3109 }
3111 do_set_pte(vma, address, fault_page, pte, true, false); 3110 do_set_pte(vma, address, fault_page, pte, true, false);
@@ -3736,7 +3735,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3736 buf, maddr + offset, bytes); 3735 buf, maddr + offset, bytes);
3737 } 3736 }
3738 kunmap(page); 3737 kunmap(page);
3739 page_cache_release(page); 3738 put_page(page);
3740 } 3739 }
3741 len -= bytes; 3740 len -= bytes;
3742 buf += bytes; 3741 buf += bytes;
diff --git a/mm/mincore.c b/mm/mincore.c
index 563f32045490..c0b5ba965200 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -75,7 +75,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
75#endif 75#endif
76 if (page) { 76 if (page) {
77 present = PageUptodate(page); 77 present = PageUptodate(page);
78 page_cache_release(page); 78 put_page(page);
79 } 79 }
80 80
81 return present; 81 return present;
@@ -211,7 +211,7 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
211 * return values: 211 * return values:
212 * zero - success 212 * zero - success
213 * -EFAULT - vec points to an illegal address 213 * -EFAULT - vec points to an illegal address
214 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE 214 * -EINVAL - addr is not a multiple of PAGE_SIZE
215 * -ENOMEM - Addresses in the range [addr, addr + len] are 215 * -ENOMEM - Addresses in the range [addr, addr + len] are
216 * invalid for the address space of this process, or 216 * invalid for the address space of this process, or
217 * specify one or more pages which are not currently 217 * specify one or more pages which are not currently
@@ -226,14 +226,14 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
226 unsigned char *tmp; 226 unsigned char *tmp;
227 227
228 /* Check the start address: needs to be page-aligned.. */ 228 /* Check the start address: needs to be page-aligned.. */
229 if (start & ~PAGE_CACHE_MASK) 229 if (start & ~PAGE_MASK)
230 return -EINVAL; 230 return -EINVAL;
231 231
232 /* ..and we need to be passed a valid user-space range */ 232 /* ..and we need to be passed a valid user-space range */
233 if (!access_ok(VERIFY_READ, (void __user *) start, len)) 233 if (!access_ok(VERIFY_READ, (void __user *) start, len))
234 return -ENOMEM; 234 return -ENOMEM;
235 235
236 /* This also avoids any overflows on PAGE_CACHE_ALIGN */ 236 /* This also avoids any overflows on PAGE_ALIGN */
237 pages = len >> PAGE_SHIFT; 237 pages = len >> PAGE_SHIFT;
238 pages += (offset_in_page(len)) != 0; 238 pages += (offset_in_page(len)) != 0;
239 239
diff --git a/mm/nommu.c b/mm/nommu.c
index de8b6b6580c1..102e257cc6c3 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -141,7 +141,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
141 if (pages) { 141 if (pages) {
142 pages[i] = virt_to_page(start); 142 pages[i] = virt_to_page(start);
143 if (pages[i]) 143 if (pages[i])
144 page_cache_get(pages[i]); 144 get_page(pages[i]);
145 } 145 }
146 if (vmas) 146 if (vmas)
147 vmas[i] = vma; 147 vmas[i] = vma;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b34d279a7ee6..86349586eacb 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -547,7 +547,11 @@ static int oom_reaper(void *unused)
547 547
548static void wake_oom_reaper(struct task_struct *tsk) 548static void wake_oom_reaper(struct task_struct *tsk)
549{ 549{
550 if (!oom_reaper_th || tsk->oom_reaper_list) 550 if (!oom_reaper_th)
551 return;
552
553 /* tsk is already queued? */
554 if (tsk == oom_reaper_list || tsk->oom_reaper_list)
551 return; 555 return;
552 556
553 get_task_struct(tsk); 557 get_task_struct(tsk);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 11ff8f758631..999792d35ccc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2176,8 +2176,8 @@ int write_cache_pages(struct address_space *mapping,
2176 cycled = 0; 2176 cycled = 0;
2177 end = -1; 2177 end = -1;
2178 } else { 2178 } else {
2179 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2179 index = wbc->range_start >> PAGE_SHIFT;
2180 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2180 end = wbc->range_end >> PAGE_SHIFT;
2181 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2181 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2182 range_whole = 1; 2182 range_whole = 1;
2183 cycled = 1; /* ignore range_cyclic tests */ 2183 cycled = 1; /* ignore range_cyclic tests */
@@ -2382,14 +2382,14 @@ int write_one_page(struct page *page, int wait)
2382 wait_on_page_writeback(page); 2382 wait_on_page_writeback(page);
2383 2383
2384 if (clear_page_dirty_for_io(page)) { 2384 if (clear_page_dirty_for_io(page)) {
2385 page_cache_get(page); 2385 get_page(page);
2386 ret = mapping->a_ops->writepage(page, &wbc); 2386 ret = mapping->a_ops->writepage(page, &wbc);
2387 if (ret == 0 && wait) { 2387 if (ret == 0 && wait) {
2388 wait_on_page_writeback(page); 2388 wait_on_page_writeback(page);
2389 if (PageError(page)) 2389 if (PageError(page))
2390 ret = -EIO; 2390 ret = -EIO;
2391 } 2391 }
2392 page_cache_release(page); 2392 put_page(page);
2393 } else { 2393 } else {
2394 unlock_page(page); 2394 unlock_page(page);
2395 } 2395 }
@@ -2431,7 +2431,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2431 __inc_zone_page_state(page, NR_DIRTIED); 2431 __inc_zone_page_state(page, NR_DIRTIED);
2432 __inc_wb_stat(wb, WB_RECLAIMABLE); 2432 __inc_wb_stat(wb, WB_RECLAIMABLE);
2433 __inc_wb_stat(wb, WB_DIRTIED); 2433 __inc_wb_stat(wb, WB_DIRTIED);
2434 task_io_account_write(PAGE_CACHE_SIZE); 2434 task_io_account_write(PAGE_SIZE);
2435 current->nr_dirtied++; 2435 current->nr_dirtied++;
2436 this_cpu_inc(bdp_ratelimits); 2436 this_cpu_inc(bdp_ratelimits);
2437 } 2437 }
@@ -2450,7 +2450,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2451 dec_zone_page_state(page, NR_FILE_DIRTY); 2451 dec_zone_page_state(page, NR_FILE_DIRTY);
2452 dec_wb_stat(wb, WB_RECLAIMABLE); 2452 dec_wb_stat(wb, WB_RECLAIMABLE);
2453 task_io_account_cancelled_write(PAGE_CACHE_SIZE); 2453 task_io_account_cancelled_write(PAGE_SIZE);
2454 } 2454 }
2455} 2455}
2456 2456
diff --git a/mm/page_io.c b/mm/page_io.c
index 18aac7819cc9..cd92e3d67a32 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -252,7 +252,7 @@ out:
252 252
253static sector_t swap_page_sector(struct page *page) 253static sector_t swap_page_sector(struct page *page)
254{ 254{
255 return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9); 255 return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
256} 256}
257 257
258int __swap_writepage(struct page *page, struct writeback_control *wbc, 258int __swap_writepage(struct page *page, struct writeback_control *wbc,
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 92c4c36501e7..c4f568206544 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
215 * all pages in [start_pfn...end_pfn) must be in the same zone. 215 * all pages in [start_pfn...end_pfn) must be in the same zone.
216 * zone->lock must be held before call this. 216 * zone->lock must be held before call this.
217 * 217 *
218 * Returns 1 if all pages in the range are isolated. 218 * Returns the last tested pfn.
219 */ 219 */
220static unsigned long 220static unsigned long
221__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, 221__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
289 * now as a simple work-around, we use the next node for destination. 289 * now as a simple work-around, we use the next node for destination.
290 */ 290 */
291 if (PageHuge(page)) { 291 if (PageHuge(page)) {
292 nodemask_t src = nodemask_of_node(page_to_nid(page)); 292 int node = next_online_node(page_to_nid(page));
293 nodemask_t dst; 293 if (node == MAX_NUMNODES)
294 nodes_complement(dst, src); 294 node = first_online_node;
295 return alloc_huge_page_node(page_hstate(compound_head(page)), 295 return alloc_huge_page_node(page_hstate(compound_head(page)),
296 next_node(page_to_nid(page), dst)); 296 node);
297 } 297 }
298 298
299 if (PageHighMem(page)) 299 if (PageHighMem(page))
diff --git a/mm/readahead.c b/mm/readahead.c
index 20e58e820e44..40be3ae0afe3 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -47,11 +47,11 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
47 if (!trylock_page(page)) 47 if (!trylock_page(page))
48 BUG(); 48 BUG();
49 page->mapping = mapping; 49 page->mapping = mapping;
50 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 50 do_invalidatepage(page, 0, PAGE_SIZE);
51 page->mapping = NULL; 51 page->mapping = NULL;
52 unlock_page(page); 52 unlock_page(page);
53 } 53 }
54 page_cache_release(page); 54 put_page(page);
55} 55}
56 56
57/* 57/*
@@ -93,14 +93,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
93 read_cache_pages_invalidate_page(mapping, page); 93 read_cache_pages_invalidate_page(mapping, page);
94 continue; 94 continue;
95 } 95 }
96 page_cache_release(page); 96 put_page(page);
97 97
98 ret = filler(data, page); 98 ret = filler(data, page);
99 if (unlikely(ret)) { 99 if (unlikely(ret)) {
100 read_cache_pages_invalidate_pages(mapping, pages); 100 read_cache_pages_invalidate_pages(mapping, pages);
101 break; 101 break;
102 } 102 }
103 task_io_account_read(PAGE_CACHE_SIZE); 103 task_io_account_read(PAGE_SIZE);
104 } 104 }
105 return ret; 105 return ret;
106} 106}
@@ -130,7 +130,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
130 mapping_gfp_constraint(mapping, GFP_KERNEL))) { 130 mapping_gfp_constraint(mapping, GFP_KERNEL))) {
131 mapping->a_ops->readpage(filp, page); 131 mapping->a_ops->readpage(filp, page);
132 } 132 }
133 page_cache_release(page); 133 put_page(page);
134 } 134 }
135 ret = 0; 135 ret = 0;
136 136
@@ -163,7 +163,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
163 if (isize == 0) 163 if (isize == 0)
164 goto out; 164 goto out;
165 165
166 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 166 end_index = ((isize - 1) >> PAGE_SHIFT);
167 167
168 /* 168 /*
169 * Preallocate as many pages as we will need. 169 * Preallocate as many pages as we will need.
@@ -216,7 +216,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
216 while (nr_to_read) { 216 while (nr_to_read) {
217 int err; 217 int err;
218 218
219 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 219 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
220 220
221 if (this_chunk > nr_to_read) 221 if (this_chunk > nr_to_read)
222 this_chunk = nr_to_read; 222 this_chunk = nr_to_read;
@@ -425,7 +425,7 @@ ondemand_readahead(struct address_space *mapping,
425 * trivial case: (offset - prev_offset) == 1 425 * trivial case: (offset - prev_offset) == 1
426 * unaligned reads: (offset - prev_offset) == 0 426 * unaligned reads: (offset - prev_offset) == 0
427 */ 427 */
428 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT; 428 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
429 if (offset - prev_offset <= 1UL) 429 if (offset - prev_offset <= 1UL)
430 goto initial_readahead; 430 goto initial_readahead;
431 431
@@ -558,8 +558,8 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
558 if (f.file) { 558 if (f.file) {
559 if (f.file->f_mode & FMODE_READ) { 559 if (f.file->f_mode & FMODE_READ) {
560 struct address_space *mapping = f.file->f_mapping; 560 struct address_space *mapping = f.file->f_mapping;
561 pgoff_t start = offset >> PAGE_CACHE_SHIFT; 561 pgoff_t start = offset >> PAGE_SHIFT;
562 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 562 pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
563 unsigned long len = end - start + 1; 563 unsigned long len = end - start + 1;
564 ret = do_readahead(mapping, f.file, start, len); 564 ret = do_readahead(mapping, f.file, start, len);
565 } 565 }
diff --git a/mm/rmap.c b/mm/rmap.c
index c399a0d41b31..307b555024ef 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
569} 569}
570 570
571#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 571#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
572static void percpu_flush_tlb_batch_pages(void *data)
573{
574 /*
575 * All TLB entries are flushed on the assumption that it is
576 * cheaper to flush all TLBs and let them be refilled than
577 * flushing individual PFNs. Note that we do not track mm's
578 * to flush as that might simply be multiple full TLB flushes
579 * for no gain.
580 */
581 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
582 flush_tlb_local();
583}
584
585/* 572/*
586 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 573 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
587 * important if a PTE was dirty when it was unmapped that it's flushed 574 * important if a PTE was dirty when it was unmapped that it's flushed
@@ -598,15 +585,14 @@ void try_to_unmap_flush(void)
598 585
599 cpu = get_cpu(); 586 cpu = get_cpu();
600 587
601 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); 588 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
602 589 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
603 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) 590 local_flush_tlb();
604 percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); 591 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
605
606 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
607 smp_call_function_many(&tlb_ubc->cpumask,
608 percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
609 } 592 }
593
594 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
595 flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
610 cpumask_clear(&tlb_ubc->cpumask); 596 cpumask_clear(&tlb_ubc->cpumask);
611 tlb_ubc->flush_required = false; 597 tlb_ubc->flush_required = false;
612 tlb_ubc->writable = false; 598 tlb_ubc->writable = false;
@@ -1555,7 +1541,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1555 1541
1556discard: 1542discard:
1557 page_remove_rmap(page, PageHuge(page)); 1543 page_remove_rmap(page, PageHuge(page));
1558 page_cache_release(page); 1544 put_page(page);
1559 1545
1560out_unmap: 1546out_unmap:
1561 pte_unmap_unlock(pte, ptl); 1547 pte_unmap_unlock(pte, ptl);
diff --git a/mm/shmem.c b/mm/shmem.c
index 9428c51ab2d6..719bd6b88d98 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -75,8 +75,8 @@ static struct vfsmount *shm_mnt;
75 75
76#include "internal.h" 76#include "internal.h"
77 77
78#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 78#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
79#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 79#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
80 80
81/* Pretend that each entry is of this size in directory's i_size */ 81/* Pretend that each entry is of this size in directory's i_size */
82#define BOGO_DIRENT_SIZE 20 82#define BOGO_DIRENT_SIZE 20
@@ -176,13 +176,13 @@ static inline int shmem_reacct_size(unsigned long flags,
176static inline int shmem_acct_block(unsigned long flags) 176static inline int shmem_acct_block(unsigned long flags)
177{ 177{
178 return (flags & VM_NORESERVE) ? 178 return (flags & VM_NORESERVE) ?
179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0;
180} 180}
181 181
182static inline void shmem_unacct_blocks(unsigned long flags, long pages) 182static inline void shmem_unacct_blocks(unsigned long flags, long pages)
183{ 183{
184 if (flags & VM_NORESERVE) 184 if (flags & VM_NORESERVE)
185 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 185 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
186} 186}
187 187
188static const struct super_operations shmem_ops; 188static const struct super_operations shmem_ops;
@@ -300,7 +300,7 @@ static int shmem_add_to_page_cache(struct page *page,
300 VM_BUG_ON_PAGE(!PageLocked(page), page); 300 VM_BUG_ON_PAGE(!PageLocked(page), page);
301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
302 302
303 page_cache_get(page); 303 get_page(page);
304 page->mapping = mapping; 304 page->mapping = mapping;
305 page->index = index; 305 page->index = index;
306 306
@@ -318,7 +318,7 @@ static int shmem_add_to_page_cache(struct page *page,
318 } else { 318 } else {
319 page->mapping = NULL; 319 page->mapping = NULL;
320 spin_unlock_irq(&mapping->tree_lock); 320 spin_unlock_irq(&mapping->tree_lock);
321 page_cache_release(page); 321 put_page(page);
322 } 322 }
323 return error; 323 return error;
324} 324}
@@ -338,7 +338,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
338 __dec_zone_page_state(page, NR_FILE_PAGES); 338 __dec_zone_page_state(page, NR_FILE_PAGES);
339 __dec_zone_page_state(page, NR_SHMEM); 339 __dec_zone_page_state(page, NR_SHMEM);
340 spin_unlock_irq(&mapping->tree_lock); 340 spin_unlock_irq(&mapping->tree_lock);
341 page_cache_release(page); 341 put_page(page);
342 BUG_ON(error); 342 BUG_ON(error);
343} 343}
344 344
@@ -474,10 +474,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
474{ 474{
475 struct address_space *mapping = inode->i_mapping; 475 struct address_space *mapping = inode->i_mapping;
476 struct shmem_inode_info *info = SHMEM_I(inode); 476 struct shmem_inode_info *info = SHMEM_I(inode);
477 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 477 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
478 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 478 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
479 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 479 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
480 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 480 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
481 struct pagevec pvec; 481 struct pagevec pvec;
482 pgoff_t indices[PAGEVEC_SIZE]; 482 pgoff_t indices[PAGEVEC_SIZE];
483 long nr_swaps_freed = 0; 483 long nr_swaps_freed = 0;
@@ -530,7 +530,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
530 struct page *page = NULL; 530 struct page *page = NULL;
531 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 531 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
532 if (page) { 532 if (page) {
533 unsigned int top = PAGE_CACHE_SIZE; 533 unsigned int top = PAGE_SIZE;
534 if (start > end) { 534 if (start > end) {
535 top = partial_end; 535 top = partial_end;
536 partial_end = 0; 536 partial_end = 0;
@@ -538,7 +538,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
538 zero_user_segment(page, partial_start, top); 538 zero_user_segment(page, partial_start, top);
539 set_page_dirty(page); 539 set_page_dirty(page);
540 unlock_page(page); 540 unlock_page(page);
541 page_cache_release(page); 541 put_page(page);
542 } 542 }
543 } 543 }
544 if (partial_end) { 544 if (partial_end) {
@@ -548,7 +548,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
548 zero_user_segment(page, 0, partial_end); 548 zero_user_segment(page, 0, partial_end);
549 set_page_dirty(page); 549 set_page_dirty(page);
550 unlock_page(page); 550 unlock_page(page);
551 page_cache_release(page); 551 put_page(page);
552 } 552 }
553 } 553 }
554 if (start >= end) 554 if (start >= end)
@@ -833,7 +833,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
833 mem_cgroup_commit_charge(page, memcg, true, false); 833 mem_cgroup_commit_charge(page, memcg, true, false);
834out: 834out:
835 unlock_page(page); 835 unlock_page(page);
836 page_cache_release(page); 836 put_page(page);
837 return error; 837 return error;
838} 838}
839 839
@@ -1080,7 +1080,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1080 if (!newpage) 1080 if (!newpage)
1081 return -ENOMEM; 1081 return -ENOMEM;
1082 1082
1083 page_cache_get(newpage); 1083 get_page(newpage);
1084 copy_highpage(newpage, oldpage); 1084 copy_highpage(newpage, oldpage);
1085 flush_dcache_page(newpage); 1085 flush_dcache_page(newpage);
1086 1086
@@ -1120,8 +1120,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1120 set_page_private(oldpage, 0); 1120 set_page_private(oldpage, 0);
1121 1121
1122 unlock_page(oldpage); 1122 unlock_page(oldpage);
1123 page_cache_release(oldpage); 1123 put_page(oldpage);
1124 page_cache_release(oldpage); 1124 put_page(oldpage);
1125 return error; 1125 return error;
1126} 1126}
1127 1127
@@ -1145,7 +1145,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1145 int once = 0; 1145 int once = 0;
1146 int alloced = 0; 1146 int alloced = 0;
1147 1147
1148 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1148 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1149 return -EFBIG; 1149 return -EFBIG;
1150repeat: 1150repeat:
1151 swap.val = 0; 1151 swap.val = 0;
@@ -1156,7 +1156,7 @@ repeat:
1156 } 1156 }
1157 1157
1158 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1158 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1159 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1159 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1160 error = -EINVAL; 1160 error = -EINVAL;
1161 goto unlock; 1161 goto unlock;
1162 } 1162 }
@@ -1169,7 +1169,7 @@ repeat:
1169 if (sgp != SGP_READ) 1169 if (sgp != SGP_READ)
1170 goto clear; 1170 goto clear;
1171 unlock_page(page); 1171 unlock_page(page);
1172 page_cache_release(page); 1172 put_page(page);
1173 page = NULL; 1173 page = NULL;
1174 } 1174 }
1175 if (page || (sgp == SGP_READ && !swap.val)) { 1175 if (page || (sgp == SGP_READ && !swap.val)) {
@@ -1327,7 +1327,7 @@ clear:
1327 1327
1328 /* Perhaps the file has been truncated since we checked */ 1328 /* Perhaps the file has been truncated since we checked */
1329 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1329 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1330 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1330 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1331 if (alloced) { 1331 if (alloced) {
1332 ClearPageDirty(page); 1332 ClearPageDirty(page);
1333 delete_from_page_cache(page); 1333 delete_from_page_cache(page);
@@ -1355,7 +1355,7 @@ failed:
1355unlock: 1355unlock:
1356 if (page) { 1356 if (page) {
1357 unlock_page(page); 1357 unlock_page(page);
1358 page_cache_release(page); 1358 put_page(page);
1359 } 1359 }
1360 if (error == -ENOSPC && !once++) { 1360 if (error == -ENOSPC && !once++) {
1361 info = SHMEM_I(inode); 1361 info = SHMEM_I(inode);
@@ -1577,7 +1577,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
1577{ 1577{
1578 struct inode *inode = mapping->host; 1578 struct inode *inode = mapping->host;
1579 struct shmem_inode_info *info = SHMEM_I(inode); 1579 struct shmem_inode_info *info = SHMEM_I(inode);
1580 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1580 pgoff_t index = pos >> PAGE_SHIFT;
1581 1581
1582 /* i_mutex is held by caller */ 1582 /* i_mutex is held by caller */
1583 if (unlikely(info->seals)) { 1583 if (unlikely(info->seals)) {
@@ -1601,16 +1601,16 @@ shmem_write_end(struct file *file, struct address_space *mapping,
1601 i_size_write(inode, pos + copied); 1601 i_size_write(inode, pos + copied);
1602 1602
1603 if (!PageUptodate(page)) { 1603 if (!PageUptodate(page)) {
1604 if (copied < PAGE_CACHE_SIZE) { 1604 if (copied < PAGE_SIZE) {
1605 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1605 unsigned from = pos & (PAGE_SIZE - 1);
1606 zero_user_segments(page, 0, from, 1606 zero_user_segments(page, 0, from,
1607 from + copied, PAGE_CACHE_SIZE); 1607 from + copied, PAGE_SIZE);
1608 } 1608 }
1609 SetPageUptodate(page); 1609 SetPageUptodate(page);
1610 } 1610 }
1611 set_page_dirty(page); 1611 set_page_dirty(page);
1612 unlock_page(page); 1612 unlock_page(page);
1613 page_cache_release(page); 1613 put_page(page);
1614 1614
1615 return copied; 1615 return copied;
1616} 1616}
@@ -1635,8 +1635,8 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1635 if (!iter_is_iovec(to)) 1635 if (!iter_is_iovec(to))
1636 sgp = SGP_DIRTY; 1636 sgp = SGP_DIRTY;
1637 1637
1638 index = *ppos >> PAGE_CACHE_SHIFT; 1638 index = *ppos >> PAGE_SHIFT;
1639 offset = *ppos & ~PAGE_CACHE_MASK; 1639 offset = *ppos & ~PAGE_MASK;
1640 1640
1641 for (;;) { 1641 for (;;) {
1642 struct page *page = NULL; 1642 struct page *page = NULL;
@@ -1644,11 +1644,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1644 unsigned long nr, ret; 1644 unsigned long nr, ret;
1645 loff_t i_size = i_size_read(inode); 1645 loff_t i_size = i_size_read(inode);
1646 1646
1647 end_index = i_size >> PAGE_CACHE_SHIFT; 1647 end_index = i_size >> PAGE_SHIFT;
1648 if (index > end_index) 1648 if (index > end_index)
1649 break; 1649 break;
1650 if (index == end_index) { 1650 if (index == end_index) {
1651 nr = i_size & ~PAGE_CACHE_MASK; 1651 nr = i_size & ~PAGE_MASK;
1652 if (nr <= offset) 1652 if (nr <= offset)
1653 break; 1653 break;
1654 } 1654 }
@@ -1666,14 +1666,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1666 * We must evaluate after, since reads (unlike writes) 1666 * We must evaluate after, since reads (unlike writes)
1667 * are called without i_mutex protection against truncate 1667 * are called without i_mutex protection against truncate
1668 */ 1668 */
1669 nr = PAGE_CACHE_SIZE; 1669 nr = PAGE_SIZE;
1670 i_size = i_size_read(inode); 1670 i_size = i_size_read(inode);
1671 end_index = i_size >> PAGE_CACHE_SHIFT; 1671 end_index = i_size >> PAGE_SHIFT;
1672 if (index == end_index) { 1672 if (index == end_index) {
1673 nr = i_size & ~PAGE_CACHE_MASK; 1673 nr = i_size & ~PAGE_MASK;
1674 if (nr <= offset) { 1674 if (nr <= offset) {
1675 if (page) 1675 if (page)
1676 page_cache_release(page); 1676 put_page(page);
1677 break; 1677 break;
1678 } 1678 }
1679 } 1679 }
@@ -1694,7 +1694,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1694 mark_page_accessed(page); 1694 mark_page_accessed(page);
1695 } else { 1695 } else {
1696 page = ZERO_PAGE(0); 1696 page = ZERO_PAGE(0);
1697 page_cache_get(page); 1697 get_page(page);
1698 } 1698 }
1699 1699
1700 /* 1700 /*
@@ -1704,10 +1704,10 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1704 ret = copy_page_to_iter(page, offset, nr, to); 1704 ret = copy_page_to_iter(page, offset, nr, to);
1705 retval += ret; 1705 retval += ret;
1706 offset += ret; 1706 offset += ret;
1707 index += offset >> PAGE_CACHE_SHIFT; 1707 index += offset >> PAGE_SHIFT;
1708 offset &= ~PAGE_CACHE_MASK; 1708 offset &= ~PAGE_MASK;
1709 1709
1710 page_cache_release(page); 1710 put_page(page);
1711 if (!iov_iter_count(to)) 1711 if (!iov_iter_count(to))
1712 break; 1712 break;
1713 if (ret < nr) { 1713 if (ret < nr) {
@@ -1717,7 +1717,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1717 cond_resched(); 1717 cond_resched();
1718 } 1718 }
1719 1719
1720 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1720 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
1721 file_accessed(file); 1721 file_accessed(file);
1722 return retval ? retval : error; 1722 return retval ? retval : error;
1723} 1723}
@@ -1755,9 +1755,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1755 if (splice_grow_spd(pipe, &spd)) 1755 if (splice_grow_spd(pipe, &spd))
1756 return -ENOMEM; 1756 return -ENOMEM;
1757 1757
1758 index = *ppos >> PAGE_CACHE_SHIFT; 1758 index = *ppos >> PAGE_SHIFT;
1759 loff = *ppos & ~PAGE_CACHE_MASK; 1759 loff = *ppos & ~PAGE_MASK;
1760 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1760 req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
1761 nr_pages = min(req_pages, spd.nr_pages_max); 1761 nr_pages = min(req_pages, spd.nr_pages_max);
1762 1762
1763 spd.nr_pages = find_get_pages_contig(mapping, index, 1763 spd.nr_pages = find_get_pages_contig(mapping, index,
@@ -1774,7 +1774,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1774 index++; 1774 index++;
1775 } 1775 }
1776 1776
1777 index = *ppos >> PAGE_CACHE_SHIFT; 1777 index = *ppos >> PAGE_SHIFT;
1778 nr_pages = spd.nr_pages; 1778 nr_pages = spd.nr_pages;
1779 spd.nr_pages = 0; 1779 spd.nr_pages = 0;
1780 1780
@@ -1784,7 +1784,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1784 if (!len) 1784 if (!len)
1785 break; 1785 break;
1786 1786
1787 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1787 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
1788 page = spd.pages[page_nr]; 1788 page = spd.pages[page_nr];
1789 1789
1790 if (!PageUptodate(page) || page->mapping != mapping) { 1790 if (!PageUptodate(page) || page->mapping != mapping) {
@@ -1793,19 +1793,19 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1793 if (error) 1793 if (error)
1794 break; 1794 break;
1795 unlock_page(page); 1795 unlock_page(page);
1796 page_cache_release(spd.pages[page_nr]); 1796 put_page(spd.pages[page_nr]);
1797 spd.pages[page_nr] = page; 1797 spd.pages[page_nr] = page;
1798 } 1798 }
1799 1799
1800 isize = i_size_read(inode); 1800 isize = i_size_read(inode);
1801 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1801 end_index = (isize - 1) >> PAGE_SHIFT;
1802 if (unlikely(!isize || index > end_index)) 1802 if (unlikely(!isize || index > end_index))
1803 break; 1803 break;
1804 1804
1805 if (end_index == index) { 1805 if (end_index == index) {
1806 unsigned int plen; 1806 unsigned int plen;
1807 1807
1808 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1808 plen = ((isize - 1) & ~PAGE_MASK) + 1;
1809 if (plen <= loff) 1809 if (plen <= loff)
1810 break; 1810 break;
1811 1811
@@ -1822,7 +1822,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1822 } 1822 }
1823 1823
1824 while (page_nr < nr_pages) 1824 while (page_nr < nr_pages)
1825 page_cache_release(spd.pages[page_nr++]); 1825 put_page(spd.pages[page_nr++]);
1826 1826
1827 if (spd.nr_pages) 1827 if (spd.nr_pages)
1828 error = splice_to_pipe(pipe, &spd); 1828 error = splice_to_pipe(pipe, &spd);
@@ -1904,10 +1904,10 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1904 else if (offset >= inode->i_size) 1904 else if (offset >= inode->i_size)
1905 offset = -ENXIO; 1905 offset = -ENXIO;
1906 else { 1906 else {
1907 start = offset >> PAGE_CACHE_SHIFT; 1907 start = offset >> PAGE_SHIFT;
1908 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1908 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1909 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1909 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1910 new_offset <<= PAGE_CACHE_SHIFT; 1910 new_offset <<= PAGE_SHIFT;
1911 if (new_offset > offset) { 1911 if (new_offset > offset) {
1912 if (new_offset < inode->i_size) 1912 if (new_offset < inode->i_size)
1913 offset = new_offset; 1913 offset = new_offset;
@@ -2203,8 +2203,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2203 goto out; 2203 goto out;
2204 } 2204 }
2205 2205
2206 start = offset >> PAGE_CACHE_SHIFT; 2206 start = offset >> PAGE_SHIFT;
2207 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2207 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2208 /* Try to avoid a swapstorm if len is impossible to satisfy */ 2208 /* Try to avoid a swapstorm if len is impossible to satisfy */
2209 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2209 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2210 error = -ENOSPC; 2210 error = -ENOSPC;
@@ -2237,8 +2237,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2237 if (error) { 2237 if (error) {
2238 /* Remove the !PageUptodate pages we added */ 2238 /* Remove the !PageUptodate pages we added */
2239 shmem_undo_range(inode, 2239 shmem_undo_range(inode,
2240 (loff_t)start << PAGE_CACHE_SHIFT, 2240 (loff_t)start << PAGE_SHIFT,
2241 (loff_t)index << PAGE_CACHE_SHIFT, true); 2241 (loff_t)index << PAGE_SHIFT, true);
2242 goto undone; 2242 goto undone;
2243 } 2243 }
2244 2244
@@ -2259,7 +2259,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2259 */ 2259 */
2260 set_page_dirty(page); 2260 set_page_dirty(page);
2261 unlock_page(page); 2261 unlock_page(page);
2262 page_cache_release(page); 2262 put_page(page);
2263 cond_resched(); 2263 cond_resched();
2264 } 2264 }
2265 2265
@@ -2280,7 +2280,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2280 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 2280 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2281 2281
2282 buf->f_type = TMPFS_MAGIC; 2282 buf->f_type = TMPFS_MAGIC;
2283 buf->f_bsize = PAGE_CACHE_SIZE; 2283 buf->f_bsize = PAGE_SIZE;
2284 buf->f_namelen = NAME_MAX; 2284 buf->f_namelen = NAME_MAX;
2285 if (sbinfo->max_blocks) { 2285 if (sbinfo->max_blocks) {
2286 buf->f_blocks = sbinfo->max_blocks; 2286 buf->f_blocks = sbinfo->max_blocks;
@@ -2523,7 +2523,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2523 struct shmem_inode_info *info; 2523 struct shmem_inode_info *info;
2524 2524
2525 len = strlen(symname) + 1; 2525 len = strlen(symname) + 1;
2526 if (len > PAGE_CACHE_SIZE) 2526 if (len > PAGE_SIZE)
2527 return -ENAMETOOLONG; 2527 return -ENAMETOOLONG;
2528 2528
2529 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2529 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
@@ -2562,7 +2562,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2562 SetPageUptodate(page); 2562 SetPageUptodate(page);
2563 set_page_dirty(page); 2563 set_page_dirty(page);
2564 unlock_page(page); 2564 unlock_page(page);
2565 page_cache_release(page); 2565 put_page(page);
2566 } 2566 }
2567 dir->i_size += BOGO_DIRENT_SIZE; 2567 dir->i_size += BOGO_DIRENT_SIZE;
2568 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2568 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -2835,7 +2835,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2835 if (*rest) 2835 if (*rest)
2836 goto bad_val; 2836 goto bad_val;
2837 sbinfo->max_blocks = 2837 sbinfo->max_blocks =
2838 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2838 DIV_ROUND_UP(size, PAGE_SIZE);
2839 } else if (!strcmp(this_char,"nr_blocks")) { 2839 } else if (!strcmp(this_char,"nr_blocks")) {
2840 sbinfo->max_blocks = memparse(value, &rest); 2840 sbinfo->max_blocks = memparse(value, &rest);
2841 if (*rest) 2841 if (*rest)
@@ -2940,7 +2940,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2940 2940
2941 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2941 if (sbinfo->max_blocks != shmem_default_max_blocks())
2942 seq_printf(seq, ",size=%luk", 2942 seq_printf(seq, ",size=%luk",
2943 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2943 sbinfo->max_blocks << (PAGE_SHIFT - 10));
2944 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2944 if (sbinfo->max_inodes != shmem_default_max_inodes())
2945 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2945 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2946 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2946 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
@@ -3082,8 +3082,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
3082 sbinfo->free_inodes = sbinfo->max_inodes; 3082 sbinfo->free_inodes = sbinfo->max_inodes;
3083 3083
3084 sb->s_maxbytes = MAX_LFS_FILESIZE; 3084 sb->s_maxbytes = MAX_LFS_FILESIZE;
3085 sb->s_blocksize = PAGE_CACHE_SIZE; 3085 sb->s_blocksize = PAGE_SIZE;
3086 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 3086 sb->s_blocksize_bits = PAGE_SHIFT;
3087 sb->s_magic = TMPFS_MAGIC; 3087 sb->s_magic = TMPFS_MAGIC;
3088 sb->s_op = &shmem_ops; 3088 sb->s_op = &shmem_ops;
3089 sb->s_time_gran = 1; 3089 sb->s_time_gran = 1;
diff --git a/mm/swap.c b/mm/swap.c
index 09fe5e97714a..a0bc206b4ac6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -114,7 +114,7 @@ void put_pages_list(struct list_head *pages)
114 114
115 victim = list_entry(pages->prev, struct page, lru); 115 victim = list_entry(pages->prev, struct page, lru);
116 list_del(&victim->lru); 116 list_del(&victim->lru);
117 page_cache_release(victim); 117 put_page(victim);
118 } 118 }
119} 119}
120EXPORT_SYMBOL(put_pages_list); 120EXPORT_SYMBOL(put_pages_list);
@@ -142,7 +142,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
142 return seg; 142 return seg;
143 143
144 pages[seg] = kmap_to_page(kiov[seg].iov_base); 144 pages[seg] = kmap_to_page(kiov[seg].iov_base);
145 page_cache_get(pages[seg]); 145 get_page(pages[seg]);
146 } 146 }
147 147
148 return seg; 148 return seg;
@@ -236,7 +236,7 @@ void rotate_reclaimable_page(struct page *page)
236 struct pagevec *pvec; 236 struct pagevec *pvec;
237 unsigned long flags; 237 unsigned long flags;
238 238
239 page_cache_get(page); 239 get_page(page);
240 local_irq_save(flags); 240 local_irq_save(flags);
241 pvec = this_cpu_ptr(&lru_rotate_pvecs); 241 pvec = this_cpu_ptr(&lru_rotate_pvecs);
242 if (!pagevec_add(pvec, page)) 242 if (!pagevec_add(pvec, page))
@@ -294,7 +294,7 @@ void activate_page(struct page *page)
294 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 294 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
295 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 295 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
296 296
297 page_cache_get(page); 297 get_page(page);
298 if (!pagevec_add(pvec, page)) 298 if (!pagevec_add(pvec, page))
299 pagevec_lru_move_fn(pvec, __activate_page, NULL); 299 pagevec_lru_move_fn(pvec, __activate_page, NULL);
300 put_cpu_var(activate_page_pvecs); 300 put_cpu_var(activate_page_pvecs);
@@ -389,7 +389,7 @@ static void __lru_cache_add(struct page *page)
389{ 389{
390 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 390 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
391 391
392 page_cache_get(page); 392 get_page(page);
393 if (!pagevec_space(pvec)) 393 if (!pagevec_space(pvec))
394 __pagevec_lru_add(pvec); 394 __pagevec_lru_add(pvec);
395 pagevec_add(pvec, page); 395 pagevec_add(pvec, page);
@@ -646,7 +646,7 @@ void deactivate_page(struct page *page)
646 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 646 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
648 648
649 page_cache_get(page); 649 get_page(page);
650 if (!pagevec_add(pvec, page)) 650 if (!pagevec_add(pvec, page))
651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
652 put_cpu_var(lru_deactivate_pvecs); 652 put_cpu_var(lru_deactivate_pvecs);
@@ -698,7 +698,7 @@ void lru_add_drain_all(void)
698} 698}
699 699
700/** 700/**
701 * release_pages - batched page_cache_release() 701 * release_pages - batched put_page()
702 * @pages: array of pages to release 702 * @pages: array of pages to release
703 * @nr: number of pages 703 * @nr: number of pages
704 * @cold: whether the pages are cache cold 704 * @cold: whether the pages are cache cold
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 69cb2464e7dc..366ce3518703 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -85,7 +85,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
85 VM_BUG_ON_PAGE(PageSwapCache(page), page); 85 VM_BUG_ON_PAGE(PageSwapCache(page), page);
86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
87 87
88 page_cache_get(page); 88 get_page(page);
89 SetPageSwapCache(page); 89 SetPageSwapCache(page);
90 set_page_private(page, entry.val); 90 set_page_private(page, entry.val);
91 91
@@ -109,7 +109,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
109 VM_BUG_ON(error == -EEXIST); 109 VM_BUG_ON(error == -EEXIST);
110 set_page_private(page, 0UL); 110 set_page_private(page, 0UL);
111 ClearPageSwapCache(page); 111 ClearPageSwapCache(page);
112 page_cache_release(page); 112 put_page(page);
113 } 113 }
114 114
115 return error; 115 return error;
@@ -226,7 +226,7 @@ void delete_from_swap_cache(struct page *page)
226 spin_unlock_irq(&address_space->tree_lock); 226 spin_unlock_irq(&address_space->tree_lock);
227 227
228 swapcache_free(entry); 228 swapcache_free(entry);
229 page_cache_release(page); 229 put_page(page);
230} 230}
231 231
232/* 232/*
@@ -252,7 +252,7 @@ static inline void free_swap_cache(struct page *page)
252void free_page_and_swap_cache(struct page *page) 252void free_page_and_swap_cache(struct page *page)
253{ 253{
254 free_swap_cache(page); 254 free_swap_cache(page);
255 page_cache_release(page); 255 put_page(page);
256} 256}
257 257
258/* 258/*
@@ -380,7 +380,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
380 } while (err != -ENOMEM); 380 } while (err != -ENOMEM);
381 381
382 if (new_page) 382 if (new_page)
383 page_cache_release(new_page); 383 put_page(new_page);
384 return found_page; 384 return found_page;
385} 385}
386 386
@@ -495,7 +495,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
495 continue; 495 continue;
496 if (offset != entry_offset) 496 if (offset != entry_offset)
497 SetPageReadahead(page); 497 SetPageReadahead(page);
498 page_cache_release(page); 498 put_page(page);
499 } 499 }
500 blk_finish_plug(&plug); 500 blk_finish_plug(&plug);
501 501
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 560ad380634c..83874eced5bf 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -119,7 +119,7 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
119 ret = try_to_free_swap(page); 119 ret = try_to_free_swap(page);
120 unlock_page(page); 120 unlock_page(page);
121 } 121 }
122 page_cache_release(page); 122 put_page(page);
123 return ret; 123 return ret;
124} 124}
125 125
@@ -1000,7 +1000,7 @@ int free_swap_and_cache(swp_entry_t entry)
1000 page = find_get_page(swap_address_space(entry), 1000 page = find_get_page(swap_address_space(entry),
1001 entry.val); 1001 entry.val);
1002 if (page && !trylock_page(page)) { 1002 if (page && !trylock_page(page)) {
1003 page_cache_release(page); 1003 put_page(page);
1004 page = NULL; 1004 page = NULL;
1005 } 1005 }
1006 } 1006 }
@@ -1017,7 +1017,7 @@ int free_swap_and_cache(swp_entry_t entry)
1017 SetPageDirty(page); 1017 SetPageDirty(page);
1018 } 1018 }
1019 unlock_page(page); 1019 unlock_page(page);
1020 page_cache_release(page); 1020 put_page(page);
1021 } 1021 }
1022 return p != NULL; 1022 return p != NULL;
1023} 1023}
@@ -1518,7 +1518,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1518 } 1518 }
1519 if (retval) { 1519 if (retval) {
1520 unlock_page(page); 1520 unlock_page(page);
1521 page_cache_release(page); 1521 put_page(page);
1522 break; 1522 break;
1523 } 1523 }
1524 1524
@@ -1570,7 +1570,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1570 */ 1570 */
1571 SetPageDirty(page); 1571 SetPageDirty(page);
1572 unlock_page(page); 1572 unlock_page(page);
1573 page_cache_release(page); 1573 put_page(page);
1574 1574
1575 /* 1575 /*
1576 * Make sure that we aren't completely killing 1576 * Make sure that we aren't completely killing
@@ -2574,7 +2574,7 @@ bad_swap:
2574out: 2574out:
2575 if (page && !IS_ERR(page)) { 2575 if (page && !IS_ERR(page)) {
2576 kunmap(page); 2576 kunmap(page);
2577 page_cache_release(page); 2577 put_page(page);
2578 } 2578 }
2579 if (name) 2579 if (name)
2580 putname(name); 2580 putname(name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 7598b552ae03..b00272810871 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -118,7 +118,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
118 return -EIO; 118 return -EIO;
119 119
120 if (page_has_private(page)) 120 if (page_has_private(page))
121 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 121 do_invalidatepage(page, 0, PAGE_SIZE);
122 122
123 /* 123 /*
124 * Some filesystems seem to re-dirty the page even after 124 * Some filesystems seem to re-dirty the page even after
@@ -159,8 +159,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
159{ 159{
160 if (page_mapped(page)) { 160 if (page_mapped(page)) {
161 unmap_mapping_range(mapping, 161 unmap_mapping_range(mapping,
162 (loff_t)page->index << PAGE_CACHE_SHIFT, 162 (loff_t)page->index << PAGE_SHIFT,
163 PAGE_CACHE_SIZE, 0); 163 PAGE_SIZE, 0);
164 } 164 }
165 return truncate_complete_page(mapping, page); 165 return truncate_complete_page(mapping, page);
166} 166}
@@ -241,8 +241,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
241 return; 241 return;
242 242
243 /* Offsets within partial pages */ 243 /* Offsets within partial pages */
244 partial_start = lstart & (PAGE_CACHE_SIZE - 1); 244 partial_start = lstart & (PAGE_SIZE - 1);
245 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 245 partial_end = (lend + 1) & (PAGE_SIZE - 1);
246 246
247 /* 247 /*
248 * 'start' and 'end' always covers the range of pages to be fully 248 * 'start' and 'end' always covers the range of pages to be fully
@@ -250,7 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
250 * start of the range and 'partial_end' at the end of the range. 250 * start of the range and 'partial_end' at the end of the range.
251 * Note that 'end' is exclusive while 'lend' is inclusive. 251 * Note that 'end' is exclusive while 'lend' is inclusive.
252 */ 252 */
253 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 253 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
254 if (lend == -1) 254 if (lend == -1)
255 /* 255 /*
256 * lend == -1 indicates end-of-file so we have to set 'end' 256 * lend == -1 indicates end-of-file so we have to set 'end'
@@ -259,7 +259,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
259 */ 259 */
260 end = -1; 260 end = -1;
261 else 261 else
262 end = (lend + 1) >> PAGE_CACHE_SHIFT; 262 end = (lend + 1) >> PAGE_SHIFT;
263 263
264 pagevec_init(&pvec, 0); 264 pagevec_init(&pvec, 0);
265 index = start; 265 index = start;
@@ -298,7 +298,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
298 if (partial_start) { 298 if (partial_start) {
299 struct page *page = find_lock_page(mapping, start - 1); 299 struct page *page = find_lock_page(mapping, start - 1);
300 if (page) { 300 if (page) {
301 unsigned int top = PAGE_CACHE_SIZE; 301 unsigned int top = PAGE_SIZE;
302 if (start > end) { 302 if (start > end) {
303 /* Truncation within a single page */ 303 /* Truncation within a single page */
304 top = partial_end; 304 top = partial_end;
@@ -311,7 +311,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
311 do_invalidatepage(page, partial_start, 311 do_invalidatepage(page, partial_start,
312 top - partial_start); 312 top - partial_start);
313 unlock_page(page); 313 unlock_page(page);
314 page_cache_release(page); 314 put_page(page);
315 } 315 }
316 } 316 }
317 if (partial_end) { 317 if (partial_end) {
@@ -324,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
324 do_invalidatepage(page, 0, 324 do_invalidatepage(page, 0,
325 partial_end); 325 partial_end);
326 unlock_page(page); 326 unlock_page(page);
327 page_cache_release(page); 327 put_page(page);
328 } 328 }
329 } 329 }
330 /* 330 /*
@@ -538,7 +538,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
538 if (mapping->a_ops->freepage) 538 if (mapping->a_ops->freepage)
539 mapping->a_ops->freepage(page); 539 mapping->a_ops->freepage(page);
540 540
541 page_cache_release(page); /* pagecache ref */ 541 put_page(page); /* pagecache ref */
542 return 1; 542 return 1;
543failed: 543failed:
544 spin_unlock_irqrestore(&mapping->tree_lock, flags); 544 spin_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -608,18 +608,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
608 * Zap the rest of the file in one hit. 608 * Zap the rest of the file in one hit.
609 */ 609 */
610 unmap_mapping_range(mapping, 610 unmap_mapping_range(mapping,
611 (loff_t)index << PAGE_CACHE_SHIFT, 611 (loff_t)index << PAGE_SHIFT,
612 (loff_t)(1 + end - index) 612 (loff_t)(1 + end - index)
613 << PAGE_CACHE_SHIFT, 613 << PAGE_SHIFT,
614 0); 614 0);
615 did_range_unmap = 1; 615 did_range_unmap = 1;
616 } else { 616 } else {
617 /* 617 /*
618 * Just zap this page 618 * Just zap this page
619 */ 619 */
620 unmap_mapping_range(mapping, 620 unmap_mapping_range(mapping,
621 (loff_t)index << PAGE_CACHE_SHIFT, 621 (loff_t)index << PAGE_SHIFT,
622 PAGE_CACHE_SIZE, 0); 622 PAGE_SIZE, 0);
623 } 623 }
624 } 624 }
625 BUG_ON(page_mapped(page)); 625 BUG_ON(page_mapped(page));
@@ -744,14 +744,14 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
744 744
745 WARN_ON(to > inode->i_size); 745 WARN_ON(to > inode->i_size);
746 746
747 if (from >= to || bsize == PAGE_CACHE_SIZE) 747 if (from >= to || bsize == PAGE_SIZE)
748 return; 748 return;
749 /* Page straddling @from will not have any hole block created? */ 749 /* Page straddling @from will not have any hole block created? */
750 rounded_from = round_up(from, bsize); 750 rounded_from = round_up(from, bsize);
751 if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) 751 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
752 return; 752 return;
753 753
754 index = from >> PAGE_CACHE_SHIFT; 754 index = from >> PAGE_SHIFT;
755 page = find_lock_page(inode->i_mapping, index); 755 page = find_lock_page(inode->i_mapping, index);
756 /* Page not cached? Nothing to do */ 756 /* Page not cached? Nothing to do */
757 if (!page) 757 if (!page)
@@ -763,7 +763,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
763 if (page_mkclean(page)) 763 if (page_mkclean(page))
764 set_page_dirty(page); 764 set_page_dirty(page);
765 unlock_page(page); 765 unlock_page(page);
766 page_cache_release(page); 766 put_page(page);
767} 767}
768EXPORT_SYMBOL(pagecache_isize_extended); 768EXPORT_SYMBOL(pagecache_isize_extended);
769 769
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 9f3a0290b273..af817e5060fb 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -93,7 +93,7 @@ out_release_uncharge_unlock:
93 pte_unmap_unlock(dst_pte, ptl); 93 pte_unmap_unlock(dst_pte, ptl);
94 mem_cgroup_cancel_charge(page, memcg, false); 94 mem_cgroup_cancel_charge(page, memcg, false);
95out_release: 95out_release:
96 page_cache_release(page); 96 put_page(page);
97 goto out; 97 goto out;
98} 98}
99 99
@@ -287,7 +287,7 @@ out_unlock:
287 up_read(&dst_mm->mmap_sem); 287 up_read(&dst_mm->mmap_sem);
288out: 288out:
289 if (page) 289 if (page)
290 page_cache_release(page); 290 put_page(page);
291 BUG_ON(copied < 0); 291 BUG_ON(copied < 0);
292 BUG_ON(err > 0); 292 BUG_ON(err > 0);
293 BUG_ON(!copied && !err); 293 BUG_ON(!copied && !err);
diff --git a/mm/zswap.c b/mm/zswap.c
index bf14508afd64..91dad80d068b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -869,7 +869,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
869 869
870 case ZSWAP_SWAPCACHE_EXIST: 870 case ZSWAP_SWAPCACHE_EXIST:
871 /* page is already in the swap cache, ignore for now */ 871 /* page is already in the swap cache, ignore for now */
872 page_cache_release(page); 872 put_page(page);
873 ret = -EEXIST; 873 ret = -EEXIST;
874 goto fail; 874 goto fail;
875 875
@@ -897,7 +897,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
897 897
898 /* start writeback */ 898 /* start writeback */
899 __swap_writepage(page, &wbc, end_swap_bio_write); 899 __swap_writepage(page, &wbc, end_swap_bio_write);
900 page_cache_release(page); 900 put_page(page);
901 zswap_written_back_pages++; 901 zswap_written_back_pages++;
902 902
903 spin_lock(&tree->lock); 903 spin_lock(&tree->lock);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index e23449094188..9cb7044d0801 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -582,7 +582,7 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
582 int err; 582 int err;
583 583
584 err = switchdev_port_attr_set(br->dev, &attr); 584 err = switchdev_port_attr_set(br->dev, &attr);
585 if (err) 585 if (err && err != -EOPNOTSUPP)
586 return err; 586 return err;
587 587
588 br->ageing_time = t; 588 br->ageing_time = t;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 67b2e27999aa..8570bc7744c2 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1521,6 +1521,8 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1521 if (copy_from_user(&tmp, user, sizeof(tmp))) 1521 if (copy_from_user(&tmp, user, sizeof(tmp)))
1522 return -EFAULT; 1522 return -EFAULT;
1523 1523
1524 tmp.name[sizeof(tmp.name) - 1] = '\0';
1525
1524 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 1526 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1525 if (!t) 1527 if (!t)
1526 return ret; 1528 return ret;
@@ -2332,6 +2334,8 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2332 if (copy_from_user(&tmp, user, sizeof(tmp))) 2334 if (copy_from_user(&tmp, user, sizeof(tmp)))
2333 return -EFAULT; 2335 return -EFAULT;
2334 2336
2337 tmp.name[sizeof(tmp.name) - 1] = '\0';
2338
2335 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 2339 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2336 if (!t) 2340 if (!t)
2337 return ret; 2341 return ret;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index adc8d7221dbb..77f7e7a9ebe1 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -40,7 +40,8 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
40/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) 40/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
41 * or the bridge port (NF_BRIDGE PREROUTING). 41 * or the bridge port (NF_BRIDGE PREROUTING).
42 */ 42 */
43static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, 43static void nft_reject_br_send_v4_tcp_reset(struct net *net,
44 struct sk_buff *oldskb,
44 const struct net_device *dev, 45 const struct net_device *dev,
45 int hook) 46 int hook)
46{ 47{
@@ -48,7 +49,6 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
48 struct iphdr *niph; 49 struct iphdr *niph;
49 const struct tcphdr *oth; 50 const struct tcphdr *oth;
50 struct tcphdr _oth; 51 struct tcphdr _oth;
51 struct net *net = sock_net(oldskb->sk);
52 52
53 if (!nft_bridge_iphdr_validate(oldskb)) 53 if (!nft_bridge_iphdr_validate(oldskb))
54 return; 54 return;
@@ -75,7 +75,8 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
75 br_deliver(br_port_get_rcu(dev), nskb); 75 br_deliver(br_port_get_rcu(dev), nskb);
76} 76}
77 77
78static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, 78static void nft_reject_br_send_v4_unreach(struct net *net,
79 struct sk_buff *oldskb,
79 const struct net_device *dev, 80 const struct net_device *dev,
80 int hook, u8 code) 81 int hook, u8 code)
81{ 82{
@@ -86,7 +87,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
86 void *payload; 87 void *payload;
87 __wsum csum; 88 __wsum csum;
88 u8 proto; 89 u8 proto;
89 struct net *net = sock_net(oldskb->sk);
90 90
91 if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) 91 if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
92 return; 92 return;
@@ -273,17 +273,17 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
273 case htons(ETH_P_IP): 273 case htons(ETH_P_IP):
274 switch (priv->type) { 274 switch (priv->type) {
275 case NFT_REJECT_ICMP_UNREACH: 275 case NFT_REJECT_ICMP_UNREACH:
276 nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, 276 nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
277 pkt->hook, 277 pkt->in, pkt->hook,
278 priv->icmp_code); 278 priv->icmp_code);
279 break; 279 break;
280 case NFT_REJECT_TCP_RST: 280 case NFT_REJECT_TCP_RST:
281 nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in, 281 nft_reject_br_send_v4_tcp_reset(pkt->net, pkt->skb,
282 pkt->hook); 282 pkt->in, pkt->hook);
283 break; 283 break;
284 case NFT_REJECT_ICMPX_UNREACH: 284 case NFT_REJECT_ICMPX_UNREACH:
285 nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, 285 nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
286 pkt->hook, 286 pkt->in, pkt->hook,
287 nft_reject_icmp_code(priv->icmp_code)); 287 nft_reject_icmp_code(priv->icmp_code));
288 break; 288 break;
289 } 289 }
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1831f6353622..a5502898ea33 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -269,7 +269,7 @@ static void _ceph_msgr_exit(void)
269 } 269 }
270 270
271 BUG_ON(zero_page == NULL); 271 BUG_ON(zero_page == NULL);
272 page_cache_release(zero_page); 272 put_page(zero_page);
273 zero_page = NULL; 273 zero_page = NULL;
274 274
275 ceph_msgr_slab_exit(); 275 ceph_msgr_slab_exit();
@@ -282,7 +282,7 @@ int ceph_msgr_init(void)
282 282
283 BUG_ON(zero_page != NULL); 283 BUG_ON(zero_page != NULL);
284 zero_page = ZERO_PAGE(0); 284 zero_page = ZERO_PAGE(0);
285 page_cache_get(zero_page); 285 get_page(zero_page);
286 286
287 /* 287 /*
288 * The number of active work items is limited by the number of 288 * The number of active work items is limited by the number of
@@ -1602,7 +1602,7 @@ static int write_partial_skip(struct ceph_connection *con)
1602 1602
1603 dout("%s %p %d left\n", __func__, con, con->out_skip); 1603 dout("%s %p %d left\n", __func__, con, con->out_skip);
1604 while (con->out_skip > 0) { 1604 while (con->out_skip > 0) {
1605 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1605 size_t size = min(con->out_skip, (int) PAGE_SIZE);
1606 1606
1607 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1607 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1608 if (ret <= 0) 1608 if (ret <= 0)
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index c7c220a736e5..6864007e64fc 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -56,7 +56,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
56 size_t bit = pl->room; 56 size_t bit = pl->room;
57 int ret; 57 int ret;
58 58
59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), 59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
60 buf, bit); 60 buf, bit);
61 pl->length += bit; 61 pl->length += bit;
62 pl->room -= bit; 62 pl->room -= bit;
@@ -67,7 +67,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
67 return ret; 67 return ret;
68 } 68 }
69 69
70 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); 70 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
71 pl->length += len; 71 pl->length += len;
72 pl->room -= len; 72 pl->room -= len;
73 return 0; 73 return 0;
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 10297f7a89ba..00d2601407c5 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -95,19 +95,19 @@ int ceph_copy_user_to_page_vector(struct page **pages,
95 loff_t off, size_t len) 95 loff_t off, size_t len)
96{ 96{
97 int i = 0; 97 int i = 0;
98 int po = off & ~PAGE_CACHE_MASK; 98 int po = off & ~PAGE_MASK;
99 int left = len; 99 int left = len;
100 int l, bad; 100 int l, bad;
101 101
102 while (left > 0) { 102 while (left > 0) {
103 l = min_t(int, PAGE_CACHE_SIZE-po, left); 103 l = min_t(int, PAGE_SIZE-po, left);
104 bad = copy_from_user(page_address(pages[i]) + po, data, l); 104 bad = copy_from_user(page_address(pages[i]) + po, data, l);
105 if (bad == l) 105 if (bad == l)
106 return -EFAULT; 106 return -EFAULT;
107 data += l - bad; 107 data += l - bad;
108 left -= l - bad; 108 left -= l - bad;
109 po += l - bad; 109 po += l - bad;
110 if (po == PAGE_CACHE_SIZE) { 110 if (po == PAGE_SIZE) {
111 po = 0; 111 po = 0;
112 i++; 112 i++;
113 } 113 }
@@ -121,17 +121,17 @@ void ceph_copy_to_page_vector(struct page **pages,
121 loff_t off, size_t len) 121 loff_t off, size_t len)
122{ 122{
123 int i = 0; 123 int i = 0;
124 size_t po = off & ~PAGE_CACHE_MASK; 124 size_t po = off & ~PAGE_MASK;
125 size_t left = len; 125 size_t left = len;
126 126
127 while (left > 0) { 127 while (left > 0) {
128 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 128 size_t l = min_t(size_t, PAGE_SIZE-po, left);
129 129
130 memcpy(page_address(pages[i]) + po, data, l); 130 memcpy(page_address(pages[i]) + po, data, l);
131 data += l; 131 data += l;
132 left -= l; 132 left -= l;
133 po += l; 133 po += l;
134 if (po == PAGE_CACHE_SIZE) { 134 if (po == PAGE_SIZE) {
135 po = 0; 135 po = 0;
136 i++; 136 i++;
137 } 137 }
@@ -144,17 +144,17 @@ void ceph_copy_from_page_vector(struct page **pages,
144 loff_t off, size_t len) 144 loff_t off, size_t len)
145{ 145{
146 int i = 0; 146 int i = 0;
147 size_t po = off & ~PAGE_CACHE_MASK; 147 size_t po = off & ~PAGE_MASK;
148 size_t left = len; 148 size_t left = len;
149 149
150 while (left > 0) { 150 while (left > 0) {
151 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 151 size_t l = min_t(size_t, PAGE_SIZE-po, left);
152 152
153 memcpy(data, page_address(pages[i]) + po, l); 153 memcpy(data, page_address(pages[i]) + po, l);
154 data += l; 154 data += l;
155 left -= l; 155 left -= l;
156 po += l; 156 po += l;
157 if (po == PAGE_CACHE_SIZE) { 157 if (po == PAGE_SIZE) {
158 po = 0; 158 po = 0;
159 i++; 159 i++;
160 } 160 }
@@ -168,25 +168,25 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
168 */ 168 */
169void ceph_zero_page_vector_range(int off, int len, struct page **pages) 169void ceph_zero_page_vector_range(int off, int len, struct page **pages)
170{ 170{
171 int i = off >> PAGE_CACHE_SHIFT; 171 int i = off >> PAGE_SHIFT;
172 172
173 off &= ~PAGE_CACHE_MASK; 173 off &= ~PAGE_MASK;
174 174
175 dout("zero_page_vector_page %u~%u\n", off, len); 175 dout("zero_page_vector_page %u~%u\n", off, len);
176 176
177 /* leading partial page? */ 177 /* leading partial page? */
178 if (off) { 178 if (off) {
179 int end = min((int)PAGE_CACHE_SIZE, off + len); 179 int end = min((int)PAGE_SIZE, off + len);
180 dout("zeroing %d %p head from %d\n", i, pages[i], 180 dout("zeroing %d %p head from %d\n", i, pages[i],
181 (int)off); 181 (int)off);
182 zero_user_segment(pages[i], off, end); 182 zero_user_segment(pages[i], off, end);
183 len -= (end - off); 183 len -= (end - off);
184 i++; 184 i++;
185 } 185 }
186 while (len >= PAGE_CACHE_SIZE) { 186 while (len >= PAGE_SIZE) {
187 dout("zeroing %d %p len=%d\n", i, pages[i], len); 187 dout("zeroing %d %p len=%d\n", i, pages[i], len);
188 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); 188 zero_user_segment(pages[i], 0, PAGE_SIZE);
189 len -= PAGE_CACHE_SIZE; 189 len -= PAGE_SIZE;
190 i++; 190 i++;
191 } 191 }
192 /* trailing partial page? */ 192 /* trailing partial page? */
diff --git a/net/core/dev.c b/net/core/dev.c
index b9bcbe77d913..77a71cd68535 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4439,6 +4439,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4439 NAPI_GRO_CB(skb)->flush = 0; 4439 NAPI_GRO_CB(skb)->flush = 0;
4440 NAPI_GRO_CB(skb)->free = 0; 4440 NAPI_GRO_CB(skb)->free = 0;
4441 NAPI_GRO_CB(skb)->encap_mark = 0; 4441 NAPI_GRO_CB(skb)->encap_mark = 0;
4442 NAPI_GRO_CB(skb)->is_fou = 0;
4442 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 4443 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4443 4444
4444 /* Setup for GRO checksum validation */ 4445 /* Setup for GRO checksum validation */
diff --git a/net/core/filter.c b/net/core/filter.c
index b7177d01ecb0..ca7f832b2980 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1149,7 +1149,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
1149} 1149}
1150EXPORT_SYMBOL_GPL(bpf_prog_destroy); 1150EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1151 1151
1152static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) 1152static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
1153 bool locked)
1153{ 1154{
1154 struct sk_filter *fp, *old_fp; 1155 struct sk_filter *fp, *old_fp;
1155 1156
@@ -1165,10 +1166,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1165 return -ENOMEM; 1166 return -ENOMEM;
1166 } 1167 }
1167 1168
1168 old_fp = rcu_dereference_protected(sk->sk_filter, 1169 old_fp = rcu_dereference_protected(sk->sk_filter, locked);
1169 sock_owned_by_user(sk));
1170 rcu_assign_pointer(sk->sk_filter, fp); 1170 rcu_assign_pointer(sk->sk_filter, fp);
1171
1172 if (old_fp) 1171 if (old_fp)
1173 sk_filter_uncharge(sk, old_fp); 1172 sk_filter_uncharge(sk, old_fp);
1174 1173
@@ -1247,7 +1246,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1247 * occurs or there is insufficient memory for the filter a negative 1246 * occurs or there is insufficient memory for the filter a negative
1248 * errno code is returned. On success the return is zero. 1247 * errno code is returned. On success the return is zero.
1249 */ 1248 */
1250int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1249int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
1250 bool locked)
1251{ 1251{
1252 struct bpf_prog *prog = __get_filter(fprog, sk); 1252 struct bpf_prog *prog = __get_filter(fprog, sk);
1253 int err; 1253 int err;
@@ -1255,7 +1255,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1255 if (IS_ERR(prog)) 1255 if (IS_ERR(prog))
1256 return PTR_ERR(prog); 1256 return PTR_ERR(prog);
1257 1257
1258 err = __sk_attach_prog(prog, sk); 1258 err = __sk_attach_prog(prog, sk, locked);
1259 if (err < 0) { 1259 if (err < 0) {
1260 __bpf_prog_release(prog); 1260 __bpf_prog_release(prog);
1261 return err; 1261 return err;
@@ -1263,7 +1263,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1263 1263
1264 return 0; 1264 return 0;
1265} 1265}
1266EXPORT_SYMBOL_GPL(sk_attach_filter); 1266EXPORT_SYMBOL_GPL(__sk_attach_filter);
1267
1268int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1269{
1270 return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
1271}
1267 1272
1268int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1273int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1269{ 1274{
@@ -1309,7 +1314,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
1309 if (IS_ERR(prog)) 1314 if (IS_ERR(prog))
1310 return PTR_ERR(prog); 1315 return PTR_ERR(prog);
1311 1316
1312 err = __sk_attach_prog(prog, sk); 1317 err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
1313 if (err < 0) { 1318 if (err < 0) {
1314 bpf_prog_put(prog); 1319 bpf_prog_put(prog);
1315 return err; 1320 return err;
@@ -1764,6 +1769,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
1764 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 1769 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
1765 switch (size) { 1770 switch (size) {
1766 case offsetof(struct bpf_tunnel_key, tunnel_label): 1771 case offsetof(struct bpf_tunnel_key, tunnel_label):
1772 case offsetof(struct bpf_tunnel_key, tunnel_ext):
1767 goto set_compat; 1773 goto set_compat;
1768 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 1774 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
1769 /* Fixup deprecated structure layouts here, so we have 1775 /* Fixup deprecated structure layouts here, so we have
@@ -1849,6 +1855,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
1849 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 1855 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
1850 switch (size) { 1856 switch (size) {
1851 case offsetof(struct bpf_tunnel_key, tunnel_label): 1857 case offsetof(struct bpf_tunnel_key, tunnel_label):
1858 case offsetof(struct bpf_tunnel_key, tunnel_ext):
1852 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 1859 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
1853 /* Fixup deprecated structure layouts here, so we have 1860 /* Fixup deprecated structure layouts here, so we have
1854 * a common path later on. 1861 * a common path later on.
@@ -1861,7 +1868,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
1861 return -EINVAL; 1868 return -EINVAL;
1862 } 1869 }
1863 } 1870 }
1864 if (unlikely(!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label)) 1871 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
1872 from->tunnel_ext))
1865 return -EINVAL; 1873 return -EINVAL;
1866 1874
1867 skb_dst_drop(skb); 1875 skb_dst_drop(skb);
@@ -2247,7 +2255,7 @@ static int __init register_sk_filter_ops(void)
2247} 2255}
2248late_initcall(register_sk_filter_ops); 2256late_initcall(register_sk_filter_ops);
2249 2257
2250int sk_detach_filter(struct sock *sk) 2258int __sk_detach_filter(struct sock *sk, bool locked)
2251{ 2259{
2252 int ret = -ENOENT; 2260 int ret = -ENOENT;
2253 struct sk_filter *filter; 2261 struct sk_filter *filter;
@@ -2255,8 +2263,7 @@ int sk_detach_filter(struct sock *sk)
2255 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 2263 if (sock_flag(sk, SOCK_FILTER_LOCKED))
2256 return -EPERM; 2264 return -EPERM;
2257 2265
2258 filter = rcu_dereference_protected(sk->sk_filter, 2266 filter = rcu_dereference_protected(sk->sk_filter, locked);
2259 sock_owned_by_user(sk));
2260 if (filter) { 2267 if (filter) {
2261 RCU_INIT_POINTER(sk->sk_filter, NULL); 2268 RCU_INIT_POINTER(sk->sk_filter, NULL);
2262 sk_filter_uncharge(sk, filter); 2269 sk_filter_uncharge(sk, filter);
@@ -2265,7 +2272,12 @@ int sk_detach_filter(struct sock *sk)
2265 2272
2266 return ret; 2273 return ret;
2267} 2274}
2268EXPORT_SYMBOL_GPL(sk_detach_filter); 2275EXPORT_SYMBOL_GPL(__sk_detach_filter);
2276
2277int sk_detach_filter(struct sock *sk)
2278{
2279 return __sk_detach_filter(sk, sock_owned_by_user(sk));
2280}
2269 2281
2270int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, 2282int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
2271 unsigned int len) 2283 unsigned int len)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f2066772d0f3..a75f7e94b445 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -909,6 +909,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
909 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 909 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
910 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 910 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
911 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 911 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
912 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
912 + nla_total_size(1); /* IFLA_PROTO_DOWN */ 913 + nla_total_size(1); /* IFLA_PROTO_DOWN */
913 914
914} 915}
diff --git a/net/core/sock.c b/net/core/sock.c
index b67b9aedb230..7e73c26b6bb4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -221,7 +221,8 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX" 224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
225 "sk_lock-AF_MAX"
225}; 226};
226static const char *const af_family_slock_key_strings[AF_MAX+1] = { 227static const char *const af_family_slock_key_strings[AF_MAX+1] = {
227 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -237,7 +238,8 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
237 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 238 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
238 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
239 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
240 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX" 241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
242 "slock-AF_MAX"
241}; 243};
242static const char *const af_family_clock_key_strings[AF_MAX+1] = { 244static const char *const af_family_clock_key_strings[AF_MAX+1] = {
243 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -253,7 +255,8 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
253 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 255 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
254 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
255 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
256 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX" 258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
259 "clock-AF_MAX"
257}; 260};
258 261
259/* 262/*
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index a0586b4a197d..a39068b4a4d9 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -195,6 +195,17 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
195 u8 proto = NAPI_GRO_CB(skb)->proto; 195 u8 proto = NAPI_GRO_CB(skb)->proto;
196 const struct net_offload **offloads; 196 const struct net_offload **offloads;
197 197
198 /* We can clear the encap_mark for FOU as we are essentially doing
199 * one of two possible things. We are either adding an L4 tunnel
200 * header to the outer L3 tunnel header, or we are are simply
201 * treating the GRE tunnel header as though it is a UDP protocol
202 * specific header such as VXLAN or GENEVE.
203 */
204 NAPI_GRO_CB(skb)->encap_mark = 0;
205
206 /* Flag this frame as already having an outer encap header */
207 NAPI_GRO_CB(skb)->is_fou = 1;
208
198 rcu_read_lock(); 209 rcu_read_lock();
199 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 210 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
200 ops = rcu_dereference(offloads[proto]); 211 ops = rcu_dereference(offloads[proto]);
@@ -352,6 +363,17 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
352 } 363 }
353 } 364 }
354 365
366 /* We can clear the encap_mark for GUE as we are essentially doing
367 * one of two possible things. We are either adding an L4 tunnel
368 * header to the outer L3 tunnel header, or we are are simply
369 * treating the GRE tunnel header as though it is a UDP protocol
370 * specific header such as VXLAN or GENEVE.
371 */
372 NAPI_GRO_CB(skb)->encap_mark = 0;
373
374 /* Flag this frame as already having an outer encap header */
375 NAPI_GRO_CB(skb)->is_fou = 1;
376
355 rcu_read_lock(); 377 rcu_read_lock();
356 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 378 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
357 ops = rcu_dereference(offloads[guehdr->proto_ctype]); 379 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index c47539d04b88..6a5bd4317866 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -150,6 +150,14 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
150 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) 150 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
151 goto out; 151 goto out;
152 152
153 /* We can only support GRE_CSUM if we can track the location of
154 * the GRE header. In the case of FOU/GUE we cannot because the
155 * outer UDP header displaces the GRE header leaving us in a state
156 * of limbo.
157 */
158 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
159 goto out;
160
153 type = greh->protocol; 161 type = greh->protocol;
154 162
155 rcu_read_lock(); 163 rcu_read_lock();
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 31936d387cfd..af5d1f38217f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -862,9 +862,16 @@ static void __gre_tunnel_init(struct net_device *dev)
862 dev->hw_features |= GRE_FEATURES; 862 dev->hw_features |= GRE_FEATURES;
863 863
864 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { 864 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
865 /* TCP offload with GRE SEQ is not supported. */ 865 /* TCP offload with GRE SEQ is not supported, nor
866 dev->features |= NETIF_F_GSO_SOFTWARE; 866 * can we support 2 levels of outer headers requiring
867 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 867 * an update.
868 */
869 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
870 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
871 dev->features |= NETIF_F_GSO_SOFTWARE;
872 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
873 }
874
868 /* Can use a lockless transmit, unless we generate 875 /* Can use a lockless transmit, unless we generate
869 * output sequences 876 * output sequences
870 */ 877 */
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 02dd990af542..6165f30c4d72 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -372,8 +372,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
372 if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) || 372 if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
373 nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || 373 nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
374 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || 374 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
375 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || 375 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
376 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || 376 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
377 nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) 377 nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
378 return -ENOMEM; 378 return -ENOMEM;
379 379
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index bf081927e06b..4133b0f513af 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -359,11 +359,12 @@ unsigned int arpt_do_table(struct sk_buff *skb,
359} 359}
360 360
361/* All zeroes == unconditional rule. */ 361/* All zeroes == unconditional rule. */
362static inline bool unconditional(const struct arpt_arp *arp) 362static inline bool unconditional(const struct arpt_entry *e)
363{ 363{
364 static const struct arpt_arp uncond; 364 static const struct arpt_arp uncond;
365 365
366 return memcmp(arp, &uncond, sizeof(uncond)) == 0; 366 return e->target_offset == sizeof(struct arpt_entry) &&
367 memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
367} 368}
368 369
369/* Figures out from what hook each rule can be called: returns 0 if 370/* Figures out from what hook each rule can be called: returns 0 if
@@ -402,11 +403,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
402 |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); 403 |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
403 404
404 /* Unconditional return/END. */ 405 /* Unconditional return/END. */
405 if ((e->target_offset == sizeof(struct arpt_entry) && 406 if ((unconditional(e) &&
406 (strcmp(t->target.u.user.name, 407 (strcmp(t->target.u.user.name,
407 XT_STANDARD_TARGET) == 0) && 408 XT_STANDARD_TARGET) == 0) &&
408 t->verdict < 0 && unconditional(&e->arp)) || 409 t->verdict < 0) || visited) {
409 visited) {
410 unsigned int oldpos, size; 410 unsigned int oldpos, size;
411 411
412 if ((strcmp(t->target.u.user.name, 412 if ((strcmp(t->target.u.user.name,
@@ -474,14 +474,12 @@ next:
474 return 1; 474 return 1;
475} 475}
476 476
477static inline int check_entry(const struct arpt_entry *e, const char *name) 477static inline int check_entry(const struct arpt_entry *e)
478{ 478{
479 const struct xt_entry_target *t; 479 const struct xt_entry_target *t;
480 480
481 if (!arp_checkentry(&e->arp)) { 481 if (!arp_checkentry(&e->arp))
482 duprintf("arp_tables: arp check failed %p %s.\n", e, name);
483 return -EINVAL; 482 return -EINVAL;
484 }
485 483
486 if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) 484 if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
487 return -EINVAL; 485 return -EINVAL;
@@ -522,10 +520,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
522 struct xt_target *target; 520 struct xt_target *target;
523 int ret; 521 int ret;
524 522
525 ret = check_entry(e, name);
526 if (ret)
527 return ret;
528
529 e->counters.pcnt = xt_percpu_counter_alloc(); 523 e->counters.pcnt = xt_percpu_counter_alloc();
530 if (IS_ERR_VALUE(e->counters.pcnt)) 524 if (IS_ERR_VALUE(e->counters.pcnt))
531 return -ENOMEM; 525 return -ENOMEM;
@@ -557,7 +551,7 @@ static bool check_underflow(const struct arpt_entry *e)
557 const struct xt_entry_target *t; 551 const struct xt_entry_target *t;
558 unsigned int verdict; 552 unsigned int verdict;
559 553
560 if (!unconditional(&e->arp)) 554 if (!unconditional(e))
561 return false; 555 return false;
562 t = arpt_get_target_c(e); 556 t = arpt_get_target_c(e);
563 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 557 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -576,9 +570,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
576 unsigned int valid_hooks) 570 unsigned int valid_hooks)
577{ 571{
578 unsigned int h; 572 unsigned int h;
573 int err;
579 574
580 if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || 575 if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
581 (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { 576 (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
577 (unsigned char *)e + e->next_offset > limit) {
582 duprintf("Bad offset %p\n", e); 578 duprintf("Bad offset %p\n", e);
583 return -EINVAL; 579 return -EINVAL;
584 } 580 }
@@ -590,6 +586,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
590 return -EINVAL; 586 return -EINVAL;
591 } 587 }
592 588
589 err = check_entry(e);
590 if (err)
591 return err;
592
593 /* Check hooks & underflows */ 593 /* Check hooks & underflows */
594 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 594 for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
595 if (!(valid_hooks & (1 << h))) 595 if (!(valid_hooks & (1 << h)))
@@ -598,9 +598,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
598 newinfo->hook_entry[h] = hook_entries[h]; 598 newinfo->hook_entry[h] = hook_entries[h];
599 if ((unsigned char *)e - base == underflows[h]) { 599 if ((unsigned char *)e - base == underflows[h]) {
600 if (!check_underflow(e)) { 600 if (!check_underflow(e)) {
601 pr_err("Underflows must be unconditional and " 601 pr_debug("Underflows must be unconditional and "
602 "use the STANDARD target with " 602 "use the STANDARD target with "
603 "ACCEPT/DROP\n"); 603 "ACCEPT/DROP\n");
604 return -EINVAL; 604 return -EINVAL;
605 } 605 }
606 newinfo->underflow[h] = underflows[h]; 606 newinfo->underflow[h] = underflows[h];
@@ -969,6 +969,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
969 sizeof(struct arpt_get_entries) + get.size); 969 sizeof(struct arpt_get_entries) + get.size);
970 return -EINVAL; 970 return -EINVAL;
971 } 971 }
972 get.name[sizeof(get.name) - 1] = '\0';
972 973
973 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 974 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
974 if (!IS_ERR_OR_NULL(t)) { 975 if (!IS_ERR_OR_NULL(t)) {
@@ -1233,7 +1234,8 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1233 1234
1234 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1235 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1235 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || 1236 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
1236 (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { 1237 (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
1238 (unsigned char *)e + e->next_offset > limit) {
1237 duprintf("Bad offset %p, limit = %p\n", e, limit); 1239 duprintf("Bad offset %p, limit = %p\n", e, limit);
1238 return -EINVAL; 1240 return -EINVAL;
1239 } 1241 }
@@ -1246,7 +1248,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1246 } 1248 }
1247 1249
1248 /* For purposes of check_entry casting the compat entry is fine */ 1250 /* For purposes of check_entry casting the compat entry is fine */
1249 ret = check_entry((struct arpt_entry *)e, name); 1251 ret = check_entry((struct arpt_entry *)e);
1250 if (ret) 1252 if (ret)
1251 return ret; 1253 return ret;
1252 1254
@@ -1662,6 +1664,7 @@ static int compat_get_entries(struct net *net,
1662 *len, sizeof(get) + get.size); 1664 *len, sizeof(get) + get.size);
1663 return -EINVAL; 1665 return -EINVAL;
1664 } 1666 }
1667 get.name[sizeof(get.name) - 1] = '\0';
1665 1668
1666 xt_compat_lock(NFPROTO_ARP); 1669 xt_compat_lock(NFPROTO_ARP);
1667 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 1670 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e53f8d6f326d..631c100a1338 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
168 168
169/* All zeroes == unconditional rule. */ 169/* All zeroes == unconditional rule. */
170/* Mildly perf critical (only if packet tracing is on) */ 170/* Mildly perf critical (only if packet tracing is on) */
171static inline bool unconditional(const struct ipt_ip *ip) 171static inline bool unconditional(const struct ipt_entry *e)
172{ 172{
173 static const struct ipt_ip uncond; 173 static const struct ipt_ip uncond;
174 174
175 return memcmp(ip, &uncond, sizeof(uncond)) == 0; 175 return e->target_offset == sizeof(struct ipt_entry) &&
176 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
176#undef FWINV 177#undef FWINV
177} 178}
178 179
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
229 } else if (s == e) { 230 } else if (s == e) {
230 (*rulenum)++; 231 (*rulenum)++;
231 232
232 if (s->target_offset == sizeof(struct ipt_entry) && 233 if (unconditional(s) &&
233 strcmp(t->target.u.kernel.target->name, 234 strcmp(t->target.u.kernel.target->name,
234 XT_STANDARD_TARGET) == 0 && 235 XT_STANDARD_TARGET) == 0 &&
235 t->verdict < 0 && 236 t->verdict < 0) {
236 unconditional(&s->ip)) {
237 /* Tail of chains: STANDARD target (return/policy) */ 237 /* Tail of chains: STANDARD target (return/policy) */
238 *comment = *chainname == hookname 238 *comment = *chainname == hookname
239 ? comments[NF_IP_TRACE_COMMENT_POLICY] 239 ? comments[NF_IP_TRACE_COMMENT_POLICY]
@@ -476,11 +476,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
476 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); 476 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
477 477
478 /* Unconditional return/END. */ 478 /* Unconditional return/END. */
479 if ((e->target_offset == sizeof(struct ipt_entry) && 479 if ((unconditional(e) &&
480 (strcmp(t->target.u.user.name, 480 (strcmp(t->target.u.user.name,
481 XT_STANDARD_TARGET) == 0) && 481 XT_STANDARD_TARGET) == 0) &&
482 t->verdict < 0 && unconditional(&e->ip)) || 482 t->verdict < 0) || visited) {
483 visited) {
484 unsigned int oldpos, size; 483 unsigned int oldpos, size;
485 484
486 if ((strcmp(t->target.u.user.name, 485 if ((strcmp(t->target.u.user.name,
@@ -569,14 +568,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
569} 568}
570 569
571static int 570static int
572check_entry(const struct ipt_entry *e, const char *name) 571check_entry(const struct ipt_entry *e)
573{ 572{
574 const struct xt_entry_target *t; 573 const struct xt_entry_target *t;
575 574
576 if (!ip_checkentry(&e->ip)) { 575 if (!ip_checkentry(&e->ip))
577 duprintf("ip check failed %p %s.\n", e, name);
578 return -EINVAL; 576 return -EINVAL;
579 }
580 577
581 if (e->target_offset + sizeof(struct xt_entry_target) > 578 if (e->target_offset + sizeof(struct xt_entry_target) >
582 e->next_offset) 579 e->next_offset)
@@ -666,10 +663,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
666 struct xt_mtchk_param mtpar; 663 struct xt_mtchk_param mtpar;
667 struct xt_entry_match *ematch; 664 struct xt_entry_match *ematch;
668 665
669 ret = check_entry(e, name);
670 if (ret)
671 return ret;
672
673 e->counters.pcnt = xt_percpu_counter_alloc(); 666 e->counters.pcnt = xt_percpu_counter_alloc();
674 if (IS_ERR_VALUE(e->counters.pcnt)) 667 if (IS_ERR_VALUE(e->counters.pcnt))
675 return -ENOMEM; 668 return -ENOMEM;
@@ -721,7 +714,7 @@ static bool check_underflow(const struct ipt_entry *e)
721 const struct xt_entry_target *t; 714 const struct xt_entry_target *t;
722 unsigned int verdict; 715 unsigned int verdict;
723 716
724 if (!unconditional(&e->ip)) 717 if (!unconditional(e))
725 return false; 718 return false;
726 t = ipt_get_target_c(e); 719 t = ipt_get_target_c(e);
727 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 720 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -741,9 +734,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
741 unsigned int valid_hooks) 734 unsigned int valid_hooks)
742{ 735{
743 unsigned int h; 736 unsigned int h;
737 int err;
744 738
745 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || 739 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
746 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { 740 (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
741 (unsigned char *)e + e->next_offset > limit) {
747 duprintf("Bad offset %p\n", e); 742 duprintf("Bad offset %p\n", e);
748 return -EINVAL; 743 return -EINVAL;
749 } 744 }
@@ -755,6 +750,10 @@ check_entry_size_and_hooks(struct ipt_entry *e,
755 return -EINVAL; 750 return -EINVAL;
756 } 751 }
757 752
753 err = check_entry(e);
754 if (err)
755 return err;
756
758 /* Check hooks & underflows */ 757 /* Check hooks & underflows */
759 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 758 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
760 if (!(valid_hooks & (1 << h))) 759 if (!(valid_hooks & (1 << h)))
@@ -763,9 +762,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
763 newinfo->hook_entry[h] = hook_entries[h]; 762 newinfo->hook_entry[h] = hook_entries[h];
764 if ((unsigned char *)e - base == underflows[h]) { 763 if ((unsigned char *)e - base == underflows[h]) {
765 if (!check_underflow(e)) { 764 if (!check_underflow(e)) {
766 pr_err("Underflows must be unconditional and " 765 pr_debug("Underflows must be unconditional and "
767 "use the STANDARD target with " 766 "use the STANDARD target with "
768 "ACCEPT/DROP\n"); 767 "ACCEPT/DROP\n");
769 return -EINVAL; 768 return -EINVAL;
770 } 769 }
771 newinfo->underflow[h] = underflows[h]; 770 newinfo->underflow[h] = underflows[h];
@@ -1157,6 +1156,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1157 *len, sizeof(get) + get.size); 1156 *len, sizeof(get) + get.size);
1158 return -EINVAL; 1157 return -EINVAL;
1159 } 1158 }
1159 get.name[sizeof(get.name) - 1] = '\0';
1160 1160
1161 t = xt_find_table_lock(net, AF_INET, get.name); 1161 t = xt_find_table_lock(net, AF_INET, get.name);
1162 if (!IS_ERR_OR_NULL(t)) { 1162 if (!IS_ERR_OR_NULL(t)) {
@@ -1493,7 +1493,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1493 1493
1494 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1494 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1495 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || 1495 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1496 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { 1496 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1497 (unsigned char *)e + e->next_offset > limit) {
1497 duprintf("Bad offset %p, limit = %p\n", e, limit); 1498 duprintf("Bad offset %p, limit = %p\n", e, limit);
1498 return -EINVAL; 1499 return -EINVAL;
1499 } 1500 }
@@ -1506,7 +1507,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1506 } 1507 }
1507 1508
1508 /* For purposes of check_entry casting the compat entry is fine */ 1509 /* For purposes of check_entry casting the compat entry is fine */
1509 ret = check_entry((struct ipt_entry *)e, name); 1510 ret = check_entry((struct ipt_entry *)e);
1510 if (ret) 1511 if (ret)
1511 return ret; 1512 return ret;
1512 1513
@@ -1935,6 +1936,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1935 *len, sizeof(get) + get.size); 1936 *len, sizeof(get) + get.size);
1936 return -EINVAL; 1937 return -EINVAL;
1937 } 1938 }
1939 get.name[sizeof(get.name) - 1] = '\0';
1938 1940
1939 xt_compat_lock(AF_INET); 1941 xt_compat_lock(AF_INET);
1940 t = xt_find_table_lock(net, AF_INET, get.name); 1942 t = xt_find_table_lock(net, AF_INET, get.name);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 7b8fbb352877..db5b87509446 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -18,10 +18,10 @@
18#include <net/netfilter/nf_conntrack_synproxy.h> 18#include <net/netfilter/nf_conntrack_synproxy.h>
19 19
20static struct iphdr * 20static struct iphdr *
21synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr) 21synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
22 __be32 daddr)
22{ 23{
23 struct iphdr *iph; 24 struct iphdr *iph;
24 struct net *net = sock_net(skb->sk);
25 25
26 skb_reset_network_header(skb); 26 skb_reset_network_header(skb);
27 iph = (struct iphdr *)skb_put(skb, sizeof(*iph)); 27 iph = (struct iphdr *)skb_put(skb, sizeof(*iph));
@@ -40,14 +40,12 @@ synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
40} 40}
41 41
42static void 42static void
43synproxy_send_tcp(const struct synproxy_net *snet, 43synproxy_send_tcp(struct net *net,
44 const struct sk_buff *skb, struct sk_buff *nskb, 44 const struct sk_buff *skb, struct sk_buff *nskb,
45 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 45 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
46 struct iphdr *niph, struct tcphdr *nth, 46 struct iphdr *niph, struct tcphdr *nth,
47 unsigned int tcp_hdr_size) 47 unsigned int tcp_hdr_size)
48{ 48{
49 struct net *net = nf_ct_net(snet->tmpl);
50
51 nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0); 49 nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
52 nskb->ip_summed = CHECKSUM_PARTIAL; 50 nskb->ip_summed = CHECKSUM_PARTIAL;
53 nskb->csum_start = (unsigned char *)nth - nskb->head; 51 nskb->csum_start = (unsigned char *)nth - nskb->head;
@@ -72,7 +70,7 @@ free_nskb:
72} 70}
73 71
74static void 72static void
75synproxy_send_client_synack(const struct synproxy_net *snet, 73synproxy_send_client_synack(struct net *net,
76 const struct sk_buff *skb, const struct tcphdr *th, 74 const struct sk_buff *skb, const struct tcphdr *th,
77 const struct synproxy_options *opts) 75 const struct synproxy_options *opts)
78{ 76{
@@ -91,7 +89,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
91 return; 89 return;
92 skb_reserve(nskb, MAX_TCP_HEADER); 90 skb_reserve(nskb, MAX_TCP_HEADER);
93 91
94 niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); 92 niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
95 93
96 skb_reset_transport_header(nskb); 94 skb_reset_transport_header(nskb);
97 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 95 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -109,15 +107,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
109 107
110 synproxy_build_options(nth, opts); 108 synproxy_build_options(nth, opts);
111 109
112 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 110 synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
113 niph, nth, tcp_hdr_size); 111 niph, nth, tcp_hdr_size);
114} 112}
115 113
116static void 114static void
117synproxy_send_server_syn(const struct synproxy_net *snet, 115synproxy_send_server_syn(struct net *net,
118 const struct sk_buff *skb, const struct tcphdr *th, 116 const struct sk_buff *skb, const struct tcphdr *th,
119 const struct synproxy_options *opts, u32 recv_seq) 117 const struct synproxy_options *opts, u32 recv_seq)
120{ 118{
119 struct synproxy_net *snet = synproxy_pernet(net);
121 struct sk_buff *nskb; 120 struct sk_buff *nskb;
122 struct iphdr *iph, *niph; 121 struct iphdr *iph, *niph;
123 struct tcphdr *nth; 122 struct tcphdr *nth;
@@ -132,7 +131,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
132 return; 131 return;
133 skb_reserve(nskb, MAX_TCP_HEADER); 132 skb_reserve(nskb, MAX_TCP_HEADER);
134 133
135 niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); 134 niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
136 135
137 skb_reset_transport_header(nskb); 136 skb_reset_transport_header(nskb);
138 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 137 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -153,12 +152,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
153 152
154 synproxy_build_options(nth, opts); 153 synproxy_build_options(nth, opts);
155 154
156 synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 155 synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
157 niph, nth, tcp_hdr_size); 156 niph, nth, tcp_hdr_size);
158} 157}
159 158
160static void 159static void
161synproxy_send_server_ack(const struct synproxy_net *snet, 160synproxy_send_server_ack(struct net *net,
162 const struct ip_ct_tcp *state, 161 const struct ip_ct_tcp *state,
163 const struct sk_buff *skb, const struct tcphdr *th, 162 const struct sk_buff *skb, const struct tcphdr *th,
164 const struct synproxy_options *opts) 163 const struct synproxy_options *opts)
@@ -177,7 +176,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
177 return; 176 return;
178 skb_reserve(nskb, MAX_TCP_HEADER); 177 skb_reserve(nskb, MAX_TCP_HEADER);
179 178
180 niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); 179 niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
181 180
182 skb_reset_transport_header(nskb); 181 skb_reset_transport_header(nskb);
183 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 182 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -193,11 +192,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
193 192
194 synproxy_build_options(nth, opts); 193 synproxy_build_options(nth, opts);
195 194
196 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 195 synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
197} 196}
198 197
199static void 198static void
200synproxy_send_client_ack(const struct synproxy_net *snet, 199synproxy_send_client_ack(struct net *net,
201 const struct sk_buff *skb, const struct tcphdr *th, 200 const struct sk_buff *skb, const struct tcphdr *th,
202 const struct synproxy_options *opts) 201 const struct synproxy_options *opts)
203{ 202{
@@ -215,7 +214,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
215 return; 214 return;
216 skb_reserve(nskb, MAX_TCP_HEADER); 215 skb_reserve(nskb, MAX_TCP_HEADER);
217 216
218 niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); 217 niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
219 218
220 skb_reset_transport_header(nskb); 219 skb_reset_transport_header(nskb);
221 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 220 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -231,15 +230,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
231 230
232 synproxy_build_options(nth, opts); 231 synproxy_build_options(nth, opts);
233 232
234 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 233 synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
235 niph, nth, tcp_hdr_size); 234 niph, nth, tcp_hdr_size);
236} 235}
237 236
238static bool 237static bool
239synproxy_recv_client_ack(const struct synproxy_net *snet, 238synproxy_recv_client_ack(struct net *net,
240 const struct sk_buff *skb, const struct tcphdr *th, 239 const struct sk_buff *skb, const struct tcphdr *th,
241 struct synproxy_options *opts, u32 recv_seq) 240 struct synproxy_options *opts, u32 recv_seq)
242{ 241{
242 struct synproxy_net *snet = synproxy_pernet(net);
243 int mss; 243 int mss;
244 244
245 mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); 245 mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
@@ -255,7 +255,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet,
255 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) 255 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
256 synproxy_check_timestamp_cookie(opts); 256 synproxy_check_timestamp_cookie(opts);
257 257
258 synproxy_send_server_syn(snet, skb, th, opts, recv_seq); 258 synproxy_send_server_syn(net, skb, th, opts, recv_seq);
259 return true; 259 return true;
260} 260}
261 261
@@ -263,7 +263,8 @@ static unsigned int
263synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) 263synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
264{ 264{
265 const struct xt_synproxy_info *info = par->targinfo; 265 const struct xt_synproxy_info *info = par->targinfo;
266 struct synproxy_net *snet = synproxy_pernet(par->net); 266 struct net *net = par->net;
267 struct synproxy_net *snet = synproxy_pernet(net);
267 struct synproxy_options opts = {}; 268 struct synproxy_options opts = {};
268 struct tcphdr *th, _th; 269 struct tcphdr *th, _th;
269 270
@@ -292,12 +293,12 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
292 XT_SYNPROXY_OPT_SACK_PERM | 293 XT_SYNPROXY_OPT_SACK_PERM |
293 XT_SYNPROXY_OPT_ECN); 294 XT_SYNPROXY_OPT_ECN);
294 295
295 synproxy_send_client_synack(snet, skb, th, &opts); 296 synproxy_send_client_synack(net, skb, th, &opts);
296 return NF_DROP; 297 return NF_DROP;
297 298
298 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 299 } else if (th->ack && !(th->fin || th->rst || th->syn)) {
299 /* ACK from client */ 300 /* ACK from client */
300 synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); 301 synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq));
301 return NF_DROP; 302 return NF_DROP;
302 } 303 }
303 304
@@ -308,7 +309,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
308 struct sk_buff *skb, 309 struct sk_buff *skb,
309 const struct nf_hook_state *nhs) 310 const struct nf_hook_state *nhs)
310{ 311{
311 struct synproxy_net *snet = synproxy_pernet(nhs->net); 312 struct net *net = nhs->net;
313 struct synproxy_net *snet = synproxy_pernet(net);
312 enum ip_conntrack_info ctinfo; 314 enum ip_conntrack_info ctinfo;
313 struct nf_conn *ct; 315 struct nf_conn *ct;
314 struct nf_conn_synproxy *synproxy; 316 struct nf_conn_synproxy *synproxy;
@@ -365,7 +367,7 @@ static unsigned int ipv4_synproxy_hook(void *priv,
365 * therefore we need to add 1 to make the SYN sequence 367 * therefore we need to add 1 to make the SYN sequence
366 * number match the one of first SYN. 368 * number match the one of first SYN.
367 */ 369 */
368 if (synproxy_recv_client_ack(snet, skb, th, &opts, 370 if (synproxy_recv_client_ack(net, skb, th, &opts,
369 ntohl(th->seq) + 1)) 371 ntohl(th->seq) + 1))
370 this_cpu_inc(snet->stats->cookie_retrans); 372 this_cpu_inc(snet->stats->cookie_retrans);
371 373
@@ -391,12 +393,12 @@ static unsigned int ipv4_synproxy_hook(void *priv,
391 XT_SYNPROXY_OPT_SACK_PERM); 393 XT_SYNPROXY_OPT_SACK_PERM);
392 394
393 swap(opts.tsval, opts.tsecr); 395 swap(opts.tsval, opts.tsecr);
394 synproxy_send_server_ack(snet, state, skb, th, &opts); 396 synproxy_send_server_ack(net, state, skb, th, &opts);
395 397
396 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); 398 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
397 399
398 swap(opts.tsval, opts.tsecr); 400 swap(opts.tsval, opts.tsecr);
399 synproxy_send_client_ack(snet, skb, th, &opts); 401 synproxy_send_client_ack(net, skb, th, &opts);
400 402
401 consume_skb(skb); 403 consume_skb(skb);
402 return NF_STOLEN; 404 return NF_STOLEN;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9428345d3a07..bc972e7152c7 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1090,8 +1090,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1090 int getfrag(void *from, char *to, int offset, int len, 1090 int getfrag(void *from, char *to, int offset, int len,
1091 int odd, struct sk_buff *skb), 1091 int odd, struct sk_buff *skb),
1092 void *from, int length, int hh_len, int fragheaderlen, 1092 void *from, int length, int hh_len, int fragheaderlen,
1093 int transhdrlen, int mtu, unsigned int flags, 1093 int exthdrlen, int transhdrlen, int mtu,
1094 const struct flowi6 *fl6) 1094 unsigned int flags, const struct flowi6 *fl6)
1095 1095
1096{ 1096{
1097 struct sk_buff *skb; 1097 struct sk_buff *skb;
@@ -1116,7 +1116,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1116 skb_put(skb, fragheaderlen + transhdrlen); 1116 skb_put(skb, fragheaderlen + transhdrlen);
1117 1117
1118 /* initialize network header pointer */ 1118 /* initialize network header pointer */
1119 skb_reset_network_header(skb); 1119 skb_set_network_header(skb, exthdrlen);
1120 1120
1121 /* initialize protocol header pointer */ 1121 /* initialize protocol header pointer */
1122 skb->transport_header = skb->network_header + fragheaderlen; 1122 skb->transport_header = skb->network_header + fragheaderlen;
@@ -1358,7 +1358,7 @@ emsgsize:
1358 (rt->dst.dev->features & NETIF_F_UFO) && 1358 (rt->dst.dev->features & NETIF_F_UFO) &&
1359 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1359 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1360 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1360 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1361 hh_len, fragheaderlen, 1361 hh_len, fragheaderlen, exthdrlen,
1362 transhdrlen, mtu, flags, fl6); 1362 transhdrlen, mtu, flags, fl6);
1363 if (err) 1363 if (err)
1364 goto error; 1364 goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eb2ac4bb09ce..1f20345cbc97 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -252,12 +252,12 @@ static int ip6_tnl_create2(struct net_device *dev)
252 252
253 t = netdev_priv(dev); 253 t = netdev_priv(dev);
254 254
255 dev->rtnl_link_ops = &ip6_link_ops;
255 err = register_netdevice(dev); 256 err = register_netdevice(dev);
256 if (err < 0) 257 if (err < 0)
257 goto out; 258 goto out;
258 259
259 strcpy(t->parms.name, dev->name); 260 strcpy(t->parms.name, dev->name);
260 dev->rtnl_link_ops = &ip6_link_ops;
261 261
262 dev_hold(dev); 262 dev_hold(dev);
263 ip6_tnl_link(ip6n, t); 263 ip6_tnl_link(ip6n, t);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 84f9baf7aee8..86b67b70b626 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
198 198
199/* All zeroes == unconditional rule. */ 199/* All zeroes == unconditional rule. */
200/* Mildly perf critical (only if packet tracing is on) */ 200/* Mildly perf critical (only if packet tracing is on) */
201static inline bool unconditional(const struct ip6t_ip6 *ipv6) 201static inline bool unconditional(const struct ip6t_entry *e)
202{ 202{
203 static const struct ip6t_ip6 uncond; 203 static const struct ip6t_ip6 uncond;
204 204
205 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; 205 return e->target_offset == sizeof(struct ip6t_entry) &&
206 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
206} 207}
207 208
208static inline const struct xt_entry_target * 209static inline const struct xt_entry_target *
@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
258 } else if (s == e) { 259 } else if (s == e) {
259 (*rulenum)++; 260 (*rulenum)++;
260 261
261 if (s->target_offset == sizeof(struct ip6t_entry) && 262 if (unconditional(s) &&
262 strcmp(t->target.u.kernel.target->name, 263 strcmp(t->target.u.kernel.target->name,
263 XT_STANDARD_TARGET) == 0 && 264 XT_STANDARD_TARGET) == 0 &&
264 t->verdict < 0 && 265 t->verdict < 0) {
265 unconditional(&s->ipv6)) {
266 /* Tail of chains: STANDARD target (return/policy) */ 266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname 267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY] 268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
@@ -488,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); 488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
489 489
490 /* Unconditional return/END. */ 490 /* Unconditional return/END. */
491 if ((e->target_offset == sizeof(struct ip6t_entry) && 491 if ((unconditional(e) &&
492 (strcmp(t->target.u.user.name, 492 (strcmp(t->target.u.user.name,
493 XT_STANDARD_TARGET) == 0) && 493 XT_STANDARD_TARGET) == 0) &&
494 t->verdict < 0 && 494 t->verdict < 0) || visited) {
495 unconditional(&e->ipv6)) || visited) {
496 unsigned int oldpos, size; 495 unsigned int oldpos, size;
497 496
498 if ((strcmp(t->target.u.user.name, 497 if ((strcmp(t->target.u.user.name,
@@ -581,14 +580,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
581} 580}
582 581
583static int 582static int
584check_entry(const struct ip6t_entry *e, const char *name) 583check_entry(const struct ip6t_entry *e)
585{ 584{
586 const struct xt_entry_target *t; 585 const struct xt_entry_target *t;
587 586
588 if (!ip6_checkentry(&e->ipv6)) { 587 if (!ip6_checkentry(&e->ipv6))
589 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
590 return -EINVAL; 588 return -EINVAL;
591 }
592 589
593 if (e->target_offset + sizeof(struct xt_entry_target) > 590 if (e->target_offset + sizeof(struct xt_entry_target) >
594 e->next_offset) 591 e->next_offset)
@@ -679,10 +676,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
679 struct xt_mtchk_param mtpar; 676 struct xt_mtchk_param mtpar;
680 struct xt_entry_match *ematch; 677 struct xt_entry_match *ematch;
681 678
682 ret = check_entry(e, name);
683 if (ret)
684 return ret;
685
686 e->counters.pcnt = xt_percpu_counter_alloc(); 679 e->counters.pcnt = xt_percpu_counter_alloc();
687 if (IS_ERR_VALUE(e->counters.pcnt)) 680 if (IS_ERR_VALUE(e->counters.pcnt))
688 return -ENOMEM; 681 return -ENOMEM;
@@ -733,7 +726,7 @@ static bool check_underflow(const struct ip6t_entry *e)
733 const struct xt_entry_target *t; 726 const struct xt_entry_target *t;
734 unsigned int verdict; 727 unsigned int verdict;
735 728
736 if (!unconditional(&e->ipv6)) 729 if (!unconditional(e))
737 return false; 730 return false;
738 t = ip6t_get_target_c(e); 731 t = ip6t_get_target_c(e);
739 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 732 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -753,9 +746,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
753 unsigned int valid_hooks) 746 unsigned int valid_hooks)
754{ 747{
755 unsigned int h; 748 unsigned int h;
749 int err;
756 750
757 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || 751 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
758 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { 752 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
753 (unsigned char *)e + e->next_offset > limit) {
759 duprintf("Bad offset %p\n", e); 754 duprintf("Bad offset %p\n", e);
760 return -EINVAL; 755 return -EINVAL;
761 } 756 }
@@ -767,6 +762,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
767 return -EINVAL; 762 return -EINVAL;
768 } 763 }
769 764
765 err = check_entry(e);
766 if (err)
767 return err;
768
770 /* Check hooks & underflows */ 769 /* Check hooks & underflows */
771 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 770 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
772 if (!(valid_hooks & (1 << h))) 771 if (!(valid_hooks & (1 << h)))
@@ -775,9 +774,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
775 newinfo->hook_entry[h] = hook_entries[h]; 774 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h]) { 775 if ((unsigned char *)e - base == underflows[h]) {
777 if (!check_underflow(e)) { 776 if (!check_underflow(e)) {
778 pr_err("Underflows must be unconditional and " 777 pr_debug("Underflows must be unconditional and "
779 "use the STANDARD target with " 778 "use the STANDARD target with "
780 "ACCEPT/DROP\n"); 779 "ACCEPT/DROP\n");
781 return -EINVAL; 780 return -EINVAL;
782 } 781 }
783 newinfo->underflow[h] = underflows[h]; 782 newinfo->underflow[h] = underflows[h];
@@ -1169,6 +1168,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1169 *len, sizeof(get) + get.size); 1168 *len, sizeof(get) + get.size);
1170 return -EINVAL; 1169 return -EINVAL;
1171 } 1170 }
1171 get.name[sizeof(get.name) - 1] = '\0';
1172 1172
1173 t = xt_find_table_lock(net, AF_INET6, get.name); 1173 t = xt_find_table_lock(net, AF_INET6, get.name);
1174 if (!IS_ERR_OR_NULL(t)) { 1174 if (!IS_ERR_OR_NULL(t)) {
@@ -1505,7 +1505,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1505 1505
1506 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1506 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1507 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || 1507 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1508 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { 1508 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1509 (unsigned char *)e + e->next_offset > limit) {
1509 duprintf("Bad offset %p, limit = %p\n", e, limit); 1510 duprintf("Bad offset %p, limit = %p\n", e, limit);
1510 return -EINVAL; 1511 return -EINVAL;
1511 } 1512 }
@@ -1518,7 +1519,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1518 } 1519 }
1519 1520
1520 /* For purposes of check_entry casting the compat entry is fine */ 1521 /* For purposes of check_entry casting the compat entry is fine */
1521 ret = check_entry((struct ip6t_entry *)e, name); 1522 ret = check_entry((struct ip6t_entry *)e);
1522 if (ret) 1523 if (ret)
1523 return ret; 1524 return ret;
1524 1525
@@ -1944,6 +1945,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1944 *len, sizeof(get) + get.size); 1945 *len, sizeof(get) + get.size);
1945 return -EINVAL; 1946 return -EINVAL;
1946 } 1947 }
1948 get.name[sizeof(get.name) - 1] = '\0';
1947 1949
1948 xt_compat_lock(AF_INET6); 1950 xt_compat_lock(AF_INET6);
1949 t = xt_find_table_lock(net, AF_INET6, get.name); 1951 t = xt_find_table_lock(net, AF_INET6, get.name);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fd25e447a5fa..8125931106be 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -843,8 +843,8 @@ start_lookup:
843 flush_stack(stack, count, skb, count - 1); 843 flush_stack(stack, count, skb, count - 1);
844 } else { 844 } else {
845 if (!inner_flushed) 845 if (!inner_flushed)
846 UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, 846 UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
847 proto == IPPROTO_UDPLITE); 847 proto == IPPROTO_UDPLITE);
848 consume_skb(skb); 848 consume_skb(skb);
849 } 849 }
850 return 0; 850 return 0;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ec22078b0914..42de4ccd159f 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
123 struct l2tp_tunnel *tunnel = NULL; 123 struct l2tp_tunnel *tunnel = NULL;
124 int length; 124 int length;
125 125
126 /* Point to L2TP header */
127 optr = ptr = skb->data;
128
129 if (!pskb_may_pull(skb, 4)) 126 if (!pskb_may_pull(skb, 4))
130 goto discard; 127 goto discard;
131 128
129 /* Point to L2TP header */
130 optr = ptr = skb->data;
132 session_id = ntohl(*((__be32 *) ptr)); 131 session_id = ntohl(*((__be32 *) ptr));
133 ptr += 4; 132 ptr += 4;
134 133
@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
156 if (!pskb_may_pull(skb, length)) 155 if (!pskb_may_pull(skb, length))
157 goto discard; 156 goto discard;
158 157
158 /* Point to L2TP header */
159 optr = ptr = skb->data;
160 ptr += 4;
159 pr_debug("%s: ip recv\n", tunnel->name); 161 pr_debug("%s: ip recv\n", tunnel->name);
160 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 162 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
161 } 163 }
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 6b54ff3ff4cb..cd479903d943 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -136,12 +136,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
136 struct l2tp_tunnel *tunnel = NULL; 136 struct l2tp_tunnel *tunnel = NULL;
137 int length; 137 int length;
138 138
139 /* Point to L2TP header */
140 optr = ptr = skb->data;
141
142 if (!pskb_may_pull(skb, 4)) 139 if (!pskb_may_pull(skb, 4))
143 goto discard; 140 goto discard;
144 141
142 /* Point to L2TP header */
143 optr = ptr = skb->data;
145 session_id = ntohl(*((__be32 *) ptr)); 144 session_id = ntohl(*((__be32 *) ptr));
146 ptr += 4; 145 ptr += 4;
147 146
@@ -169,6 +168,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
169 if (!pskb_may_pull(skb, length)) 168 if (!pskb_may_pull(skb, length))
170 goto discard; 169 goto discard;
171 170
171 /* Point to L2TP header */
172 optr = ptr = skb->data;
173 ptr += 4;
172 pr_debug("%s: ip recv\n", tunnel->name); 174 pr_debug("%s: ip recv\n", tunnel->name);
173 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 175 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
174 } 176 }
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 283981108ca8..74142d07ad31 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -343,8 +343,10 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
343 struct ieee80211_chanctx *ctx, 343 struct ieee80211_chanctx *ctx,
344 const struct cfg80211_chan_def *chandef) 344 const struct cfg80211_chan_def *chandef)
345{ 345{
346 if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) 346 if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
347 ieee80211_recalc_chanctx_min_def(local, ctx);
347 return; 348 return;
349 }
348 350
349 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); 351 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
350 352
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 804575ff7af5..422003540169 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1719,6 +1719,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
1719enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta); 1719enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
1720enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta); 1720enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
1721void ieee80211_sta_set_rx_nss(struct sta_info *sta); 1721void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1722enum ieee80211_sta_rx_bandwidth
1723ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
1724enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
1725void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1722void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, 1726void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
1723 struct ieee80211_mgmt *mgmt); 1727 struct ieee80211_mgmt *mgmt);
1724u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1728u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 5b6aec1a0630..002244bca948 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
530 const u8 *target_addr, *orig_addr; 530 const u8 *target_addr, *orig_addr;
531 const u8 *da; 531 const u8 *da;
532 u8 target_flags, ttl, flags; 532 u8 target_flags, ttl, flags;
533 u32 orig_sn, target_sn, lifetime, target_metric; 533 u32 orig_sn, target_sn, lifetime, target_metric = 0;
534 bool reply = false; 534 bool reply = false;
535 bool forward = true; 535 bool forward = true;
536 bool root_is_gate; 536 bool root_is_gate;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d20bab5c146c..861b93ffbe92 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -67,6 +67,7 @@
67 67
68static const struct rhashtable_params sta_rht_params = { 68static const struct rhashtable_params sta_rht_params = {
69 .nelem_hint = 3, /* start small */ 69 .nelem_hint = 3, /* start small */
70 .insecure_elasticity = true, /* Disable chain-length checks. */
70 .automatic_shrinking = true, 71 .automatic_shrinking = true,
71 .head_offset = offsetof(struct sta_info, hash_node), 72 .head_offset = offsetof(struct sta_info, hash_node),
72 .key_offset = offsetof(struct sta_info, addr), 73 .key_offset = offsetof(struct sta_info, addr),
@@ -258,11 +259,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
258} 259}
259 260
260/* Caller must hold local->sta_mtx */ 261/* Caller must hold local->sta_mtx */
261static void sta_info_hash_add(struct ieee80211_local *local, 262static int sta_info_hash_add(struct ieee80211_local *local,
262 struct sta_info *sta) 263 struct sta_info *sta)
263{ 264{
264 rhashtable_insert_fast(&local->sta_hash, &sta->hash_node, 265 return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
265 sta_rht_params); 266 sta_rht_params);
266} 267}
267 268
268static void sta_deliver_ps_frames(struct work_struct *wk) 269static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -524,7 +525,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
524 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 525 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
525 526
526 /* make the station visible */ 527 /* make the station visible */
527 sta_info_hash_add(local, sta); 528 err = sta_info_hash_add(local, sta);
529 if (err)
530 goto out_drop_sta;
528 531
529 list_add_tail_rcu(&sta->list, &local->sta_list); 532 list_add_tail_rcu(&sta->list, &local->sta_list);
530 533
@@ -557,6 +560,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
557 out_remove: 560 out_remove:
558 sta_info_hash_del(local, sta); 561 sta_info_hash_del(local, sta);
559 list_del_rcu(&sta->list); 562 list_del_rcu(&sta->list);
563 out_drop_sta:
560 local->num_sta--; 564 local->num_sta--;
561 synchronize_net(); 565 synchronize_net();
562 __cleanup_single_sta(sta); 566 __cleanup_single_sta(sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 053f5c4fa495..62193f4bc37b 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -377,7 +377,6 @@ DECLARE_EWMA(signal, 1024, 8)
377 * @uploaded: set to true when sta is uploaded to the driver 377 * @uploaded: set to true when sta is uploaded to the driver
378 * @sta: station information we share with the driver 378 * @sta: station information we share with the driver
379 * @sta_state: duplicates information about station state (for debug) 379 * @sta_state: duplicates information about station state (for debug)
380 * @beacon_loss_count: number of times beacon loss has triggered
381 * @rcu_head: RCU head used for freeing this station struct 380 * @rcu_head: RCU head used for freeing this station struct
382 * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, 381 * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
383 * taken from HT/VHT capabilities or VHT operating mode notification 382 * taken from HT/VHT capabilities or VHT operating mode notification
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index c9eeb3f12808..a29ea813b7d5 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -4,7 +4,7 @@
4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2014, Intel Corporation 5 * Copyright 2014, Intel Corporation
6 * Copyright 2014 Intel Mobile Communications GmbH 6 * Copyright 2014 Intel Mobile Communications GmbH
7 * Copyright 2015 Intel Deutschland GmbH 7 * Copyright 2015 - 2016 Intel Deutschland GmbH
8 * 8 *
9 * This file is GPLv2 as found in COPYING. 9 * This file is GPLv2 as found in COPYING.
10 */ 10 */
@@ -15,6 +15,7 @@
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include "ieee80211_i.h" 16#include "ieee80211_i.h"
17#include "driver-ops.h" 17#include "driver-ops.h"
18#include "rate.h"
18 19
19/* give usermode some time for retries in setting up the TDLS session */ 20/* give usermode some time for retries in setting up the TDLS session */
20#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) 21#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
@@ -302,7 +303,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
302 /* IEEE802.11ac-2013 Table E-4 */ 303 /* IEEE802.11ac-2013 Table E-4 */
303 u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; 304 u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
304 struct cfg80211_chan_def uc = sta->tdls_chandef; 305 struct cfg80211_chan_def uc = sta->tdls_chandef;
305 enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta); 306 enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta);
306 int i; 307 int i;
307 308
308 /* only support upgrading non-narrow channels up to 80Mhz */ 309 /* only support upgrading non-narrow channels up to 80Mhz */
@@ -313,7 +314,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
313 if (max_width > NL80211_CHAN_WIDTH_80) 314 if (max_width > NL80211_CHAN_WIDTH_80)
314 max_width = NL80211_CHAN_WIDTH_80; 315 max_width = NL80211_CHAN_WIDTH_80;
315 316
316 if (uc.width == max_width) 317 if (uc.width >= max_width)
317 return; 318 return;
318 /* 319 /*
319 * Channel usage constrains in the IEEE802.11ac-2013 specification only 320 * Channel usage constrains in the IEEE802.11ac-2013 specification only
@@ -324,6 +325,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
324 for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) 325 for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
325 if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { 326 if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
326 uc.center_freq1 = centers_80mhz[i]; 327 uc.center_freq1 = centers_80mhz[i];
328 uc.center_freq2 = 0;
327 uc.width = NL80211_CHAN_WIDTH_80; 329 uc.width = NL80211_CHAN_WIDTH_80;
328 break; 330 break;
329 } 331 }
@@ -332,7 +334,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
332 return; 334 return;
333 335
334 /* proceed to downgrade the chandef until usable or the same */ 336 /* proceed to downgrade the chandef until usable or the same */
335 while (uc.width > max_width && 337 while (uc.width > max_width ||
336 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, 338 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
337 sdata->wdev.iftype)) 339 sdata->wdev.iftype))
338 ieee80211_chandef_downgrade(&uc); 340 ieee80211_chandef_downgrade(&uc);
@@ -1242,18 +1244,44 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
1242 return ret; 1244 return ret;
1243} 1245}
1244 1246
1245static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata) 1247static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
1248 struct sta_info *sta)
1246{ 1249{
1247 struct ieee80211_local *local = sdata->local; 1250 struct ieee80211_local *local = sdata->local;
1248 struct ieee80211_chanctx_conf *conf; 1251 struct ieee80211_chanctx_conf *conf;
1249 struct ieee80211_chanctx *ctx; 1252 struct ieee80211_chanctx *ctx;
1253 enum nl80211_chan_width width;
1254 struct ieee80211_supported_band *sband;
1250 1255
1251 mutex_lock(&local->chanctx_mtx); 1256 mutex_lock(&local->chanctx_mtx);
1252 conf = rcu_dereference_protected(sdata->vif.chanctx_conf, 1257 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1253 lockdep_is_held(&local->chanctx_mtx)); 1258 lockdep_is_held(&local->chanctx_mtx));
1254 if (conf) { 1259 if (conf) {
1260 width = conf->def.width;
1261 sband = local->hw.wiphy->bands[conf->def.chan->band];
1255 ctx = container_of(conf, struct ieee80211_chanctx, conf); 1262 ctx = container_of(conf, struct ieee80211_chanctx, conf);
1256 ieee80211_recalc_chanctx_chantype(local, ctx); 1263 ieee80211_recalc_chanctx_chantype(local, ctx);
1264
1265 /* if width changed and a peer is given, update its BW */
1266 if (width != conf->def.width && sta &&
1267 test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) {
1268 enum ieee80211_sta_rx_bandwidth bw;
1269
1270 bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
1271 bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
1272 if (bw != sta->sta.bandwidth) {
1273 sta->sta.bandwidth = bw;
1274 rate_control_rate_update(local, sband, sta,
1275 IEEE80211_RC_BW_CHANGED);
1276 /*
1277 * if a TDLS peer BW was updated, we need to
1278 * recalc the chandef width again, to get the
1279 * correct chanctx min_def
1280 */
1281 ieee80211_recalc_chanctx_chantype(local, ctx);
1282 }
1283 }
1284
1257 } 1285 }
1258 mutex_unlock(&local->chanctx_mtx); 1286 mutex_unlock(&local->chanctx_mtx);
1259} 1287}
@@ -1350,8 +1378,6 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1350 break; 1378 break;
1351 } 1379 }
1352 1380
1353 iee80211_tdls_recalc_chanctx(sdata);
1354
1355 mutex_lock(&local->sta_mtx); 1381 mutex_lock(&local->sta_mtx);
1356 sta = sta_info_get(sdata, peer); 1382 sta = sta_info_get(sdata, peer);
1357 if (!sta) { 1383 if (!sta) {
@@ -1360,6 +1386,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1360 break; 1386 break;
1361 } 1387 }
1362 1388
1389 iee80211_tdls_recalc_chanctx(sdata, sta);
1363 iee80211_tdls_recalc_ht_protection(sdata, sta); 1390 iee80211_tdls_recalc_ht_protection(sdata, sta);
1364 1391
1365 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); 1392 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
@@ -1390,7 +1417,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1390 iee80211_tdls_recalc_ht_protection(sdata, NULL); 1417 iee80211_tdls_recalc_ht_protection(sdata, NULL);
1391 mutex_unlock(&local->sta_mtx); 1418 mutex_unlock(&local->sta_mtx);
1392 1419
1393 iee80211_tdls_recalc_chanctx(sdata); 1420 iee80211_tdls_recalc_chanctx(sdata, NULL);
1394 break; 1421 break;
1395 default: 1422 default:
1396 ret = -ENOTSUPP; 1423 ret = -ENOTSUPP;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 62ad5321257d..21f6602395f7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1116,11 +1116,15 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1116 reset_agg_timer = true; 1116 reset_agg_timer = true;
1117 } else { 1117 } else {
1118 queued = true; 1118 queued = true;
1119 if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
1120 clear_sta_flag(tx->sta, WLAN_STA_SP);
1121 ps_dbg(tx->sta->sdata,
1122 "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
1123 tx->sta->sta.addr, tx->sta->sta.aid);
1124 }
1119 info->control.vif = &tx->sdata->vif; 1125 info->control.vif = &tx->sdata->vif;
1120 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1126 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1121 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS | 1127 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
1122 IEEE80211_TX_CTL_NO_PS_BUFFER |
1123 IEEE80211_TX_STATUS_EOSP;
1124 __skb_queue_tail(&tid_tx->pending, skb); 1128 __skb_queue_tail(&tid_tx->pending, skb);
1125 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1129 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1126 purge_skb = __skb_dequeue(&tid_tx->pending); 1130 purge_skb = __skb_dequeue(&tid_tx->pending);
@@ -1247,7 +1251,8 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
1247 struct txq_info *txqi; 1251 struct txq_info *txqi;
1248 u8 ac; 1252 u8 ac;
1249 1253
1250 if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE) 1254 if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
1255 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
1251 goto tx_normal; 1256 goto tx_normal;
1252 1257
1253 if (!ieee80211_is_data(hdr->frame_control)) 1258 if (!ieee80211_is_data(hdr->frame_control))
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 89e04d55aa18..e590e2ef9eaf 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -319,7 +319,30 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
319 return IEEE80211_STA_RX_BW_80; 319 return IEEE80211_STA_RX_BW_80;
320} 320}
321 321
322static enum ieee80211_sta_rx_bandwidth 322enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
323{
324 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
325 u32 cap_width;
326
327 if (!vht_cap->vht_supported) {
328 if (!sta->sta.ht_cap.ht_supported)
329 return NL80211_CHAN_WIDTH_20_NOHT;
330
331 return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
332 NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
333 }
334
335 cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
336
337 if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
338 return NL80211_CHAN_WIDTH_160;
339 else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
340 return NL80211_CHAN_WIDTH_80P80;
341
342 return NL80211_CHAN_WIDTH_80;
343}
344
345enum ieee80211_sta_rx_bandwidth
323ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) 346ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
324{ 347{
325 switch (width) { 348 switch (width) {
@@ -347,10 +370,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
347 370
348 bw = ieee80211_sta_cap_rx_bw(sta); 371 bw = ieee80211_sta_cap_rx_bw(sta);
349 bw = min(bw, sta->cur_max_bandwidth); 372 bw = min(bw, sta->cur_max_bandwidth);
350 373 bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
351 /* do not cap the BW of TDLS WIDER_BW peers by the bss */
352 if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
353 bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
354 374
355 return bw; 375 return bw;
356} 376}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index b18c5ed42d95..0b80a7140cc4 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
543 if (!dev) 543 if (!dev)
544 return ERR_PTR(-ENODEV); 544 return ERR_PTR(-ENODEV);
545 545
546 if (IS_ERR(dev))
547 return dev;
548
546 /* The caller is holding rtnl anyways, so release the dev reference */ 549 /* The caller is holding rtnl anyways, so release the dev reference */
547 dev_put(dev); 550 dev_put(dev);
548 551
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index b0bc475f641e..2e8e7e5fb4a6 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -95,7 +95,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
95 if (!nested) 95 if (!nested)
96 goto nla_put_failure; 96 goto nla_put_failure;
97 if (mtype_do_head(skb, map) || 97 if (mtype_do_head(skb, map) ||
98 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || 98 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
99 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) 99 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
100 goto nla_put_failure; 100 goto nla_put_failure;
101 if (unlikely(ip_set_put_flags(skb, set))) 101 if (unlikely(ip_set_put_flags(skb, set)))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 7e6568cad494..a748b0c2c981 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -497,6 +497,26 @@ __ip_set_put(struct ip_set *set)
497 write_unlock_bh(&ip_set_ref_lock); 497 write_unlock_bh(&ip_set_ref_lock);
498} 498}
499 499
500/* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
501 * a separate reference counter
502 */
503static inline void
504__ip_set_get_netlink(struct ip_set *set)
505{
506 write_lock_bh(&ip_set_ref_lock);
507 set->ref_netlink++;
508 write_unlock_bh(&ip_set_ref_lock);
509}
510
511static inline void
512__ip_set_put_netlink(struct ip_set *set)
513{
514 write_lock_bh(&ip_set_ref_lock);
515 BUG_ON(set->ref_netlink == 0);
516 set->ref_netlink--;
517 write_unlock_bh(&ip_set_ref_lock);
518}
519
500/* Add, del and test set entries from kernel. 520/* Add, del and test set entries from kernel.
501 * 521 *
502 * The set behind the index must exist and must be referenced 522 * The set behind the index must exist and must be referenced
@@ -1002,7 +1022,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
1002 if (!attr[IPSET_ATTR_SETNAME]) { 1022 if (!attr[IPSET_ATTR_SETNAME]) {
1003 for (i = 0; i < inst->ip_set_max; i++) { 1023 for (i = 0; i < inst->ip_set_max; i++) {
1004 s = ip_set(inst, i); 1024 s = ip_set(inst, i);
1005 if (s && s->ref) { 1025 if (s && (s->ref || s->ref_netlink)) {
1006 ret = -IPSET_ERR_BUSY; 1026 ret = -IPSET_ERR_BUSY;
1007 goto out; 1027 goto out;
1008 } 1028 }
@@ -1024,7 +1044,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
1024 if (!s) { 1044 if (!s) {
1025 ret = -ENOENT; 1045 ret = -ENOENT;
1026 goto out; 1046 goto out;
1027 } else if (s->ref) { 1047 } else if (s->ref || s->ref_netlink) {
1028 ret = -IPSET_ERR_BUSY; 1048 ret = -IPSET_ERR_BUSY;
1029 goto out; 1049 goto out;
1030 } 1050 }
@@ -1171,6 +1191,9 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1171 from->family == to->family)) 1191 from->family == to->family))
1172 return -IPSET_ERR_TYPE_MISMATCH; 1192 return -IPSET_ERR_TYPE_MISMATCH;
1173 1193
1194 if (from->ref_netlink || to->ref_netlink)
1195 return -EBUSY;
1196
1174 strncpy(from_name, from->name, IPSET_MAXNAMELEN); 1197 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
1175 strncpy(from->name, to->name, IPSET_MAXNAMELEN); 1198 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
1176 strncpy(to->name, from_name, IPSET_MAXNAMELEN); 1199 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
@@ -1206,7 +1229,7 @@ ip_set_dump_done(struct netlink_callback *cb)
1206 if (set->variant->uref) 1229 if (set->variant->uref)
1207 set->variant->uref(set, cb, false); 1230 set->variant->uref(set, cb, false);
1208 pr_debug("release set %s\n", set->name); 1231 pr_debug("release set %s\n", set->name);
1209 __ip_set_put_byindex(inst, index); 1232 __ip_set_put_netlink(set);
1210 } 1233 }
1211 return 0; 1234 return 0;
1212} 1235}
@@ -1328,7 +1351,7 @@ dump_last:
1328 if (!cb->args[IPSET_CB_ARG0]) { 1351 if (!cb->args[IPSET_CB_ARG0]) {
1329 /* Start listing: make sure set won't be destroyed */ 1352 /* Start listing: make sure set won't be destroyed */
1330 pr_debug("reference set\n"); 1353 pr_debug("reference set\n");
1331 set->ref++; 1354 set->ref_netlink++;
1332 } 1355 }
1333 write_unlock_bh(&ip_set_ref_lock); 1356 write_unlock_bh(&ip_set_ref_lock);
1334 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, 1357 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
@@ -1396,7 +1419,7 @@ release_refcount:
1396 if (set->variant->uref) 1419 if (set->variant->uref)
1397 set->variant->uref(set, cb, false); 1420 set->variant->uref(set, cb, false);
1398 pr_debug("release set %s\n", set->name); 1421 pr_debug("release set %s\n", set->name);
1399 __ip_set_put_byindex(inst, index); 1422 __ip_set_put_netlink(set);
1400 cb->args[IPSET_CB_ARG0] = 0; 1423 cb->args[IPSET_CB_ARG0] = 0;
1401 } 1424 }
1402out: 1425out:
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index e5336ab36d67..d32fd6b036bf 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -1082,7 +1082,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
1082 if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask)) 1082 if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
1083 goto nla_put_failure; 1083 goto nla_put_failure;
1084#endif 1084#endif
1085 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || 1085 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
1086 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) 1086 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
1087 goto nla_put_failure; 1087 goto nla_put_failure;
1088 if (unlikely(ip_set_put_flags(skb, set))) 1088 if (unlikely(ip_set_put_flags(skb, set)))
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 24c6c1962aea..a2a89e4e0a14 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -458,7 +458,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
458 if (!nested) 458 if (!nested)
459 goto nla_put_failure; 459 goto nla_put_failure;
460 if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || 460 if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
461 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || 461 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
462 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, 462 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
463 htonl(sizeof(*map) + n * set->dsize))) 463 htonl(sizeof(*map) + n * set->dsize)))
464 goto nla_put_failure; 464 goto nla_put_failure;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 75429997ed41..cb5b630a645b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -582,7 +582,12 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
582 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 582 /* nfnetlink_unicast will either free the nskb or add it to a socket */
583 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); 583 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
584 if (err < 0) { 584 if (err < 0) {
585 queue->queue_user_dropped++; 585 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
586 failopen = 1;
587 err = 0;
588 } else {
589 queue->queue_user_dropped++;
590 }
586 goto err_out_unlock; 591 goto err_out_unlock;
587 } 592 }
588 593
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 234a73344c6e..ce947292ae77 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -7,7 +7,9 @@ config OPENVSWITCH
7 depends on INET 7 depends on INET
8 depends on !NF_CONNTRACK || \ 8 depends on !NF_CONNTRACK || \
9 (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \ 9 (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \
10 (!NF_NAT || NF_NAT))) 10 (!NF_NAT || NF_NAT) && \
11 (!NF_NAT_IPV4 || NF_NAT_IPV4) && \
12 (!NF_NAT_IPV6 || NF_NAT_IPV6)))
11 select LIBCRC32C 13 select LIBCRC32C
12 select MPLS 14 select MPLS
13 select NET_MPLS_GSO 15 select NET_MPLS_GSO
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index dc5eb29fe7d6..1b9d286756be 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -535,14 +535,15 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
535 switch (ctinfo) { 535 switch (ctinfo) {
536 case IP_CT_RELATED: 536 case IP_CT_RELATED:
537 case IP_CT_RELATED_REPLY: 537 case IP_CT_RELATED_REPLY:
538 if (skb->protocol == htons(ETH_P_IP) && 538 if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
539 skb->protocol == htons(ETH_P_IP) &&
539 ip_hdr(skb)->protocol == IPPROTO_ICMP) { 540 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
540 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, 541 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
541 hooknum)) 542 hooknum))
542 err = NF_DROP; 543 err = NF_DROP;
543 goto push; 544 goto push;
544#if IS_ENABLED(CONFIG_NF_NAT_IPV6) 545 } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
545 } else if (skb->protocol == htons(ETH_P_IPV6)) { 546 skb->protocol == htons(ETH_P_IPV6)) {
546 __be16 frag_off; 547 __be16 frag_off;
547 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 548 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
548 int hdrlen = ipv6_skip_exthdr(skb, 549 int hdrlen = ipv6_skip_exthdr(skb,
@@ -557,7 +558,6 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
557 err = NF_DROP; 558 err = NF_DROP;
558 goto push; 559 goto push;
559 } 560 }
560#endif
561 } 561 }
562 /* Non-ICMP, fall thru to initialize if needed. */ 562 /* Non-ICMP, fall thru to initialize if needed. */
563 case IP_CT_NEW: 563 case IP_CT_NEW:
@@ -664,11 +664,12 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
664 664
665 /* Determine NAT type. 665 /* Determine NAT type.
666 * Check if the NAT type can be deduced from the tracked connection. 666 * Check if the NAT type can be deduced from the tracked connection.
667 * Make sure expected traffic is NATted only when committing. 667 * Make sure new expected connections (IP_CT_RELATED) are NATted only
668 * when committing.
668 */ 669 */
669 if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW && 670 if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
670 ct->status & IPS_NAT_MASK && 671 ct->status & IPS_NAT_MASK &&
671 (!(ct->status & IPS_EXPECTED_BIT) || info->commit)) { 672 (ctinfo != IP_CT_RELATED || info->commit)) {
672 /* NAT an established or related connection like before. */ 673 /* NAT an established or related connection like before. */
673 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) 674 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
674 /* This is the REPLY direction for a connection 675 /* This is the REPLY direction for a connection
@@ -968,7 +969,8 @@ static int parse_nat(const struct nlattr *attr,
968 break; 969 break;
969 970
970 case OVS_NAT_ATTR_IP_MIN: 971 case OVS_NAT_ATTR_IP_MIN:
971 nla_memcpy(&info->range.min_addr, a, nla_len(a)); 972 nla_memcpy(&info->range.min_addr, a,
973 sizeof(info->range.min_addr));
972 info->range.flags |= NF_NAT_RANGE_MAP_IPS; 974 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
973 break; 975 break;
974 976
@@ -1238,7 +1240,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1238 } 1240 }
1239 1241
1240 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) { 1242 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
1241 if (info->family == NFPROTO_IPV4) { 1243 if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
1244 info->family == NFPROTO_IPV4) {
1242 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN, 1245 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
1243 info->range.min_addr.ip) || 1246 info->range.min_addr.ip) ||
1244 (info->range.max_addr.ip 1247 (info->range.max_addr.ip
@@ -1246,8 +1249,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1246 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX, 1249 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
1247 info->range.max_addr.ip)))) 1250 info->range.max_addr.ip))))
1248 return false; 1251 return false;
1249#if IS_ENABLED(CONFIG_NF_NAT_IPV6) 1252 } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
1250 } else if (info->family == NFPROTO_IPV6) { 1253 info->family == NFPROTO_IPV6) {
1251 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN, 1254 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
1252 &info->range.min_addr.in6) || 1255 &info->range.min_addr.in6) ||
1253 (memcmp(&info->range.max_addr.in6, 1256 (memcmp(&info->range.max_addr.in6,
@@ -1256,7 +1259,6 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1256 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX, 1259 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
1257 &info->range.max_addr.in6)))) 1260 &info->range.max_addr.in6))))
1258 return false; 1261 return false;
1259#endif
1260 } else { 1262 } else {
1261 return false; 1263 return false;
1262 } 1264 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1ecfa710ca98..f12c17f355d9 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4151,7 +4151,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4151 4151
4152 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 4152 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4153 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 4153 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4154 WARN(1, "Tx-ring is not supported.\n"); 4154 net_warn_ratelimited("Tx-ring is not supported.\n");
4155 goto out; 4155 goto out;
4156 } 4156 }
4157 4157
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 977fb86065b7..abc8cc805e8d 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -796,7 +796,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
796 796
797 addr = kmap_atomic(sg_page(&frag->f_sg)); 797 addr = kmap_atomic(sg_page(&frag->f_sg));
798 798
799 src = addr + frag_off; 799 src = addr + frag->f_sg.offset + frag_off;
800 dst = (void *)map->m_page_addrs[map_page] + map_off; 800 dst = (void *)map->m_page_addrs[map_page] + map_off;
801 for (k = 0; k < to_copy; k += 8) { 801 for (k = 0; k < to_copy; k += 8) {
802 /* Record ports that became uncongested, ie 802 /* Record ports that became uncongested, ie
diff --git a/net/rds/page.c b/net/rds/page.c
index 616f21f4e7d7..e2b5a5832d3d 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -135,8 +135,8 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
135 if (rem->r_offset != 0) 135 if (rem->r_offset != 0)
136 rds_stats_inc(s_page_remainder_hit); 136 rds_stats_inc(s_page_remainder_hit);
137 137
138 rem->r_offset += bytes; 138 rem->r_offset += ALIGN(bytes, 8);
139 if (rem->r_offset == PAGE_SIZE) { 139 if (rem->r_offset >= PAGE_SIZE) {
140 __free_page(rem->r_page); 140 __free_page(rem->r_page);
141 rem->r_page = NULL; 141 rem->r_page = NULL;
142 } 142 }
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 736c004abfbc..9844fe573029 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -401,7 +401,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
401 sk = chunk->skb->sk; 401 sk = chunk->skb->sk;
402 402
403 /* Allocate the new skb. */ 403 /* Allocate the new skb. */
404 nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC); 404 nskb = alloc_skb(packet->size + MAX_HEADER, gfp);
405 if (!nskb) 405 if (!nskb)
406 goto nomem; 406 goto nomem;
407 407
@@ -523,8 +523,8 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
523 */ 523 */
524 if (auth) 524 if (auth)
525 sctp_auth_calculate_hmac(asoc, nskb, 525 sctp_auth_calculate_hmac(asoc, nskb,
526 (struct sctp_auth_chunk *)auth, 526 (struct sctp_auth_chunk *)auth,
527 GFP_ATOMIC); 527 gfp);
528 528
529 /* 2) Calculate the Adler-32 checksum of the whole packet, 529 /* 2) Calculate the Adler-32 checksum of the whole packet,
530 * including the SCTP common header and all the 530 * including the SCTP common header and all the
@@ -705,7 +705,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
705 /* Check whether this chunk and all the rest of pending data will fit 705 /* Check whether this chunk and all the rest of pending data will fit
706 * or delay in hopes of bundling a full sized packet. 706 * or delay in hopes of bundling a full sized packet.
707 */ 707 */
708 if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead) 708 if (chunk->skb->len + q->out_qlen >
709 transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
709 /* Enough data queued to fill a packet */ 710 /* Enough data queued to fill a packet */
710 return SCTP_XMIT_OK; 711 return SCTP_XMIT_OK;
711 712
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8c6bc795f060..15612ffa8d57 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1728,8 +1728,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
1728 return 0; 1728 return 0;
1729 } 1729 }
1730 1730
1731 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1731 first = snd_buf->page_base >> PAGE_SHIFT;
1732 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1732 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1733 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1733 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1734 rqstp->rq_enc_pages 1734 rqstp->rq_enc_pages
1735 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1735 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
@@ -1775,10 +1775,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1775 status = alloc_enc_pages(rqstp); 1775 status = alloc_enc_pages(rqstp);
1776 if (status) 1776 if (status)
1777 return status; 1777 return status;
1778 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1778 first = snd_buf->page_base >> PAGE_SHIFT;
1779 inpages = snd_buf->pages + first; 1779 inpages = snd_buf->pages + first;
1780 snd_buf->pages = rqstp->rq_enc_pages; 1780 snd_buf->pages = rqstp->rq_enc_pages;
1781 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1781 snd_buf->page_base -= first << PAGE_SHIFT;
1782 /* 1782 /*
1783 * Give the tail its own page, in case we need extra space in the 1783 * Give the tail its own page, in case we need extra space in the
1784 * head when wrapping: 1784 * head when wrapping:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d94a8e1e9f05..045e11ecd332 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -465,7 +465,7 @@ encryptor(struct scatterlist *sg, void *data)
465 page_pos = desc->pos - outbuf->head[0].iov_len; 465 page_pos = desc->pos - outbuf->head[0].iov_len;
466 if (page_pos >= 0 && page_pos < outbuf->page_len) { 466 if (page_pos >= 0 && page_pos < outbuf->page_len) {
467 /* pages are not in place: */ 467 /* pages are not in place: */
468 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; 468 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
469 in_page = desc->pages[i]; 469 in_page = desc->pages[i];
470 } else { 470 } else {
471 in_page = sg_page(sg); 471 in_page = sg_page(sg);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 765088e4ad84..a737c2da0837 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -79,9 +79,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
79 len -= buf->head[0].iov_len; 79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) { 80 if (len <= buf->page_len) {
81 unsigned int last = (buf->page_base + len - 1) 81 unsigned int last = (buf->page_base + len - 1)
82 >>PAGE_CACHE_SHIFT; 82 >>PAGE_SHIFT;
83 unsigned int offset = (buf->page_base + len - 1) 83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_CACHE_SIZE - 1); 84 & (PAGE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last]); 85 ptr = kmap_atomic(buf->pages[last]);
86 pad = *(ptr + offset); 86 pad = *(ptr + offset);
87 kunmap_atomic(ptr); 87 kunmap_atomic(ptr);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 008c25d1b9f9..553bf95f7003 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -881,7 +881,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
881 char *kaddr; 881 char *kaddr;
882 ssize_t ret = -ENOMEM; 882 ssize_t ret = -ENOMEM;
883 883
884 if (count >= PAGE_CACHE_SIZE) 884 if (count >= PAGE_SIZE)
885 goto out_slow; 885 goto out_slow;
886 886
887 page = find_or_create_page(mapping, 0, GFP_KERNEL); 887 page = find_or_create_page(mapping, 0, GFP_KERNEL);
@@ -892,7 +892,7 @@ static ssize_t cache_downcall(struct address_space *mapping,
892 ret = cache_do_downcall(kaddr, buf, count, cd); 892 ret = cache_do_downcall(kaddr, buf, count, cd);
893 kunmap(page); 893 kunmap(page);
894 unlock_page(page); 894 unlock_page(page);
895 page_cache_release(page); 895 put_page(page);
896 return ret; 896 return ret;
897out_slow: 897out_slow:
898 return cache_slow_downcall(buf, count, cd); 898 return cache_slow_downcall(buf, count, cd);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 31789ef3e614..fc48eca21fd2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1390,8 +1390,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1390 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1390 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1391 int err; 1391 int err;
1392 1392
1393 sb->s_blocksize = PAGE_CACHE_SIZE; 1393 sb->s_blocksize = PAGE_SIZE;
1394 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1394 sb->s_blocksize_bits = PAGE_SHIFT;
1395 sb->s_magic = RPCAUTH_GSSMAGIC; 1395 sb->s_magic = RPCAUTH_GSSMAGIC;
1396 sb->s_op = &s_ops; 1396 sb->s_op = &s_ops;
1397 sb->s_d_op = &simple_dentry_operations; 1397 sb->s_d_op = &simple_dentry_operations;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 2df87f78e518..de70c78025d7 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -96,8 +96,8 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
96 if (base || xdr->page_base) { 96 if (base || xdr->page_base) {
97 pglen -= base; 97 pglen -= base;
98 base += xdr->page_base; 98 base += xdr->page_base;
99 ppage += base >> PAGE_CACHE_SHIFT; 99 ppage += base >> PAGE_SHIFT;
100 base &= ~PAGE_CACHE_MASK; 100 base &= ~PAGE_MASK;
101 } 101 }
102 do { 102 do {
103 char *kaddr; 103 char *kaddr;
@@ -113,7 +113,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
113 } 113 }
114 } 114 }
115 115
116 len = PAGE_CACHE_SIZE; 116 len = PAGE_SIZE;
117 kaddr = kmap_atomic(*ppage); 117 kaddr = kmap_atomic(*ppage);
118 if (base) { 118 if (base) {
119 len -= base; 119 len -= base;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4c1b53..6bdb3865212d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way: 165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]', 166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 167 * then its address is given as (i << PAGE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap. 169 * they point to may overlap.
170 */ 170 */
@@ -181,20 +181,20 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
181 pgto_base += len; 181 pgto_base += len;
182 pgfrom_base += len; 182 pgfrom_base += len;
183 183
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 184 pgto = pages + (pgto_base >> PAGE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 185 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
186 186
187 pgto_base &= ~PAGE_CACHE_MASK; 187 pgto_base &= ~PAGE_MASK;
188 pgfrom_base &= ~PAGE_CACHE_MASK; 188 pgfrom_base &= ~PAGE_MASK;
189 189
190 do { 190 do {
191 /* Are any pointers crossing a page boundary? */ 191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) { 192 if (pgto_base == 0) {
193 pgto_base = PAGE_CACHE_SIZE; 193 pgto_base = PAGE_SIZE;
194 pgto--; 194 pgto--;
195 } 195 }
196 if (pgfrom_base == 0) { 196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_CACHE_SIZE; 197 pgfrom_base = PAGE_SIZE;
198 pgfrom--; 198 pgfrom--;
199 } 199 }
200 200
@@ -236,11 +236,11 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
236 char *vto; 236 char *vto;
237 size_t copy; 237 size_t copy;
238 238
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 239 pgto = pages + (pgbase >> PAGE_SHIFT);
240 pgbase &= ~PAGE_CACHE_MASK; 240 pgbase &= ~PAGE_MASK;
241 241
242 for (;;) { 242 for (;;) {
243 copy = PAGE_CACHE_SIZE - pgbase; 243 copy = PAGE_SIZE - pgbase;
244 if (copy > len) 244 if (copy > len)
245 copy = len; 245 copy = len;
246 246
@@ -253,7 +253,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
253 break; 253 break;
254 254
255 pgbase += copy; 255 pgbase += copy;
256 if (pgbase == PAGE_CACHE_SIZE) { 256 if (pgbase == PAGE_SIZE) {
257 flush_dcache_page(*pgto); 257 flush_dcache_page(*pgto);
258 pgbase = 0; 258 pgbase = 0;
259 pgto++; 259 pgto++;
@@ -280,11 +280,11 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
280 char *vfrom; 280 char *vfrom;
281 size_t copy; 281 size_t copy;
282 282
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 283 pgfrom = pages + (pgbase >> PAGE_SHIFT);
284 pgbase &= ~PAGE_CACHE_MASK; 284 pgbase &= ~PAGE_MASK;
285 285
286 do { 286 do {
287 copy = PAGE_CACHE_SIZE - pgbase; 287 copy = PAGE_SIZE - pgbase;
288 if (copy > len) 288 if (copy > len)
289 copy = len; 289 copy = len;
290 290
@@ -293,7 +293,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
293 kunmap_atomic(vfrom); 293 kunmap_atomic(vfrom);
294 294
295 pgbase += copy; 295 pgbase += copy;
296 if (pgbase == PAGE_CACHE_SIZE) { 296 if (pgbase == PAGE_SIZE) {
297 pgbase = 0; 297 pgbase = 0;
298 pgfrom++; 298 pgfrom++;
299 } 299 }
@@ -1038,8 +1038,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1038 if (base < buf->page_len) { 1038 if (base < buf->page_len) {
1039 subbuf->page_len = min(buf->page_len - base, len); 1039 subbuf->page_len = min(buf->page_len - base, len);
1040 base += buf->page_base; 1040 base += buf->page_base;
1041 subbuf->page_base = base & ~PAGE_CACHE_MASK; 1041 subbuf->page_base = base & ~PAGE_MASK;
1042 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; 1042 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1043 len -= subbuf->page_len; 1043 len -= subbuf->page_len;
1044 base = 0; 1044 base = 0;
1045 } else { 1045 } else {
@@ -1297,9 +1297,9 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1297 todo -= avail_here; 1297 todo -= avail_here;
1298 1298
1299 base += buf->page_base; 1299 base += buf->page_base;
1300 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 1300 ppages = buf->pages + (base >> PAGE_SHIFT);
1301 base &= ~PAGE_CACHE_MASK; 1301 base &= ~PAGE_MASK;
1302 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 1302 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1303 avail_here); 1303 avail_here);
1304 c = kmap(*ppages) + base; 1304 c = kmap(*ppages) + base;
1305 1305
@@ -1383,7 +1383,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1383 } 1383 }
1384 1384
1385 avail_page = min(avail_here, 1385 avail_page = min(avail_here,
1386 (unsigned int) PAGE_CACHE_SIZE); 1386 (unsigned int) PAGE_SIZE);
1387 } 1387 }
1388 base = buf->page_len; /* align to start of tail */ 1388 base = buf->page_len; /* align to start of tail */
1389 } 1389 }
@@ -1479,9 +1479,9 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1479 if (page_len > len) 1479 if (page_len > len)
1480 page_len = len; 1480 page_len = len;
1481 len -= page_len; 1481 len -= page_len;
1482 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); 1482 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1483 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; 1483 i = (offset + buf->page_base) >> PAGE_SHIFT;
1484 thislen = PAGE_CACHE_SIZE - page_offset; 1484 thislen = PAGE_SIZE - page_offset;
1485 do { 1485 do {
1486 if (thislen > page_len) 1486 if (thislen > page_len)
1487 thislen = page_len; 1487 thislen = page_len;
@@ -1492,7 +1492,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1492 page_len -= thislen; 1492 page_len -= thislen;
1493 i++; 1493 i++;
1494 page_offset = 0; 1494 page_offset = 0;
1495 thislen = PAGE_CACHE_SIZE; 1495 thislen = PAGE_SIZE;
1496 } while (page_len != 0); 1496 } while (page_len != 0);
1497 offset = 0; 1497 offset = 0;
1498 } 1498 }
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 8b5833c1ff2e..2b9b98f1c2ff 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -1079,7 +1079,7 @@ nla_put_failure:
1079 * @filter_dev: filter device 1079 * @filter_dev: filter device
1080 * @idx: 1080 * @idx:
1081 * 1081 *
1082 * Delete FDB entry from switch device. 1082 * Dump FDB entries from switch device.
1083 */ 1083 */
1084int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 1084int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
1085 struct net_device *dev, 1085 struct net_device *dev,
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 0a369bb440e7..662bdd20a748 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
842 * qp_handle. 842 * qp_handle.
843 */ 843 */
844 if (vmci_handle_is_invalid(e_payload->handle) || 844 if (vmci_handle_is_invalid(e_payload->handle) ||
845 vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) 845 !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
846 return; 846 return;
847 847
848 /* We don't ask for delayed CBs when we subscribe to this event (we 848 /* We don't ask for delayed CBs when we subscribe to this event (we
@@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit);
2154 2154
2155MODULE_AUTHOR("VMware, Inc."); 2155MODULE_AUTHOR("VMware, Inc.");
2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2157MODULE_VERSION("1.0.2.0-k"); 2157MODULE_VERSION("1.0.3.0-k");
2158MODULE_LICENSE("GPL v2"); 2158MODULE_LICENSE("GPL v2");
2159MODULE_ALIAS("vmware_vsock"); 2159MODULE_ALIAS("vmware_vsock");
2160MODULE_ALIAS_NETPROTO(PF_VSOCK); 2160MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index ad7f5b3f9b61..1c4ad477ce93 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
292 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; 292 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
293 293
294 skb_dst_force(skb); 294 skb_dst_force(skb);
295 dev_hold(skb->dev);
295 296
296 nexthdr = x->type->input(x, skb); 297 nexthdr = x->type->input(x, skb);
297 298
298 if (nexthdr == -EINPROGRESS) 299 if (nexthdr == -EINPROGRESS)
299 return 0; 300 return 0;
300resume: 301resume:
302 dev_put(skb->dev);
303
301 spin_lock(&x->lock); 304 spin_lock(&x->lock);
302 if (nexthdr <= 0) { 305 if (nexthdr <= 0) {
303 if (nexthdr == -EBADMSG) { 306 if (nexthdr == -EBADMSG) {
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 502c9fc8db85..b820cc96a3bc 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -76,16 +76,10 @@ HOSTLOADLIBES_offwaketime += -lelf
76HOSTLOADLIBES_spintest += -lelf 76HOSTLOADLIBES_spintest += -lelf
77HOSTLOADLIBES_map_perf_test += -lelf -lrt 77HOSTLOADLIBES_map_perf_test += -lelf -lrt
78 78
79# point this to your LLVM backend with bpf support 79# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
80LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc 80# But, there is no easy way to fix it, so just exclude it since it is
81
82# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
83# But, ehere is not easy way to fix it, so just exclude it since it is
84# useless for BPF samples. 81# useless for BPF samples.
85$(obj)/%.o: $(src)/%.c 82$(obj)/%.o: $(src)/%.c
86 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 83 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
87 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ 84 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
88 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ 85 -O2 -emit-llvm -c $< -o -| llc -march=bpf -filetype=obj -o $@
89 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
90 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
91 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 9363500131a7..7904a2a493de 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -82,6 +82,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
82#define PT_REGS_FP(x) ((x)->bp) 82#define PT_REGS_FP(x) ((x)->bp)
83#define PT_REGS_RC(x) ((x)->ax) 83#define PT_REGS_RC(x) ((x)->ax)
84#define PT_REGS_SP(x) ((x)->sp) 84#define PT_REGS_SP(x) ((x)->sp)
85#define PT_REGS_IP(x) ((x)->ip)
85 86
86#elif defined(__s390x__) 87#elif defined(__s390x__)
87 88
@@ -94,6 +95,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
94#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ 95#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
95#define PT_REGS_RC(x) ((x)->gprs[2]) 96#define PT_REGS_RC(x) ((x)->gprs[2])
96#define PT_REGS_SP(x) ((x)->gprs[15]) 97#define PT_REGS_SP(x) ((x)->gprs[15])
98#define PT_REGS_IP(x) ((x)->ip)
97 99
98#elif defined(__aarch64__) 100#elif defined(__aarch64__)
99 101
@@ -106,6 +108,30 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
106#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */ 108#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
107#define PT_REGS_RC(x) ((x)->regs[0]) 109#define PT_REGS_RC(x) ((x)->regs[0])
108#define PT_REGS_SP(x) ((x)->sp) 110#define PT_REGS_SP(x) ((x)->sp)
111#define PT_REGS_IP(x) ((x)->pc)
112
113#elif defined(__powerpc__)
114
115#define PT_REGS_PARM1(x) ((x)->gpr[3])
116#define PT_REGS_PARM2(x) ((x)->gpr[4])
117#define PT_REGS_PARM3(x) ((x)->gpr[5])
118#define PT_REGS_PARM4(x) ((x)->gpr[6])
119#define PT_REGS_PARM5(x) ((x)->gpr[7])
120#define PT_REGS_RC(x) ((x)->gpr[3])
121#define PT_REGS_SP(x) ((x)->sp)
122#define PT_REGS_IP(x) ((x)->nip)
109 123
110#endif 124#endif
125
126#ifdef __powerpc__
127#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
128#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
129#else
130#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
131 bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
132#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
133 bpf_probe_read(&(ip), sizeof(ip), \
134 (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
135#endif
136
111#endif 137#endif
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 95af56ec5739..3147377e8fd3 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -17,6 +17,7 @@
17#include <linux/bpf.h> 17#include <linux/bpf.h>
18#include <string.h> 18#include <string.h>
19#include <time.h> 19#include <time.h>
20#include <sys/resource.h>
20#include "libbpf.h" 21#include "libbpf.h"
21#include "bpf_load.h" 22#include "bpf_load.h"
22 23
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
index 4b27619d91a4..ce0167d09cdc 100644
--- a/samples/bpf/spintest_kern.c
+++ b/samples/bpf/spintest_kern.c
@@ -34,7 +34,7 @@ struct bpf_map_def SEC("maps") stackmap = {
34#define PROG(foo) \ 34#define PROG(foo) \
35int foo(struct pt_regs *ctx) \ 35int foo(struct pt_regs *ctx) \
36{ \ 36{ \
37 long v = ctx->ip, *val; \ 37 long v = PT_REGS_IP(ctx), *val; \
38\ 38\
39 val = bpf_map_lookup_elem(&my_map, &v); \ 39 val = bpf_map_lookup_elem(&my_map, &v); \
40 bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \ 40 bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index 09c1adc27d42..6d6eefd0d465 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx)
27 long init_val = 1; 27 long init_val = 1;
28 long *value; 28 long *value;
29 29
30 /* x64/s390x specific: read ip of kfree_skb caller. 30 /* read ip of kfree_skb caller.
31 * non-portable version of __builtin_return_address(0) 31 * non-portable version of __builtin_return_address(0)
32 */ 32 */
33 bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx)); 33 BPF_KPROBE_READ_RET_IP(loc, ctx);
34 34
35 value = bpf_map_lookup_elem(&my_map, &loc); 35 value = bpf_map_lookup_elem(&my_map, &loc);
36 if (value) 36 if (value)
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
index ac4671420cf1..6dd8e384de96 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4_kern.c
@@ -40,7 +40,7 @@ int bpf_prog2(struct pt_regs *ctx)
40 long ip = 0; 40 long ip = 0;
41 41
42 /* get ip address of kmem_cache_alloc_node() caller */ 42 /* get ip address of kmem_cache_alloc_node() caller */
43 bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); 43 BPF_KRETPROBE_READ_RET_IP(ip, ctx);
44 44
45 struct pair v = { 45 struct pair v = {
46 .val = bpf_ktime_get_ns(), 46 .val = bpf_ktime_get_ns(),
diff --git a/sound/core/timer.c b/sound/core/timer.c
index aa1b15c155d1..6469bedda2f3 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1019,8 +1019,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
1019 njiff += timer->sticks - priv->correction; 1019 njiff += timer->sticks - priv->correction;
1020 priv->correction = 0; 1020 priv->correction = 0;
1021 } 1021 }
1022 priv->last_expires = priv->tlist.expires = njiff; 1022 priv->last_expires = njiff;
1023 add_timer(&priv->tlist); 1023 mod_timer(&priv->tlist, njiff);
1024 return 0; 1024 return 0;
1025} 1025}
1026 1026
@@ -1502,17 +1502,13 @@ static int snd_timer_user_ginfo(struct file *file,
1502 return err; 1502 return err;
1503} 1503}
1504 1504
1505static int snd_timer_user_gparams(struct file *file, 1505static int timer_set_gparams(struct snd_timer_gparams *gparams)
1506 struct snd_timer_gparams __user *_gparams)
1507{ 1506{
1508 struct snd_timer_gparams gparams;
1509 struct snd_timer *t; 1507 struct snd_timer *t;
1510 int err; 1508 int err;
1511 1509
1512 if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
1513 return -EFAULT;
1514 mutex_lock(&register_mutex); 1510 mutex_lock(&register_mutex);
1515 t = snd_timer_find(&gparams.tid); 1511 t = snd_timer_find(&gparams->tid);
1516 if (!t) { 1512 if (!t) {
1517 err = -ENODEV; 1513 err = -ENODEV;
1518 goto _error; 1514 goto _error;
@@ -1525,12 +1521,22 @@ static int snd_timer_user_gparams(struct file *file,
1525 err = -ENOSYS; 1521 err = -ENOSYS;
1526 goto _error; 1522 goto _error;
1527 } 1523 }
1528 err = t->hw.set_period(t, gparams.period_num, gparams.period_den); 1524 err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
1529_error: 1525_error:
1530 mutex_unlock(&register_mutex); 1526 mutex_unlock(&register_mutex);
1531 return err; 1527 return err;
1532} 1528}
1533 1529
1530static int snd_timer_user_gparams(struct file *file,
1531 struct snd_timer_gparams __user *_gparams)
1532{
1533 struct snd_timer_gparams gparams;
1534
1535 if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
1536 return -EFAULT;
1537 return timer_set_gparams(&gparams);
1538}
1539
1534static int snd_timer_user_gstatus(struct file *file, 1540static int snd_timer_user_gstatus(struct file *file,
1535 struct snd_timer_gstatus __user *_gstatus) 1541 struct snd_timer_gstatus __user *_gstatus)
1536{ 1542{
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 2e908225d754..6a437eb66115 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -22,6 +22,19 @@
22 22
23#include <linux/compat.h> 23#include <linux/compat.h>
24 24
25/*
26 * ILP32/LP64 has different size for 'long' type. Additionally, the size
27 * of storage alignment differs depending on architectures. Here, '__packed'
28 * qualifier is used so that the size of this structure is multiple of 4 and
29 * it fits to any architectures with 32 bit storage alignment.
30 */
31struct snd_timer_gparams32 {
32 struct snd_timer_id tid;
33 u32 period_num;
34 u32 period_den;
35 unsigned char reserved[32];
36} __packed;
37
25struct snd_timer_info32 { 38struct snd_timer_info32 {
26 u32 flags; 39 u32 flags;
27 s32 card; 40 s32 card;
@@ -32,6 +45,19 @@ struct snd_timer_info32 {
32 unsigned char reserved[64]; 45 unsigned char reserved[64];
33}; 46};
34 47
48static int snd_timer_user_gparams_compat(struct file *file,
49 struct snd_timer_gparams32 __user *user)
50{
51 struct snd_timer_gparams gparams;
52
53 if (copy_from_user(&gparams.tid, &user->tid, sizeof(gparams.tid)) ||
54 get_user(gparams.period_num, &user->period_num) ||
55 get_user(gparams.period_den, &user->period_den))
56 return -EFAULT;
57
58 return timer_set_gparams(&gparams);
59}
60
35static int snd_timer_user_info_compat(struct file *file, 61static int snd_timer_user_info_compat(struct file *file,
36 struct snd_timer_info32 __user *_info) 62 struct snd_timer_info32 __user *_info)
37{ 63{
@@ -99,6 +125,7 @@ static int snd_timer_user_status_compat(struct file *file,
99 */ 125 */
100 126
101enum { 127enum {
128 SNDRV_TIMER_IOCTL_GPARAMS32 = _IOW('T', 0x04, struct snd_timer_gparams32),
102 SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), 129 SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
103 SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), 130 SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
104#ifdef CONFIG_X86_X32 131#ifdef CONFIG_X86_X32
@@ -114,7 +141,6 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
114 case SNDRV_TIMER_IOCTL_PVERSION: 141 case SNDRV_TIMER_IOCTL_PVERSION:
115 case SNDRV_TIMER_IOCTL_TREAD: 142 case SNDRV_TIMER_IOCTL_TREAD:
116 case SNDRV_TIMER_IOCTL_GINFO: 143 case SNDRV_TIMER_IOCTL_GINFO:
117 case SNDRV_TIMER_IOCTL_GPARAMS:
118 case SNDRV_TIMER_IOCTL_GSTATUS: 144 case SNDRV_TIMER_IOCTL_GSTATUS:
119 case SNDRV_TIMER_IOCTL_SELECT: 145 case SNDRV_TIMER_IOCTL_SELECT:
120 case SNDRV_TIMER_IOCTL_PARAMS: 146 case SNDRV_TIMER_IOCTL_PARAMS:
@@ -128,6 +154,8 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
128 case SNDRV_TIMER_IOCTL_PAUSE_OLD: 154 case SNDRV_TIMER_IOCTL_PAUSE_OLD:
129 case SNDRV_TIMER_IOCTL_NEXT_DEVICE: 155 case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
130 return snd_timer_user_ioctl(file, cmd, (unsigned long)argp); 156 return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
157 case SNDRV_TIMER_IOCTL_GPARAMS32:
158 return snd_timer_user_gparams_compat(file, argp);
131 case SNDRV_TIMER_IOCTL_INFO32: 159 case SNDRV_TIMER_IOCTL_INFO32:
132 return snd_timer_user_info_compat(file, argp); 160 return snd_timer_user_info_compat(file, argp);
133 case SNDRV_TIMER_IOCTL_STATUS32: 161 case SNDRV_TIMER_IOCTL_STATUS32:
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index 845d5e5884a4..ec4db3a514fc 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -446,18 +446,12 @@ end:
446 446
447void snd_dice_stream_destroy_duplex(struct snd_dice *dice) 447void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
448{ 448{
449 struct reg_params tx_params, rx_params; 449 unsigned int i;
450
451 snd_dice_transaction_clear_enable(dice);
452 450
453 if (get_register_params(dice, &tx_params, &rx_params) == 0) { 451 for (i = 0; i < MAX_STREAMS; i++) {
454 stop_streams(dice, AMDTP_IN_STREAM, &tx_params); 452 destroy_stream(dice, AMDTP_IN_STREAM, i);
455 stop_streams(dice, AMDTP_OUT_STREAM, &rx_params); 453 destroy_stream(dice, AMDTP_OUT_STREAM, i);
456 } 454 }
457
458 release_resources(dice);
459
460 dice->substreams_counter = 0;
461} 455}
462 456
463void snd_dice_stream_update_duplex(struct snd_dice *dice) 457void snd_dice_stream_update_duplex(struct snd_dice *dice)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2624cfe98884..b680be0e937d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2361,6 +2361,10 @@ static const struct pci_device_id azx_ids[] = {
2361 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2361 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2362 { PCI_DEVICE(0x1002, 0xaae8), 2362 { PCI_DEVICE(0x1002, 0xaae8),
2363 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2363 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2364 { PCI_DEVICE(0x1002, 0xaae0),
2365 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2366 { PCI_DEVICE(0x1002, 0xaaf0),
2367 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2364 /* VIA VT8251/VT8237A */ 2368 /* VIA VT8251/VT8237A */
2365 { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA }, 2369 { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
2366 /* VIA GFX VT7122/VX900 */ 2370 /* VIA GFX VT7122/VX900 */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4f5ca0b9ce27..fefe83f2beab 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4759,6 +4759,7 @@ enum {
4759 ALC255_FIXUP_DELL_SPK_NOISE, 4759 ALC255_FIXUP_DELL_SPK_NOISE,
4760 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 4760 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
4761 ALC280_FIXUP_HP_HEADSET_MIC, 4761 ALC280_FIXUP_HP_HEADSET_MIC,
4762 ALC221_FIXUP_HP_FRONT_MIC,
4762}; 4763};
4763 4764
4764static const struct hda_fixup alc269_fixups[] = { 4765static const struct hda_fixup alc269_fixups[] = {
@@ -5401,6 +5402,13 @@ static const struct hda_fixup alc269_fixups[] = {
5401 .chained = true, 5402 .chained = true,
5402 .chain_id = ALC269_FIXUP_HEADSET_MIC, 5403 .chain_id = ALC269_FIXUP_HEADSET_MIC,
5403 }, 5404 },
5405 [ALC221_FIXUP_HP_FRONT_MIC] = {
5406 .type = HDA_FIXUP_PINS,
5407 .v.pins = (const struct hda_pintbl[]) {
5408 { 0x19, 0x02a19020 }, /* Front Mic */
5409 { }
5410 },
5411 },
5404}; 5412};
5405 5413
5406static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5414static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5506,6 +5514,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5506 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5514 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5507 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5515 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5508 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), 5516 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
5517 SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
5509 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 5518 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5510 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5519 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5511 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5520 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6406,6 +6415,7 @@ enum {
6406 ALC668_FIXUP_AUTO_MUTE, 6415 ALC668_FIXUP_AUTO_MUTE,
6407 ALC668_FIXUP_DELL_DISABLE_AAMIX, 6416 ALC668_FIXUP_DELL_DISABLE_AAMIX,
6408 ALC668_FIXUP_DELL_XPS13, 6417 ALC668_FIXUP_DELL_XPS13,
6418 ALC662_FIXUP_ASUS_Nx50,
6409}; 6419};
6410 6420
6411static const struct hda_fixup alc662_fixups[] = { 6421static const struct hda_fixup alc662_fixups[] = {
@@ -6646,6 +6656,12 @@ static const struct hda_fixup alc662_fixups[] = {
6646 .type = HDA_FIXUP_FUNC, 6656 .type = HDA_FIXUP_FUNC,
6647 .v.func = alc_fixup_bass_chmap, 6657 .v.func = alc_fixup_bass_chmap,
6648 }, 6658 },
6659 [ALC662_FIXUP_ASUS_Nx50] = {
6660 .type = HDA_FIXUP_FUNC,
6661 .v.func = alc_fixup_auto_mute_via_amp,
6662 .chained = true,
6663 .chain_id = ALC662_FIXUP_BASS_1A
6664 },
6649}; 6665};
6650 6666
6651static const struct snd_pci_quirk alc662_fixup_tbl[] = { 6667static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6668,8 +6684,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6668 SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 6684 SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
6669 SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 6685 SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
6670 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 6686 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
6671 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A), 6687 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
6672 SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A), 6688 SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
6689 SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
6673 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), 6690 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
6674 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16), 6691 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
6675 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16), 6692 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index d14bf411515b..a452ad7cec40 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -15,7 +15,6 @@ config SND_USB_AUDIO
15 select SND_RAWMIDI 15 select SND_RAWMIDI
16 select SND_PCM 16 select SND_PCM
17 select BITREVERSE 17 select BITREVERSE
18 select SND_USB_AUDIO_USE_MEDIA_CONTROLLER if MEDIA_CONTROLLER && (MEDIA_SUPPORT=y || MEDIA_SUPPORT=SND_USB_AUDIO)
19 help 18 help
20 Say Y here to include support for USB audio and USB MIDI 19 Say Y here to include support for USB audio and USB MIDI
21 devices. 20 devices.
@@ -23,9 +22,6 @@ config SND_USB_AUDIO
23 To compile this driver as a module, choose M here: the module 22 To compile this driver as a module, choose M here: the module
24 will be called snd-usb-audio. 23 will be called snd-usb-audio.
25 24
26config SND_USB_AUDIO_USE_MEDIA_CONTROLLER
27 bool
28
29config SND_USB_UA101 25config SND_USB_UA101
30 tristate "Edirol UA-101/UA-1000 driver" 26 tristate "Edirol UA-101/UA-1000 driver"
31 select SND_PCM 27 select SND_PCM
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 8dca3c407f5a..2d2d122b069f 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -15,8 +15,6 @@ snd-usb-audio-objs := card.o \
15 quirks.o \ 15 quirks.o \
16 stream.o 16 stream.o
17 17
18snd-usb-audio-$(CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER) += media.o
19
20snd-usbmidi-lib-objs := midi.o 18snd-usbmidi-lib-objs := midi.o
21 19
22# Toplevel Module Dependency 20# Toplevel Module Dependency
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 63244bbba8c7..3fc63583a537 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -66,7 +66,6 @@
66#include "format.h" 66#include "format.h"
67#include "power.h" 67#include "power.h"
68#include "stream.h" 68#include "stream.h"
69#include "media.h"
70 69
71MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); 70MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
72MODULE_DESCRIPTION("USB Audio"); 71MODULE_DESCRIPTION("USB Audio");
@@ -612,11 +611,6 @@ static int usb_audio_probe(struct usb_interface *intf,
612 if (err < 0) 611 if (err < 0)
613 goto __error; 612 goto __error;
614 613
615 if (quirk->media_device) {
616 /* don't want to fail when media_snd_device_create() fails */
617 media_snd_device_create(chip, intf);
618 }
619
620 usb_chip[chip->index] = chip; 614 usb_chip[chip->index] = chip;
621 chip->num_interfaces++; 615 chip->num_interfaces++;
622 usb_set_intfdata(intf, chip); 616 usb_set_intfdata(intf, chip);
@@ -673,14 +667,6 @@ static void usb_audio_disconnect(struct usb_interface *intf)
673 list_for_each(p, &chip->midi_list) { 667 list_for_each(p, &chip->midi_list) {
674 snd_usbmidi_disconnect(p); 668 snd_usbmidi_disconnect(p);
675 } 669 }
676 /*
677 * Nice to check quirk && quirk->media_device
678 * need some special handlings. Doesn't look like
679 * we have access to quirk here
680 * Acceses mixer_list
681 */
682 media_snd_device_delete(chip);
683
684 /* release mixer resources */ 670 /* release mixer resources */
685 list_for_each_entry(mixer, &chip->mixer_list, list) { 671 list_for_each_entry(mixer, &chip->mixer_list, list) {
686 snd_usb_mixer_disconnect(mixer); 672 snd_usb_mixer_disconnect(mixer);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 34a0898e2238..71778ca4b26a 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -105,8 +105,6 @@ struct snd_usb_endpoint {
105 struct list_head list; 105 struct list_head list;
106}; 106};
107 107
108struct media_ctl;
109
110struct snd_usb_substream { 108struct snd_usb_substream {
111 struct snd_usb_stream *stream; 109 struct snd_usb_stream *stream;
112 struct usb_device *dev; 110 struct usb_device *dev;
@@ -158,7 +156,6 @@ struct snd_usb_substream {
158 } dsd_dop; 156 } dsd_dop;
159 157
160 bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */ 158 bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */
161 struct media_ctl *media_ctl;
162}; 159};
163 160
164struct snd_usb_stream { 161struct snd_usb_stream {
diff --git a/sound/usb/media.c b/sound/usb/media.c
deleted file mode 100644
index 93a50d01490c..000000000000
--- a/sound/usb/media.c
+++ /dev/null
@@ -1,318 +0,0 @@
1/*
2 * media.c - Media Controller specific ALSA driver code
3 *
4 * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
5 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
6 *
7 * This file is released under the GPLv2.
8 */
9
10/*
11 * This file adds Media Controller support to ALSA driver
12 * to use the Media Controller API to share tuner with DVB
13 * and V4L2 drivers that control media device. Media device
14 * is created based on existing quirks framework. Using this
15 * approach, the media controller API usage can be added for
16 * a specific device.
17*/
18
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/usb.h>
24
25#include <sound/pcm.h>
26#include <sound/core.h>
27
28#include "usbaudio.h"
29#include "card.h"
30#include "mixer.h"
31#include "media.h"
32
33static int media_snd_enable_source(struct media_ctl *mctl)
34{
35 if (mctl && mctl->media_dev->enable_source)
36 return mctl->media_dev->enable_source(&mctl->media_entity,
37 &mctl->media_pipe);
38 return 0;
39}
40
41static void media_snd_disable_source(struct media_ctl *mctl)
42{
43 if (mctl && mctl->media_dev->disable_source)
44 mctl->media_dev->disable_source(&mctl->media_entity);
45}
46
47int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
48 int stream)
49{
50 struct media_device *mdev;
51 struct media_ctl *mctl;
52 struct device *pcm_dev = &pcm->streams[stream].dev;
53 u32 intf_type;
54 int ret = 0;
55 u16 mixer_pad;
56 struct media_entity *entity;
57
58 mdev = subs->stream->chip->media_dev;
59 if (!mdev)
60 return -ENODEV;
61
62 if (subs->media_ctl)
63 return 0;
64
65 /* allocate media_ctl */
66 mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
67 if (!mctl)
68 return -ENOMEM;
69
70 mctl->media_dev = mdev;
71 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
72 intf_type = MEDIA_INTF_T_ALSA_PCM_PLAYBACK;
73 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_PLAYBACK;
74 mctl->media_pad.flags = MEDIA_PAD_FL_SOURCE;
75 mixer_pad = 1;
76 } else {
77 intf_type = MEDIA_INTF_T_ALSA_PCM_CAPTURE;
78 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_CAPTURE;
79 mctl->media_pad.flags = MEDIA_PAD_FL_SINK;
80 mixer_pad = 2;
81 }
82 mctl->media_entity.name = pcm->name;
83 media_entity_pads_init(&mctl->media_entity, 1, &mctl->media_pad);
84 ret = media_device_register_entity(mctl->media_dev,
85 &mctl->media_entity);
86 if (ret)
87 goto free_mctl;
88
89 mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0,
90 MAJOR(pcm_dev->devt),
91 MINOR(pcm_dev->devt));
92 if (!mctl->intf_devnode) {
93 ret = -ENOMEM;
94 goto unregister_entity;
95 }
96 mctl->intf_link = media_create_intf_link(&mctl->media_entity,
97 &mctl->intf_devnode->intf,
98 MEDIA_LNK_FL_ENABLED);
99 if (!mctl->intf_link) {
100 ret = -ENOMEM;
101 goto devnode_remove;
102 }
103
104 /* create link between mixer and audio */
105 media_device_for_each_entity(entity, mdev) {
106 switch (entity->function) {
107 case MEDIA_ENT_F_AUDIO_MIXER:
108 ret = media_create_pad_link(entity, mixer_pad,
109 &mctl->media_entity, 0,
110 MEDIA_LNK_FL_ENABLED);
111 if (ret)
112 goto remove_intf_link;
113 break;
114 }
115 }
116
117 subs->media_ctl = mctl;
118 return 0;
119
120remove_intf_link:
121 media_remove_intf_link(mctl->intf_link);
122devnode_remove:
123 media_devnode_remove(mctl->intf_devnode);
124unregister_entity:
125 media_device_unregister_entity(&mctl->media_entity);
126free_mctl:
127 kfree(mctl);
128 return ret;
129}
130
131void media_snd_stream_delete(struct snd_usb_substream *subs)
132{
133 struct media_ctl *mctl = subs->media_ctl;
134
135 if (mctl && mctl->media_dev) {
136 struct media_device *mdev;
137
138 mdev = subs->stream->chip->media_dev;
139 if (mdev && media_devnode_is_registered(&mdev->devnode)) {
140 media_devnode_remove(mctl->intf_devnode);
141 media_device_unregister_entity(&mctl->media_entity);
142 media_entity_cleanup(&mctl->media_entity);
143 }
144 kfree(mctl);
145 subs->media_ctl = NULL;
146 }
147}
148
149int media_snd_start_pipeline(struct snd_usb_substream *subs)
150{
151 struct media_ctl *mctl = subs->media_ctl;
152
153 if (mctl)
154 return media_snd_enable_source(mctl);
155 return 0;
156}
157
158void media_snd_stop_pipeline(struct snd_usb_substream *subs)
159{
160 struct media_ctl *mctl = subs->media_ctl;
161
162 if (mctl)
163 media_snd_disable_source(mctl);
164}
165
166int media_snd_mixer_init(struct snd_usb_audio *chip)
167{
168 struct device *ctl_dev = &chip->card->ctl_dev;
169 struct media_intf_devnode *ctl_intf;
170 struct usb_mixer_interface *mixer;
171 struct media_device *mdev = chip->media_dev;
172 struct media_mixer_ctl *mctl;
173 u32 intf_type = MEDIA_INTF_T_ALSA_CONTROL;
174 int ret;
175
176 if (!mdev)
177 return -ENODEV;
178
179 ctl_intf = chip->ctl_intf_media_devnode;
180 if (!ctl_intf) {
181 ctl_intf = media_devnode_create(mdev, intf_type, 0,
182 MAJOR(ctl_dev->devt),
183 MINOR(ctl_dev->devt));
184 if (!ctl_intf)
185 return -ENOMEM;
186 chip->ctl_intf_media_devnode = ctl_intf;
187 }
188
189 list_for_each_entry(mixer, &chip->mixer_list, list) {
190
191 if (mixer->media_mixer_ctl)
192 continue;
193
194 /* allocate media_mixer_ctl */
195 mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
196 if (!mctl)
197 return -ENOMEM;
198
199 mctl->media_dev = mdev;
200 mctl->media_entity.function = MEDIA_ENT_F_AUDIO_MIXER;
201 mctl->media_entity.name = chip->card->mixername;
202 mctl->media_pad[0].flags = MEDIA_PAD_FL_SINK;
203 mctl->media_pad[1].flags = MEDIA_PAD_FL_SOURCE;
204 mctl->media_pad[2].flags = MEDIA_PAD_FL_SOURCE;
205 media_entity_pads_init(&mctl->media_entity, MEDIA_MIXER_PAD_MAX,
206 mctl->media_pad);
207 ret = media_device_register_entity(mctl->media_dev,
208 &mctl->media_entity);
209 if (ret) {
210 kfree(mctl);
211 return ret;
212 }
213
214 mctl->intf_link = media_create_intf_link(&mctl->media_entity,
215 &ctl_intf->intf,
216 MEDIA_LNK_FL_ENABLED);
217 if (!mctl->intf_link) {
218 media_device_unregister_entity(&mctl->media_entity);
219 media_entity_cleanup(&mctl->media_entity);
220 kfree(mctl);
221 return -ENOMEM;
222 }
223 mctl->intf_devnode = ctl_intf;
224 mixer->media_mixer_ctl = mctl;
225 }
226 return 0;
227}
228
229static void media_snd_mixer_delete(struct snd_usb_audio *chip)
230{
231 struct usb_mixer_interface *mixer;
232 struct media_device *mdev = chip->media_dev;
233
234 if (!mdev)
235 return;
236
237 list_for_each_entry(mixer, &chip->mixer_list, list) {
238 struct media_mixer_ctl *mctl;
239
240 mctl = mixer->media_mixer_ctl;
241 if (!mixer->media_mixer_ctl)
242 continue;
243
244 if (media_devnode_is_registered(&mdev->devnode)) {
245 media_device_unregister_entity(&mctl->media_entity);
246 media_entity_cleanup(&mctl->media_entity);
247 }
248 kfree(mctl);
249 mixer->media_mixer_ctl = NULL;
250 }
251 if (media_devnode_is_registered(&mdev->devnode))
252 media_devnode_remove(chip->ctl_intf_media_devnode);
253 chip->ctl_intf_media_devnode = NULL;
254}
255
256int media_snd_device_create(struct snd_usb_audio *chip,
257 struct usb_interface *iface)
258{
259 struct media_device *mdev;
260 struct usb_device *usbdev = interface_to_usbdev(iface);
261 int ret;
262
263 mdev = media_device_get_devres(&usbdev->dev);
264 if (!mdev)
265 return -ENOMEM;
266 if (!mdev->dev) {
267 /* register media device */
268 mdev->dev = &usbdev->dev;
269 if (usbdev->product)
270 strlcpy(mdev->model, usbdev->product,
271 sizeof(mdev->model));
272 if (usbdev->serial)
273 strlcpy(mdev->serial, usbdev->serial,
274 sizeof(mdev->serial));
275 strcpy(mdev->bus_info, usbdev->devpath);
276 mdev->hw_revision = le16_to_cpu(usbdev->descriptor.bcdDevice);
277 media_device_init(mdev);
278 }
279 if (!media_devnode_is_registered(&mdev->devnode)) {
280 ret = media_device_register(mdev);
281 if (ret) {
282 dev_err(&usbdev->dev,
283 "Couldn't register media device. Error: %d\n",
284 ret);
285 return ret;
286 }
287 }
288
289 /* save media device - avoid lookups */
290 chip->media_dev = mdev;
291
292 /* Create media entities for mixer and control dev */
293 ret = media_snd_mixer_init(chip);
294 if (ret) {
295 dev_err(&usbdev->dev,
296 "Couldn't create media mixer entities. Error: %d\n",
297 ret);
298
299 /* clear saved media_dev */
300 chip->media_dev = NULL;
301
302 return ret;
303 }
304 return 0;
305}
306
307void media_snd_device_delete(struct snd_usb_audio *chip)
308{
309 struct media_device *mdev = chip->media_dev;
310
311 media_snd_mixer_delete(chip);
312
313 if (mdev) {
314 if (media_devnode_is_registered(&mdev->devnode))
315 media_device_unregister(mdev);
316 chip->media_dev = NULL;
317 }
318}
diff --git a/sound/usb/media.h b/sound/usb/media.h
deleted file mode 100644
index 1dcdcdc5f7aa..000000000000
--- a/sound/usb/media.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * media.h - Media Controller specific ALSA driver code
3 *
4 * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
5 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
6 *
7 * This file is released under the GPLv2.
8 */
9
10/*
11 * This file adds Media Controller support to ALSA driver
12 * to use the Media Controller API to share tuner with DVB
13 * and V4L2 drivers that control media device. Media device
14 * is created based on existing quirks framework. Using this
15 * approach, the media controller API usage can be added for
16 * a specific device.
17*/
18#ifndef __MEDIA_H
19
20#ifdef CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER
21
22#include <media/media-device.h>
23#include <media/media-entity.h>
24#include <sound/asound.h>
25
26struct media_ctl {
27 struct media_device *media_dev;
28 struct media_entity media_entity;
29 struct media_intf_devnode *intf_devnode;
30 struct media_link *intf_link;
31 struct media_pad media_pad;
32 struct media_pipeline media_pipe;
33};
34
35/*
36 * One source pad each for SNDRV_PCM_STREAM_CAPTURE and
37 * SNDRV_PCM_STREAM_PLAYBACK. One for sink pad to link
38 * to AUDIO Source
39*/
40#define MEDIA_MIXER_PAD_MAX (SNDRV_PCM_STREAM_LAST + 2)
41
42struct media_mixer_ctl {
43 struct media_device *media_dev;
44 struct media_entity media_entity;
45 struct media_intf_devnode *intf_devnode;
46 struct media_link *intf_link;
47 struct media_pad media_pad[MEDIA_MIXER_PAD_MAX];
48 struct media_pipeline media_pipe;
49};
50
51int media_snd_device_create(struct snd_usb_audio *chip,
52 struct usb_interface *iface);
53void media_snd_device_delete(struct snd_usb_audio *chip);
54int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
55 int stream);
56void media_snd_stream_delete(struct snd_usb_substream *subs);
57int media_snd_start_pipeline(struct snd_usb_substream *subs);
58void media_snd_stop_pipeline(struct snd_usb_substream *subs);
59#else
60static inline int media_snd_device_create(struct snd_usb_audio *chip,
61 struct usb_interface *iface)
62 { return 0; }
63static inline void media_snd_device_delete(struct snd_usb_audio *chip) { }
64static inline int media_snd_stream_init(struct snd_usb_substream *subs,
65 struct snd_pcm *pcm, int stream)
66 { return 0; }
67static inline void media_snd_stream_delete(struct snd_usb_substream *subs) { }
68static inline int media_snd_start_pipeline(struct snd_usb_substream *subs)
69 { return 0; }
70static inline void media_snd_stop_pipeline(struct snd_usb_substream *subs) { }
71#endif
72#endif /* __MEDIA_H */
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index f3789446ab9c..3417ef347e40 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -3,8 +3,6 @@
3 3
4#include <sound/info.h> 4#include <sound/info.h>
5 5
6struct media_mixer_ctl;
7
8struct usb_mixer_interface { 6struct usb_mixer_interface {
9 struct snd_usb_audio *chip; 7 struct snd_usb_audio *chip;
10 struct usb_host_interface *hostif; 8 struct usb_host_interface *hostif;
@@ -24,7 +22,6 @@ struct usb_mixer_interface {
24 struct urb *rc_urb; 22 struct urb *rc_urb;
25 struct usb_ctrlrequest *rc_setup_packet; 23 struct usb_ctrlrequest *rc_setup_packet;
26 u8 rc_buffer[6]; 24 u8 rc_buffer[6];
27 struct media_mixer_ctl *media_mixer_ctl;
28}; 25};
29 26
30#define MAX_CHANNELS 16 /* max logical channels */ 27#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 0e4e0640c504..44d178ee9177 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -35,7 +35,6 @@
35#include "pcm.h" 35#include "pcm.h"
36#include "clock.h" 36#include "clock.h"
37#include "power.h" 37#include "power.h"
38#include "media.h"
39 38
40#define SUBSTREAM_FLAG_DATA_EP_STARTED 0 39#define SUBSTREAM_FLAG_DATA_EP_STARTED 0
41#define SUBSTREAM_FLAG_SYNC_EP_STARTED 1 40#define SUBSTREAM_FLAG_SYNC_EP_STARTED 1
@@ -718,14 +717,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
718 struct audioformat *fmt; 717 struct audioformat *fmt;
719 int ret; 718 int ret;
720 719
721 ret = media_snd_start_pipeline(subs);
722 if (ret)
723 return ret;
724
725 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, 720 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
726 params_buffer_bytes(hw_params)); 721 params_buffer_bytes(hw_params));
727 if (ret < 0) 722 if (ret < 0)
728 goto err_ret; 723 return ret;
729 724
730 subs->pcm_format = params_format(hw_params); 725 subs->pcm_format = params_format(hw_params);
731 subs->period_bytes = params_period_bytes(hw_params); 726 subs->period_bytes = params_period_bytes(hw_params);
@@ -739,27 +734,22 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
739 dev_dbg(&subs->dev->dev, 734 dev_dbg(&subs->dev->dev,
740 "cannot set format: format = %#x, rate = %d, channels = %d\n", 735 "cannot set format: format = %#x, rate = %d, channels = %d\n",
741 subs->pcm_format, subs->cur_rate, subs->channels); 736 subs->pcm_format, subs->cur_rate, subs->channels);
742 ret = -EINVAL; 737 return -EINVAL;
743 goto err_ret;
744 } 738 }
745 739
746 ret = snd_usb_lock_shutdown(subs->stream->chip); 740 ret = snd_usb_lock_shutdown(subs->stream->chip);
747 if (ret < 0) 741 if (ret < 0)
748 goto err_ret; 742 return ret;
749 ret = set_format(subs, fmt); 743 ret = set_format(subs, fmt);
750 snd_usb_unlock_shutdown(subs->stream->chip); 744 snd_usb_unlock_shutdown(subs->stream->chip);
751 if (ret < 0) 745 if (ret < 0)
752 goto err_ret; 746 return ret;
753 747
754 subs->interface = fmt->iface; 748 subs->interface = fmt->iface;
755 subs->altset_idx = fmt->altset_idx; 749 subs->altset_idx = fmt->altset_idx;
756 subs->need_setup_ep = true; 750 subs->need_setup_ep = true;
757 751
758 return 0; 752 return 0;
759
760err_ret:
761 media_snd_stop_pipeline(subs);
762 return ret;
763} 753}
764 754
765/* 755/*
@@ -771,7 +761,6 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
771{ 761{
772 struct snd_usb_substream *subs = substream->runtime->private_data; 762 struct snd_usb_substream *subs = substream->runtime->private_data;
773 763
774 media_snd_stop_pipeline(subs);
775 subs->cur_audiofmt = NULL; 764 subs->cur_audiofmt = NULL;
776 subs->cur_rate = 0; 765 subs->cur_rate = 0;
777 subs->period_bytes = 0; 766 subs->period_bytes = 0;
@@ -1232,7 +1221,6 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
1232 struct snd_usb_stream *as = snd_pcm_substream_chip(substream); 1221 struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
1233 struct snd_pcm_runtime *runtime = substream->runtime; 1222 struct snd_pcm_runtime *runtime = substream->runtime;
1234 struct snd_usb_substream *subs = &as->substream[direction]; 1223 struct snd_usb_substream *subs = &as->substream[direction];
1235 int ret;
1236 1224
1237 subs->interface = -1; 1225 subs->interface = -1;
1238 subs->altset_idx = 0; 1226 subs->altset_idx = 0;
@@ -1246,12 +1234,7 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
1246 subs->dsd_dop.channel = 0; 1234 subs->dsd_dop.channel = 0;
1247 subs->dsd_dop.marker = 1; 1235 subs->dsd_dop.marker = 1;
1248 1236
1249 ret = setup_hw_info(runtime, subs); 1237 return setup_hw_info(runtime, subs);
1250 if (ret == 0)
1251 ret = media_snd_stream_init(subs, as->pcm, direction);
1252 if (ret)
1253 snd_usb_autosuspend(subs->stream->chip);
1254 return ret;
1255} 1238}
1256 1239
1257static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) 1240static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
@@ -1260,7 +1243,6 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
1260 struct snd_usb_substream *subs = &as->substream[direction]; 1243 struct snd_usb_substream *subs = &as->substream[direction];
1261 1244
1262 stop_endpoints(subs, true); 1245 stop_endpoints(subs, true);
1263 media_snd_stop_pipeline(subs);
1264 1246
1265 if (subs->interface >= 0 && 1247 if (subs->interface >= 0 &&
1266 !snd_usb_lock_shutdown(subs->stream->chip)) { 1248 !snd_usb_lock_shutdown(subs->stream->chip)) {
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 9d087b19c70c..c60a776e815d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2886,7 +2886,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2886 .product_name = pname, \ 2886 .product_name = pname, \
2887 .ifnum = QUIRK_ANY_INTERFACE, \ 2887 .ifnum = QUIRK_ANY_INTERFACE, \
2888 .type = QUIRK_AUDIO_ALIGN_TRANSFER, \ 2888 .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
2889 .media_device = 1, \
2890 } \ 2889 } \
2891} 2890}
2892 2891
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index fb62bce2435c..6178bb5d0731 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
150 usb_audio_err(chip, "cannot memdup\n"); 150 usb_audio_err(chip, "cannot memdup\n");
151 return -ENOMEM; 151 return -ENOMEM;
152 } 152 }
153 INIT_LIST_HEAD(&fp->list);
153 if (fp->nr_rates > MAX_NR_RATES) { 154 if (fp->nr_rates > MAX_NR_RATES) {
154 kfree(fp); 155 kfree(fp);
155 return -EINVAL; 156 return -EINVAL;
@@ -193,6 +194,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
193 return 0; 194 return 0;
194 195
195 error: 196 error:
197 list_del(&fp->list); /* unlink for avoiding double-free */
196 kfree(fp); 198 kfree(fp);
197 kfree(rate_table); 199 kfree(rate_table);
198 return err; 200 return err;
@@ -469,6 +471,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
469 fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; 471 fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
470 fp->datainterval = 0; 472 fp->datainterval = 0;
471 fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); 473 fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
474 INIT_LIST_HEAD(&fp->list);
472 475
473 switch (fp->maxpacksize) { 476 switch (fp->maxpacksize) {
474 case 0x120: 477 case 0x120:
@@ -492,6 +495,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
492 ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; 495 ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
493 err = snd_usb_add_audio_stream(chip, stream, fp); 496 err = snd_usb_add_audio_stream(chip, stream, fp);
494 if (err < 0) { 497 if (err < 0) {
498 list_del(&fp->list); /* unlink for avoiding double-free */
495 kfree(fp); 499 kfree(fp);
496 return err; 500 return err;
497 } 501 }
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 51258a15f653..8e9548bc1f1a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -36,7 +36,6 @@
36#include "format.h" 36#include "format.h"
37#include "clock.h" 37#include "clock.h"
38#include "stream.h" 38#include "stream.h"
39#include "media.h"
40 39
41/* 40/*
42 * free a substream 41 * free a substream
@@ -53,7 +52,6 @@ static void free_substream(struct snd_usb_substream *subs)
53 kfree(fp); 52 kfree(fp);
54 } 53 }
55 kfree(subs->rate_list.list); 54 kfree(subs->rate_list.list);
56 media_snd_stream_delete(subs);
57} 55}
58 56
59 57
@@ -316,7 +314,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
316/* 314/*
317 * add this endpoint to the chip instance. 315 * add this endpoint to the chip instance.
318 * if a stream with the same endpoint already exists, append to it. 316 * if a stream with the same endpoint already exists, append to it.
319 * if not, create a new pcm stream. 317 * if not, create a new pcm stream. note, fp is added to the substream
318 * fmt_list and will be freed on the chip instance release. do not free
319 * fp or do remove it from the substream fmt_list to avoid double-free.
320 */ 320 */
321int snd_usb_add_audio_stream(struct snd_usb_audio *chip, 321int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
322 int stream, 322 int stream,
@@ -677,6 +677,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
677 * (fp->maxpacksize & 0x7ff); 677 * (fp->maxpacksize & 0x7ff);
678 fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no); 678 fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
679 fp->clock = clock; 679 fp->clock = clock;
680 INIT_LIST_HEAD(&fp->list);
680 681
681 /* some quirks for attributes here */ 682 /* some quirks for attributes here */
682 683
@@ -725,6 +726,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
725 dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint); 726 dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
726 err = snd_usb_add_audio_stream(chip, stream, fp); 727 err = snd_usb_add_audio_stream(chip, stream, fp);
727 if (err < 0) { 728 if (err < 0) {
729 list_del(&fp->list); /* unlink for avoiding double-free */
728 kfree(fp->rate_table); 730 kfree(fp->rate_table);
729 kfree(fp->chmap); 731 kfree(fp->chmap);
730 kfree(fp); 732 kfree(fp);
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index a161c7c1b126..b665d85555cb 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -30,9 +30,6 @@
30 * 30 *
31 */ 31 */
32 32
33struct media_device;
34struct media_intf_devnode;
35
36struct snd_usb_audio { 33struct snd_usb_audio {
37 int index; 34 int index;
38 struct usb_device *dev; 35 struct usb_device *dev;
@@ -63,8 +60,6 @@ struct snd_usb_audio {
63 bool autoclock; /* from the 'autoclock' module param */ 60 bool autoclock; /* from the 'autoclock' module param */
64 61
65 struct usb_host_interface *ctrl_intf; /* the audio control interface */ 62 struct usb_host_interface *ctrl_intf; /* the audio control interface */
66 struct media_device *media_dev;
67 struct media_intf_devnode *ctl_intf_media_devnode;
68}; 63};
69 64
70#define usb_audio_err(chip, fmt, args...) \ 65#define usb_audio_err(chip, fmt, args...) \
@@ -115,7 +110,6 @@ struct snd_usb_audio_quirk {
115 const char *product_name; 110 const char *product_name;
116 int16_t ifnum; 111 int16_t ifnum;
117 uint16_t type; 112 uint16_t type;
118 bool media_device;
119 const void *data; 113 const void *data;
120}; 114};
121 115
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index 5334ad9d39b7..1069d96248c1 100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
@@ -3,7 +3,7 @@
3make &> /dev/null 3make &> /dev/null
4 4
5for i in `ls tests/*.c`; do 5for i in `ls tests/*.c`; do
6 testname=$(basename -s .c "$i") 6 testname=$(basename "$i" .c)
7 gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null 7 gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
8 echo -ne "$testname... " 8 echo -ne "$testname... "
9 if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then 9 if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then
@@ -11,11 +11,13 @@ for i in `ls tests/*.c`; do
11 else 11 else
12 echo "FAILED!" 12 echo "FAILED!"
13 fi 13 fi
14 rm tests/$testname 14 if [ -f "tests/$testname" ]; then
15 rm tests/$testname
16 fi
15done 17done
16 18
17for i in `ls tests/*.c`; do 19for i in `ls tests/*.c`; do
18 testname=$(basename -s .c "$i") 20 testname=$(basename "$i" .c)
19 gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null 21 gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null
20 echo -ne "(PRELOAD) $testname... " 22 echo -ne "(PRELOAD) $testname... "
21 if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then 23 if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
@@ -23,5 +25,7 @@ for i in `ls tests/*.c`; do
23 else 25 else
24 echo "FAILED!" 26 echo "FAILED!"
25 fi 27 fi
26 rm tests/$testname 28 if [ -f "tests/$testname" ]; then
29 rm tests/$testname
30 fi
27done 31done
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 2e1fa2357528..8c8c6b9ce915 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -74,6 +74,7 @@ arch/*/include/uapi/asm/unistd*.h
74arch/*/include/uapi/asm/perf_regs.h 74arch/*/include/uapi/asm/perf_regs.h
75arch/*/lib/memcpy*.S 75arch/*/lib/memcpy*.S
76arch/*/lib/memset*.S 76arch/*/lib/memset*.S
77arch/*/include/asm/*features.h
77include/linux/poison.h 78include/linux/poison.h
78include/linux/hw_breakpoint.h 79include/linux/hw_breakpoint.h
79include/uapi/linux/perf_event.h 80include/uapi/linux/perf_event.h
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 6138bdef6e63..f8ccee132867 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -4,6 +4,8 @@
4#include <stdlib.h> 4#include <stdlib.h>
5#include <string.h> 5#include <string.h>
6#include <linux/stringify.h> 6#include <linux/stringify.h>
7#include "header.h"
8#include "util.h"
7 9
8#define mfspr(rn) ({unsigned long rval; \ 10#define mfspr(rn) ({unsigned long rval; \
9 asm volatile("mfspr %0," __stringify(rn) \ 11 asm volatile("mfspr %0," __stringify(rn) \
diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg
index 238aa3927c71..f2d9c5fe58e0 100755
--- a/tools/perf/tests/perf-targz-src-pkg
+++ b/tools/perf/tests/perf-targz-src-pkg
@@ -15,7 +15,7 @@ TMP_DEST=$(mktemp -d)
15tar xf ${TARBALL} -C $TMP_DEST 15tar xf ${TARBALL} -C $TMP_DEST
16rm -f ${TARBALL} 16rm -f ${TARBALL}
17cd - > /dev/null 17cd - > /dev/null
18make -C $TMP_DEST/perf*/tools/perf > /dev/null 2>&1 18make -C $TMP_DEST/perf*/tools/perf > /dev/null
19RC=$? 19RC=$?
20rm -rf ${TMP_DEST} 20rm -rf ${TMP_DEST}
21exit $RC 21exit $RC
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 4b9816555946..2a83414159a6 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -337,7 +337,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
337 chain = list_entry(node->val.next, struct callchain_list, list); 337 chain = list_entry(node->val.next, struct callchain_list, list);
338 chain->has_children = has_sibling; 338 chain->has_children = has_sibling;
339 339
340 if (node->val.next != node->val.prev) { 340 if (!list_empty(&node->val)) {
341 chain = list_entry(node->val.prev, struct callchain_list, list); 341 chain = list_entry(node->val.prev, struct callchain_list, list);
342 chain->has_children = !RB_EMPTY_ROOT(&node->rb_root); 342 chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
343 } 343 }
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 52cf479bc593..dad55d04ffdd 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -56,13 +56,22 @@ const char *perf_event__name(unsigned int id)
56 return perf_event__names[id]; 56 return perf_event__names[id];
57} 57}
58 58
59static struct perf_sample synth_sample = { 59static int perf_tool__process_synth_event(struct perf_tool *tool,
60 union perf_event *event,
61 struct machine *machine,
62 perf_event__handler_t process)
63{
64 struct perf_sample synth_sample = {
60 .pid = -1, 65 .pid = -1,
61 .tid = -1, 66 .tid = -1,
62 .time = -1, 67 .time = -1,
63 .stream_id = -1, 68 .stream_id = -1,
64 .cpu = -1, 69 .cpu = -1,
65 .period = 1, 70 .period = 1,
71 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
72 };
73
74 return process(tool, event, &synth_sample, machine);
66}; 75};
67 76
68/* 77/*
@@ -186,7 +195,7 @@ pid_t perf_event__synthesize_comm(struct perf_tool *tool,
186 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 195 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
187 return -1; 196 return -1;
188 197
189 if (process(tool, event, &synth_sample, machine) != 0) 198 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
190 return -1; 199 return -1;
191 200
192 return tgid; 201 return tgid;
@@ -218,7 +227,7 @@ static int perf_event__synthesize_fork(struct perf_tool *tool,
218 227
219 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 228 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
220 229
221 if (process(tool, event, &synth_sample, machine) != 0) 230 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
222 return -1; 231 return -1;
223 232
224 return 0; 233 return 0;
@@ -344,7 +353,7 @@ out:
344 event->mmap2.pid = tgid; 353 event->mmap2.pid = tgid;
345 event->mmap2.tid = pid; 354 event->mmap2.tid = pid;
346 355
347 if (process(tool, event, &synth_sample, machine) != 0) { 356 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
348 rc = -1; 357 rc = -1;
349 break; 358 break;
350 } 359 }
@@ -402,7 +411,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
402 411
403 memcpy(event->mmap.filename, pos->dso->long_name, 412 memcpy(event->mmap.filename, pos->dso->long_name,
404 pos->dso->long_name_len + 1); 413 pos->dso->long_name_len + 1);
405 if (process(tool, event, &synth_sample, machine) != 0) { 414 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
406 rc = -1; 415 rc = -1;
407 break; 416 break;
408 } 417 }
@@ -472,7 +481,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
472 /* 481 /*
473 * Send the prepared comm event 482 * Send the prepared comm event
474 */ 483 */
475 if (process(tool, comm_event, &synth_sample, machine) != 0) 484 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
476 break; 485 break;
477 486
478 rc = 0; 487 rc = 0;
@@ -701,7 +710,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
701 event->mmap.len = map->end - event->mmap.start; 710 event->mmap.len = map->end - event->mmap.start;
702 event->mmap.pid = machine->pid; 711 event->mmap.pid = machine->pid;
703 712
704 err = process(tool, event, &synth_sample, machine); 713 err = perf_tool__process_synth_event(tool, event, machine, process);
705 free(event); 714 free(event);
706 715
707 return err; 716 return err;
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index cd67e64a0494..2fbeb59c4bdd 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -9,36 +9,32 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
9 9
10#if defined(__arm__) 10#if defined(__arm__)
11#define GEN_ELF_ARCH EM_ARM 11#define GEN_ELF_ARCH EM_ARM
12#define GEN_ELF_ENDIAN ELFDATA2LSB
13#define GEN_ELF_CLASS ELFCLASS32 12#define GEN_ELF_CLASS ELFCLASS32
14#elif defined(__aarch64__) 13#elif defined(__aarch64__)
15#define GEN_ELF_ARCH EM_AARCH64 14#define GEN_ELF_ARCH EM_AARCH64
16#define GEN_ELF_ENDIAN ELFDATA2LSB
17#define GEN_ELF_CLASS ELFCLASS64 15#define GEN_ELF_CLASS ELFCLASS64
18#elif defined(__x86_64__) 16#elif defined(__x86_64__)
19#define GEN_ELF_ARCH EM_X86_64 17#define GEN_ELF_ARCH EM_X86_64
20#define GEN_ELF_ENDIAN ELFDATA2LSB
21#define GEN_ELF_CLASS ELFCLASS64 18#define GEN_ELF_CLASS ELFCLASS64
22#elif defined(__i386__) 19#elif defined(__i386__)
23#define GEN_ELF_ARCH EM_386 20#define GEN_ELF_ARCH EM_386
24#define GEN_ELF_ENDIAN ELFDATA2LSB
25#define GEN_ELF_CLASS ELFCLASS32 21#define GEN_ELF_CLASS ELFCLASS32
26#elif defined(__ppcle__) 22#elif defined(__powerpc64__)
27#define GEN_ELF_ARCH EM_PPC
28#define GEN_ELF_ENDIAN ELFDATA2LSB
29#define GEN_ELF_CLASS ELFCLASS64
30#elif defined(__powerpc__)
31#define GEN_ELF_ARCH EM_PPC64
32#define GEN_ELF_ENDIAN ELFDATA2MSB
33#define GEN_ELF_CLASS ELFCLASS64
34#elif defined(__powerpcle__)
35#define GEN_ELF_ARCH EM_PPC64 23#define GEN_ELF_ARCH EM_PPC64
36#define GEN_ELF_ENDIAN ELFDATA2LSB
37#define GEN_ELF_CLASS ELFCLASS64 24#define GEN_ELF_CLASS ELFCLASS64
25#elif defined(__powerpc__)
26#define GEN_ELF_ARCH EM_PPC
27#define GEN_ELF_CLASS ELFCLASS32
38#else 28#else
39#error "unsupported architecture" 29#error "unsupported architecture"
40#endif 30#endif
41 31
32#if __BYTE_ORDER == __BIG_ENDIAN
33#define GEN_ELF_ENDIAN ELFDATA2MSB
34#else
35#define GEN_ELF_ENDIAN ELFDATA2LSB
36#endif
37
42#if GEN_ELF_CLASS == ELFCLASS64 38#if GEN_ELF_CLASS == ELFCLASS64
43#define elf_newehdr elf64_newehdr 39#define elf_newehdr elf64_newehdr
44#define elf_getshdr elf64_getshdr 40#define elf_getshdr elf64_getshdr
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 6bc3ecd2e7ca..abf1366e2a24 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -279,6 +279,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
279 event.sample.header.misc = PERF_RECORD_MISC_USER; 279 event.sample.header.misc = PERF_RECORD_MISC_USER;
280 event.sample.header.size = sizeof(struct perf_event_header); 280 event.sample.header.size = sizeof(struct perf_event_header);
281 281
282 sample.cpumode = PERF_RECORD_MISC_USER;
282 sample.ip = le64_to_cpu(branch->from); 283 sample.ip = le64_to_cpu(branch->from);
283 sample.pid = btsq->pid; 284 sample.pid = btsq->pid;
284 sample.tid = btsq->tid; 285 sample.tid = btsq->tid;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 05d815851be1..407f11b97c8d 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -979,6 +979,7 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
979 if (!pt->timeless_decoding) 979 if (!pt->timeless_decoding)
980 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 980 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
981 981
982 sample.cpumode = PERF_RECORD_MISC_USER;
982 sample.ip = ptq->state->from_ip; 983 sample.ip = ptq->state->from_ip;
983 sample.pid = ptq->pid; 984 sample.pid = ptq->pid;
984 sample.tid = ptq->tid; 985 sample.tid = ptq->tid;
@@ -1035,6 +1036,7 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1035 if (!pt->timeless_decoding) 1036 if (!pt->timeless_decoding)
1036 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 1037 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1037 1038
1039 sample.cpumode = PERF_RECORD_MISC_USER;
1038 sample.ip = ptq->state->from_ip; 1040 sample.ip = ptq->state->from_ip;
1039 sample.pid = ptq->pid; 1041 sample.pid = ptq->pid;
1040 sample.tid = ptq->tid; 1042 sample.tid = ptq->tid;
@@ -1092,6 +1094,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1092 if (!pt->timeless_decoding) 1094 if (!pt->timeless_decoding)
1093 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 1095 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1094 1096
1097 sample.cpumode = PERF_RECORD_MISC_USER;
1095 sample.ip = ptq->state->from_ip; 1098 sample.ip = ptq->state->from_ip;
1096 sample.pid = ptq->pid; 1099 sample.pid = ptq->pid;
1097 sample.tid = ptq->tid; 1100 sample.tid = ptq->tid;
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index cd272cc21e05..ad0c0bb1fbc7 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -417,6 +417,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
417 * use first address as sample address 417 * use first address as sample address
418 */ 418 */
419 memset(&sample, 0, sizeof(sample)); 419 memset(&sample, 0, sizeof(sample));
420 sample.cpumode = PERF_RECORD_MISC_USER;
420 sample.pid = pid; 421 sample.pid = pid;
421 sample.tid = tid; 422 sample.tid = tid;
422 sample.time = id->time; 423 sample.time = id->time;
@@ -505,6 +506,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
505 * use first address as sample address 506 * use first address as sample address
506 */ 507 */
507 memset(&sample, 0, sizeof(sample)); 508 memset(&sample, 0, sizeof(sample));
509 sample.cpumode = PERF_RECORD_MISC_USER;
508 sample.pid = pid; 510 sample.pid = pid;
509 sample.tid = tid; 511 sample.tid = tid;
510 sample.time = id->time; 512 sample.time = id->time;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 20a257a12ea5..acbf7ff2ee6e 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -66,6 +66,8 @@ unsigned int do_slm_cstates;
66unsigned int use_c1_residency_msr; 66unsigned int use_c1_residency_msr;
67unsigned int has_aperf; 67unsigned int has_aperf;
68unsigned int has_epb; 68unsigned int has_epb;
69unsigned int do_irtl_snb;
70unsigned int do_irtl_hsw;
69unsigned int units = 1000000; /* MHz etc */ 71unsigned int units = 1000000; /* MHz etc */
70unsigned int genuine_intel; 72unsigned int genuine_intel;
71unsigned int has_invariant_tsc; 73unsigned int has_invariant_tsc;
@@ -187,7 +189,7 @@ struct pkg_data {
187 unsigned long long pkg_any_core_c0; 189 unsigned long long pkg_any_core_c0;
188 unsigned long long pkg_any_gfxe_c0; 190 unsigned long long pkg_any_gfxe_c0;
189 unsigned long long pkg_both_core_gfxe_c0; 191 unsigned long long pkg_both_core_gfxe_c0;
190 unsigned long long gfx_rc6_ms; 192 long long gfx_rc6_ms;
191 unsigned int gfx_mhz; 193 unsigned int gfx_mhz;
192 unsigned int package_id; 194 unsigned int package_id;
193 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ 195 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
@@ -621,8 +623,14 @@ int format_counters(struct thread_data *t, struct core_data *c,
621 outp += sprintf(outp, "%8d", p->pkg_temp_c); 623 outp += sprintf(outp, "%8d", p->pkg_temp_c);
622 624
623 /* GFXrc6 */ 625 /* GFXrc6 */
624 if (do_gfx_rc6_ms) 626 if (do_gfx_rc6_ms) {
625 outp += sprintf(outp, "%8.2f", 100.0 * p->gfx_rc6_ms / 1000.0 / interval_float); 627 if (p->gfx_rc6_ms == -1) { /* detect counter reset */
628 outp += sprintf(outp, " ***.**");
629 } else {
630 outp += sprintf(outp, "%8.2f",
631 p->gfx_rc6_ms / 10.0 / interval_float);
632 }
633 }
626 634
627 /* GFXMHz */ 635 /* GFXMHz */
628 if (do_gfx_mhz) 636 if (do_gfx_mhz)
@@ -766,7 +774,12 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
766 old->pc10 = new->pc10 - old->pc10; 774 old->pc10 = new->pc10 - old->pc10;
767 old->pkg_temp_c = new->pkg_temp_c; 775 old->pkg_temp_c = new->pkg_temp_c;
768 776
769 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; 777 /* flag an error when rc6 counter resets/wraps */
778 if (old->gfx_rc6_ms > new->gfx_rc6_ms)
779 old->gfx_rc6_ms = -1;
780 else
781 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
782
770 old->gfx_mhz = new->gfx_mhz; 783 old->gfx_mhz = new->gfx_mhz;
771 784
772 DELTA_WRAP32(new->energy_pkg, old->energy_pkg); 785 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -1296,6 +1309,7 @@ int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S,
1296int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1309int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1297int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1310int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1298int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1311int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1312int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1299 1313
1300 1314
1301static void 1315static void
@@ -1579,6 +1593,47 @@ dump_config_tdp(void)
1579 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); 1593 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
1580 fprintf(outf, ")\n"); 1594 fprintf(outf, ")\n");
1581} 1595}
1596
1597unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
1598
1599void print_irtl(void)
1600{
1601 unsigned long long msr;
1602
1603 get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
1604 fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
1605 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1606 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1607
1608 get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
1609 fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
1610 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1611 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1612
1613 get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
1614 fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
1615 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1616 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1617
1618 if (!do_irtl_hsw)
1619 return;
1620
1621 get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
1622 fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
1623 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1624 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1625
1626 get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
1627 fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
1628 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1629 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1630
1631 get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
1632 fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
1633 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
1634 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
1635
1636}
1582void free_fd_percpu(void) 1637void free_fd_percpu(void)
1583{ 1638{
1584 int i; 1639 int i;
@@ -2144,6 +2199,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
2144 case 0x56: /* BDX-DE */ 2199 case 0x56: /* BDX-DE */
2145 case 0x4E: /* SKL */ 2200 case 0x4E: /* SKL */
2146 case 0x5E: /* SKL */ 2201 case 0x5E: /* SKL */
2202 case 0x8E: /* KBL */
2203 case 0x9E: /* KBL */
2204 case 0x55: /* SKX */
2147 pkg_cstate_limits = hsw_pkg_cstate_limits; 2205 pkg_cstate_limits = hsw_pkg_cstate_limits;
2148 break; 2206 break;
2149 case 0x37: /* BYT */ 2207 case 0x37: /* BYT */
@@ -2156,6 +2214,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
2156 case 0x57: /* PHI */ 2214 case 0x57: /* PHI */
2157 pkg_cstate_limits = phi_pkg_cstate_limits; 2215 pkg_cstate_limits = phi_pkg_cstate_limits;
2158 break; 2216 break;
2217 case 0x5C: /* BXT */
2218 pkg_cstate_limits = bxt_pkg_cstate_limits;
2219 break;
2159 default: 2220 default:
2160 return 0; 2221 return 0;
2161 } 2222 }
@@ -2248,6 +2309,9 @@ int has_config_tdp(unsigned int family, unsigned int model)
2248 case 0x56: /* BDX-DE */ 2309 case 0x56: /* BDX-DE */
2249 case 0x4E: /* SKL */ 2310 case 0x4E: /* SKL */
2250 case 0x5E: /* SKL */ 2311 case 0x5E: /* SKL */
2312 case 0x8E: /* KBL */
2313 case 0x9E: /* KBL */
2314 case 0x55: /* SKX */
2251 2315
2252 case 0x57: /* Knights Landing */ 2316 case 0x57: /* Knights Landing */
2253 return 1; 2317 return 1;
@@ -2585,13 +2649,19 @@ void rapl_probe(unsigned int family, unsigned int model)
2585 case 0x47: /* BDW */ 2649 case 0x47: /* BDW */
2586 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; 2650 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
2587 break; 2651 break;
2652 case 0x5C: /* BXT */
2653 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
2654 break;
2588 case 0x4E: /* SKL */ 2655 case 0x4E: /* SKL */
2589 case 0x5E: /* SKL */ 2656 case 0x5E: /* SKL */
2657 case 0x8E: /* KBL */
2658 case 0x9E: /* KBL */
2590 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2659 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2591 break; 2660 break;
2592 case 0x3F: /* HSX */ 2661 case 0x3F: /* HSX */
2593 case 0x4F: /* BDX */ 2662 case 0x4F: /* BDX */
2594 case 0x56: /* BDX-DE */ 2663 case 0x56: /* BDX-DE */
2664 case 0x55: /* SKX */
2595 case 0x57: /* KNL */ 2665 case 0x57: /* KNL */
2596 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2666 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2597 break; 2667 break;
@@ -2871,6 +2941,10 @@ int has_snb_msrs(unsigned int family, unsigned int model)
2871 case 0x56: /* BDX-DE */ 2941 case 0x56: /* BDX-DE */
2872 case 0x4E: /* SKL */ 2942 case 0x4E: /* SKL */
2873 case 0x5E: /* SKL */ 2943 case 0x5E: /* SKL */
2944 case 0x8E: /* KBL */
2945 case 0x9E: /* KBL */
2946 case 0x55: /* SKX */
2947 case 0x5C: /* BXT */
2874 return 1; 2948 return 1;
2875 } 2949 }
2876 return 0; 2950 return 0;
@@ -2879,9 +2953,14 @@ int has_snb_msrs(unsigned int family, unsigned int model)
2879/* 2953/*
2880 * HSW adds support for additional MSRs: 2954 * HSW adds support for additional MSRs:
2881 * 2955 *
2882 * MSR_PKG_C8_RESIDENCY 0x00000630 2956 * MSR_PKG_C8_RESIDENCY 0x00000630
2883 * MSR_PKG_C9_RESIDENCY 0x00000631 2957 * MSR_PKG_C9_RESIDENCY 0x00000631
2884 * MSR_PKG_C10_RESIDENCY 0x00000632 2958 * MSR_PKG_C10_RESIDENCY 0x00000632
2959 *
2960 * MSR_PKGC8_IRTL 0x00000633
2961 * MSR_PKGC9_IRTL 0x00000634
2962 * MSR_PKGC10_IRTL 0x00000635
2963 *
2885 */ 2964 */
2886int has_hsw_msrs(unsigned int family, unsigned int model) 2965int has_hsw_msrs(unsigned int family, unsigned int model)
2887{ 2966{
@@ -2893,6 +2972,9 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
2893 case 0x3D: /* BDW */ 2972 case 0x3D: /* BDW */
2894 case 0x4E: /* SKL */ 2973 case 0x4E: /* SKL */
2895 case 0x5E: /* SKL */ 2974 case 0x5E: /* SKL */
2975 case 0x8E: /* KBL */
2976 case 0x9E: /* KBL */
2977 case 0x5C: /* BXT */
2896 return 1; 2978 return 1;
2897 } 2979 }
2898 return 0; 2980 return 0;
@@ -2914,6 +2996,8 @@ int has_skl_msrs(unsigned int family, unsigned int model)
2914 switch (model) { 2996 switch (model) {
2915 case 0x4E: /* SKL */ 2997 case 0x4E: /* SKL */
2916 case 0x5E: /* SKL */ 2998 case 0x5E: /* SKL */
2999 case 0x8E: /* KBL */
3000 case 0x9E: /* KBL */
2917 return 1; 3001 return 1;
2918 } 3002 }
2919 return 0; 3003 return 0;
@@ -3187,7 +3271,7 @@ void process_cpuid()
3187 if (debug) 3271 if (debug)
3188 decode_misc_enable_msr(); 3272 decode_misc_enable_msr();
3189 3273
3190 if (max_level >= 0x7) { 3274 if (max_level >= 0x7 && debug) {
3191 int has_sgx; 3275 int has_sgx;
3192 3276
3193 ecx = 0; 3277 ecx = 0;
@@ -3221,7 +3305,15 @@ void process_cpuid()
3221 switch(model) { 3305 switch(model) {
3222 case 0x4E: /* SKL */ 3306 case 0x4E: /* SKL */
3223 case 0x5E: /* SKL */ 3307 case 0x5E: /* SKL */
3224 crystal_hz = 24000000; /* 24 MHz */ 3308 case 0x8E: /* KBL */
3309 case 0x9E: /* KBL */
3310 crystal_hz = 24000000; /* 24.0 MHz */
3311 break;
3312 case 0x55: /* SKX */
3313 crystal_hz = 25000000; /* 25.0 MHz */
3314 break;
3315 case 0x5C: /* BXT */
3316 crystal_hz = 19200000; /* 19.2 MHz */
3225 break; 3317 break;
3226 default: 3318 default:
3227 crystal_hz = 0; 3319 crystal_hz = 0;
@@ -3254,11 +3346,13 @@ void process_cpuid()
3254 3346
3255 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); 3347 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
3256 do_snb_cstates = has_snb_msrs(family, model); 3348 do_snb_cstates = has_snb_msrs(family, model);
3349 do_irtl_snb = has_snb_msrs(family, model);
3257 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); 3350 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
3258 do_pc3 = (pkg_cstate_limit >= PCL__3); 3351 do_pc3 = (pkg_cstate_limit >= PCL__3);
3259 do_pc6 = (pkg_cstate_limit >= PCL__6); 3352 do_pc6 = (pkg_cstate_limit >= PCL__6);
3260 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); 3353 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
3261 do_c8_c9_c10 = has_hsw_msrs(family, model); 3354 do_c8_c9_c10 = has_hsw_msrs(family, model);
3355 do_irtl_hsw = has_hsw_msrs(family, model);
3262 do_skl_residency = has_skl_msrs(family, model); 3356 do_skl_residency = has_skl_msrs(family, model);
3263 do_slm_cstates = is_slm(family, model); 3357 do_slm_cstates = is_slm(family, model);
3264 do_knl_cstates = is_knl(family, model); 3358 do_knl_cstates = is_knl(family, model);
@@ -3564,6 +3658,9 @@ void turbostat_init()
3564 3658
3565 if (debug) 3659 if (debug)
3566 for_all_cpus(print_thermal, ODD_COUNTERS); 3660 for_all_cpus(print_thermal, ODD_COUNTERS);
3661
3662 if (debug && do_irtl_snb)
3663 print_irtl();
3567} 3664}
3568 3665
3569int fork_it(char **argv) 3666int fork_it(char **argv)
@@ -3629,7 +3726,7 @@ int get_and_dump_counters(void)
3629} 3726}
3630 3727
3631void print_version() { 3728void print_version() {
3632 fprintf(outf, "turbostat version 4.11 27 Feb 2016" 3729 fprintf(outf, "turbostat version 4.12 5 Apr 2016"
3633 " - Len Brown <lenb@kernel.org>\n"); 3730 " - Len Brown <lenb@kernel.org>\n");
3634} 3731}
3635 3732
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index b9453b838162..150829dd7998 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1497,15 +1497,15 @@ TEST_F(TRACE_syscall, syscall_dropped)
1497#define SECCOMP_SET_MODE_FILTER 1 1497#define SECCOMP_SET_MODE_FILTER 1
1498#endif 1498#endif
1499 1499
1500#ifndef SECCOMP_FLAG_FILTER_TSYNC 1500#ifndef SECCOMP_FILTER_FLAG_TSYNC
1501#define SECCOMP_FLAG_FILTER_TSYNC 1 1501#define SECCOMP_FILTER_FLAG_TSYNC 1
1502#endif 1502#endif
1503 1503
1504#ifndef seccomp 1504#ifndef seccomp
1505int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter) 1505int seccomp(unsigned int op, unsigned int flags, void *args)
1506{ 1506{
1507 errno = 0; 1507 errno = 0;
1508 return syscall(__NR_seccomp, op, flags, filter); 1508 return syscall(__NR_seccomp, op, flags, args);
1509} 1509}
1510#endif 1510#endif
1511 1511
@@ -1613,7 +1613,7 @@ TEST(TSYNC_first)
1613 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1613 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1614 } 1614 }
1615 1615
1616 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1616 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1617 &prog); 1617 &prog);
1618 ASSERT_NE(ENOSYS, errno) { 1618 ASSERT_NE(ENOSYS, errno) {
1619 TH_LOG("Kernel does not support seccomp syscall!"); 1619 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1831,7 +1831,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
1831 self->sibling_count++; 1831 self->sibling_count++;
1832 } 1832 }
1833 1833
1834 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1834 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1835 &self->apply_prog); 1835 &self->apply_prog);
1836 ASSERT_EQ(0, ret) { 1836 ASSERT_EQ(0, ret) {
1837 TH_LOG("Could install filter on all threads!"); 1837 TH_LOG("Could install filter on all threads!");
@@ -1892,7 +1892,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
1892 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1892 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1893 } 1893 }
1894 1894
1895 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1895 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1896 &self->apply_prog); 1896 &self->apply_prog);
1897 ASSERT_NE(ENOSYS, errno) { 1897 ASSERT_NE(ENOSYS, errno) {
1898 TH_LOG("Kernel does not support seccomp syscall!"); 1898 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1940,7 +1940,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
1940 self->sibling_count++; 1940 self->sibling_count++;
1941 } 1941 }
1942 1942
1943 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1943 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1944 &self->apply_prog); 1944 &self->apply_prog);
1945 ASSERT_EQ(self->sibling[0].system_tid, ret) { 1945 ASSERT_EQ(self->sibling[0].system_tid, ret) {
1946 TH_LOG("Did not fail on diverged sibling."); 1946 TH_LOG("Did not fail on diverged sibling.");
@@ -1992,7 +1992,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
1992 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 1992 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1993 } 1993 }
1994 1994
1995 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1995 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1996 &self->apply_prog); 1996 &self->apply_prog);
1997 ASSERT_EQ(ret, self->sibling[0].system_tid) { 1997 ASSERT_EQ(ret, self->sibling[0].system_tid) {
1998 TH_LOG("Did not fail on diverged sibling."); 1998 TH_LOG("Did not fail on diverged sibling.");
@@ -2021,7 +2021,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2021 /* Switch to the remaining sibling */ 2021 /* Switch to the remaining sibling */
2022 sib = !sib; 2022 sib = !sib;
2023 2023
2024 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2024 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2025 &self->apply_prog); 2025 &self->apply_prog);
2026 ASSERT_EQ(0, ret) { 2026 ASSERT_EQ(0, ret) {
2027 TH_LOG("Expected the remaining sibling to sync"); 2027 TH_LOG("Expected the remaining sibling to sync");
@@ -2044,7 +2044,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2044 while (!kill(self->sibling[sib].system_tid, 0)) 2044 while (!kill(self->sibling[sib].system_tid, 0))
2045 sleep(0.1); 2045 sleep(0.1);
2046 2046
2047 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2047 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2048 &self->apply_prog); 2048 &self->apply_prog);
2049 ASSERT_EQ(0, ret); /* just us chickens */ 2049 ASSERT_EQ(0, ret); /* just us chickens */
2050} 2050}