aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 21:17:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 21:17:56 -0400
commitbf5f89463f5b3109a72ed13ca62b57e90213387d (patch)
treef9f288a341dd86efa996f7a08fb425eae34eb446
parent2d3e4866dea96b0506395b47bfefb234f2088dac (diff)
parent4d2b5bcab53f1c76a86279339561c9a36109a93b (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - the rest of MM - various misc things - procfs updates - lib/ updates - checkpatch updates - kdump/kexec updates - add kvmalloc helpers, use them - time helper updates for Y2038 issues. We're almost ready to remove current_fs_time() but that awaits a btrfs merge. - add tracepoints to DAX * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits) drivers/staging/ccree/ssi_hash.c: fix build with gcc-4.4.4 selftests/vm: add a test for virtual address range mapping dax: add tracepoint to dax_insert_mapping() dax: add tracepoint to dax_writeback_one() dax: add tracepoints to dax_writeback_mapping_range() dax: add tracepoints to dax_load_hole() dax: add tracepoints to dax_pfn_mkwrite() dax: add tracepoints to dax_iomap_pte_fault() mtd: nand: nandsim: convert to memalloc_noreclaim_*() treewide: convert PF_MEMALLOC manipulations to new helpers mm: introduce memalloc_noreclaim_{save,restore} mm: prevent potential recursive reclaim due to clearing PF_MEMALLOC mm/huge_memory.c: deposit a pgtable for DAX PMD faults when required mm/huge_memory.c: use zap_deposited_table() more time: delete CURRENT_TIME_SEC and CURRENT_TIME gfs2: replace CURRENT_TIME with current_time apparmorfs: replace CURRENT_TIME with current_time() lustre: replace CURRENT_TIME macro fs: ubifs: replace CURRENT_TIME_SEC with current_time fs: ufs: use ktime_get_real_ts64() for birthtime ...
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/powerpc/firmware-assisted-dump.txt23
-rw-r--r--Documentation/vm/transhuge.txt10
-rw-r--r--Makefile8
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/arc/kernel/unwind.c2
-rw-r--r--arch/arm/include/asm/cacheflush.h20
-rw-r--r--arch/arm/include/asm/set_memory.h32
-rw-r--r--arch/arm/kernel/ftrace.c1
-rw-r--r--arch/arm/kernel/kgdb.c2
-rw-r--r--arch/arm/kernel/machine_kexec.c1
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c4
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c2
-rw-r--r--arch/arm/mm/pageattr.c1
-rw-r--r--arch/arm/net/bpf_jit_32.c1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/cacheflush.h4
-rw-r--r--arch/arm64/mm/pageattr.c1
-rw-r--r--arch/arm64/net/bpf_jit_comp.c1
-rw-r--r--arch/ia64/kernel/crash.c22
-rw-r--r--arch/m68k/ifpsp060/src/ilsp.S2
-rw-r--r--arch/m68k/ifpsp060/src/isp.S2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c2
-rw-r--r--arch/mips/dec/prom/init.c6
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-rgmii.h2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/powerpc/Kconfig10
-rw-r--r--arch/powerpc/include/asm/fadump.h2
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/fadump.c57
-rw-r--r--arch/powerpc/kernel/setup-common.c5
-rw-r--r--arch/powerpc/mm/icswx.c2
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/set_memory.h (renamed from arch/s390/include/asm/cacheflush.h)9
-rw-r--r--arch/s390/kernel/ftrace.c1
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c10
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/pageattr.c1
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/s390/net/bpf_jit_comp.c1
-rw-r--r--arch/x86/include/asm/cacheflush.h85
-rw-r--r--arch/x86/include/asm/set_memory.h87
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c1
-rw-r--r--arch/x86/kernel/kprobes/opt.c1
-rw-r--r--arch/x86/kernel/machine_kexec_32.c2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/page_track.c4
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/pageattr.c1
-rw-r--r--arch/x86/net/bpf_jit_comp.c1
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--certs/blacklist.c2
-rw-r--r--crypto/lzo.c4
-rw-r--r--drivers/acpi/apei/erst.c8
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/nbd.c7
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/char/agp/amd-k7-agp.c1
-rw-r--r--drivers/char/agp/ati-agp.c1
-rw-r--r--drivers/char/agp/generic.c12
-rw-r--r--drivers/char/agp/intel-gtt.c1
-rw-r--r--drivers/char/agp/sworks-agp.c1
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/clk/qcom/common.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c3
-rw-r--r--drivers/cpufreq/sti-cpufreq.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c4
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/hwtracing/intel_th/msu.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/leds/leds-lp5521.c2
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/leds/leds-lp5562.c2
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-ioctl.c13
-rw-r--r--drivers/md/dm-stats.c7
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h2
-rw-r--r--drivers/misc/c2port/c2port-duramar2150.c4
-rw-r--r--drivers/misc/sram-exec.c3
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mtd/nand/nandsim.c29
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h4
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c12
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c9
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c8
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/hippi/rrunner.c3
-rw-r--r--drivers/nvdimm/dimm_devs.c5
-rw-r--r--drivers/scsi/isci/registers.h4
-rw-r--r--drivers/scsi/iscsi_tcp.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/staging/ccree/ssi_hash.c236
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c11
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c5
-rw-r--r--drivers/staging/most/mostcore/core.c2
-rw-r--r--drivers/tty/n_hdlc.c10
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/vhost.c15
-rw-r--r--drivers/vhost/vsock.c9
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c7
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/xen/evtchn.c14
-rw-r--r--firmware/Makefile3
-rw-r--r--fs/btrfs/ctree.c9
-rw-r--r--fs/btrfs/free-space-tree.c3
-rw-r--r--fs/btrfs/ioctl.c9
-rw-r--r--fs/btrfs/send.c27
-rw-r--r--fs/buffer.c13
-rw-r--r--fs/ceph/file.c9
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/cifs/cifsencrypt.c4
-rw-r--r--fs/cifs/cifssmb.c10
-rw-r--r--fs/cifs/inode.c28
-rw-r--r--fs/dax.c48
-rw-r--r--fs/exofs/dir.c3
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/f2fs/f2fs.h20
-rw-r--r--fs/f2fs/file.c4
-rw-r--r--fs/f2fs/node.c6
-rw-r--r--fs/f2fs/segment.c16
-rw-r--r--fs/f2fs/segment.h5
-rw-r--r--fs/file.c2
-rw-r--r--fs/gfs2/bmap.c4
-rw-r--r--fs/hfs/extent.c4
-rw-r--r--fs/hfsplus/extents.c5
-rw-r--r--fs/inode.c2
-rw-r--r--fs/iomap.c13
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nsfs.c4
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/namespaces.c1
-rw-r--r--fs/reiserfs/item_ops.c24
-rw-r--r--fs/select.c5
-rw-r--r--fs/seq_file.c16
-rw-r--r--fs/ubifs/dir.c12
-rw-r--r--fs/ubifs/file.c12
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/misc.h10
-rw-r--r--fs/ubifs/sb.c14
-rw-r--r--fs/ubifs/xattr.c6
-rw-r--r--fs/ufs/ialloc.c6
-rw-r--r--fs/xattr.c27
-rw-r--r--fs/xfs/kmem.c2
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--include/asm-generic/set_memory.h12
-rw-r--r--include/drm/drm_mem_util.h9
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/cpumask.h4
-rw-r--r--include/linux/crash_core.h69
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/ipc.h7
-rw-r--r--include/linux/jiffies.h11
-rw-r--r--include/linux/kexec.h57
-rw-r--r--include/linux/kref.h6
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mm.h22
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/sched/mm.h12
-rw-r--r--include/linux/sem.h3
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/uaccess.h1
-rw-r--r--include/linux/vmalloc.h20
-rw-r--r--include/trace/events/fs_dax.h130
-rw-r--r--include/uapi/linux/ipmi.h2
-rw-r--r--init/do_mounts.h22
-rw-r--r--init/initramfs.c14
-rw-r--r--ipc/shm.c16
-rw-r--r--ipc/util.c7
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/crash_core.c439
-rw-r--r--kernel/fork.c25
-rw-r--r--kernel/groups.c2
-rw-r--r--kernel/hung_task.c8
-rw-r--r--kernel/kcov.c9
-rw-r--r--kernel/kexec_core.c431
-rw-r--r--kernel/ksysfs.c8
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/pid_namespace.c34
-rw-r--r--kernel/power/snapshot.c3
-rw-r--r--kernel/printk/printk.c6
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/taskstats.c14
-rw-r--r--kernel/trace/trace_entries.h6
-rw-r--r--kernel/trace/trace_hwlat.c14
-rw-r--r--kernel/trace/trace_output.c9
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Makefile1
-rw-r--r--lib/fault-inject.c2
-rw-r--r--lib/iov_iter.c5
-rw-r--r--lib/list_sort.c149
-rw-r--r--lib/rhashtable.c13
-rw-r--r--lib/test_list_sort.c150
-rw-r--r--lib/test_sort.c11
-rw-r--r--lib/vsprintf.c3
-rw-r--r--lib/zlib_inflate/inftrees.c2
-rw-r--r--mm/compaction.c83
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/frame_vector.c5
-rw-r--r--mm/huge_memory.c28
-rw-r--r--mm/internal.h12
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/page_alloc.c162
-rw-r--r--mm/page_isolation.c5
-rw-r--r--mm/swap_slots.c19
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c10
-rw-r--r--mm/util.c57
-rw-r--r--mm/vmalloc.c33
-rw-r--r--mm/vmscan.c17
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/sock.c7
-rw-r--r--net/decnet/af_decnet.c3
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/tcp_metrics.c5
-rw-r--r--net/ipv6/ila/ila_xlat.c8
-rw-r--r--net/mpls/af_mpls.c5
-rw-r--r--net/netfilter/x_tables.c24
-rw-r--r--net/netfilter/xt_recent.c5
-rw-r--r--net/sched/sch_choke.c5
-rw-r--r--net/sched/sch_fq.c12
-rw-r--r--net/sched/sch_fq_codel.c26
-rw-r--r--net/sched/sch_hhf.c33
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_sfq.c6
-rwxr-xr-xscripts/checkpatch.pl165
-rw-r--r--scripts/spelling.txt8
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/apparmor/include/lib.h11
-rw-r--r--security/apparmor/lib.c30
-rw-r--r--security/apparmor/match.c2
-rw-r--r--security/apparmor/policy_unpack.c2
-rw-r--r--security/keys/keyctl.c22
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/intel8x0.c4
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/x86/intel_hdmi_audio.c2
-rw-r--r--tools/testing/selftests/vm/Makefile1
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests11
-rw-r--r--tools/testing/selftests/vm/virtual_address_range.c122
-rw-r--r--usr/Kconfig10
-rw-r--r--virt/kvm/kvm_main.c18
323 files changed, 2448 insertions, 2106 deletions
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 94dd27ef4a76..f42b90687d40 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -694,8 +694,7 @@ struct address_space_operations {
694 694
695 write_end: After a successful write_begin, and data copy, write_end must 695 write_end: After a successful write_begin, and data copy, write_end must
696 be called. len is the original len passed to write_begin, and copied 696 be called. len is the original len passed to write_begin, and copied
697 is the amount that was able to be copied (copied == len is always true 697 is the amount that was able to be copied.
698 if write_begin was called with the AOP_FLAG_UNINTERRUPTIBLE flag).
699 698
700 The filesystem must take care of unlocking the page and releasing it 699 The filesystem must take care of unlocking the page and releasing it
701 refcount, and updating i_size. 700 refcount, and updating i_size.
diff --git a/Documentation/powerpc/firmware-assisted-dump.txt b/Documentation/powerpc/firmware-assisted-dump.txt
index 19b1e3d09a19..9cabaf8a207e 100644
--- a/Documentation/powerpc/firmware-assisted-dump.txt
+++ b/Documentation/powerpc/firmware-assisted-dump.txt
@@ -55,10 +55,14 @@ as follows:
55 booted with restricted memory. By default, the boot memory 55 booted with restricted memory. By default, the boot memory
56 size will be the larger of 5% of system RAM or 256MB. 56 size will be the larger of 5% of system RAM or 256MB.
57 Alternatively, user can also specify boot memory size 57 Alternatively, user can also specify boot memory size
58 through boot parameter 'fadump_reserve_mem=' which will 58 through boot parameter 'crashkernel=' which will override
59 override the default calculated size. Use this option 59 the default calculated size. Use this option if default
60 if default boot memory size is not sufficient for second 60 boot memory size is not sufficient for second kernel to
61 kernel to boot successfully. 61 boot successfully. For syntax of crashkernel= parameter,
62 refer to Documentation/kdump/kdump.txt. If any offset is
63 provided in crashkernel= parameter, it will be ignored
64 as fadump reserves memory at end of RAM for boot memory
65 dump preservation in case of a crash.
62 66
63-- After the low memory (boot memory) area has been saved, the 67-- After the low memory (boot memory) area has been saved, the
64 firmware will reset PCI and other hardware state. It will 68 firmware will reset PCI and other hardware state. It will
@@ -158,13 +162,16 @@ How to enable firmware-assisted dump (fadump):
158 162
1591. Set config option CONFIG_FA_DUMP=y and build kernel. 1631. Set config option CONFIG_FA_DUMP=y and build kernel.
1602. Boot into linux kernel with 'fadump=on' kernel cmdline option. 1642. Boot into linux kernel with 'fadump=on' kernel cmdline option.
1613. Optionally, user can also set 'fadump_reserve_mem=' kernel cmdline 1653. Optionally, user can also set 'crashkernel=' kernel cmdline
162 to specify size of the memory to reserve for boot memory dump 166 to specify size of the memory to reserve for boot memory dump
163 preservation. 167 preservation.
164 168
165NOTE: If firmware-assisted dump fails to reserve memory then it will 169NOTE: 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead
166 fallback to existing kdump mechanism if 'crashkernel=' option 170 use 'crashkernel=' to specify size of the memory to reserve
167 is set at kernel cmdline. 171 for boot memory dump preservation.
172 2. If firmware-assisted dump fails to reserve memory then it
173 will fallback to existing kdump mechanism if 'crashkernel='
174 option is set at kernel cmdline.
168 175
169Sysfs/debugfs files: 176Sysfs/debugfs files:
170------------ 177------------
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index cd28d5ee5273..4dde03b44ad1 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -266,7 +266,7 @@ for each mapping.
266 266
267The number of file transparent huge pages mapped to userspace is available 267The number of file transparent huge pages mapped to userspace is available
268by reading ShmemPmdMapped and ShmemHugePages fields in /proc/meminfo. 268by reading ShmemPmdMapped and ShmemHugePages fields in /proc/meminfo.
269To identify what applications are mapping file transparent huge pages, it 269To identify what applications are mapping file transparent huge pages, it
270is necessary to read /proc/PID/smaps and count the FileHugeMapped fields 270is necessary to read /proc/PID/smaps and count the FileHugeMapped fields
271for each mapping. 271for each mapping.
272 272
@@ -292,7 +292,7 @@ thp_collapse_alloc_failed is incremented if khugepaged found a range
292 the allocation. 292 the allocation.
293 293
294thp_file_alloc is incremented every time a file huge page is successfully 294thp_file_alloc is incremented every time a file huge page is successfully
295i allocated. 295 allocated.
296 296
297thp_file_mapped is incremented every time a file huge page is mapped into 297thp_file_mapped is incremented every time a file huge page is mapped into
298 user address space. 298 user address space.
@@ -501,7 +501,7 @@ scanner can get reference to a page is get_page_unless_zero().
501 501
502All tail pages have zero ->_refcount until atomic_add(). This prevents the 502All tail pages have zero ->_refcount until atomic_add(). This prevents the
503scanner from getting a reference to the tail page up to that point. After the 503scanner from getting a reference to the tail page up to that point. After the
504atomic_add() we don't care about the ->_refcount value. We already known how 504atomic_add() we don't care about the ->_refcount value. We already known how
505many references should be uncharged from the head page. 505many references should be uncharged from the head page.
506 506
507For head page get_page_unless_zero() will succeed and we don't mind. It's 507For head page get_page_unless_zero() will succeed and we don't mind. It's
@@ -519,8 +519,8 @@ comes. Splitting will free up unused subpages.
519 519
520Splitting the page right away is not an option due to locking context in 520Splitting the page right away is not an option due to locking context in
521the place where we can detect partial unmap. It's also might be 521the place where we can detect partial unmap. It's also might be
522counterproductive since in many cases partial unmap unmap happens during 522counterproductive since in many cases partial unmap happens during exit(2) if
523exit(2) if an THP crosses VMA boundary. 523a THP crosses a VMA boundary.
524 524
525Function deferred_split_huge_page() is used to queue page for splitting. 525Function deferred_split_huge_page() is used to queue page for splitting.
526The splitting itself will happen when we get memory pressure via shrinker 526The splitting itself will happen when we get memory pressure via shrinker
diff --git a/Makefile b/Makefile
index 43534cca1de9..220121fdca4d 100644
--- a/Makefile
+++ b/Makefile
@@ -1374,7 +1374,7 @@ help:
1374 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \ 1374 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
1375 echo ' (default: $(INSTALL_HDR_PATH))'; \ 1375 echo ' (default: $(INSTALL_HDR_PATH))'; \
1376 echo '' 1376 echo ''
1377 @echo 'Static analysers' 1377 @echo 'Static analysers:'
1378 @echo ' checkstack - Generate a list of stack hogs' 1378 @echo ' checkstack - Generate a list of stack hogs'
1379 @echo ' namespacecheck - Name space analysis on compiled kernel' 1379 @echo ' namespacecheck - Name space analysis on compiled kernel'
1380 @echo ' versioncheck - Sanity check on version.h usage' 1380 @echo ' versioncheck - Sanity check on version.h usage'
@@ -1384,7 +1384,7 @@ help:
1384 @echo ' headerdep - Detect inclusion cycles in headers' 1384 @echo ' headerdep - Detect inclusion cycles in headers'
1385 @$(MAKE) -f $(srctree)/scripts/Makefile.help checker-help 1385 @$(MAKE) -f $(srctree)/scripts/Makefile.help checker-help
1386 @echo '' 1386 @echo ''
1387 @echo 'Kernel selftest' 1387 @echo 'Kernel selftest:'
1388 @echo ' kselftest - Build and run kernel selftest (run as root)' 1388 @echo ' kselftest - Build and run kernel selftest (run as root)'
1389 @echo ' Build, install, and boot kernel before' 1389 @echo ' Build, install, and boot kernel before'
1390 @echo ' running kselftest on it' 1390 @echo ' running kselftest on it'
@@ -1392,6 +1392,10 @@ help:
1392 @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed' 1392 @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed'
1393 @echo ' .config.' 1393 @echo ' .config.'
1394 @echo '' 1394 @echo ''
1395 @echo 'Userspace tools targets:'
1396 @echo ' use "make tools/help"'
1397 @echo ' or "cd tools; make help"'
1398 @echo ''
1395 @echo 'Kernel packaging:' 1399 @echo 'Kernel packaging:'
1396 @$(MAKE) $(build)=$(package-dir) help 1400 @$(MAKE) $(build)=$(package-dir) help
1397 @echo '' 1401 @echo ''
diff --git a/arch/Kconfig b/arch/Kconfig
index 640999412d11..dcbd462b68b1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -2,7 +2,11 @@
2# General architecture dependent options 2# General architecture dependent options
3# 3#
4 4
5config CRASH_CORE
6 bool
7
5config KEXEC_CORE 8config KEXEC_CORE
9 select CRASH_CORE
6 bool 10 bool
7 11
8config HAVE_IMA_KEXEC 12config HAVE_IMA_KEXEC
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index b6e4f7a7419b..333daab7def0 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -845,7 +845,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
845 * state->dataAlign; 845 * state->dataAlign;
846 break; 846 break;
847 case DW_CFA_def_cfa_register: 847 case DW_CFA_def_cfa_register:
848 unw_debug("cfa_def_cfa_regsiter: "); 848 unw_debug("cfa_def_cfa_register: ");
849 state->cfa.reg = get_uleb128(&ptr.p8, end); 849 state->cfa.reg = get_uleb128(&ptr.p8, end);
850 break; 850 break;
851 /*todo case DW_CFA_def_cfa_expression: */ 851 /*todo case DW_CFA_def_cfa_expression: */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 02454fa15d2c..d69bebf697e7 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -478,26 +478,6 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
478 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ 478 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
479 "r9","r10","lr","memory" ) 479 "r9","r10","lr","memory" )
480 480
481#ifdef CONFIG_MMU
482int set_memory_ro(unsigned long addr, int numpages);
483int set_memory_rw(unsigned long addr, int numpages);
484int set_memory_x(unsigned long addr, int numpages);
485int set_memory_nx(unsigned long addr, int numpages);
486#else
487static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
488static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
489static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
490static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
491#endif
492
493#ifdef CONFIG_STRICT_KERNEL_RWX
494void set_kernel_text_rw(void);
495void set_kernel_text_ro(void);
496#else
497static inline void set_kernel_text_rw(void) { }
498static inline void set_kernel_text_ro(void) { }
499#endif
500
501void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, 481void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
502 void *kaddr, unsigned long len); 482 void *kaddr, unsigned long len);
503 483
diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h
new file mode 100644
index 000000000000..5aa4315abe91
--- /dev/null
+++ b/arch/arm/include/asm/set_memory.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 1999-2002 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASMARM_SET_MEMORY_H
10#define _ASMARM_SET_MEMORY_H
11
12#ifdef CONFIG_MMU
13int set_memory_ro(unsigned long addr, int numpages);
14int set_memory_rw(unsigned long addr, int numpages);
15int set_memory_x(unsigned long addr, int numpages);
16int set_memory_nx(unsigned long addr, int numpages);
17#else
18static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
19static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
20static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
21static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
22#endif
23
24#ifdef CONFIG_STRICT_KERNEL_RWX
25void set_kernel_text_rw(void);
26void set_kernel_text_ro(void);
27#else
28static inline void set_kernel_text_rw(void) { }
29static inline void set_kernel_text_ro(void) { }
30#endif
31
32#endif
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 414e60ed0257..833c991075a1 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -21,6 +21,7 @@
21#include <asm/opcodes.h> 21#include <asm/opcodes.h>
22#include <asm/ftrace.h> 22#include <asm/ftrace.h>
23#include <asm/insn.h> 23#include <asm/insn.h>
24#include <asm/set_memory.h>
24 25
25#ifdef CONFIG_THUMB2_KERNEL 26#ifdef CONFIG_THUMB2_KERNEL
26#define NOP 0xf85deb04 /* pop.w {lr} */ 27#define NOP 0xf85deb04 /* pop.w {lr} */
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 9232caee7060..1bb4c40a3135 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -269,7 +269,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
269 269
270/* 270/*
271 * Register our undef instruction hooks with ARM undef core. 271 * Register our undef instruction hooks with ARM undef core.
272 * We regsiter a hook specifically looking for the KGB break inst 272 * We register a hook specifically looking for the KGB break inst
273 * and we handle the normal undef case within the do_undefinstr 273 * and we handle the normal undef case within the do_undefinstr
274 * handler. 274 * handler.
275 */ 275 */
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index b18c1ea56bed..15495887ca14 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -18,6 +18,7 @@
18#include <asm/mach-types.h> 18#include <asm/mach-types.h>
19#include <asm/smp_plat.h> 19#include <asm/smp_plat.h>
20#include <asm/system_misc.h> 20#include <asm/system_misc.h>
21#include <asm/set_memory.h>
21 22
22extern void relocate_new_kernel(void); 23extern void relocate_new_kernel(void);
23extern const unsigned int relocate_new_kernel_size; 24extern const unsigned int relocate_new_kernel_size;
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 4977296f0c78..bcf3df59f71b 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -43,14 +43,14 @@
43int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data); 43int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
44 44
45/* 45/*
46 * Base address for PCI regsiter region 46 * Base address for PCI register region
47 */ 47 */
48unsigned long ixp4xx_pci_reg_base = 0; 48unsigned long ixp4xx_pci_reg_base = 0;
49 49
50/* 50/*
51 * PCI cfg an I/O routines are done by programming a 51 * PCI cfg an I/O routines are done by programming a
52 * command/byte enable register, and then read/writing 52 * command/byte enable register, and then read/writing
53 * the data from a data regsiter. We need to ensure 53 * the data from a data register. We need to ensure
54 * these transactions are atomic or we will end up 54 * these transactions are atomic or we will end up
55 * with corrupt data on the bus or in a driver. 55 * with corrupt data on the bus or in a driver.
56 */ 56 */
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 6b6fda65fb3b..91272db09fa3 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -117,7 +117,7 @@ static struct musb_hdrc_platform_data tusb_data = {
117static void __init n8x0_usb_init(void) 117static void __init n8x0_usb_init(void)
118{ 118{
119 int ret = 0; 119 int ret = 0;
120 static char announce[] __initdata = KERN_INFO "TUSB 6010\n"; 120 static const char announce[] __initconst = KERN_INFO "TUSB 6010\n";
121 121
122 /* PM companion chip power control pin */ 122 /* PM companion chip power control pin */
123 ret = gpio_request_one(TUSB6010_GPIO_ENABLE, GPIOF_OUT_INIT_LOW, 123 ret = gpio_request_one(TUSB6010_GPIO_ENABLE, GPIOF_OUT_INIT_LOW,
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 3b69f2642513..1403cb4a0c3d 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/set_memory.h>
18 19
19struct page_change_data { 20struct page_change_data {
20 pgprot_t set_mask; 21 pgprot_t set_mask;
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 93d0b6d0b63e..d5b9fa19b684 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -18,6 +18,7 @@
18#include <linux/if_vlan.h> 18#include <linux/if_vlan.h>
19 19
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <asm/set_memory.h>
21#include <asm/hwcap.h> 22#include <asm/hwcap.h>
22#include <asm/opcodes.h> 23#include <asm/opcodes.h>
23 24
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index a12f1afc95a3..a7a97a608033 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -29,6 +29,7 @@ generic-y += rwsem.h
29generic-y += segment.h 29generic-y += segment.h
30generic-y += sembuf.h 30generic-y += sembuf.h
31generic-y += serial.h 31generic-y += serial.h
32generic-y += set_memory.h
32generic-y += shmbuf.h 33generic-y += shmbuf.h
33generic-y += simd.h 34generic-y += simd.h
34generic-y += sizes.h 35generic-y += sizes.h
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 728f933cef8c..d74a284abdc2 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -150,10 +150,6 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
150{ 150{
151} 151}
152 152
153int set_memory_ro(unsigned long addr, int numpages);
154int set_memory_rw(unsigned long addr, int numpages);
155int set_memory_x(unsigned long addr, int numpages);
156int set_memory_nx(unsigned long addr, int numpages);
157int set_memory_valid(unsigned long addr, unsigned long size, int enable); 153int set_memory_valid(unsigned long addr, unsigned long size, int enable);
158 154
159#endif 155#endif
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 3212ee0558f6..a682a0a2a0fa 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -17,6 +17,7 @@
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18 18
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/set_memory.h>
20#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
21 22
22struct page_change_data { 23struct page_change_data {
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index d68abde52740..c6e53580aefe 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -27,6 +27,7 @@
27#include <asm/byteorder.h> 27#include <asm/byteorder.h>
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/debug-monitors.h> 29#include <asm/debug-monitors.h>
30#include <asm/set_memory.h>
30 31
31#include "bpf_jit.h" 32#include "bpf_jit.h"
32 33
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 2955f359e2a7..75859a07d75b 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -27,28 +27,6 @@ static int kdump_freeze_monarch;
27static int kdump_on_init = 1; 27static int kdump_on_init = 1;
28static int kdump_on_fatal_mca = 1; 28static int kdump_on_fatal_mca = 1;
29 29
30static inline Elf64_Word
31*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
32 size_t data_len)
33{
34 struct elf_note *note = (struct elf_note *)buf;
35 note->n_namesz = strlen(name) + 1;
36 note->n_descsz = data_len;
37 note->n_type = type;
38 buf += (sizeof(*note) + 3)/4;
39 memcpy(buf, name, note->n_namesz);
40 buf += (note->n_namesz + 3)/4;
41 memcpy(buf, data, data_len);
42 buf += (data_len + 3)/4;
43 return buf;
44}
45
46static void
47final_note(void *buf)
48{
49 memset(buf, 0, sizeof(struct elf_note));
50}
51
52extern void ia64_dump_cpu_regs(void *); 30extern void ia64_dump_cpu_regs(void *);
53 31
54static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus); 32static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S
index 970abaf3303e..dd5b2c357e95 100644
--- a/arch/m68k/ifpsp060/src/ilsp.S
+++ b/arch/m68k/ifpsp060/src/ilsp.S
@@ -776,7 +776,7 @@ muls64_zero:
776# ALGORITHM *********************************************************** # 776# ALGORITHM *********************************************************** #
777# In the interest of simplicity, all operands are converted to # 777# In the interest of simplicity, all operands are converted to #
778# longword size whether the operation is byte, word, or long. The # 778# longword size whether the operation is byte, word, or long. The #
779# bounds are sign extended accordingly. If Rn is a data regsiter, Rn is # 779# bounds are sign extended accordingly. If Rn is a data register, Rn is #
780# also sign extended. If Rn is an address register, it need not be sign # 780# also sign extended. If Rn is an address register, it need not be sign #
781# extended since the full register is always used. # 781# extended since the full register is always used. #
782# The condition codes are set correctly before the final "rts". # 782# The condition codes are set correctly before the final "rts". #
diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S
index b865c1a052ba..29a9f8629b9d 100644
--- a/arch/m68k/ifpsp060/src/isp.S
+++ b/arch/m68k/ifpsp060/src/isp.S
@@ -1876,7 +1876,7 @@ movp_read_err:
1876# word, or longword sized operands. Then, in the interest of # 1876# word, or longword sized operands. Then, in the interest of #
1877# simplicity, all operands are converted to longword size whether the # 1877# simplicity, all operands are converted to longword size whether the #
1878# operation is byte, word, or long. The bounds are sign extended # 1878# operation is byte, word, or long. The bounds are sign extended #
1879# accordingly. If Rn is a data regsiter, Rn is also sign extended. If # 1879# accordingly. If Rn is a data register, Rn is also sign extended. If #
1880# Rn is an address register, it need not be sign extended since the # 1880# Rn is an address register, it need not be sign extended since the #
1881# full register is always used. # 1881# full register is always used. #
1882# The comparisons are made and the condition codes calculated. # 1882# The comparisons are made and the condition codes calculated. #
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index ba4753c23b03..d18ed5af62f4 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -152,7 +152,7 @@ static int __cvmx_helper_errata_asx_pass1(int interface, int port,
152} 152}
153 153
154/** 154/**
155 * Configure all of the ASX, GMX, and PKO regsiters required 155 * Configure all of the ASX, GMX, and PKO registers required
156 * to get RGMII to function on the supplied interface. 156 * to get RGMII to function on the supplied interface.
157 * 157 *
158 * @interface: PKO Interface to configure (0 or 1) 158 * @interface: PKO Interface to configure (0 or 1)
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index 4e1761e0a09a..d88eb7a6662b 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -88,7 +88,7 @@ void __init which_prom(s32 magic, s32 *prom_vec)
88void __init prom_init(void) 88void __init prom_init(void)
89{ 89{
90 extern void dec_machine_halt(void); 90 extern void dec_machine_halt(void);
91 static char cpu_msg[] __initdata = 91 static const char cpu_msg[] __initconst =
92 "Sorry, this kernel is compiled for a wrong CPU type!\n"; 92 "Sorry, this kernel is compiled for a wrong CPU type!\n";
93 s32 argc = fw_arg0; 93 s32 argc = fw_arg0;
94 s32 *argv = (void *)fw_arg1; 94 s32 *argv = (void *)fw_arg1;
@@ -111,7 +111,7 @@ void __init prom_init(void)
111#if defined(CONFIG_CPU_R3000) 111#if defined(CONFIG_CPU_R3000)
112 if ((current_cpu_type() == CPU_R4000SC) || 112 if ((current_cpu_type() == CPU_R4000SC) ||
113 (current_cpu_type() == CPU_R4400SC)) { 113 (current_cpu_type() == CPU_R4400SC)) {
114 static char r4k_msg[] __initdata = 114 static const char r4k_msg[] __initconst =
115 "Please recompile with \"CONFIG_CPU_R4x00 = y\".\n"; 115 "Please recompile with \"CONFIG_CPU_R4x00 = y\".\n";
116 printk(cpu_msg); 116 printk(cpu_msg);
117 printk(r4k_msg); 117 printk(r4k_msg);
@@ -122,7 +122,7 @@ void __init prom_init(void)
122#if defined(CONFIG_CPU_R4X00) 122#if defined(CONFIG_CPU_R4X00)
123 if ((current_cpu_type() == CPU_R3000) || 123 if ((current_cpu_type() == CPU_R3000) ||
124 (current_cpu_type() == CPU_R3000A)) { 124 (current_cpu_type() == CPU_R3000A)) {
125 static char r3k_msg[] __initdata = 125 static const char r3k_msg[] __initconst =
126 "Please recompile with \"CONFIG_CPU_R3000 = y\".\n"; 126 "Please recompile with \"CONFIG_CPU_R3000 = y\".\n";
127 printk(cpu_msg); 127 printk(cpu_msg);
128 printk(r3k_msg); 128 printk(r3k_msg);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
index f89775be7654..f7a95d7de140 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
@@ -55,7 +55,7 @@ extern int __cvmx_helper_rgmii_probe(int interface);
55extern void cvmx_helper_rgmii_internal_loopback(int port); 55extern void cvmx_helper_rgmii_internal_loopback(int port);
56 56
57/** 57/**
58 * Configure all of the ASX, GMX, and PKO regsiters required 58 * Configure all of the ASX, GMX, and PKO registers required
59 * to get RGMII to function on the supplied interface. 59 * to get RGMII to function on the supplied interface.
60 * 60 *
61 * @interface: PKO Interface to configure (0 or 1) 61 * @interface: PKO Interface to configure (0 or 1)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 9452b02ce079..313a88b2973f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -618,7 +618,7 @@ static int mipspmu_event_init(struct perf_event *event)
618 return -ENOENT; 618 return -ENOENT;
619 } 619 }
620 620
621 if (event->cpu >= nr_cpumask_bits || 621 if ((unsigned int)event->cpu >= nr_cpumask_bits ||
622 (event->cpu >= 0 && !cpu_online(event->cpu))) 622 (event->cpu >= 0 && !cpu_online(event->cpu)))
623 return -ENODEV; 623 return -ENODEV;
624 624
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b49e7bf9f950..9681b5877140 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2256,8 +2256,8 @@ void set_handler(unsigned long offset, void *addr, unsigned long size)
2256 local_flush_icache_range(ebase + offset, ebase + offset + size); 2256 local_flush_icache_range(ebase + offset, ebase + offset + size);
2257} 2257}
2258 2258
2259static char panic_null_cerr[] = 2259static const char panic_null_cerr[] =
2260 "Trying to set NULL cache error exception handler"; 2260 "Trying to set NULL cache error exception handler\n";
2261 2261
2262/* 2262/*
2263 * Install uncached CPU exception handler. 2263 * Install uncached CPU exception handler.
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ad4cb1613c57..a4fd296c958e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1369,7 +1369,7 @@ nadtlb_nullify:
1369 1369
1370 /* 1370 /*
1371 When there is no translation for the probe address then we 1371 When there is no translation for the probe address then we
1372 must nullify the insn and return zero in the target regsiter. 1372 must nullify the insn and return zero in the target register.
1373 This will indicate to the calling code that it does not have 1373 This will indicate to the calling code that it does not have
1374 write/read privileges to this address. 1374 write/read privileges to this address.
1375 1375
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index c66c943d9322..f1a76935a314 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -218,7 +218,7 @@ void *module_alloc(unsigned long size)
218 * easier than trying to map the text, data, init_text and 218 * easier than trying to map the text, data, init_text and
219 * init_data correctly */ 219 * init_data correctly */
220 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 220 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
221 GFP_KERNEL | __GFP_HIGHMEM, 221 GFP_KERNEL,
222 PAGE_KERNEL_RWX, 0, NUMA_NO_NODE, 222 PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
223 __builtin_return_address(0)); 223 __builtin_return_address(0));
224} 224}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f07f727cbfd2..d8834e8bfb05 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -571,21 +571,23 @@ config RELOCATABLE_TEST
571 relocation code. 571 relocation code.
572 572
573config CRASH_DUMP 573config CRASH_DUMP
574 bool "Build a kdump crash kernel" 574 bool "Build a dump capture kernel"
575 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) 575 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
576 select RELOCATABLE if PPC64 || 44x || FSL_BOOKE 576 select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
577 help 577 help
578 Build a kernel suitable for use as a kdump capture kernel. 578 Build a kernel suitable for use as a dump capture kernel.
579 The same kernel binary can be used as production kernel and dump 579 The same kernel binary can be used as production kernel and dump
580 capture kernel. 580 capture kernel.
581 581
582config FA_DUMP 582config FA_DUMP
583 bool "Firmware-assisted dump" 583 bool "Firmware-assisted dump"
584 depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC_CORE 584 depends on PPC64 && PPC_RTAS
585 select CRASH_CORE
586 select CRASH_DUMP
585 help 587 help
586 A robust mechanism to get reliable kernel crash dump with 588 A robust mechanism to get reliable kernel crash dump with
587 assistance from firmware. This approach does not use kexec, 589 assistance from firmware. This approach does not use kexec,
588 instead firmware assists in booting the kdump kernel 590 instead firmware assists in booting the capture kernel
589 while preserving memory contents. Firmware-assisted dump 591 while preserving memory contents. Firmware-assisted dump
590 is meant to be a kdump replacement offering robustness and 592 is meant to be a kdump replacement offering robustness and
591 speed not possible without system firmware assistance. 593 speed not possible without system firmware assistance.
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 0031806475f0..60b91084f33c 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -73,6 +73,8 @@
73 reg_entry++; \ 73 reg_entry++; \
74}) 74})
75 75
76extern int crashing_cpu;
77
76/* Kernel Dump section info */ 78/* Kernel Dump section info */
77struct fadump_section { 79struct fadump_section {
78 __be32 request_flag; 80 __be32 request_flag;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 47b63de81f9b..cbabb5adccd9 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -43,8 +43,6 @@
43#define IPI_TIMEOUT 10000 43#define IPI_TIMEOUT 10000
44#define REAL_MODE_TIMEOUT 10000 44#define REAL_MODE_TIMEOUT 10000
45 45
46/* This keeps a track of which one is the crashing cpu. */
47int crashing_cpu = -1;
48static int time_to_dump; 46static int time_to_dump;
49 47
50#define CRASH_HANDLER_MAX 3 48#define CRASH_HANDLER_MAX 3
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 243dbef7e926..466569e26278 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -209,14 +209,20 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
209 */ 209 */
210static inline unsigned long fadump_calculate_reserve_size(void) 210static inline unsigned long fadump_calculate_reserve_size(void)
211{ 211{
212 unsigned long size; 212 int ret;
213 unsigned long long base, size;
213 214
214 /* 215 /*
215 * Check if the size is specified through fadump_reserve_mem= cmdline 216 * Check if the size is specified through crashkernel= cmdline
216 * option. If yes, then use that. 217 * option. If yes, then use that but ignore base as fadump
218 * reserves memory at end of RAM.
217 */ 219 */
218 if (fw_dump.reserve_bootvar) 220 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
221 &size, &base);
222 if (ret == 0 && size > 0) {
223 fw_dump.reserve_bootvar = (unsigned long)size;
219 return fw_dump.reserve_bootvar; 224 return fw_dump.reserve_bootvar;
225 }
220 226
221 /* divide by 20 to get 5% of value */ 227 /* divide by 20 to get 5% of value */
222 size = memblock_end_of_DRAM() / 20; 228 size = memblock_end_of_DRAM() / 20;
@@ -371,15 +377,6 @@ static int __init early_fadump_param(char *p)
371} 377}
372early_param("fadump", early_fadump_param); 378early_param("fadump", early_fadump_param);
373 379
374/* Look for fadump_reserve_mem= cmdline option */
375static int __init early_fadump_reserve_mem(char *p)
376{
377 if (p)
378 fw_dump.reserve_bootvar = memparse(p, &p);
379 return 0;
380}
381early_param("fadump_reserve_mem", early_fadump_reserve_mem);
382
383static void register_fw_dump(struct fadump_mem_struct *fdm) 380static void register_fw_dump(struct fadump_mem_struct *fdm)
384{ 381{
385 int rc; 382 int rc;
@@ -527,34 +524,6 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
527 return reg_entry; 524 return reg_entry;
528} 525}
529 526
530static u32 *fadump_append_elf_note(u32 *buf, char *name, unsigned type,
531 void *data, size_t data_len)
532{
533 struct elf_note note;
534
535 note.n_namesz = strlen(name) + 1;
536 note.n_descsz = data_len;
537 note.n_type = type;
538 memcpy(buf, &note, sizeof(note));
539 buf += (sizeof(note) + 3)/4;
540 memcpy(buf, name, note.n_namesz);
541 buf += (note.n_namesz + 3)/4;
542 memcpy(buf, data, note.n_descsz);
543 buf += (note.n_descsz + 3)/4;
544
545 return buf;
546}
547
548static void fadump_final_note(u32 *buf)
549{
550 struct elf_note note;
551
552 note.n_namesz = 0;
553 note.n_descsz = 0;
554 note.n_type = 0;
555 memcpy(buf, &note, sizeof(note));
556}
557
558static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) 527static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
559{ 528{
560 struct elf_prstatus prstatus; 529 struct elf_prstatus prstatus;
@@ -565,8 +534,8 @@ static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
565 * prstatus.pr_pid = ???? 534 * prstatus.pr_pid = ????
566 */ 535 */
567 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); 536 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
568 buf = fadump_append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, 537 buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
569 &prstatus, sizeof(prstatus)); 538 &prstatus, sizeof(prstatus));
570 return buf; 539 return buf;
571} 540}
572 541
@@ -707,7 +676,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
707 note_buf = fadump_regs_to_elf_notes(note_buf, &regs); 676 note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
708 } 677 }
709 } 678 }
710 fadump_final_note(note_buf); 679 final_note(note_buf);
711 680
712 if (fdh) { 681 if (fdh) {
713 pr_debug("Updating elfcore header (%llx) with cpu notes\n", 682 pr_debug("Updating elfcore header (%llx) with cpu notes\n",
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 5c10b5925ac2..69e077180db6 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -125,6 +125,11 @@ int ppc_do_canonicalize_irqs;
125EXPORT_SYMBOL(ppc_do_canonicalize_irqs); 125EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
126#endif 126#endif
127 127
128#ifdef CONFIG_CRASH_CORE
129/* This keeps a track of which one is the crashing cpu. */
130int crashing_cpu = -1;
131#endif
132
128/* also used by kexec */ 133/* also used by kexec */
129void machine_shutdown(void) 134void machine_shutdown(void)
130{ 135{
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
index 915412e4d5ba..1fa794d7d59f 100644
--- a/arch/powerpc/mm/icswx.c
+++ b/arch/powerpc/mm/icswx.c
@@ -186,7 +186,7 @@ static u32 acop_get_inst(struct pt_regs *regs)
186} 186}
187 187
188/** 188/**
189 * @regs: regsiters at time of interrupt 189 * @regs: registers at time of interrupt
190 * @address: storage address 190 * @address: storage address
191 * @error_code: Fault code, usually the DSISR or ESR depending on 191 * @error_code: Fault code, usually the DSISR or ESR depending on
192 * processor type 192 * processor type
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 7e3481eb2174..45092b12f54f 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,4 +1,5 @@
1generic-y += asm-offsets.h 1generic-y += asm-offsets.h
2generic-y += cacheflush.h
2generic-y += clkdev.h 3generic-y += clkdev.h
3generic-y += dma-contiguous.h 4generic-y += dma-contiguous.h
4generic-y += div64.h 5generic-y += div64.h
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/set_memory.h
index 0499334f9473..46a4db44c47a 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -1,8 +1,5 @@
1#ifndef _S390_CACHEFLUSH_H 1#ifndef _ASMS390_SET_MEMORY_H
2#define _S390_CACHEFLUSH_H 2#define _ASMS390_SET_MEMORY_H
3
4/* Caches aren't brain-dead on the s390. */
5#include <asm-generic/cacheflush.h>
6 3
7#define SET_MEMORY_RO 1UL 4#define SET_MEMORY_RO 1UL
8#define SET_MEMORY_RW 2UL 5#define SET_MEMORY_RW 2UL
@@ -31,4 +28,4 @@ static inline int set_memory_x(unsigned long addr, int numpages)
31 return __set_memory(addr, numpages, SET_MEMORY_X); 28 return __set_memory(addr, numpages, SET_MEMORY_X);
32} 29}
33 30
34#endif /* _S390_CACHEFLUSH_H */ 31#endif
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 60a8a4e207ed..27477f34cc0a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -17,6 +17,7 @@
17#include <trace/syscall.h> 17#include <trace/syscall.h>
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20#include <asm/set_memory.h>
20#include "entry.h" 21#include "entry.h"
21 22
22/* 23/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 76f9eda1d7c0..3d6a99746454 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -31,7 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/hardirq.h> 32#include <linux/hardirq.h>
33#include <linux/ftrace.h> 33#include <linux/ftrace.h>
34#include <asm/cacheflush.h> 34#include <asm/set_memory.h>
35#include <asm/sections.h> 35#include <asm/sections.h>
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <asm/dis.h> 37#include <asm/dis.h>
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index db5658daf994..49a6bd45957b 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -26,6 +26,7 @@
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/os_info.h> 28#include <asm/os_info.h>
29#include <asm/set_memory.h>
29#include <asm/switch_to.h> 30#include <asm/switch_to.h>
30#include <asm/nmi.h> 31#include <asm/nmi.h>
31 32
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 9a4f279d25ca..ca960d0370d5 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -823,7 +823,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
823 } 823 }
824 824
825 /* Check online status of the CPU to which the event is pinned */ 825 /* Check online status of the CPU to which the event is pinned */
826 if (event->cpu >= nr_cpumask_bits || 826 if ((unsigned int)event->cpu >= nr_cpumask_bits ||
827 (event->cpu >= 0 && !cpu_online(event->cpu))) 827 (event->cpu >= 0 && !cpu_online(event->cpu)))
828 return -ENODEV; 828 return -ENODEV;
829 829
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index aeb3feb9de53..689ac48361c6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1204,10 +1204,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1204 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 1204 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1205 return -EINVAL; 1205 return -EINVAL;
1206 1206
1207 keys = kmalloc_array(args->count, sizeof(uint8_t), 1207 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1208 GFP_KERNEL | __GFP_NOWARN);
1209 if (!keys)
1210 keys = vmalloc(sizeof(uint8_t) * args->count);
1211 if (!keys) 1208 if (!keys)
1212 return -ENOMEM; 1209 return -ENOMEM;
1213 1210
@@ -1249,10 +1246,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1249 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 1246 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1250 return -EINVAL; 1247 return -EINVAL;
1251 1248
1252 keys = kmalloc_array(args->count, sizeof(uint8_t), 1249 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1253 GFP_KERNEL | __GFP_NOWARN);
1254 if (!keys)
1255 keys = vmalloc(sizeof(uint8_t) * args->count);
1256 if (!keys) 1250 if (!keys)
1257 return -ENOMEM; 1251 return -ENOMEM;
1258 1252
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ee5066718b21..ee6a1d3d4983 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -39,6 +39,7 @@
39#include <asm/sections.h> 39#include <asm/sections.h>
40#include <asm/ctl_reg.h> 40#include <asm/ctl_reg.h>
41#include <asm/sclp.h> 41#include <asm/sclp.h>
42#include <asm/set_memory.h>
42 43
43pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); 44pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
44 45
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index fc321c5ec30e..49e721f3645e 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -8,6 +8,7 @@
8#include <asm/facility.h> 8#include <asm/facility.h>
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/page.h> 10#include <asm/page.h>
11#include <asm/set_memory.h>
11 12
12static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) 13static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
13{ 14{
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 60d38993f232..c33c94b4be60 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -17,6 +17,7 @@
17#include <asm/setup.h> 17#include <asm/setup.h>
18#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
19#include <asm/sections.h> 19#include <asm/sections.h>
20#include <asm/set_memory.h>
20 21
21static DEFINE_MUTEX(vmem_mutex); 22static DEFINE_MUTEX(vmem_mutex);
22 23
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 4ecf6d687509..6e97a2e3fd8d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -24,6 +24,7 @@
24#include <linux/bpf.h> 24#include <linux/bpf.h>
25#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
26#include <asm/dis.h> 26#include <asm/dis.h>
27#include <asm/set_memory.h>
27#include "bpf_jit.h" 28#include "bpf_jit.h"
28 29
29int bpf_jit_enable __read_mostly; 30int bpf_jit_enable __read_mostly;
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index e7e1942edff7..8b4140f6724f 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -5,93 +5,8 @@
5#include <asm-generic/cacheflush.h> 5#include <asm-generic/cacheflush.h>
6#include <asm/special_insns.h> 6#include <asm/special_insns.h>
7 7
8/*
9 * The set_memory_* API can be used to change various attributes of a virtual
10 * address range. The attributes include:
11 * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
12 * Executability : eXeutable, NoteXecutable
13 * Read/Write : ReadOnly, ReadWrite
14 * Presence : NotPresent
15 *
16 * Within a category, the attributes are mutually exclusive.
17 *
18 * The implementation of this API will take care of various aspects that
19 * are associated with changing such attributes, such as:
20 * - Flushing TLBs
21 * - Flushing CPU caches
22 * - Making sure aliases of the memory behind the mapping don't violate
23 * coherency rules as defined by the CPU in the system.
24 *
25 * What this API does not do:
26 * - Provide exclusion between various callers - including callers that
27 * operation on other mappings of the same physical page
28 * - Restore default attributes when a page is freed
29 * - Guarantee that mappings other than the requested one are
30 * in any state, other than that these do not violate rules for
31 * the CPU you have. Do not depend on any effects on other mappings,
32 * CPUs other than the one you have may have more relaxed rules.
33 * The caller is required to take care of these.
34 */
35
36int _set_memory_uc(unsigned long addr, int numpages);
37int _set_memory_wc(unsigned long addr, int numpages);
38int _set_memory_wt(unsigned long addr, int numpages);
39int _set_memory_wb(unsigned long addr, int numpages);
40int set_memory_uc(unsigned long addr, int numpages);
41int set_memory_wc(unsigned long addr, int numpages);
42int set_memory_wt(unsigned long addr, int numpages);
43int set_memory_wb(unsigned long addr, int numpages);
44int set_memory_x(unsigned long addr, int numpages);
45int set_memory_nx(unsigned long addr, int numpages);
46int set_memory_ro(unsigned long addr, int numpages);
47int set_memory_rw(unsigned long addr, int numpages);
48int set_memory_np(unsigned long addr, int numpages);
49int set_memory_4k(unsigned long addr, int numpages);
50
51int set_memory_array_uc(unsigned long *addr, int addrinarray);
52int set_memory_array_wc(unsigned long *addr, int addrinarray);
53int set_memory_array_wt(unsigned long *addr, int addrinarray);
54int set_memory_array_wb(unsigned long *addr, int addrinarray);
55
56int set_pages_array_uc(struct page **pages, int addrinarray);
57int set_pages_array_wc(struct page **pages, int addrinarray);
58int set_pages_array_wt(struct page **pages, int addrinarray);
59int set_pages_array_wb(struct page **pages, int addrinarray);
60
61/*
62 * For legacy compatibility with the old APIs, a few functions
63 * are provided that work on a "struct page".
64 * These functions operate ONLY on the 1:1 kernel mapping of the
65 * memory that the struct page represents, and internally just
66 * call the set_memory_* function. See the description of the
67 * set_memory_* function for more details on conventions.
68 *
69 * These APIs should be considered *deprecated* and are likely going to
70 * be removed in the future.
71 * The reason for this is the implicit operation on the 1:1 mapping only,
72 * making this not a generally useful API.
73 *
74 * Specifically, many users of the old APIs had a virtual address,
75 * called virt_to_page() or vmalloc_to_page() on that address to
76 * get a struct page* that the old API required.
77 * To convert these cases, use set_memory_*() on the original
78 * virtual address, do not use these functions.
79 */
80
81int set_pages_uc(struct page *page, int numpages);
82int set_pages_wb(struct page *page, int numpages);
83int set_pages_x(struct page *page, int numpages);
84int set_pages_nx(struct page *page, int numpages);
85int set_pages_ro(struct page *page, int numpages);
86int set_pages_rw(struct page *page, int numpages);
87
88
89void clflush_cache_range(void *addr, unsigned int size); 8void clflush_cache_range(void *addr, unsigned int size);
90 9
91#define mmio_flush_range(addr, size) clflush_cache_range(addr, size) 10#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
92 11
93extern int kernel_set_to_readonly;
94void set_kernel_text_rw(void);
95void set_kernel_text_ro(void);
96
97#endif /* _ASM_X86_CACHEFLUSH_H */ 12#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
new file mode 100644
index 000000000000..eaec6c364e42
--- /dev/null
+++ b/arch/x86/include/asm/set_memory.h
@@ -0,0 +1,87 @@
1#ifndef _ASM_X86_SET_MEMORY_H
2#define _ASM_X86_SET_MEMORY_H
3
4#include <asm/page.h>
5#include <asm-generic/set_memory.h>
6
7/*
8 * The set_memory_* API can be used to change various attributes of a virtual
9 * address range. The attributes include:
10 * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
11 * Executability : eXeutable, NoteXecutable
12 * Read/Write : ReadOnly, ReadWrite
13 * Presence : NotPresent
14 *
15 * Within a category, the attributes are mutually exclusive.
16 *
17 * The implementation of this API will take care of various aspects that
18 * are associated with changing such attributes, such as:
19 * - Flushing TLBs
20 * - Flushing CPU caches
21 * - Making sure aliases of the memory behind the mapping don't violate
22 * coherency rules as defined by the CPU in the system.
23 *
24 * What this API does not do:
25 * - Provide exclusion between various callers - including callers that
26 * operation on other mappings of the same physical page
27 * - Restore default attributes when a page is freed
28 * - Guarantee that mappings other than the requested one are
29 * in any state, other than that these do not violate rules for
30 * the CPU you have. Do not depend on any effects on other mappings,
31 * CPUs other than the one you have may have more relaxed rules.
32 * The caller is required to take care of these.
33 */
34
35int _set_memory_uc(unsigned long addr, int numpages);
36int _set_memory_wc(unsigned long addr, int numpages);
37int _set_memory_wt(unsigned long addr, int numpages);
38int _set_memory_wb(unsigned long addr, int numpages);
39int set_memory_uc(unsigned long addr, int numpages);
40int set_memory_wc(unsigned long addr, int numpages);
41int set_memory_wt(unsigned long addr, int numpages);
42int set_memory_wb(unsigned long addr, int numpages);
43int set_memory_np(unsigned long addr, int numpages);
44int set_memory_4k(unsigned long addr, int numpages);
45
46int set_memory_array_uc(unsigned long *addr, int addrinarray);
47int set_memory_array_wc(unsigned long *addr, int addrinarray);
48int set_memory_array_wt(unsigned long *addr, int addrinarray);
49int set_memory_array_wb(unsigned long *addr, int addrinarray);
50
51int set_pages_array_uc(struct page **pages, int addrinarray);
52int set_pages_array_wc(struct page **pages, int addrinarray);
53int set_pages_array_wt(struct page **pages, int addrinarray);
54int set_pages_array_wb(struct page **pages, int addrinarray);
55
56/*
57 * For legacy compatibility with the old APIs, a few functions
58 * are provided that work on a "struct page".
59 * These functions operate ONLY on the 1:1 kernel mapping of the
60 * memory that the struct page represents, and internally just
61 * call the set_memory_* function. See the description of the
62 * set_memory_* function for more details on conventions.
63 *
64 * These APIs should be considered *deprecated* and are likely going to
65 * be removed in the future.
66 * The reason for this is the implicit operation on the 1:1 mapping only,
67 * making this not a generally useful API.
68 *
69 * Specifically, many users of the old APIs had a virtual address,
70 * called virt_to_page() or vmalloc_to_page() on that address to
71 * get a struct page* that the old API required.
72 * To convert these cases, use set_memory_*() on the original
73 * virtual address, do not use these functions.
74 */
75
76int set_pages_uc(struct page *page, int numpages);
77int set_pages_wb(struct page *page, int numpages);
78int set_pages_x(struct page *page, int numpages);
79int set_pages_nx(struct page *page, int numpages);
80int set_pages_ro(struct page *page, int numpages);
81int set_pages_rw(struct page *page, int numpages);
82
83extern int kernel_set_to_readonly;
84void set_kernel_text_rw(void);
85void set_kernel_text_ro(void);
86
87#endif /* _ASM_X86_SET_MEMORY_H */
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index df083efe6ee0..815dd63f49d0 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -36,7 +36,7 @@
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm/iommu.h> 37#include <asm/iommu.h>
38#include <asm/gart.h> 38#include <asm/gart.h>
39#include <asm/cacheflush.h> 39#include <asm/set_memory.h>
40#include <asm/swiotlb.h> 40#include <asm/swiotlb.h>
41#include <asm/dma.h> 41#include <asm/dma.h>
42#include <asm/amd_nb.h> 42#include <asm/amd_nb.h>
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c36140d788fe..ee8f11800295 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -16,7 +16,7 @@
16 16
17#ifdef CONFIG_X86_64 17#ifdef CONFIG_X86_64
18# include <asm/mmconfig.h> 18# include <asm/mmconfig.h>
19# include <asm/cacheflush.h> 19# include <asm/set_memory.h>
20#endif 20#endif
21 21
22#include "cpu.h" 22#include "cpu.h"
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index a44ef52184df..0af86d9242da 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,7 +17,7 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/cacheflush.h> 20#include <asm/set_memory.h>
21 21
22void __init check_bugs(void) 22void __init check_bugs(void)
23{ 23{
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8ee76dce9140..0651e974dcb3 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -24,7 +24,7 @@
24 24
25#include <trace/syscall.h> 25#include <trace/syscall.h>
26 26
27#include <asm/cacheflush.h> 27#include <asm/set_memory.h>
28#include <asm/kprobes.h> 28#include <asm/kprobes.h>
29#include <asm/ftrace.h> 29#include <asm/ftrace.h>
30#include <asm/nops.h> 30#include <asm/nops.h>
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 19e1f2a6d7b0..5b2bbfbb3712 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -61,6 +61,7 @@
61#include <asm/alternative.h> 61#include <asm/alternative.h>
62#include <asm/insn.h> 62#include <asm/insn.h>
63#include <asm/debugreg.h> 63#include <asm/debugreg.h>
64#include <asm/set_memory.h>
64 65
65#include "common.h" 66#include "common.h"
66 67
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 9aadff3d0902..901c640d152f 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -37,6 +37,7 @@
37#include <asm/alternative.h> 37#include <asm/alternative.h>
38#include <asm/insn.h> 38#include <asm/insn.h>
39#include <asm/debugreg.h> 39#include <asm/debugreg.h>
40#include <asm/set_memory.h>
40 41
41#include "common.h" 42#include "common.h"
42 43
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 5f43cec296c5..8c53c5d7a1bc 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -23,7 +23,7 @@
23#include <asm/io_apic.h> 23#include <asm/io_apic.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/desc.h> 25#include <asm/desc.h>
26#include <asm/cacheflush.h> 26#include <asm/set_memory.h>
27#include <asm/debugreg.h> 27#include <asm/debugreg.h>
28 28
29static void set_idt(void *newidt, __u16 limit) 29static void set_idt(void *newidt, __u16 limit)
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 085c3b300d32..ce640428d6fe 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -27,6 +27,7 @@
27#include <asm/debugreg.h> 27#include <asm/debugreg.h>
28#include <asm/kexec-bzimage64.h> 28#include <asm/kexec-bzimage64.h>
29#include <asm/setup.h> 29#include <asm/setup.h>
30#include <asm/set_memory.h>
30 31
31#ifdef CONFIG_KEXEC_FILE 32#ifdef CONFIG_KEXEC_FILE
32static struct kexec_file_ops *kexec_file_loaders[] = { 33static struct kexec_file_ops *kexec_file_loaders[] = {
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 477ae806c2fa..f67bd3205df7 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -85,7 +85,7 @@ void *module_alloc(unsigned long size)
85 85
86 p = __vmalloc_node_range(size, MODULE_ALIGN, 86 p = __vmalloc_node_range(size, MODULE_ALIGN,
87 MODULES_VADDR + get_module_load_offset(), 87 MODULES_VADDR + get_module_load_offset(),
88 MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, 88 MODULES_END, GFP_KERNEL,
89 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 89 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
90 __builtin_return_address(0)); 90 __builtin_return_address(0));
91 if (p && (kasan_module_alloc(p, size) < 0)) { 91 if (p && (kasan_module_alloc(p, size) < 0)) {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9fa5b8164961..c329d2894905 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -177,8 +177,8 @@ static void recalculate_apic_map(struct kvm *kvm)
177 if (kvm_apic_present(vcpu)) 177 if (kvm_apic_present(vcpu))
178 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic)); 178 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
179 179
180 new = kvm_kvzalloc(sizeof(struct kvm_apic_map) + 180 new = kvzalloc(sizeof(struct kvm_apic_map) +
181 sizeof(struct kvm_lapic *) * ((u64)max_id + 1)); 181 sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL);
182 182
183 if (!new) 183 if (!new)
184 goto out; 184 goto out;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 60168cdd0546..ea67dc876316 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -40,8 +40,8 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
40 int i; 40 int i;
41 41
42 for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { 42 for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
43 slot->arch.gfn_track[i] = kvm_kvzalloc(npages * 43 slot->arch.gfn_track[i] = kvzalloc(npages *
44 sizeof(*slot->arch.gfn_track[i])); 44 sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL);
45 if (!slot->arch.gfn_track[i]) 45 if (!slot->arch.gfn_track[i])
46 goto track_free; 46 goto track_free;
47 } 47 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b38a302858a0..464da936c53d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8190,13 +8190,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
8190 slot->base_gfn, level) + 1; 8190 slot->base_gfn, level) + 1;
8191 8191
8192 slot->arch.rmap[i] = 8192 slot->arch.rmap[i] =
8193 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); 8193 kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL);
8194 if (!slot->arch.rmap[i]) 8194 if (!slot->arch.rmap[i])
8195 goto out_free; 8195 goto out_free;
8196 if (i == 0) 8196 if (i == 0)
8197 continue; 8197 continue;
8198 8198
8199 linfo = kvm_kvzalloc(lpages * sizeof(*linfo)); 8199 linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL);
8200 if (!linfo) 8200 if (!linfo)
8201 goto out_free; 8201 goto out_free;
8202 8202
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 138bad2fb6bc..cbc87ea98751 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -5,7 +5,7 @@
5#include <linux/memblock.h> 5#include <linux/memblock.h>
6#include <linux/bootmem.h> /* for max_low_pfn */ 6#include <linux/bootmem.h> /* for max_low_pfn */
7 7
8#include <asm/cacheflush.h> 8#include <asm/set_memory.h>
9#include <asm/e820/api.h> 9#include <asm/e820/api.h>
10#include <asm/init.h> 10#include <asm/init.h>
11#include <asm/page.h> 11#include <asm/page.h>
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f34d275ee201..99fb83819a5f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -48,7 +48,7 @@
48#include <asm/sections.h> 48#include <asm/sections.h>
49#include <asm/paravirt.h> 49#include <asm/paravirt.h>
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/set_memory.h>
52#include <asm/page_types.h> 52#include <asm/page_types.h>
53#include <asm/init.h> 53#include <asm/init.h>
54 54
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 745e5e183169..41270b96403d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -50,7 +50,7 @@
50#include <asm/sections.h> 50#include <asm/sections.h>
51#include <asm/kdebug.h> 51#include <asm/kdebug.h>
52#include <asm/numa.h> 52#include <asm/numa.h>
53#include <asm/cacheflush.h> 53#include <asm/set_memory.h>
54#include <asm/init.h> 54#include <asm/init.h>
55#include <asm/uv/uv.h> 55#include <asm/uv/uv.h>
56#include <asm/setup.h> 56#include <asm/setup.h>
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index e4f7b25df18e..bbc558b88a88 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -14,7 +14,7 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/mmiotrace.h> 15#include <linux/mmiotrace.h>
16 16
17#include <asm/cacheflush.h> 17#include <asm/set_memory.h>
18#include <asm/e820/api.h> 18#include <asm/e820/api.h>
19#include <asm/fixmap.h> 19#include <asm/fixmap.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 56b22fa504df..1dcd2be4cce4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -24,6 +24,7 @@
24#include <asm/pgalloc.h> 24#include <asm/pgalloc.h>
25#include <asm/proto.h> 25#include <asm/proto.h>
26#include <asm/pat.h> 26#include <asm/pat.h>
27#include <asm/set_memory.h>
27 28
28/* 29/*
29 * The current flushing context - we pass it instead of 5 arguments: 30 * The current flushing context - we pass it instead of 5 arguments:
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 14f840df1d95..f58939393eef 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
12#include <linux/filter.h> 12#include <linux/filter.h>
13#include <linux/if_vlan.h> 13#include <linux/if_vlan.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/set_memory.h>
15#include <linux/bpf.h> 16#include <linux/bpf.h>
16 17
17int bpf_jit_enable __read_mostly; 18int bpf_jit_enable __read_mostly;
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index 29e9ba6ace9d..c1bdb9edcae7 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -11,7 +11,7 @@
11#include <asm/pci_x86.h> 11#include <asm/pci_x86.h>
12#include <asm/e820/types.h> 12#include <asm/e820/types.h>
13#include <asm/pci-functions.h> 13#include <asm/pci-functions.h>
14#include <asm/cacheflush.h> 14#include <asm/set_memory.h>
15 15
16/* BIOS32 signature: "_32_" */ 16/* BIOS32 signature: "_32_" */
17#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) 17#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index a15cf815ac4e..7e76a4d8304b 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -49,7 +49,7 @@
49#include <asm/efi.h> 49#include <asm/efi.h>
50#include <asm/e820/api.h> 50#include <asm/e820/api.h>
51#include <asm/time.h> 51#include <asm/time.h>
52#include <asm/cacheflush.h> 52#include <asm/set_memory.h>
53#include <asm/tlbflush.h> 53#include <asm/tlbflush.h>
54#include <asm/x86_init.h> 54#include <asm/x86_init.h>
55#include <asm/uv/uv.h> 55#include <asm/uv/uv.h>
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 5db706f14111..a163a90af4aa 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -2,7 +2,7 @@
2#include <linux/slab.h> 2#include <linux/slab.h>
3#include <linux/memblock.h> 3#include <linux/memblock.h>
4 4
5#include <asm/cacheflush.h> 5#include <asm/set_memory.h>
6#include <asm/pgtable.h> 6#include <asm/pgtable.h>
7#include <asm/realmode.h> 7#include <asm/realmode.h>
8#include <asm/tlbflush.h> 8#include <asm/tlbflush.h>
diff --git a/certs/blacklist.c b/certs/blacklist.c
index 3eddce0e307a..3a507b9e2568 100644
--- a/certs/blacklist.c
+++ b/certs/blacklist.c
@@ -140,7 +140,7 @@ int is_hash_blacklisted(const u8 *hash, size_t hash_len, const char *type)
140EXPORT_SYMBOL_GPL(is_hash_blacklisted); 140EXPORT_SYMBOL_GPL(is_hash_blacklisted);
141 141
142/* 142/*
143 * Intialise the blacklist 143 * Initialise the blacklist
144 */ 144 */
145static int __init blacklist_init(void) 145static int __init blacklist_init(void)
146{ 146{
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 168df784da84..218567d717d6 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -32,9 +32,7 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
32{ 32{
33 void *ctx; 33 void *ctx;
34 34
35 ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN); 35 ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
36 if (!ctx)
37 ctx = vmalloc(LZO1X_MEM_COMPRESS);
38 if (!ctx) 36 if (!ctx)
39 return ERR_PTR(-ENOMEM); 37 return ERR_PTR(-ENOMEM);
40 38
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 7207e5fc9d3d..2c462beee551 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -513,7 +513,7 @@ retry:
513 if (i < erst_record_id_cache.len) 513 if (i < erst_record_id_cache.len)
514 goto retry; 514 goto retry;
515 if (erst_record_id_cache.len >= erst_record_id_cache.size) { 515 if (erst_record_id_cache.len >= erst_record_id_cache.size) {
516 int new_size, alloc_size; 516 int new_size;
517 u64 *new_entries; 517 u64 *new_entries;
518 518
519 new_size = erst_record_id_cache.size * 2; 519 new_size = erst_record_id_cache.size * 2;
@@ -524,11 +524,7 @@ retry:
524 pr_warn(FW_WARN "too many record IDs!\n"); 524 pr_warn(FW_WARN "too many record IDs!\n");
525 return 0; 525 return 0;
526 } 526 }
527 alloc_size = new_size * sizeof(entries[0]); 527 new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL);
528 if (alloc_size < PAGE_SIZE)
529 new_entries = kmalloc(alloc_size, GFP_KERNEL);
530 else
531 new_entries = vmalloc(alloc_size);
532 if (!new_entries) 528 if (!new_entries)
533 return -ENOMEM; 529 return -ENOMEM;
534 memcpy(new_entries, entries, 530 memcpy(new_entries, entries,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6cbe6036da99..e5b47f032d9a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -95,7 +95,7 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
95/* pcc mapped address + header size + offset within PCC subspace */ 95/* pcc mapped address + header size + offset within PCC subspace */
96#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) 96#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
97 97
98/* Check if a CPC regsiter is in PCC */ 98/* Check if a CPC register is in PCC */
99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ 99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \ 100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM) 101 ACPI_ADR_SPACE_PLATFORM_COMM)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index dece26f119d4..a804a4107fbc 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -409,7 +409,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
409 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); 409 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
410 if (!new_pages) { 410 if (!new_pages) {
411 new_pages = __vmalloc(bytes, 411 new_pages = __vmalloc(bytes,
412 GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO, 412 GFP_NOIO | __GFP_ZERO,
413 PAGE_KERNEL); 413 PAGE_KERNEL);
414 if (!new_pages) 414 if (!new_pages)
415 return NULL; 415 return NULL;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e9e2a9e95a66..9a7bb2c29447 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/sched/mm.h>
21#include <linux/fs.h> 22#include <linux/fs.h>
22#include <linux/bio.h> 23#include <linux/bio.h>
23#include <linux/stat.h> 24#include <linux/stat.h>
@@ -347,7 +348,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
347 struct socket *sock = config->socks[index]->sock; 348 struct socket *sock = config->socks[index]->sock;
348 int result; 349 int result;
349 struct msghdr msg; 350 struct msghdr msg;
350 unsigned long pflags = current->flags; 351 unsigned int noreclaim_flag;
351 352
352 if (unlikely(!sock)) { 353 if (unlikely(!sock)) {
353 dev_err_ratelimited(disk_to_dev(nbd->disk), 354 dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -358,7 +359,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
358 359
359 msg.msg_iter = *iter; 360 msg.msg_iter = *iter;
360 361
361 current->flags |= PF_MEMALLOC; 362 noreclaim_flag = memalloc_noreclaim_save();
362 do { 363 do {
363 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 364 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
364 msg.msg_name = NULL; 365 msg.msg_name = NULL;
@@ -381,7 +382,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
381 *sent += result; 382 *sent += result;
382 } while (msg_data_left(&msg)); 383 } while (msg_data_left(&msg));
383 384
384 current_restore_flags(pflags, PF_MEMALLOC); 385 memalloc_noreclaim_restore(noreclaim_flag);
385 386
386 return result; 387 return result;
387} 388}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3670e8dd03fe..26812c1ed0cf 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1922,7 +1922,7 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1922{ 1922{
1923 struct ceph_osd_request *osd_req = obj_request->osd_req; 1923 struct ceph_osd_request *osd_req = obj_request->osd_req;
1924 1924
1925 osd_req->r_mtime = CURRENT_TIME; 1925 ktime_get_real_ts(&osd_req->r_mtime);
1926 osd_req->r_data_offset = obj_request->offset; 1926 osd_req->r_data_offset = obj_request->offset;
1927} 1927}
1928 1928
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 3661a51e93e2..5fbd333e4c6d 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -9,6 +9,7 @@
9#include <linux/page-flags.h> 9#include <linux/page-flags.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <asm/set_memory.h>
12#include "agp.h" 13#include "agp.h"
13 14
14#define AMD_MMBASE_BAR 1 15#define AMD_MMBASE_BAR 1
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 75a9786a77e6..0b5ec7af2414 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/agp_backend.h> 11#include <linux/agp_backend.h>
12#include <asm/agp.h> 12#include <asm/agp.h>
13#include <asm/set_memory.h>
13#include "agp.h" 14#include "agp.h"
14 15
15#define ATI_GART_MMBASE_BAR 1 16#define ATI_GART_MMBASE_BAR 1
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index f002fa5d1887..658664a5a5aa 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -39,7 +39,9 @@
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/cacheflush.h> 42#ifdef CONFIG_X86
43#include <asm/set_memory.h>
44#endif
43#include <asm/pgtable.h> 45#include <asm/pgtable.h>
44#include "agp.h" 46#include "agp.h"
45 47
@@ -88,13 +90,7 @@ static int agp_get_key(void)
88 90
89void agp_alloc_page_array(size_t size, struct agp_memory *mem) 91void agp_alloc_page_array(size_t size, struct agp_memory *mem)
90{ 92{
91 mem->pages = NULL; 93 mem->pages = kvmalloc(size, GFP_KERNEL);
92
93 if (size <= 2*PAGE_SIZE)
94 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
95 if (mem->pages == NULL) {
96 mem->pages = vmalloc(size);
97 }
98} 94}
99EXPORT_SYMBOL(agp_alloc_page_array); 95EXPORT_SYMBOL(agp_alloc_page_array);
100 96
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 7fcc2a9d1d5a..9b6b6023193b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -25,6 +25,7 @@
25#include "agp.h" 25#include "agp.h"
26#include "intel-agp.h" 26#include "intel-agp.h"
27#include <drm/intel-gtt.h> 27#include <drm/intel-gtt.h>
28#include <asm/set_memory.h>
28 29
29/* 30/*
30 * If we have Intel graphics, we're not going to have anything other than 31 * If we have Intel graphics, we're not going to have anything other than
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 9b163b49d976..03be4ac79b0d 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -9,6 +9,7 @@
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/agp_backend.h> 11#include <linux/agp_backend.h>
12#include <asm/set_memory.h>
12#include "agp.h" 13#include "agp.h"
13 14
14#define SVWRKS_COMMAND 0x04 15#define SVWRKS_COMMAND 0x04
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 50aa9ba91f25..0d7b577e0ff0 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -489,7 +489,7 @@ static const struct file_operations dsp56k_fops = {
489 489
490/****** Init and module functions ******/ 490/****** Init and module functions ******/
491 491
492static char banner[] __initdata = KERN_INFO "DSP56k driver installed\n"; 492static const char banner[] __initconst = KERN_INFO "DSP56k driver installed\n";
493 493
494static int __init dsp56k_init_driver(void) 494static int __init dsp56k_init_driver(void)
495{ 495{
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 03f9d316f969..d523991c945f 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -128,7 +128,7 @@ static void qcom_cc_gdsc_unregister(void *data)
128 128
129/* 129/*
130 * Backwards compatibility with old DTs. Register a pass-through factor 1/1 130 * Backwards compatibility with old DTs. Register a pass-through factor 1/1
131 * clock to translate 'path' clk into 'name' clk and regsiter the 'path' 131 * clock to translate 'path' clk into 'name' clk and register the 'path'
132 * clk as a fixed rate clock if it isn't present. 132 * clk as a fixed rate clock if it isn't present.
133 */ 133 */
134static int _qcom_cc_register_board_clk(struct device *dev, const char *path, 134static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0b5bf135b090..062d71434e47 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1171,7 +1171,8 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1171 1171
1172static void __request_acpi_cpufreq(void) 1172static void __request_acpi_cpufreq(void)
1173{ 1173{
1174 const char *cur_drv, *drv = "acpi-cpufreq"; 1174 const char drv[] = "acpi-cpufreq";
1175 const char *cur_drv;
1175 1176
1176 cur_drv = cpufreq_get_current_driver(); 1177 cur_drv = cpufreq_get_current_driver();
1177 if (!cur_drv) 1178 if (!cur_drv)
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a7db9011d5fe..d2d0430d09d4 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -236,7 +236,7 @@ use_defaults:
236 return 0; 236 return 0;
237} 237}
238 238
239static int sti_cpufreq_fetch_syscon_regsiters(void) 239static int sti_cpufreq_fetch_syscon_registers(void)
240{ 240{
241 struct device *dev = ddata.cpu; 241 struct device *dev = ddata.cpu;
242 struct device_node *np = dev->of_node; 242 struct device_node *np = dev->of_node;
@@ -275,7 +275,7 @@ static int sti_cpufreq_init(void)
275 goto skip_voltage_scaling; 275 goto skip_voltage_scaling;
276 } 276 }
277 277
278 ret = sti_cpufreq_fetch_syscon_regsiters(); 278 ret = sti_cpufreq_fetch_syscon_registers();
279 if (ret) 279 if (ret)
280 goto skip_voltage_scaling; 280 goto skip_voltage_scaling;
281 281
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 6d691abe889c..2ee327d69775 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -27,6 +27,9 @@
27 */ 27 */
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h> 29#include <drm/amdgpu_drm.h>
30#ifdef CONFIG_X86
31#include <asm/set_memory.h>
32#endif
30#include "amdgpu.h" 33#include "amdgpu.h"
31 34
32/* 35/*
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a0ea3241c651..1f178b878e42 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2446,7 +2446,7 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
2446int __init drm_fb_helper_modinit(void) 2446int __init drm_fb_helper_modinit(void)
2447{ 2447{
2448#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT) 2448#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
2449 const char *name = "fbcon"; 2449 const char name[] = "fbcon";
2450 struct module *fbcon; 2450 struct module *fbcon;
2451 2451
2452 mutex_lock(&module_mutex); 2452 mutex_lock(&module_mutex);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index d019b5e311cc..2d955d7d7b6d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -161,8 +161,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
161 file_size += sizeof(*iter.hdr) * n_obj; 161 file_size += sizeof(*iter.hdr) * n_obj;
162 162
163 /* Allocate the file in vmalloc memory, it's likely to be big */ 163 /* Allocate the file in vmalloc memory, it's likely to be big */
164 iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_HIGHMEM | 164 iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
165 __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL); 165 PAGE_KERNEL);
166 if (!iter.start) { 166 if (!iter.start) {
167 dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); 167 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
168 return; 168 return;
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 3f4f424196b2..3949b0990916 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -21,6 +21,7 @@
21 21
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <linux/shmem_fs.h> 23#include <linux/shmem_fs.h>
24#include <asm/set_memory.h>
24#include "psb_drv.h" 25#include "psb_drv.h"
25#include "blitter.h" 26#include "blitter.h"
26 27
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 5ee93ff55608..1f9b35afefee 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -35,6 +35,7 @@
35#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36#include <acpi/video.h> 36#include <acpi/video.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <asm/set_memory.h>
38 39
39static struct drm_driver driver; 40static struct drm_driver driver;
40static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 41static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8bab4aea63e6..2aa6b97fd22f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,8 @@
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/stop_machine.h> 32#include <linux/stop_machine.h>
33 33
34#include <asm/set_memory.h>
35
34#include <drm/drmP.h> 36#include <drm/drmP.h>
35#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
36 38
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ca5397beb357..2170534101ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -568,9 +568,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
568 568
569 size *= nmemb; 569 size *= nmemb;
570 570
571 mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 571 mem = kvmalloc(size, GFP_KERNEL);
572 if (!mem)
573 mem = vmalloc(size);
574 if (!mem) 572 if (!mem)
575 return ERR_PTR(-ENOMEM); 573 return ERR_PTR(-ENOMEM);
576 574
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c4777c8d0312..0b3ec35515f3 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -27,6 +27,9 @@
27 */ 27 */
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#ifdef CONFIG_X86
31#include <asm/set_memory.h>
32#endif
30#include "radeon.h" 33#include "radeon.h"
31 34
32/* 35/*
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index a37de5db5731..eeddc1e48409 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -51,6 +51,9 @@
51#if IS_ENABLED(CONFIG_AGP) 51#if IS_ENABLED(CONFIG_AGP)
52#include <asm/agp.h> 52#include <asm/agp.h>
53#endif 53#endif
54#ifdef CONFIG_X86
55#include <asm/set_memory.h>
56#endif
54 57
55#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 58#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
56#define SMALL_ALLOCATION 16 59#define SMALL_ALLOCATION 16
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index cec4b4baa179..90ddbdca93bd 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -53,6 +53,9 @@
53#if IS_ENABLED(CONFIG_AGP) 53#if IS_ENABLED(CONFIG_AGP)
54#include <asm/agp.h> 54#include <asm/agp.h>
55#endif 55#endif
56#ifdef CONFIG_X86
57#include <asm/set_memory.h>
58#endif
56 59
57#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 60#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
58#define SMALL_ALLOCATION 4 61#define SMALL_ALLOCATION 4
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index aee3c00f836e..5260179d788a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -44,6 +44,9 @@
44#include <drm/ttm/ttm_bo_driver.h> 44#include <drm/ttm/ttm_bo_driver.h>
45#include <drm/ttm/ttm_placement.h> 45#include <drm/ttm/ttm_placement.h>
46#include <drm/ttm/ttm_page_alloc.h> 46#include <drm/ttm/ttm_page_alloc.h>
47#ifdef CONFIG_X86
48#include <asm/set_memory.h>
49#endif
47 50
48/** 51/**
49 * Allocates storage for pointers to the pages that back the ttm. 52 * Allocates storage for pointers to the pages that back the ttm.
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index e88afe1a435c..dbbe31df74df 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -27,7 +27,9 @@
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29 29
30#include <asm/cacheflush.h> 30#ifdef CONFIG_X86
31#include <asm/set_memory.h>
32#endif
31 33
32#include "intel_th.h" 34#include "intel_th.h"
33#include "msu.h" 35#include "msu.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 014c8262bfff..37d5d29597a4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1721,7 +1721,7 @@ int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1721 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, 1721 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1722 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); 1722 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1723 1723
1724 /* DMA momery regsiter */ 1724 /* DMA memory register */
1725 if (mr->type == MR_TYPE_DMA) 1725 if (mr->type == MR_TYPE_DMA)
1726 return 0; 1726 return 0;
1727 1727
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index dc5c97c8f070..80fc01ffd8bd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -205,7 +205,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
205 return 0; 205 return 0;
206 } 206 }
207 207
208 /* Note: if page_shift is zero, FAST memory regsiter */ 208 /* Note: if page_shift is zero, FAST memory register */
209 mtt->page_shift = page_shift; 209 mtt->page_shift = page_shift;
210 210
211 /* Compute MTT entry necessary */ 211 /* Compute MTT entry necessary */
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 549b315ca8fe..f53c8cda1bde 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -281,7 +281,7 @@ static void lp5521_firmware_loaded(struct lp55xx_chip *chip)
281 } 281 }
282 282
283 /* 283 /*
284 * Program momery sequence 284 * Program memory sequence
285 * 1) set engine mode to "LOAD" 285 * 1) set engine mode to "LOAD"
286 * 2) write firmware data into program memory 286 * 2) write firmware data into program memory
287 */ 287 */
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index c5b30f06218a..e9ba8cd32d66 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -387,7 +387,7 @@ static void lp5523_firmware_loaded(struct lp55xx_chip *chip)
387 } 387 }
388 388
389 /* 389 /*
390 * Program momery sequence 390 * Program memory sequence
391 * 1) set engine mode to "LOAD" 391 * 1) set engine mode to "LOAD"
392 * 2) write firmware data into program memory 392 * 2) write firmware data into program memory
393 */ 393 */
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index b75333803a63..90892585bcb5 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -270,7 +270,7 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
270 } 270 }
271 271
272 /* 272 /*
273 * Program momery sequence 273 * Program memory sequence
274 * 1) set engine mode to "LOAD" 274 * 1) set engine mode to "LOAD"
275 * 2) write firmware data into program memory 275 * 2) write firmware data into program memory
276 */ 276 */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 85e3f21c2514..e57353e39168 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -767,16 +767,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
767 } 767 }
768 768
769 n = d->nr_stripes * sizeof(atomic_t); 769 n = d->nr_stripes * sizeof(atomic_t);
770 d->stripe_sectors_dirty = n < PAGE_SIZE << 6 770 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
771 ? kzalloc(n, GFP_KERNEL)
772 : vzalloc(n);
773 if (!d->stripe_sectors_dirty) 771 if (!d->stripe_sectors_dirty)
774 return -ENOMEM; 772 return -ENOMEM;
775 773
776 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 774 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
777 d->full_dirty_stripes = n < PAGE_SIZE << 6 775 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
778 ? kzalloc(n, GFP_KERNEL)
779 : vzalloc(n);
780 if (!d->full_dirty_stripes) 776 if (!d->full_dirty_stripes)
781 return -ENOMEM; 777 return -ENOMEM;
782 778
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 5d13930f0f22..cb8d2ccbb6c6 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -43,11 +43,7 @@ struct closure;
43 (heap)->used = 0; \ 43 (heap)->used = 0; \
44 (heap)->size = (_size); \ 44 (heap)->size = (_size); \
45 _bytes = (heap)->size * sizeof(*(heap)->data); \ 45 _bytes = (heap)->size * sizeof(*(heap)->data); \
46 (heap)->data = NULL; \ 46 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
47 if (_bytes < KMALLOC_MAX_SIZE) \
48 (heap)->data = kmalloc(_bytes, (gfp)); \
49 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
50 (heap)->data = vmalloc(_bytes); \
51 (heap)->data; \ 47 (heap)->data; \
52}) 48})
53 49
@@ -136,12 +132,8 @@ do { \
136 \ 132 \
137 (fifo)->mask = _allocated_size - 1; \ 133 (fifo)->mask = _allocated_size - 1; \
138 (fifo)->front = (fifo)->back = 0; \ 134 (fifo)->front = (fifo)->back = 0; \
139 (fifo)->data = NULL; \
140 \ 135 \
141 if (_bytes < KMALLOC_MAX_SIZE) \ 136 (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
142 (fifo)->data = kmalloc(_bytes, (gfp)); \
143 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
144 (fifo)->data = vmalloc(_bytes); \
145 (fifo)->data; \ 137 (fifo)->data; \
146}) 138})
147 139
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c92c31b23e54..5db11a405129 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -406,7 +406,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
406 if (gfp_mask & __GFP_NORETRY) 406 if (gfp_mask & __GFP_NORETRY)
407 noio_flag = memalloc_noio_save(); 407 noio_flag = memalloc_noio_save();
408 408
409 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL); 409 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
410 410
411 if (gfp_mask & __GFP_NORETRY) 411 if (gfp_mask & __GFP_NORETRY)
412 memalloc_noio_restore(noio_flag); 412 memalloc_noio_restore(noio_flag);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2d5d7064acbf..0555b4410e05 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1691,6 +1691,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
1691 struct dm_ioctl *dmi; 1691 struct dm_ioctl *dmi;
1692 int secure_data; 1692 int secure_data;
1693 const size_t minimum_data_size = offsetof(struct dm_ioctl, data); 1693 const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
1694 unsigned noio_flag;
1694 1695
1695 if (copy_from_user(param_kernel, user, minimum_data_size)) 1696 if (copy_from_user(param_kernel, user, minimum_data_size))
1696 return -EFAULT; 1697 return -EFAULT;
@@ -1713,15 +1714,9 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
1713 * Use kmalloc() rather than vmalloc() when we can. 1714 * Use kmalloc() rather than vmalloc() when we can.
1714 */ 1715 */
1715 dmi = NULL; 1716 dmi = NULL;
1716 if (param_kernel->data_size <= KMALLOC_MAX_SIZE) 1717 noio_flag = memalloc_noio_save();
1717 dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 1718 dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
1718 1719 memalloc_noio_restore(noio_flag);
1719 if (!dmi) {
1720 unsigned noio_flag;
1721 noio_flag = memalloc_noio_save();
1722 dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
1723 memalloc_noio_restore(noio_flag);
1724 }
1725 1720
1726 if (!dmi) { 1721 if (!dmi) {
1727 if (secure_data && clear_user(user, param_kernel->data_size)) 1722 if (secure_data && clear_user(user, param_kernel->data_size))
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 0250e7e521ab..6028d8247f58 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -146,12 +146,7 @@ static void *dm_kvzalloc(size_t alloc_size, int node)
146 if (!claim_shared_memory(alloc_size)) 146 if (!claim_shared_memory(alloc_size))
147 return NULL; 147 return NULL;
148 148
149 if (alloc_size <= KMALLOC_MAX_SIZE) { 149 p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
150 p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
151 if (p)
152 return p;
153 }
154 p = vzalloc_node(alloc_size, node);
155 if (p) 150 if (p)
156 return p; 151 return p;
157 152
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
index 354ec07eae87..23ae72468025 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
@@ -70,7 +70,7 @@
70* (3) both long and short but short preferred and long only when necesarry 70* (3) both long and short but short preferred and long only when necesarry
71* 71*
72* These modes must be selected compile time via compile switches. 72* These modes must be selected compile time via compile switches.
73* Compile switch settings for the diffrent modes: 73* Compile switch settings for the different modes:
74* (1) DRXDAPFASI_LONG_ADDR_ALLOWED=0, DRXDAPFASI_SHORT_ADDR_ALLOWED=1 74* (1) DRXDAPFASI_LONG_ADDR_ALLOWED=0, DRXDAPFASI_SHORT_ADDR_ALLOWED=1
75* (2) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=0 75* (2) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=0
76* (3) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=1 76* (3) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=1
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 5484301d57d9..3dc61ea7dc64 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
129 129
130 duramar2150_c2port_dev = c2port_device_register("uc", 130 duramar2150_c2port_dev = c2port_device_register("uc",
131 &duramar2150_c2port_ops, NULL); 131 &duramar2150_c2port_ops, NULL);
132 if (!duramar2150_c2port_dev) { 132 if (IS_ERR(duramar2150_c2port_dev)) {
133 ret = -ENODEV; 133 ret = PTR_ERR(duramar2150_c2port_dev);
134 goto free_region; 134 goto free_region;
135 } 135 }
136 136
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index ac522417c462..3d528a13b8fc 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -16,9 +16,10 @@
16 16
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/genalloc.h> 18#include <linux/genalloc.h>
19#include <linux/mm.h>
19#include <linux/sram.h> 20#include <linux/sram.h>
20 21
21#include <asm/cacheflush.h> 22#include <asm/set_memory.h>
22 23
23#include "sram.h" 24#include "sram.h"
24 25
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 498c0854305f..06c4974ee8dd 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags)
298 size_t pas_size; 298 size_t pas_size;
299 size_t vas_size; 299 size_t vas_size;
300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); 300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
301 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 301 u64 num_pages;
302 302
303 if (size > SIZE_MAX - PAGE_SIZE)
304 return NULL;
305 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
303 if (num_pages > 306 if (num_pages >
304 (SIZE_MAX - queue_size) / 307 (SIZE_MAX - queue_size) /
305 (sizeof(*queue->kernel_if->u.g.pas) + 308 (sizeof(*queue->kernel_if->u.g.pas) +
@@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
624{ 627{
625 struct vmci_queue *queue; 628 struct vmci_queue *queue;
626 size_t queue_page_size; 629 size_t queue_page_size;
627 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 630 u64 num_pages;
628 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 631 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
629 632
633 if (size > SIZE_MAX - PAGE_SIZE)
634 return NULL;
635 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
630 if (num_pages > (SIZE_MAX - queue_size) / 636 if (num_pages > (SIZE_MAX - queue_size) /
631 sizeof(*queue->kernel_if->u.h.page)) 637 sizeof(*queue->kernel_if->u.h.page))
632 return NULL; 638 return NULL;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index c84742671a5f..092c9bd225be 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -40,6 +40,7 @@
40#include <linux/list.h> 40#include <linux/list.h>
41#include <linux/random.h> 41#include <linux/random.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/sched/mm.h>
43#include <linux/fs.h> 44#include <linux/fs.h>
44#include <linux/pagemap.h> 45#include <linux/pagemap.h>
45#include <linux/seq_file.h> 46#include <linux/seq_file.h>
@@ -1368,31 +1369,18 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
1368 return 0; 1369 return 0;
1369} 1370}
1370 1371
1371static int set_memalloc(void)
1372{
1373 if (current->flags & PF_MEMALLOC)
1374 return 0;
1375 current->flags |= PF_MEMALLOC;
1376 return 1;
1377}
1378
1379static void clear_memalloc(int memalloc)
1380{
1381 if (memalloc)
1382 current->flags &= ~PF_MEMALLOC;
1383}
1384
1385static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) 1372static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1386{ 1373{
1387 ssize_t tx; 1374 ssize_t tx;
1388 int err, memalloc; 1375 int err;
1376 unsigned int noreclaim_flag;
1389 1377
1390 err = get_pages(ns, file, count, pos); 1378 err = get_pages(ns, file, count, pos);
1391 if (err) 1379 if (err)
1392 return err; 1380 return err;
1393 memalloc = set_memalloc(); 1381 noreclaim_flag = memalloc_noreclaim_save();
1394 tx = kernel_read(file, pos, buf, count); 1382 tx = kernel_read(file, pos, buf, count);
1395 clear_memalloc(memalloc); 1383 memalloc_noreclaim_restore(noreclaim_flag);
1396 put_pages(ns); 1384 put_pages(ns);
1397 return tx; 1385 return tx;
1398} 1386}
@@ -1400,14 +1388,15 @@ static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_
1400static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) 1388static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1401{ 1389{
1402 ssize_t tx; 1390 ssize_t tx;
1403 int err, memalloc; 1391 int err;
1392 unsigned int noreclaim_flag;
1404 1393
1405 err = get_pages(ns, file, count, pos); 1394 err = get_pages(ns, file, count, pos);
1406 if (err) 1395 if (err)
1407 return err; 1396 return err;
1408 memalloc = set_memalloc(); 1397 noreclaim_flag = memalloc_noreclaim_save();
1409 tx = kernel_write(file, buf, count, pos); 1398 tx = kernel_write(file, buf, count, pos);
1410 clear_memalloc(memalloc); 1399 memalloc_noreclaim_restore(noreclaim_flag);
1411 put_pages(ns); 1400 put_pages(ns);
1412 return tx; 1401 return tx;
1413} 1402}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 4ef07d97156d..602c19e23f05 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -413,7 +413,7 @@
413/* RSCFDnRPGACCr */ 413/* RSCFDnRPGACCr */
414#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r))) 414#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
415 415
416/* CAN FD mode specific regsiter map */ 416/* CAN FD mode specific register map */
417 417
418/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ 418/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
419#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m))) 419#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 7cdb18512407..2a57b46fd6a6 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -48,7 +48,7 @@ eg., if the value 10011010b is written into the least significant byte of a comm
48/* 32 bit registers */ 48/* 32 bit registers */
49 49
50#define ASF_STAT 0x00 /* ASF status register */ 50#define ASF_STAT 0x00 /* ASF status register */
51#define CHIPID 0x04 /* Chip ID regsiter */ 51#define CHIPID 0x04 /* Chip ID register */
52#define MIB_DATA 0x10 /* MIB data register */ 52#define MIB_DATA 0x10 /* MIB data register */
53#define MIB_ADDR 0x14 /* MIB address register */ 53#define MIB_ADDR 0x14 /* MIB address register */
54#define STAT0 0x30 /* Status0 register */ 54#define STAT0 0x30 /* Status0 register */
@@ -648,7 +648,7 @@ typedef enum {
648/* driver ioctl parameters */ 648/* driver ioctl parameters */
649#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32) 649#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
650 650
651/* amd8111e desriptor format */ 651/* amd8111e descriptor format */
652 652
653struct amd8111e_tx_dr{ 653struct amd8111e_tx_dr{
654 654
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 796c37a5bbde..c5b81268c284 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -42,8 +42,8 @@
42 42
43*/ 43*/
44 44
45static char version[] = "atarilance.c: v1.3 04/04/96 " 45static const char version[] = "atarilance.c: v1.3 04/04/96 "
46 "Roman.Hodek@informatik.uni-erlangen.de\n"; 46 "Roman.Hodek@informatik.uni-erlangen.de\n";
47 47
48#include <linux/netdevice.h> 48#include <linux/netdevice.h>
49#include <linux/etherdevice.h> 49#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 6c98901f1b89..82cc81385033 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -72,7 +72,7 @@
72#include <asm/dec/machtype.h> 72#include <asm/dec/machtype.h>
73#include <asm/dec/system.h> 73#include <asm/dec/system.h>
74 74
75static char version[] = 75static const char version[] =
76"declance.c: v0.011 by Linux MIPS DECstation task force\n"; 76"declance.c: v0.011 by Linux MIPS DECstation task force\n";
77 77
78MODULE_AUTHOR("Linux MIPS DECstation task force"); 78MODULE_AUTHOR("Linux MIPS DECstation task force");
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 12bb4f1489fc..77b1db267730 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -21,7 +21,8 @@
21 21
22*/ 22*/
23 23
24static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n"; 24static const char version[] =
25"sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
25 26
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/stddef.h> 28#include <linux/stddef.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index a8b80c56ac25..73efdb05a490 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
307 307
308/* 308/*
309 * atl1c_read_phy_core 309 * atl1c_read_phy_core
310 * core function to read register in PHY via MDIO control regsiter. 310 * core function to read register in PHY via MDIO control register.
311 * ext: extension register (see IEEE 802.3) 311 * ext: extension register (see IEEE 802.3)
312 * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) 312 * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
313 * reg: reg to read 313 * reg: reg to read
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index cea6bdcde33f..8baf9d3eb4b1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1591,7 +1591,7 @@ static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1591 if (rc != 0) { 1591 if (rc != 0) {
1592 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); 1592 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1593 1593
1594 /* Calling function should not diffrentiate between this case 1594 /* Calling function should not differentiate between this case
1595 * and the case in which there is already a pending ramrod 1595 * and the case in which there is already a pending ramrod
1596 */ 1596 */
1597 rc = 1; 1597 rc = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
index 920d918ed193..f04e81f33795 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -41,9 +41,6 @@
41 41
42#define VALIDATE_TID 1 42#define VALIDATE_TID 1
43 43
44void *cxgb_alloc_mem(unsigned long size);
45void cxgb_free_mem(void *addr);
46
47/* 44/*
48 * Map an ATID or STID to their entries in the corresponding TID tables. 45 * Map an ATID or STID to their entries in the corresponding TID tables.
49 */ 46 */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 76684dcb874c..fa81445e334c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1152,27 +1152,6 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
1152} 1152}
1153 1153
1154/* 1154/*
1155 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1156 * The allocated memory is cleared.
1157 */
1158void *cxgb_alloc_mem(unsigned long size)
1159{
1160 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1161
1162 if (!p)
1163 p = vzalloc(size);
1164 return p;
1165}
1166
1167/*
1168 * Free memory allocated through t3_alloc_mem().
1169 */
1170void cxgb_free_mem(void *addr)
1171{
1172 kvfree(addr);
1173}
1174
1175/*
1176 * Allocate and initialize the TID tables. Returns 0 on success. 1155 * Allocate and initialize the TID tables. Returns 0 on success.
1177 */ 1156 */
1178static int init_tid_tabs(struct tid_info *t, unsigned int ntids, 1157static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
@@ -1182,7 +1161,7 @@ static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1182 unsigned long size = ntids * sizeof(*t->tid_tab) + 1161 unsigned long size = ntids * sizeof(*t->tid_tab) +
1183 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); 1162 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1184 1163
1185 t->tid_tab = cxgb_alloc_mem(size); 1164 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1186 if (!t->tid_tab) 1165 if (!t->tid_tab)
1187 return -ENOMEM; 1166 return -ENOMEM;
1188 1167
@@ -1218,7 +1197,7 @@ static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1218 1197
1219static void free_tid_maps(struct tid_info *t) 1198static void free_tid_maps(struct tid_info *t)
1220{ 1199{
1221 cxgb_free_mem(t->tid_tab); 1200 kvfree(t->tid_tab);
1222} 1201}
1223 1202
1224static inline void add_adapter(struct adapter *adap) 1203static inline void add_adapter(struct adapter *adap)
@@ -1293,7 +1272,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1293 return 0; 1272 return 0;
1294 1273
1295out_free_l2t: 1274out_free_l2t:
1296 t3_free_l2t(l2td); 1275 kvfree(l2td);
1297out_free: 1276out_free:
1298 kfree(t); 1277 kfree(t);
1299 return err; 1278 return err;
@@ -1302,7 +1281,7 @@ out_free:
1302static void clean_l2_data(struct rcu_head *head) 1281static void clean_l2_data(struct rcu_head *head)
1303{ 1282{
1304 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); 1283 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1305 t3_free_l2t(d); 1284 kvfree(d);
1306} 1285}
1307 1286
1308 1287
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 52063587e1e9..26264125865f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -444,7 +444,7 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
444 struct l2t_data *d; 444 struct l2t_data *d;
445 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); 445 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
446 446
447 d = cxgb_alloc_mem(size); 447 d = kvzalloc(size, GFP_KERNEL);
448 if (!d) 448 if (!d)
449 return NULL; 449 return NULL;
450 450
@@ -462,9 +462,3 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
462 } 462 }
463 return d; 463 return d;
464} 464}
465
466void t3_free_l2t(struct l2t_data *d)
467{
468 cxgb_free_mem(d);
469}
470
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 8cffcdfd5678..c2fd323c4078 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -115,7 +115,6 @@ int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
115 struct l2t_entry *e); 115 struct l2t_entry *e);
116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); 116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
117struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); 117struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
118void t3_free_l2t(struct l2t_data *d);
119 118
120int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); 119int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
121 120
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 7ad43af6bde1..3103ef9b561d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -290,8 +290,8 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
290 if (clipt_size < CLIPT_MIN_HASH_BUCKETS) 290 if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
291 return NULL; 291 return NULL;
292 292
293 ctbl = t4_alloc_mem(sizeof(*ctbl) + 293 ctbl = kvzalloc(sizeof(*ctbl) +
294 clipt_size*sizeof(struct list_head)); 294 clipt_size*sizeof(struct list_head), GFP_KERNEL);
295 if (!ctbl) 295 if (!ctbl)
296 return NULL; 296 return NULL;
297 297
@@ -305,9 +305,9 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
305 for (i = 0; i < ctbl->clipt_size; ++i) 305 for (i = 0; i < ctbl->clipt_size; ++i)
306 INIT_LIST_HEAD(&ctbl->hash_list[i]); 306 INIT_LIST_HEAD(&ctbl->hash_list[i]);
307 307
308 cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry)); 308 cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL);
309 if (!cl_list) { 309 if (!cl_list) {
310 t4_free_mem(ctbl); 310 kvfree(ctbl);
311 return NULL; 311 return NULL;
312 } 312 }
313 ctbl->cl_list = (void *)cl_list; 313 ctbl->cl_list = (void *)cl_list;
@@ -326,8 +326,8 @@ void t4_cleanup_clip_tbl(struct adapter *adap)
326 326
327 if (ctbl) { 327 if (ctbl) {
328 if (ctbl->cl_list) 328 if (ctbl->cl_list)
329 t4_free_mem(ctbl->cl_list); 329 kvfree(ctbl->cl_list);
330 t4_free_mem(ctbl); 330 kvfree(ctbl);
331 } 331 }
332} 332}
333EXPORT_SYMBOL(t4_cleanup_clip_tbl); 333EXPORT_SYMBOL(t4_cleanup_clip_tbl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 163543b1ea0b..1d2be2dd19dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1184,8 +1184,6 @@ extern const char cxgb4_driver_version[];
1184void t4_os_portmod_changed(const struct adapter *adap, int port_id); 1184void t4_os_portmod_changed(const struct adapter *adap, int port_id);
1185void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 1185void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
1186 1186
1187void *t4_alloc_mem(size_t size);
1188
1189void t4_free_sge_resources(struct adapter *adap); 1187void t4_free_sge_resources(struct adapter *adap);
1190void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); 1188void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
1191irq_handler_t t4_intr_handler(struct adapter *adap); 1189irq_handler_t t4_intr_handler(struct adapter *adap);
@@ -1557,7 +1555,6 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
1557 int rateunit, int ratemode, int channel, int class, 1555 int rateunit, int ratemode, int channel, int class,
1558 int minrate, int maxrate, int weight, int pktsize); 1556 int minrate, int maxrate, int weight, int pktsize);
1559void t4_sge_decode_idma_state(struct adapter *adapter, int state); 1557void t4_sge_decode_idma_state(struct adapter *adapter, int state);
1560void t4_free_mem(void *addr);
1561void t4_idma_monitor_init(struct adapter *adapter, 1558void t4_idma_monitor_init(struct adapter *adapter,
1562 struct sge_idma_monitor_state *idma); 1559 struct sge_idma_monitor_state *idma);
1563void t4_idma_monitor(struct adapter *adapter, 1560void t4_idma_monitor(struct adapter *adapter,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index f6e739da7bb7..1fa34b009891 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2634,7 +2634,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2634 if (count > avail - pos) 2634 if (count > avail - pos)
2635 count = avail - pos; 2635 count = avail - pos;
2636 2636
2637 data = t4_alloc_mem(count); 2637 data = kvzalloc(count, GFP_KERNEL);
2638 if (!data) 2638 if (!data)
2639 return -ENOMEM; 2639 return -ENOMEM;
2640 2640
@@ -2642,12 +2642,12 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2642 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); 2642 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
2643 spin_unlock(&adap->win0_lock); 2643 spin_unlock(&adap->win0_lock);
2644 if (ret) { 2644 if (ret) {
2645 t4_free_mem(data); 2645 kvfree(data);
2646 return ret; 2646 return ret;
2647 } 2647 }
2648 ret = copy_to_user(buf, data, count); 2648 ret = copy_to_user(buf, data, count);
2649 2649
2650 t4_free_mem(data); 2650 kvfree(data);
2651 if (ret) 2651 if (ret)
2652 return -EFAULT; 2652 return -EFAULT;
2653 2653
@@ -2753,7 +2753,7 @@ static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
2753 adap->sge.egr_sz, adap->sge.blocked_fl); 2753 adap->sge.egr_sz, adap->sge.blocked_fl);
2754 len += sprintf(buf + len, "\n"); 2754 len += sprintf(buf + len, "\n");
2755 size = simple_read_from_buffer(ubuf, count, ppos, buf, len); 2755 size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
2756 t4_free_mem(buf); 2756 kvfree(buf);
2757 return size; 2757 return size;
2758} 2758}
2759 2759
@@ -2773,7 +2773,7 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
2773 return err; 2773 return err;
2774 2774
2775 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); 2775 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
2776 t4_free_mem(t); 2776 kvfree(t);
2777 return count; 2777 return count;
2778} 2778}
2779 2779
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 02f80febeb91..0ba7866c8259 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -969,7 +969,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
969{ 969{
970 int i, err = 0; 970 int i, err = 0;
971 struct adapter *adapter = netdev2adap(dev); 971 struct adapter *adapter = netdev2adap(dev);
972 u8 *buf = t4_alloc_mem(EEPROMSIZE); 972 u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
973 973
974 if (!buf) 974 if (!buf)
975 return -ENOMEM; 975 return -ENOMEM;
@@ -980,7 +980,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
980 980
981 if (!err) 981 if (!err)
982 memcpy(data, buf + e->offset, e->len); 982 memcpy(data, buf + e->offset, e->len);
983 t4_free_mem(buf); 983 kvfree(buf);
984 return err; 984 return err;
985} 985}
986 986
@@ -1009,7 +1009,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1009 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1009 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1010 /* RMW possibly needed for first or last words. 1010 /* RMW possibly needed for first or last words.
1011 */ 1011 */
1012 buf = t4_alloc_mem(aligned_len); 1012 buf = kvzalloc(aligned_len, GFP_KERNEL);
1013 if (!buf) 1013 if (!buf)
1014 return -ENOMEM; 1014 return -ENOMEM;
1015 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 1015 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
@@ -1037,7 +1037,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1037 err = t4_seeprom_wp(adapter, true); 1037 err = t4_seeprom_wp(adapter, true);
1038out: 1038out:
1039 if (buf != data) 1039 if (buf != data)
1040 t4_free_mem(buf); 1040 kvfree(buf);
1041 return err; 1041 return err;
1042} 1042}
1043 1043
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c12c4a3b82b5..38a5c6764bb5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -880,27 +880,6 @@ freeout:
880 return err; 880 return err;
881} 881}
882 882
883/*
884 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
885 * The allocated memory is cleared.
886 */
887void *t4_alloc_mem(size_t size)
888{
889 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
890
891 if (!p)
892 p = vzalloc(size);
893 return p;
894}
895
896/*
897 * Free memory allocated through alloc_mem().
898 */
899void t4_free_mem(void *addr)
900{
901 kvfree(addr);
902}
903
904static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, 883static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
905 void *accel_priv, select_queue_fallback_t fallback) 884 void *accel_priv, select_queue_fallback_t fallback)
906{ 885{
@@ -1299,7 +1278,7 @@ static int tid_init(struct tid_info *t)
1299 max_ftids * sizeof(*t->ftid_tab) + 1278 max_ftids * sizeof(*t->ftid_tab) +
1300 ftid_bmap_size * sizeof(long); 1279 ftid_bmap_size * sizeof(long);
1301 1280
1302 t->tid_tab = t4_alloc_mem(size); 1281 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1303 if (!t->tid_tab) 1282 if (!t->tid_tab)
1304 return -ENOMEM; 1283 return -ENOMEM;
1305 1284
@@ -3445,7 +3424,7 @@ static int adap_init0(struct adapter *adap)
3445 /* allocate memory to read the header of the firmware on the 3424 /* allocate memory to read the header of the firmware on the
3446 * card 3425 * card
3447 */ 3426 */
3448 card_fw = t4_alloc_mem(sizeof(*card_fw)); 3427 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
3449 3428
3450 /* Get FW from from /lib/firmware/ */ 3429 /* Get FW from from /lib/firmware/ */
3451 ret = request_firmware(&fw, fw_info->fw_mod_name, 3430 ret = request_firmware(&fw, fw_info->fw_mod_name,
@@ -3465,7 +3444,7 @@ static int adap_init0(struct adapter *adap)
3465 3444
3466 /* Cleaning up */ 3445 /* Cleaning up */
3467 release_firmware(fw); 3446 release_firmware(fw);
3468 t4_free_mem(card_fw); 3447 kvfree(card_fw);
3469 3448
3470 if (ret < 0) 3449 if (ret < 0)
3471 goto bye; 3450 goto bye;
@@ -4470,9 +4449,9 @@ static void free_some_resources(struct adapter *adapter)
4470{ 4449{
4471 unsigned int i; 4450 unsigned int i;
4472 4451
4473 t4_free_mem(adapter->l2t); 4452 kvfree(adapter->l2t);
4474 t4_cleanup_sched(adapter); 4453 t4_cleanup_sched(adapter);
4475 t4_free_mem(adapter->tids.tid_tab); 4454 kvfree(adapter->tids.tid_tab);
4476 cxgb4_cleanup_tc_u32(adapter); 4455 cxgb4_cleanup_tc_u32(adapter);
4477 kfree(adapter->sge.egr_map); 4456 kfree(adapter->sge.egr_map);
4478 kfree(adapter->sge.ingr_map); 4457 kfree(adapter->sge.ingr_map);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index a1b19422b339..ef06ce8247ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -432,9 +432,9 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
432 for (i = 0; i < t->size; i++) { 432 for (i = 0; i < t->size; i++) {
433 struct cxgb4_link *link = &t->table[i]; 433 struct cxgb4_link *link = &t->table[i];
434 434
435 t4_free_mem(link->tid_map); 435 kvfree(link->tid_map);
436 } 436 }
437 t4_free_mem(adap->tc_u32); 437 kvfree(adap->tc_u32);
438} 438}
439 439
440struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) 440struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
@@ -446,8 +446,8 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
446 if (!max_tids) 446 if (!max_tids)
447 return NULL; 447 return NULL;
448 448
449 t = t4_alloc_mem(sizeof(*t) + 449 t = kvzalloc(sizeof(*t) +
450 (max_tids * sizeof(struct cxgb4_link))); 450 (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
451 if (!t) 451 if (!t)
452 return NULL; 452 return NULL;
453 453
@@ -458,7 +458,7 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
458 unsigned int bmap_size; 458 unsigned int bmap_size;
459 459
460 bmap_size = BITS_TO_LONGS(max_tids); 460 bmap_size = BITS_TO_LONGS(max_tids);
461 link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size); 461 link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL);
462 if (!link->tid_map) 462 if (!link->tid_map)
463 goto out_no_mem; 463 goto out_no_mem;
464 bitmap_zero(link->tid_map, max_tids); 464 bitmap_zero(link->tid_map, max_tids);
@@ -471,11 +471,11 @@ out_no_mem:
471 struct cxgb4_link *link = &t->table[i]; 471 struct cxgb4_link *link = &t->table[i];
472 472
473 if (link->tid_map) 473 if (link->tid_map)
474 t4_free_mem(link->tid_map); 474 kvfree(link->tid_map);
475 } 475 }
476 476
477 if (t) 477 if (t)
478 t4_free_mem(t); 478 kvfree(t);
479 479
480 return NULL; 480 return NULL;
481} 481}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 7c8c5b9a3c22..6f3692db29af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -646,7 +646,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
646 if (l2t_size < L2T_MIN_HASH_BUCKETS) 646 if (l2t_size < L2T_MIN_HASH_BUCKETS)
647 return NULL; 647 return NULL;
648 648
649 d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); 649 d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL);
650 if (!d) 650 if (!d)
651 return NULL; 651 return NULL;
652 652
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index c9026352a842..02acff741f11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -177,7 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
177 } 177 }
178 178
179 list_del(&qe->list); 179 list_del(&qe->list);
180 t4_free_mem(qe); 180 kvfree(qe);
181 if (atomic_dec_and_test(&e->refcnt)) { 181 if (atomic_dec_and_test(&e->refcnt)) {
182 e->state = SCHED_STATE_UNUSED; 182 e->state = SCHED_STATE_UNUSED;
183 memset(&e->info, 0, sizeof(e->info)); 183 memset(&e->info, 0, sizeof(e->info));
@@ -201,7 +201,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
201 if (p->queue < 0 || p->queue >= pi->nqsets) 201 if (p->queue < 0 || p->queue >= pi->nqsets)
202 return -ERANGE; 202 return -ERANGE;
203 203
204 qe = t4_alloc_mem(sizeof(struct sched_queue_entry)); 204 qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
205 if (!qe) 205 if (!qe)
206 return -ENOMEM; 206 return -ENOMEM;
207 207
@@ -211,7 +211,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
211 /* Unbind queue from any existing class */ 211 /* Unbind queue from any existing class */
212 err = t4_sched_queue_unbind(pi, p); 212 err = t4_sched_queue_unbind(pi, p);
213 if (err) { 213 if (err) {
214 t4_free_mem(qe); 214 kvfree(qe);
215 goto out; 215 goto out;
216 } 216 }
217 217
@@ -224,7 +224,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
224 spin_lock(&e->lock); 224 spin_lock(&e->lock);
225 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); 225 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
226 if (err) { 226 if (err) {
227 t4_free_mem(qe); 227 kvfree(qe);
228 spin_unlock(&e->lock); 228 spin_unlock(&e->lock);
229 goto out; 229 goto out;
230 } 230 }
@@ -512,7 +512,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
512 struct sched_table *s; 512 struct sched_table *s;
513 unsigned int i; 513 unsigned int i;
514 514
515 s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class)); 515 s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL);
516 if (!s) 516 if (!s)
517 return NULL; 517 return NULL;
518 518
@@ -548,6 +548,6 @@ void t4_cleanup_sched(struct adapter *adap)
548 t4_sched_class_free(pi, e); 548 t4_sched_class_free(pi, e);
549 write_unlock(&s->rw_lock); 549 write_unlock(&s->rw_lock);
550 } 550 }
551 t4_free_mem(s); 551 kvfree(s);
552 } 552 }
553} 553}
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index b600fbbbf679..f910f0f386d6 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -56,7 +56,7 @@
56 local_irq_{dis,en}able() 56 local_irq_{dis,en}able()
57*/ 57*/
58 58
59static char *version = 59static const char version[] =
60"cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n"; 60"cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n";
61 61
62/* ======================= configure the driver here ======================= */ 62/* ======================= configure the driver here ======================= */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 24dfba53a0f2..bbc0a98e7ca3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -405,7 +405,7 @@ struct mac_driver {
405}; 405};
406 406
407struct mac_stats_string { 407struct mac_stats_string {
408 char desc[ETH_GSTRING_LEN]; 408 const char desc[ETH_GSTRING_LEN];
409 unsigned long offset; 409 unsigned long offset;
410}; 410};
411 411
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 68812d783f33..413025bdcb50 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -127,7 +127,7 @@ out:
127 * @offset: register offset to be read 127 * @offset: register offset to be read
128 * @data: pointer to the read data 128 * @data: pointer to the read data
129 * 129 *
130 * Reads the MDI control regsiter in the PHY at offset and stores the 130 * Reads the MDI control register in the PHY at offset and stores the
131 * information read to data. 131 * information read to data.
132 **/ 132 **/
133s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) 133s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3ba89bc43d74..6ffd1849a604 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -70,13 +70,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
70 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; 70 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
71 71
72 tmp = size * sizeof(struct mlx4_en_tx_info); 72 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); 73 ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
74 if (!ring->tx_info) { 74 if (!ring->tx_info) {
75 ring->tx_info = vmalloc(tmp); 75 err = -ENOMEM;
76 if (!ring->tx_info) { 76 goto err_ring;
77 err = -ENOMEM;
78 goto err_ring;
79 }
80 } 77 }
81 78
82 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 79 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index db65f72879e9..ce852ca22a96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -115,12 +115,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
115 115
116 for (i = 0; i <= buddy->max_order; ++i) { 116 for (i = 0; i <= buddy->max_order; ++i) {
117 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 117 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
118 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); 118 buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
119 if (!buddy->bits[i]) { 119 if (!buddy->bits[i])
120 buddy->bits[i] = vzalloc(s * sizeof(long)); 120 goto err_out_free;
121 if (!buddy->bits[i])
122 goto err_out_free;
123 }
124 } 121 }
125 122
126 set_bit(0, buddy->bits[buddy->max_order]); 123 set_bit(0, buddy->bits[buddy->max_order]);
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 07091dd27e5d..7b0a8db57af9 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -444,7 +444,7 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
444 (entry * SIZEOF_SONIC_RR) + offset); 444 (entry * SIZEOF_SONIC_RR) + offset);
445} 445}
446 446
447static const char *version = 447static const char version[] =
448 "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; 448 "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
449 449
450#endif /* SONIC_H */ 450#endif /* SONIC_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 0ed24d6e6c65..40f057edeafc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -3058,7 +3058,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3058 3058
3059 /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect 3059 /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
3060 * the number of VF SBs [especially for first VF on engine, as we can't 3060 * the number of VF SBs [especially for first VF on engine, as we can't
3061 * diffrentiate between empty entries and its entries]. 3061 * differentiate between empty entries and its entries].
3062 * Since we don't really support more SBs than VFs today, prevent any 3062 * Since we don't really support more SBs than VFs today, prevent any
3063 * such configuration by sanitizing the number of SBs to equal the 3063 * such configuration by sanitizing the number of SBs to equal the
3064 * number of VFs. 3064 * number of VFs.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index b7ad36b91e12..c67ff1411799 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -978,7 +978,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
978 if (rc) 978 if (rc)
979 goto err2; 979 goto err2;
980 980
981 /* First Dword used to diffrentiate between various sources */ 981 /* First Dword used to differentiate between various sources */
982 data = cdev->firmware->data + sizeof(u32); 982 data = cdev->firmware->data + sizeof(u32);
983 983
984 qed_dbg_pf_init(cdev); 984 qed_dbg_pf_init(cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index d5df29f787c5..f5ed54d611ec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -625,7 +625,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
625 * - If !ARI, VFs would start on next device. 625 * - If !ARI, VFs would start on next device.
626 * so offset - (256 - pf_id) would provide the number. 626 * so offset - (256 - pf_id) would provide the number.
627 * Utilize the fact that (256 - pf_id) is achieved only by later 627 * Utilize the fact that (256 - pf_id) is achieved only by later
628 * to diffrentiate between the two. 628 * to differentiate between the two.
629 */ 629 */
630 630
631 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { 631 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index d54490d3f7ad..1e594351a60f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -387,7 +387,7 @@ static void sxgbe_free_rx_buffers(struct net_device *dev,
387/** 387/**
388 * init_tx_ring - init the TX descriptor ring 388 * init_tx_ring - init the TX descriptor ring
389 * @dev: net device structure 389 * @dev: net device structure
390 * @tx_ring: ring to be intialised 390 * @tx_ring: ring to be initialised
391 * @tx_rsize: ring size 391 * @tx_rsize: ring size
392 * Description: this function initializes the DMA TX descriptor 392 * Description: this function initializes the DMA TX descriptor
393 */ 393 */
@@ -437,7 +437,7 @@ dmamem_err:
437/** 437/**
438 * free_rx_ring - free the RX descriptor ring 438 * free_rx_ring - free the RX descriptor ring
439 * @dev: net device structure 439 * @dev: net device structure
440 * @rx_ring: ring to be intialised 440 * @rx_ring: ring to be initialised
441 * @rx_rsize: ring size 441 * @rx_rsize: ring size
442 * Description: this function initializes the DMA RX descriptor 442 * Description: this function initializes the DMA RX descriptor
443 */ 443 */
@@ -453,7 +453,7 @@ static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
453/** 453/**
454 * init_rx_ring - init the RX descriptor ring 454 * init_rx_ring - init the RX descriptor ring
455 * @dev: net device structure 455 * @dev: net device structure
456 * @rx_ring: ring to be intialised 456 * @rx_ring: ring to be initialised
457 * @rx_rsize: ring size 457 * @rx_rsize: ring size
458 * Description: this function initializes the DMA RX descriptor 458 * Description: this function initializes the DMA RX descriptor
459 */ 459 */
@@ -539,7 +539,7 @@ err_free_dma_rx:
539/** 539/**
540 * free_tx_ring - free the TX descriptor ring 540 * free_tx_ring - free the TX descriptor ring
541 * @dev: net device structure 541 * @dev: net device structure
542 * @tx_ring: ring to be intialised 542 * @tx_ring: ring to be initialised
543 * @tx_rsize: ring size 543 * @tx_rsize: ring size
544 * Description: this function initializes the DMA TX descriptor 544 * Description: this function initializes the DMA TX descriptor
545 */ 545 */
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 3dadee1080b9..d9db8a06afd2 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25#define DRV_VERSION "1.39" 25#define DRV_VERSION "1.39"
26static const char *version = "tc35815.c:v" DRV_VERSION "\n"; 26static const char version[] = "tc35815.c:v" DRV_VERSION "\n";
27#define MODNAME "tc35815" 27#define MODNAME "tc35815"
28 28
29#include <linux/module.h> 29#include <linux/module.h>
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index b0de8ecd7fe8..f4a816cf012a 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -228,7 +228,7 @@
228#define DRV_VERSION "v1.11" 228#define DRV_VERSION "v1.11"
229#define DRV_RELDATE "2014/07/01" 229#define DRV_RELDATE "2014/07/01"
230 230
231static char version[] = 231static const char version[] =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE 232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n"; 233 " Lawrence V. Stefani and others\n";
234 234
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index dd7fc6659ad4..9b0d6148e994 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -60,7 +60,8 @@ MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
60MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver"); 60MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
61MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
62 62
63static char version[] = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n"; 63static const char version[] =
64"rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
64 65
65 66
66static const struct net_device_ops rr_netdev_ops = { 67static const struct net_device_ops rr_netdev_ops = {
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index fac1e9fbd11d..9852a3355509 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -106,10 +106,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
106 return -ENXIO; 106 return -ENXIO;
107 } 107 }
108 108
109 ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL); 109 ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
110 if (!ndd->data)
111 ndd->data = vmalloc(ndd->nsarea.config_size);
112
113 if (!ndd->data) 110 if (!ndd->data)
114 return -ENOMEM; 111 return -ENOMEM;
115 112
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 97f3ceb8d724..63468cfe3e4a 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -652,7 +652,7 @@ struct scu_iit_entry {
652 652
653 653
654/* 654/*
655 * TODO: Where is the SAS_LNKTOV regsiter? 655 * TODO: Where is the SAS_LNKTOV register?
656 * TODO: Where is the SAS_PHYTOV register? */ 656 * TODO: Where is the SAS_PHYTOV register? */
657 657
658#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1) 658#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
@@ -1827,7 +1827,7 @@ struct scu_peg_registers {
1827}; 1827};
1828 1828
1829/** 1829/**
1830 * struct scu_registers - SCU regsiters including both PEG registers if we turn 1830 * struct scu_registers - SCU registers including both PEG registers if we turn
1831 * on that compile option. All of these registers are in the memory mapped 1831 * on that compile option. All of these registers are in the memory mapped
1832 * space returned from BAR1. 1832 * space returned from BAR1.
1833 * 1833 *
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index bbea8eac9abb..4842fc0e809d 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -30,6 +30,7 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/inet.h> 31#include <linux/inet.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/sched/mm.h>
33#include <linux/file.h> 34#include <linux/file.h>
34#include <linux/blkdev.h> 35#include <linux/blkdev.h>
35#include <linux/delay.h> 36#include <linux/delay.h>
@@ -371,10 +372,10 @@ static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
371static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) 372static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
372{ 373{
373 struct iscsi_conn *conn = task->conn; 374 struct iscsi_conn *conn = task->conn;
374 unsigned long pflags = current->flags; 375 unsigned int noreclaim_flag;
375 int rc = 0; 376 int rc = 0;
376 377
377 current->flags |= PF_MEMALLOC; 378 noreclaim_flag = memalloc_noreclaim_save();
378 379
379 while (iscsi_sw_tcp_xmit_qlen(conn)) { 380 while (iscsi_sw_tcp_xmit_qlen(conn)) {
380 rc = iscsi_sw_tcp_xmit(conn); 381 rc = iscsi_sw_tcp_xmit(conn);
@@ -387,7 +388,7 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
387 rc = 0; 388 rc = 0;
388 } 389 }
389 390
390 current_restore_flags(pflags, PF_MEMALLOC); 391 memalloc_noreclaim_restore(noreclaim_flag);
391 return rc; 392 return rc;
392} 393}
393 394
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0016f12cc563..316c3df0c3fd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -244,7 +244,7 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
244 * @instance: Adapter soft state 244 * @instance: Adapter soft state
245 * @cmd: Command packet to be returned to free command pool 245 * @cmd: Command packet to be returned to free command pool
246 */ 246 */
247inline void 247void
248megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 248megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
249{ 249{
250 unsigned long flags; 250 unsigned long flags;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8981806fb13f..099ab4ca7edf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1421,7 +1421,7 @@ void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
1421 Mpi2EventNotificationReply_t *mpi_reply); 1421 Mpi2EventNotificationReply_t *mpi_reply);
1422 1422
1423void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, 1423void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc,
1424 u8 bits_to_regsiter); 1424 u8 bits_to_register);
1425int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, 1425int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1426 u8 *issue_reset); 1426 u8 *issue_reset);
1427 1427
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 8ff5d4ec9e5c..f99d4219b01e 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1987,22 +1987,24 @@ static struct ssi_hash_template driver_hash[] = {
1987 .hmac_driver_name = "hmac-sha1-dx", 1987 .hmac_driver_name = "hmac-sha1-dx",
1988 .blocksize = SHA1_BLOCK_SIZE, 1988 .blocksize = SHA1_BLOCK_SIZE,
1989 .synchronize = false, 1989 .synchronize = false,
1990 .template_ahash = { 1990 {
1991 .init = ssi_ahash_init, 1991 .template_ahash = {
1992 .update = ssi_ahash_update, 1992 .init = ssi_ahash_init,
1993 .final = ssi_ahash_final, 1993 .update = ssi_ahash_update,
1994 .finup = ssi_ahash_finup, 1994 .final = ssi_ahash_final,
1995 .digest = ssi_ahash_digest, 1995 .finup = ssi_ahash_finup,
1996 .digest = ssi_ahash_digest,
1996#ifdef EXPORT_FIXED 1997#ifdef EXPORT_FIXED
1997 .export = ssi_ahash_export, 1998 .export = ssi_ahash_export,
1998 .import = ssi_ahash_import, 1999 .import = ssi_ahash_import,
1999#endif 2000#endif
2000 .setkey = ssi_ahash_setkey, 2001 .setkey = ssi_ahash_setkey,
2001 .halg = { 2002 .halg = {
2002 .digestsize = SHA1_DIGEST_SIZE, 2003 .digestsize = SHA1_DIGEST_SIZE,
2003 .statesize = sizeof(struct sha1_state), 2004 .statesize = sizeof(struct sha1_state),
2005 },
2004 }, 2006 },
2005 }, 2007 },
2006 .hash_mode = DRV_HASH_SHA1, 2008 .hash_mode = DRV_HASH_SHA1,
2007 .hw_mode = DRV_HASH_HW_SHA1, 2009 .hw_mode = DRV_HASH_HW_SHA1,
2008 .inter_digestsize = SHA1_DIGEST_SIZE, 2010 .inter_digestsize = SHA1_DIGEST_SIZE,
@@ -2014,22 +2016,24 @@ static struct ssi_hash_template driver_hash[] = {
2014 .hmac_driver_name = "hmac-sha256-dx", 2016 .hmac_driver_name = "hmac-sha256-dx",
2015 .blocksize = SHA256_BLOCK_SIZE, 2017 .blocksize = SHA256_BLOCK_SIZE,
2016 .synchronize = false, 2018 .synchronize = false,
2017 .template_ahash = { 2019 {
2018 .init = ssi_ahash_init, 2020 .template_ahash = {
2019 .update = ssi_ahash_update, 2021 .init = ssi_ahash_init,
2020 .final = ssi_ahash_final, 2022 .update = ssi_ahash_update,
2021 .finup = ssi_ahash_finup, 2023 .final = ssi_ahash_final,
2022 .digest = ssi_ahash_digest, 2024 .finup = ssi_ahash_finup,
2025 .digest = ssi_ahash_digest,
2023#ifdef EXPORT_FIXED 2026#ifdef EXPORT_FIXED
2024 .export = ssi_ahash_export, 2027 .export = ssi_ahash_export,
2025 .import = ssi_ahash_import, 2028 .import = ssi_ahash_import,
2026#endif 2029#endif
2027 .setkey = ssi_ahash_setkey, 2030 .setkey = ssi_ahash_setkey,
2028 .halg = { 2031 .halg = {
2029 .digestsize = SHA256_DIGEST_SIZE, 2032 .digestsize = SHA256_DIGEST_SIZE,
2030 .statesize = sizeof(struct sha256_state), 2033 .statesize = sizeof(struct sha256_state),
2034 },
2031 }, 2035 },
2032 }, 2036 },
2033 .hash_mode = DRV_HASH_SHA256, 2037 .hash_mode = DRV_HASH_SHA256,
2034 .hw_mode = DRV_HASH_HW_SHA256, 2038 .hw_mode = DRV_HASH_HW_SHA256,
2035 .inter_digestsize = SHA256_DIGEST_SIZE, 2039 .inter_digestsize = SHA256_DIGEST_SIZE,
@@ -2041,22 +2045,24 @@ static struct ssi_hash_template driver_hash[] = {
2041 .hmac_driver_name = "hmac-sha224-dx", 2045 .hmac_driver_name = "hmac-sha224-dx",
2042 .blocksize = SHA224_BLOCK_SIZE, 2046 .blocksize = SHA224_BLOCK_SIZE,
2043 .synchronize = false, 2047 .synchronize = false,
2044 .template_ahash = { 2048 {
2045 .init = ssi_ahash_init, 2049 .template_ahash = {
2046 .update = ssi_ahash_update, 2050 .init = ssi_ahash_init,
2047 .final = ssi_ahash_final, 2051 .update = ssi_ahash_update,
2048 .finup = ssi_ahash_finup, 2052 .final = ssi_ahash_final,
2049 .digest = ssi_ahash_digest, 2053 .finup = ssi_ahash_finup,
2054 .digest = ssi_ahash_digest,
2050#ifdef EXPORT_FIXED 2055#ifdef EXPORT_FIXED
2051 .export = ssi_ahash_export, 2056 .export = ssi_ahash_export,
2052 .import = ssi_ahash_import, 2057 .import = ssi_ahash_import,
2053#endif 2058#endif
2054 .setkey = ssi_ahash_setkey, 2059 .setkey = ssi_ahash_setkey,
2055 .halg = { 2060 .halg = {
2056 .digestsize = SHA224_DIGEST_SIZE, 2061 .digestsize = SHA224_DIGEST_SIZE,
2057 .statesize = sizeof(struct sha256_state), 2062 .statesize = sizeof(struct sha256_state),
2063 },
2058 }, 2064 },
2059 }, 2065 },
2060 .hash_mode = DRV_HASH_SHA224, 2066 .hash_mode = DRV_HASH_SHA224,
2061 .hw_mode = DRV_HASH_HW_SHA256, 2067 .hw_mode = DRV_HASH_HW_SHA256,
2062 .inter_digestsize = SHA256_DIGEST_SIZE, 2068 .inter_digestsize = SHA256_DIGEST_SIZE,
@@ -2069,22 +2075,24 @@ static struct ssi_hash_template driver_hash[] = {
2069 .hmac_driver_name = "hmac-sha384-dx", 2075 .hmac_driver_name = "hmac-sha384-dx",
2070 .blocksize = SHA384_BLOCK_SIZE, 2076 .blocksize = SHA384_BLOCK_SIZE,
2071 .synchronize = false, 2077 .synchronize = false,
2072 .template_ahash = { 2078 {
2073 .init = ssi_ahash_init, 2079 .template_ahash = {
2074 .update = ssi_ahash_update, 2080 .init = ssi_ahash_init,
2075 .final = ssi_ahash_final, 2081 .update = ssi_ahash_update,
2076 .finup = ssi_ahash_finup, 2082 .final = ssi_ahash_final,
2077 .digest = ssi_ahash_digest, 2083 .finup = ssi_ahash_finup,
2084 .digest = ssi_ahash_digest,
2078#ifdef EXPORT_FIXED 2085#ifdef EXPORT_FIXED
2079 .export = ssi_ahash_export, 2086 .export = ssi_ahash_export,
2080 .import = ssi_ahash_import, 2087 .import = ssi_ahash_import,
2081#endif 2088#endif
2082 .setkey = ssi_ahash_setkey, 2089 .setkey = ssi_ahash_setkey,
2083 .halg = { 2090 .halg = {
2084 .digestsize = SHA384_DIGEST_SIZE, 2091 .digestsize = SHA384_DIGEST_SIZE,
2085 .statesize = sizeof(struct sha512_state), 2092 .statesize = sizeof(struct sha512_state),
2093 },
2086 }, 2094 },
2087 }, 2095 },
2088 .hash_mode = DRV_HASH_SHA384, 2096 .hash_mode = DRV_HASH_SHA384,
2089 .hw_mode = DRV_HASH_HW_SHA512, 2097 .hw_mode = DRV_HASH_HW_SHA512,
2090 .inter_digestsize = SHA512_DIGEST_SIZE, 2098 .inter_digestsize = SHA512_DIGEST_SIZE,
@@ -2096,22 +2104,24 @@ static struct ssi_hash_template driver_hash[] = {
2096 .hmac_driver_name = "hmac-sha512-dx", 2104 .hmac_driver_name = "hmac-sha512-dx",
2097 .blocksize = SHA512_BLOCK_SIZE, 2105 .blocksize = SHA512_BLOCK_SIZE,
2098 .synchronize = false, 2106 .synchronize = false,
2099 .template_ahash = { 2107 {
2100 .init = ssi_ahash_init, 2108 .template_ahash = {
2101 .update = ssi_ahash_update, 2109 .init = ssi_ahash_init,
2102 .final = ssi_ahash_final, 2110 .update = ssi_ahash_update,
2103 .finup = ssi_ahash_finup, 2111 .final = ssi_ahash_final,
2104 .digest = ssi_ahash_digest, 2112 .finup = ssi_ahash_finup,
2113 .digest = ssi_ahash_digest,
2105#ifdef EXPORT_FIXED 2114#ifdef EXPORT_FIXED
2106 .export = ssi_ahash_export, 2115 .export = ssi_ahash_export,
2107 .import = ssi_ahash_import, 2116 .import = ssi_ahash_import,
2108#endif 2117#endif
2109 .setkey = ssi_ahash_setkey, 2118 .setkey = ssi_ahash_setkey,
2110 .halg = { 2119 .halg = {
2111 .digestsize = SHA512_DIGEST_SIZE, 2120 .digestsize = SHA512_DIGEST_SIZE,
2112 .statesize = sizeof(struct sha512_state), 2121 .statesize = sizeof(struct sha512_state),
2122 },
2113 }, 2123 },
2114 }, 2124 },
2115 .hash_mode = DRV_HASH_SHA512, 2125 .hash_mode = DRV_HASH_SHA512,
2116 .hw_mode = DRV_HASH_HW_SHA512, 2126 .hw_mode = DRV_HASH_HW_SHA512,
2117 .inter_digestsize = SHA512_DIGEST_SIZE, 2127 .inter_digestsize = SHA512_DIGEST_SIZE,
@@ -2124,22 +2134,24 @@ static struct ssi_hash_template driver_hash[] = {
2124 .hmac_driver_name = "hmac-md5-dx", 2134 .hmac_driver_name = "hmac-md5-dx",
2125 .blocksize = MD5_HMAC_BLOCK_SIZE, 2135 .blocksize = MD5_HMAC_BLOCK_SIZE,
2126 .synchronize = false, 2136 .synchronize = false,
2127 .template_ahash = { 2137 {
2128 .init = ssi_ahash_init, 2138 .template_ahash = {
2129 .update = ssi_ahash_update, 2139 .init = ssi_ahash_init,
2130 .final = ssi_ahash_final, 2140 .update = ssi_ahash_update,
2131 .finup = ssi_ahash_finup, 2141 .final = ssi_ahash_final,
2132 .digest = ssi_ahash_digest, 2142 .finup = ssi_ahash_finup,
2143 .digest = ssi_ahash_digest,
2133#ifdef EXPORT_FIXED 2144#ifdef EXPORT_FIXED
2134 .export = ssi_ahash_export, 2145 .export = ssi_ahash_export,
2135 .import = ssi_ahash_import, 2146 .import = ssi_ahash_import,
2136#endif 2147#endif
2137 .setkey = ssi_ahash_setkey, 2148 .setkey = ssi_ahash_setkey,
2138 .halg = { 2149 .halg = {
2139 .digestsize = MD5_DIGEST_SIZE, 2150 .digestsize = MD5_DIGEST_SIZE,
2140 .statesize = sizeof(struct md5_state), 2151 .statesize = sizeof(struct md5_state),
2152 },
2141 }, 2153 },
2142 }, 2154 },
2143 .hash_mode = DRV_HASH_MD5, 2155 .hash_mode = DRV_HASH_MD5,
2144 .hw_mode = DRV_HASH_HW_MD5, 2156 .hw_mode = DRV_HASH_HW_MD5,
2145 .inter_digestsize = MD5_DIGEST_SIZE, 2157 .inter_digestsize = MD5_DIGEST_SIZE,
@@ -2149,52 +2161,56 @@ static struct ssi_hash_template driver_hash[] = {
2149 .driver_name = "xcbc-aes-dx", 2161 .driver_name = "xcbc-aes-dx",
2150 .blocksize = AES_BLOCK_SIZE, 2162 .blocksize = AES_BLOCK_SIZE,
2151 .synchronize = false, 2163 .synchronize = false,
2152 .template_ahash = { 2164 {
2153 .init = ssi_ahash_init, 2165 .template_ahash = {
2154 .update = ssi_mac_update, 2166 .init = ssi_ahash_init,
2155 .final = ssi_mac_final, 2167 .update = ssi_mac_update,
2156 .finup = ssi_mac_finup, 2168 .final = ssi_mac_final,
2157 .digest = ssi_mac_digest, 2169 .finup = ssi_mac_finup,
2158 .setkey = ssi_xcbc_setkey, 2170 .digest = ssi_mac_digest,
2171 .setkey = ssi_xcbc_setkey,
2159#ifdef EXPORT_FIXED 2172#ifdef EXPORT_FIXED
2160 .export = ssi_ahash_export, 2173 .export = ssi_ahash_export,
2161 .import = ssi_ahash_import, 2174 .import = ssi_ahash_import,
2162#endif 2175#endif
2163 .halg = { 2176 .halg = {
2164 .digestsize = AES_BLOCK_SIZE, 2177 .digestsize = AES_BLOCK_SIZE,
2165 .statesize = sizeof(struct aeshash_state), 2178 .statesize = sizeof(struct aeshash_state),
2179 },
2166 }, 2180 },
2167 },
2168 .hash_mode = DRV_HASH_NULL,
2169 .hw_mode = DRV_CIPHER_XCBC_MAC,
2170 .inter_digestsize = AES_BLOCK_SIZE,
2171 }, 2181 },
2182 .hash_mode = DRV_HASH_NULL,
2183 .hw_mode = DRV_CIPHER_XCBC_MAC,
2184 .inter_digestsize = AES_BLOCK_SIZE,
2185 },
2172#if SSI_CC_HAS_CMAC 2186#if SSI_CC_HAS_CMAC
2173 { 2187 {
2174 .name = "cmac(aes)", 2188 .name = "cmac(aes)",
2175 .driver_name = "cmac-aes-dx", 2189 .driver_name = "cmac-aes-dx",
2176 .blocksize = AES_BLOCK_SIZE, 2190 .blocksize = AES_BLOCK_SIZE,
2177 .synchronize = false, 2191 .synchronize = false,
2178 .template_ahash = { 2192 {
2179 .init = ssi_ahash_init, 2193 .template_ahash = {
2180 .update = ssi_mac_update, 2194 .init = ssi_ahash_init,
2181 .final = ssi_mac_final, 2195 .update = ssi_mac_update,
2182 .finup = ssi_mac_finup, 2196 .final = ssi_mac_final,
2183 .digest = ssi_mac_digest, 2197 .finup = ssi_mac_finup,
2184 .setkey = ssi_cmac_setkey, 2198 .digest = ssi_mac_digest,
2199 .setkey = ssi_cmac_setkey,
2185#ifdef EXPORT_FIXED 2200#ifdef EXPORT_FIXED
2186 .export = ssi_ahash_export, 2201 .export = ssi_ahash_export,
2187 .import = ssi_ahash_import, 2202 .import = ssi_ahash_import,
2188#endif 2203#endif
2189 .halg = { 2204 .halg = {
2190 .digestsize = AES_BLOCK_SIZE, 2205 .digestsize = AES_BLOCK_SIZE,
2191 .statesize = sizeof(struct aeshash_state), 2206 .statesize = sizeof(struct aeshash_state),
2207 },
2192 }, 2208 },
2193 },
2194 .hash_mode = DRV_HASH_NULL,
2195 .hw_mode = DRV_CIPHER_CMAC,
2196 .inter_digestsize = AES_BLOCK_SIZE,
2197 }, 2209 },
2210 .hash_mode = DRV_HASH_NULL,
2211 .hw_mode = DRV_CIPHER_CMAC,
2212 .inter_digestsize = AES_BLOCK_SIZE,
2213 },
2198#endif 2214#endif
2199 2215
2200}; 2216};
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
index a6a76a681ea9..8f638267e704 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
@@ -45,15 +45,6 @@ EXPORT_SYMBOL(libcfs_kvzalloc);
45void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size, 45void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
46 gfp_t flags) 46 gfp_t flags)
47{ 47{
48 void *ret; 48 return kvzalloc_node(size, flags, cfs_cpt_spread_node(cptab, cpt));
49
50 ret = kzalloc_node(size, flags | __GFP_NOWARN,
51 cfs_cpt_spread_node(cptab, cpt));
52 if (!ret) {
53 WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH)));
54 ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));
55 }
56
57 return ret;
58} 49}
59EXPORT_SYMBOL(libcfs_kvzalloc_cpt); 50EXPORT_SYMBOL(libcfs_kvzalloc_cpt);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 11b5a8d36415..ca5040c69217 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1454,17 +1454,17 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1454 1454
1455 /* We mark all of the fields "set" so MDS/OST does not re-set them */ 1455 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1456 if (attr->ia_valid & ATTR_CTIME) { 1456 if (attr->ia_valid & ATTR_CTIME) {
1457 attr->ia_ctime = CURRENT_TIME; 1457 attr->ia_ctime = current_time(inode);
1458 attr->ia_valid |= ATTR_CTIME_SET; 1458 attr->ia_valid |= ATTR_CTIME_SET;
1459 } 1459 }
1460 if (!(attr->ia_valid & ATTR_ATIME_SET) && 1460 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1461 (attr->ia_valid & ATTR_ATIME)) { 1461 (attr->ia_valid & ATTR_ATIME)) {
1462 attr->ia_atime = CURRENT_TIME; 1462 attr->ia_atime = current_time(inode);
1463 attr->ia_valid |= ATTR_ATIME_SET; 1463 attr->ia_valid |= ATTR_ATIME_SET;
1464 } 1464 }
1465 if (!(attr->ia_valid & ATTR_MTIME_SET) && 1465 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1466 (attr->ia_valid & ATTR_MTIME)) { 1466 (attr->ia_valid & ATTR_MTIME)) {
1467 attr->ia_mtime = CURRENT_TIME; 1467 attr->ia_mtime = current_time(inode);
1468 attr->ia_valid |= ATTR_MTIME_SET; 1468 attr->ia_valid |= ATTR_MTIME_SET;
1469 } 1469 }
1470 1470
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index f991bee81b37..cbab80092442 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -216,7 +216,7 @@ static int osc_io_submit(const struct lu_env *env,
216 struct cl_object *obj = ios->cis_obj; 216 struct cl_object *obj = ios->cis_obj;
217 217
218 cl_object_attr_lock(obj); 218 cl_object_attr_lock(obj);
219 attr->cat_mtime = LTIME_S(CURRENT_TIME); 219 attr->cat_mtime = ktime_get_real_seconds();
220 attr->cat_ctime = attr->cat_mtime; 220 attr->cat_ctime = attr->cat_mtime;
221 cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME); 221 cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
222 cl_object_attr_unlock(obj); 222 cl_object_attr_unlock(obj);
@@ -256,7 +256,7 @@ static void osc_page_touch_at(const struct lu_env *env,
256 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms, 256 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
257 loi->loi_lvb.lvb_size); 257 loi->loi_lvb.lvb_size);
258 258
259 attr->cat_ctime = LTIME_S(CURRENT_TIME); 259 attr->cat_ctime = ktime_get_real_seconds();
260 attr->cat_mtime = attr->cat_ctime; 260 attr->cat_mtime = attr->cat_ctime;
261 valid = CAT_MTIME | CAT_CTIME; 261 valid = CAT_MTIME | CAT_CTIME;
262 if (kms > loi->loi_kms) { 262 if (kms > loi->loi_kms) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
index 40ac3582fb7a..11162f595fc7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
@@ -36,12 +36,13 @@
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/list.h> 37#include <linux/list.h>
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <asm/cacheflush.h>
40#include <linux/io.h> 39#include <linux/io.h>
41#include <asm/current.h> 40#include <asm/current.h>
42#include <linux/sched/signal.h> 41#include <linux/sched/signal.h>
43#include <linux/file.h> 42#include <linux/file.h>
44 43
44#include <asm/set_memory.h>
45
45#include "atomisp_internal.h" 46#include "atomisp_internal.h"
46#include "hmm/hmm_common.h" 47#include "hmm/hmm_common.h"
47#include "hmm/hmm_pool.h" 48#include "hmm/hmm_pool.h"
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
index 639b8cdf7a5e..19e0e9ee37de 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
@@ -27,7 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29 29
30#include "asm/cacheflush.h" 30#include <asm/set_memory.h>
31 31
32#include "atomisp_internal.h" 32#include "atomisp_internal.h"
33 33
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
index 4000c05652e1..bf6586805f7f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
@@ -27,7 +27,8 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29 29
30#include "asm/cacheflush.h" 30#include <asm/set_memory.h>
31
31#include "atomisp_internal.h" 32#include "atomisp_internal.h"
32#include "hmm/hmm_pool.h" 33#include "hmm/hmm_pool.h"
33 34
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
index 2009e3a11b86..706bd43e8b1b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
@@ -30,13 +30,16 @@
30#include <linux/slab.h> /* for kmalloc */ 30#include <linux/slab.h> /* for kmalloc */
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <asm/cacheflush.h>
34#include <linux/module.h> 33#include <linux/module.h>
35#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
36#include <linux/string.h> 35#include <linux/string.h>
37#include <linux/errno.h> 36#include <linux/errno.h>
38#include <linux/sizes.h> 37#include <linux/sizes.h>
39 38
39#ifdef CONFIG_X86
40#include <asm/set_memory.h>
41#endif
42
40#include "atomisp_internal.h" 43#include "atomisp_internal.h"
41#include "mmu/isp_mmu.h" 44#include "mmu/isp_mmu.h"
42 45
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index 675b2a9e66c1..069269db394c 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -82,7 +82,7 @@ struct most_inst_obj {
82 82
83static const struct { 83static const struct {
84 int most_ch_data_type; 84 int most_ch_data_type;
85 char *name; 85 const char *name;
86} ch_data_type[] = { 86} ch_data_type[] = {
87 { MOST_CH_CONTROL, "control\n" }, 87 { MOST_CH_CONTROL, "control\n" },
88 { MOST_CH_ASYNC, "async\n" }, 88 { MOST_CH_ASYNC, "async\n" },
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index e94aea8c0d05..7b2a466616d6 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -939,11 +939,11 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
939 return buf; 939 return buf;
940} /* end of n_hdlc_buf_get() */ 940} /* end of n_hdlc_buf_get() */
941 941
942static char hdlc_banner[] __initdata = 942static const char hdlc_banner[] __initconst =
943 KERN_INFO "HDLC line discipline maxframe=%u\n"; 943 KERN_INFO "HDLC line discipline maxframe=%u\n";
944static char hdlc_register_ok[] __initdata = 944static const char hdlc_register_ok[] __initconst =
945 KERN_INFO "N_HDLC line discipline registered.\n"; 945 KERN_INFO "N_HDLC line discipline registered.\n";
946static char hdlc_register_fail[] __initdata = 946static const char hdlc_register_fail[] __initconst =
947 KERN_ERR "error registering line discipline: %d\n"; 947 KERN_ERR "error registering line discipline: %d\n";
948 948
949static int __init n_hdlc_init(void) 949static int __init n_hdlc_init(void)
@@ -968,9 +968,9 @@ static int __init n_hdlc_init(void)
968 968
969} /* end of init_module() */ 969} /* end of init_module() */
970 970
971static char hdlc_unregister_ok[] __exitdata = 971static const char hdlc_unregister_ok[] __exitdata =
972 KERN_INFO "N_HDLC: line discipline unregistered\n"; 972 KERN_INFO "N_HDLC: line discipline unregistered\n";
973static char hdlc_unregister_fail[] __exitdata = 973static const char hdlc_unregister_fail[] __exitdata =
974 KERN_ERR "N_HDLC: can't unregister line discipline (err = %d)\n"; 974 KERN_ERR "N_HDLC: can't unregister line discipline (err = %d)\n";
975 975
976static void __exit n_hdlc_exit(void) 976static void __exit n_hdlc_exit(void)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index c334bcc59c64..a93a3167a9c6 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -986,7 +986,7 @@ static struct platform_driver asc_serial_driver = {
986static int __init asc_init(void) 986static int __init asc_init(void)
987{ 987{
988 int ret; 988 int ret;
989 static char banner[] __initdata = 989 static const char banner[] __initconst =
990 KERN_INFO "STMicroelectronics ASC driver initialized\n"; 990 KERN_INFO "STMicroelectronics ASC driver initialized\n";
991 991
992 printk(banner); 992 printk(banner);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9b519897cc17..f61f852d6cfd 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -817,12 +817,9 @@ static int vhost_net_open(struct inode *inode, struct file *f)
817 struct vhost_virtqueue **vqs; 817 struct vhost_virtqueue **vqs;
818 int i; 818 int i;
819 819
820 n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 820 n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT);
821 if (!n) { 821 if (!n)
822 n = vmalloc(sizeof *n); 822 return -ENOMEM;
823 if (!n)
824 return -ENOMEM;
825 }
826 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); 823 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
827 if (!vqs) { 824 if (!vqs) {
828 kvfree(n); 825 kvfree(n);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f0ba362d4c10..042030e5a035 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -534,18 +534,9 @@ err_mm:
534} 534}
535EXPORT_SYMBOL_GPL(vhost_dev_set_owner); 535EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
536 536
537static void *vhost_kvzalloc(unsigned long size)
538{
539 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
540
541 if (!n)
542 n = vzalloc(size);
543 return n;
544}
545
546struct vhost_umem *vhost_dev_reset_owner_prepare(void) 537struct vhost_umem *vhost_dev_reset_owner_prepare(void)
547{ 538{
548 return vhost_kvzalloc(sizeof(struct vhost_umem)); 539 return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
549} 540}
550EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); 541EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
551 542
@@ -1276,7 +1267,7 @@ EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1276 1267
1277static struct vhost_umem *vhost_umem_alloc(void) 1268static struct vhost_umem *vhost_umem_alloc(void)
1278{ 1269{
1279 struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem)); 1270 struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
1280 1271
1281 if (!umem) 1272 if (!umem)
1282 return NULL; 1273 return NULL;
@@ -1302,7 +1293,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1302 return -EOPNOTSUPP; 1293 return -EOPNOTSUPP;
1303 if (mem.nregions > max_mem_regions) 1294 if (mem.nregions > max_mem_regions)
1304 return -E2BIG; 1295 return -E2BIG;
1305 newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); 1296 newmem = kvzalloc(size + mem.nregions * sizeof(*m->regions), GFP_KERNEL);
1306 if (!newmem) 1297 if (!newmem)
1307 return -ENOMEM; 1298 return -ENOMEM;
1308 1299
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index d939ac1a4997..3acef3c5d8ed 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -508,12 +508,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
508 /* This struct is large and allocation could fail, fall back to vmalloc 508 /* This struct is large and allocation could fail, fall back to vmalloc
509 * if there is no other way. 509 * if there is no other way.
510 */ 510 */
511 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 511 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_REPEAT);
512 if (!vsock) { 512 if (!vsock)
513 vsock = vmalloc(sizeof(*vsock)); 513 return -ENOMEM;
514 if (!vsock)
515 return -ENOMEM;
516 }
517 514
518 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); 515 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
519 if (!vqs) { 516 if (!vqs) {
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index ff2a5d2023e1..6b444400a86c 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -934,7 +934,7 @@ static __inline__ int var_to_refresh(const struct fb_var_screeninfo *var)
934} 934}
935 935
936/*************************************************************** 936/***************************************************************
937 * Various intialisation functions * 937 * Various initialisation functions *
938 ***************************************************************/ 938 ***************************************************************/
939 939
940static void get_initial_mode(struct intelfb_info *dinfo) 940static void get_initial_mode(struct intelfb_info *dinfo)
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 1c1e95a0b8fa..ce4c4729a5e8 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -37,7 +37,7 @@
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/fb.h> 38#include <linux/fb.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <asm/cacheflush.h> 40#include <asm/set_memory.h>
41#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
42#include <linux/mmzone.h> 42#include <linux/mmzone.h>
43 43
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 150ce2abf6c8..d3eca879a0a8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -243,11 +243,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
243 sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); 243 sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
244 244
245 /* Get the physical addresses of the source buffer */ 245 /* Get the physical addresses of the source buffer */
246 down_read(&current->mm->mmap_sem); 246 num_pinned = get_user_pages_unlocked(param.local_vaddr - lb_offset,
247 num_pinned = get_user_pages(param.local_vaddr - lb_offset, 247 num_pages, pages, (param.source == -1) ? 0 : FOLL_WRITE);
248 num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
249 pages, NULL);
250 up_read(&current->mm->mmap_sem);
251 248
252 if (num_pinned != num_pages) { 249 if (num_pinned != num_pages) {
253 /* get_user_pages() failed */ 250 /* get_user_pages() failed */
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 70c7194e2810..67fbe35ce7cf 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -34,7 +34,7 @@
34#include <linux/nmi.h> 34#include <linux/nmi.h>
35#include <linux/kdebug.h> 35#include <linux/kdebug.h>
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <asm/cacheflush.h> 37#include <asm/set_memory.h>
38#endif /* CONFIG_HPWDT_NMI_DECODING */ 38#endif /* CONFIG_HPWDT_NMI_DECODING */
39#include <asm/nmi.h> 39#include <asm/nmi.h>
40#include <asm/frame.h> 40#include <asm/frame.h>
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 6890897a6f30..10f1ef582659 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -87,18 +87,6 @@ struct user_evtchn {
87 bool enabled; 87 bool enabled;
88}; 88};
89 89
90static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
91{
92 evtchn_port_t *ring;
93 size_t s = size * sizeof(*ring);
94
95 ring = kmalloc(s, GFP_KERNEL);
96 if (!ring)
97 ring = vmalloc(s);
98
99 return ring;
100}
101
102static void evtchn_free_ring(evtchn_port_t *ring) 90static void evtchn_free_ring(evtchn_port_t *ring)
103{ 91{
104 kvfree(ring); 92 kvfree(ring);
@@ -334,7 +322,7 @@ static int evtchn_resize_ring(struct per_user_data *u)
334 else 322 else
335 new_size = 2 * u->ring_size; 323 new_size = 2 * u->ring_size;
336 324
337 new_ring = evtchn_alloc_ring(new_size); 325 new_ring = kvmalloc(new_size * sizeof(*new_ring), GFP_KERNEL);
338 if (!new_ring) 326 if (!new_ring)
339 return -ENOMEM; 327 return -ENOMEM;
340 328
diff --git a/firmware/Makefile b/firmware/Makefile
index e297e1b52636..fa3e81c2a97b 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -176,7 +176,8 @@ quiet_cmd_fwbin = MK_FW $@
176wordsize_deps := $(wildcard include/config/64bit.h include/config/32bit.h \ 176wordsize_deps := $(wildcard include/config/64bit.h include/config/32bit.h \
177 include/config/ppc32.h include/config/ppc64.h \ 177 include/config/ppc32.h include/config/ppc64.h \
178 include/config/superh32.h include/config/superh64.h \ 178 include/config/superh32.h include/config/superh64.h \
179 include/config/x86_32.h include/config/x86_64.h) 179 include/config/x86_32.h include/config/x86_64.h \
180 firmware/Makefile)
180 181
181$(patsubst %,$(obj)/%.gen.S, $(fw-shipped-y)): %: $(wordsize_deps) 182$(patsubst %,$(obj)/%.gen.S, $(fw-shipped-y)): %: $(wordsize_deps)
182 $(call cmd,fwbin,$(patsubst %.gen.S,%,$@)) 183 $(call cmd,fwbin,$(patsubst %.gen.S,%,$@))
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 7dc8844037e0..1c3b6c54d5ee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -5392,13 +5392,10 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5392 goto out; 5392 goto out;
5393 } 5393 }
5394 5394
5395 tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN); 5395 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5396 if (!tmp_buf) { 5396 if (!tmp_buf) {
5397 tmp_buf = vmalloc(fs_info->nodesize); 5397 ret = -ENOMEM;
5398 if (!tmp_buf) { 5398 goto out;
5399 ret = -ENOMEM;
5400 goto out;
5401 }
5402 } 5399 }
5403 5400
5404 left_path->search_commit_root = 1; 5401 left_path->search_commit_root = 1;
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index dd7fb22a955a..fc0bd8406758 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -167,8 +167,7 @@ static u8 *alloc_bitmap(u32 bitmap_size)
167 if (mem) 167 if (mem)
168 return mem; 168 return mem;
169 169
170 return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO, 170 return __vmalloc(bitmap_size, GFP_NOFS | __GFP_ZERO, PAGE_KERNEL);
171 PAGE_KERNEL);
172} 171}
173 172
174int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, 173int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index dabfc7ac48a6..922a66fce401 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3539,12 +3539,9 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
3539 u64 last_dest_end = destoff; 3539 u64 last_dest_end = destoff;
3540 3540
3541 ret = -ENOMEM; 3541 ret = -ENOMEM;
3542 buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN); 3542 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
3543 if (!buf) { 3543 if (!buf)
3544 buf = vmalloc(fs_info->nodesize); 3544 return ret;
3545 if (!buf)
3546 return ret;
3547 }
3548 3545
3549 path = btrfs_alloc_path(); 3546 path = btrfs_alloc_path();
3550 if (!path) { 3547 if (!path) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index a60d5bfb8a49..3f645cd67b54 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6360,22 +6360,16 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6360 sctx->clone_roots_cnt = arg->clone_sources_count; 6360 sctx->clone_roots_cnt = arg->clone_sources_count;
6361 6361
6362 sctx->send_max_size = BTRFS_SEND_BUF_SIZE; 6362 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
6363 sctx->send_buf = kmalloc(sctx->send_max_size, GFP_KERNEL | __GFP_NOWARN); 6363 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
6364 if (!sctx->send_buf) { 6364 if (!sctx->send_buf) {
6365 sctx->send_buf = vmalloc(sctx->send_max_size); 6365 ret = -ENOMEM;
6366 if (!sctx->send_buf) { 6366 goto out;
6367 ret = -ENOMEM;
6368 goto out;
6369 }
6370 } 6367 }
6371 6368
6372 sctx->read_buf = kmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL | __GFP_NOWARN); 6369 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
6373 if (!sctx->read_buf) { 6370 if (!sctx->read_buf) {
6374 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); 6371 ret = -ENOMEM;
6375 if (!sctx->read_buf) { 6372 goto out;
6376 ret = -ENOMEM;
6377 goto out;
6378 }
6379 } 6373 }
6380 6374
6381 sctx->pending_dir_moves = RB_ROOT; 6375 sctx->pending_dir_moves = RB_ROOT;
@@ -6396,13 +6390,10 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6396 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources); 6390 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
6397 6391
6398 if (arg->clone_sources_count) { 6392 if (arg->clone_sources_count) {
6399 clone_sources_tmp = kmalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN); 6393 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
6400 if (!clone_sources_tmp) { 6394 if (!clone_sources_tmp) {
6401 clone_sources_tmp = vmalloc(alloc_size); 6395 ret = -ENOMEM;
6402 if (!clone_sources_tmp) { 6396 goto out;
6403 ret = -ENOMEM;
6404 goto out;
6405 }
6406 } 6397 }
6407 6398
6408 ret = copy_from_user(clone_sources_tmp, arg->clone_sources, 6399 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
diff --git a/fs/buffer.c b/fs/buffer.c
index 9196f2a270da..c3c7455efa3f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2379,8 +2379,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
2379 goto out; 2379 goto out;
2380 2380
2381 err = pagecache_write_begin(NULL, mapping, size, 0, 2381 err = pagecache_write_begin(NULL, mapping, size, 0,
2382 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND, 2382 AOP_FLAG_CONT_EXPAND, &page, &fsdata);
2383 &page, &fsdata);
2384 if (err) 2383 if (err)
2385 goto out; 2384 goto out;
2386 2385
@@ -2415,9 +2414,8 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2415 } 2414 }
2416 len = PAGE_SIZE - zerofrom; 2415 len = PAGE_SIZE - zerofrom;
2417 2416
2418 err = pagecache_write_begin(file, mapping, curpos, len, 2417 err = pagecache_write_begin(file, mapping, curpos, len, 0,
2419 AOP_FLAG_UNINTERRUPTIBLE, 2418 &page, &fsdata);
2420 &page, &fsdata);
2421 if (err) 2419 if (err)
2422 goto out; 2420 goto out;
2423 zero_user(page, zerofrom, len); 2421 zero_user(page, zerofrom, len);
@@ -2449,9 +2447,8 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2449 } 2447 }
2450 len = offset - zerofrom; 2448 len = offset - zerofrom;
2451 2449
2452 err = pagecache_write_begin(file, mapping, curpos, len, 2450 err = pagecache_write_begin(file, mapping, curpos, len, 0,
2453 AOP_FLAG_UNINTERRUPTIBLE, 2451 &page, &fsdata);
2454 &page, &fsdata);
2455 if (err) 2452 if (err)
2456 goto out; 2453 goto out;
2457 zero_user(page, zerofrom, len); 2454 zero_user(page, zerofrom, len);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 26cc95421cca..18c045e2ead6 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -74,12 +74,9 @@ dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) & 74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 (PAGE_SIZE - 1); 75 (PAGE_SIZE - 1);
76 npages = calc_pages_for(align, nbytes); 76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); 77 pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 if (!pages) { 78 if (!pages)
79 pages = vmalloc(sizeof(*pages) * npages); 79 return ERR_PTR(-ENOMEM);
80 if (!pages)
81 return ERR_PTR(-ENOMEM);
82 }
83 80
84 for (idx = 0; idx < npages; ) { 81 for (idx = 0; idx < npages; ) {
85 size_t start; 82 size_t start;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index c681762d76e6..1d3fa90d40b9 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1666,6 +1666,7 @@ struct ceph_mds_request *
1666ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1666ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1667{ 1667{
1668 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1668 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1669 struct timespec ts;
1669 1670
1670 if (!req) 1671 if (!req)
1671 return ERR_PTR(-ENOMEM); 1672 return ERR_PTR(-ENOMEM);
@@ -1684,7 +1685,8 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1684 init_completion(&req->r_safe_completion); 1685 init_completion(&req->r_safe_completion);
1685 INIT_LIST_HEAD(&req->r_unsafe_item); 1686 INIT_LIST_HEAD(&req->r_unsafe_item);
1686 1687
1687 req->r_stamp = current_fs_time(mdsc->fsc->sb); 1688 ktime_get_real_ts(&ts);
1689 req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
1688 1690
1689 req->r_op = op; 1691 req->r_op = op;
1690 req->r_direct_mode = mode; 1692 req->r_direct_mode = mode;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 058ac9b36f04..68abbb0db608 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -478,6 +478,7 @@ find_timestamp(struct cifs_ses *ses)
478 unsigned char *blobptr; 478 unsigned char *blobptr;
479 unsigned char *blobend; 479 unsigned char *blobend;
480 struct ntlmssp2_name *attrptr; 480 struct ntlmssp2_name *attrptr;
481 struct timespec ts;
481 482
482 if (!ses->auth_key.len || !ses->auth_key.response) 483 if (!ses->auth_key.len || !ses->auth_key.response)
483 return 0; 484 return 0;
@@ -502,7 +503,8 @@ find_timestamp(struct cifs_ses *ses)
502 blobptr += attrsize; /* advance attr value */ 503 blobptr += attrsize; /* advance attr value */
503 } 504 }
504 505
505 return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 506 ktime_get_real_ts(&ts);
507 return cpu_to_le64(cifs_UnixTimeToNT(ts));
506} 508}
507 509
508static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, 510static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 205fd94f52fd..4c01b3f9abf0 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -478,14 +478,14 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
478 * this requirement. 478 * this requirement.
479 */ 479 */
480 int val, seconds, remain, result; 480 int val, seconds, remain, result;
481 struct timespec ts, utc; 481 struct timespec ts;
482 utc = CURRENT_TIME; 482 unsigned long utc = ktime_get_real_seconds();
483 ts = cnvrtDosUnixTm(rsp->SrvTime.Date, 483 ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
484 rsp->SrvTime.Time, 0); 484 rsp->SrvTime.Time, 0);
485 cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n", 485 cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n",
486 (int)ts.tv_sec, (int)utc.tv_sec, 486 (int)ts.tv_sec, (int)utc,
487 (int)(utc.tv_sec - ts.tv_sec)); 487 (int)(utc - ts.tv_sec));
488 val = (int)(utc.tv_sec - ts.tv_sec); 488 val = (int)(utc - ts.tv_sec);
489 seconds = abs(val); 489 seconds = abs(val);
490 result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; 490 result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
491 remain = seconds % MIN_TZ_ADJ; 491 remain = seconds % MIN_TZ_ADJ;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b261db34103c..c3b2fa0b2ec8 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -322,9 +322,9 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
322 fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU; 322 fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
323 fattr->cf_uid = cifs_sb->mnt_uid; 323 fattr->cf_uid = cifs_sb->mnt_uid;
324 fattr->cf_gid = cifs_sb->mnt_gid; 324 fattr->cf_gid = cifs_sb->mnt_gid;
325 fattr->cf_atime = CURRENT_TIME; 325 ktime_get_real_ts(&fattr->cf_mtime);
326 fattr->cf_ctime = CURRENT_TIME; 326 fattr->cf_mtime = timespec_trunc(fattr->cf_mtime, sb->s_time_gran);
327 fattr->cf_mtime = CURRENT_TIME; 327 fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
328 fattr->cf_nlink = 2; 328 fattr->cf_nlink = 2;
329 fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL; 329 fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
330} 330}
@@ -586,9 +586,10 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
586/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */ 586/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
587static void 587static void
588cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, 588cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
589 struct cifs_sb_info *cifs_sb, bool adjust_tz, 589 struct super_block *sb, bool adjust_tz,
590 bool symlink) 590 bool symlink)
591{ 591{
592 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
592 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 593 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
593 594
594 memset(fattr, 0, sizeof(*fattr)); 595 memset(fattr, 0, sizeof(*fattr));
@@ -598,8 +599,10 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
598 599
599 if (info->LastAccessTime) 600 if (info->LastAccessTime)
600 fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); 601 fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
601 else 602 else {
602 fattr->cf_atime = CURRENT_TIME; 603 ktime_get_real_ts(&fattr->cf_atime);
604 fattr->cf_atime = timespec_trunc(fattr->cf_atime, sb->s_time_gran);
605 }
603 606
604 fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); 607 fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
605 fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); 608 fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
@@ -659,7 +662,6 @@ cifs_get_file_info(struct file *filp)
659 FILE_ALL_INFO find_data; 662 FILE_ALL_INFO find_data;
660 struct cifs_fattr fattr; 663 struct cifs_fattr fattr;
661 struct inode *inode = file_inode(filp); 664 struct inode *inode = file_inode(filp);
662 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
663 struct cifsFileInfo *cfile = filp->private_data; 665 struct cifsFileInfo *cfile = filp->private_data;
664 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 666 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
665 struct TCP_Server_Info *server = tcon->ses->server; 667 struct TCP_Server_Info *server = tcon->ses->server;
@@ -671,7 +673,7 @@ cifs_get_file_info(struct file *filp)
671 rc = server->ops->query_file_info(xid, tcon, &cfile->fid, &find_data); 673 rc = server->ops->query_file_info(xid, tcon, &cfile->fid, &find_data);
672 switch (rc) { 674 switch (rc) {
673 case 0: 675 case 0:
674 cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false, 676 cifs_all_info_to_fattr(&fattr, &find_data, inode->i_sb, false,
675 false); 677 false);
676 break; 678 break;
677 case -EREMOTE: 679 case -EREMOTE:
@@ -753,7 +755,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
753 } 755 }
754 756
755 if (!rc) { 757 if (!rc) {
756 cifs_all_info_to_fattr(&fattr, data, cifs_sb, adjust_tz, 758 cifs_all_info_to_fattr(&fattr, data, sb, adjust_tz,
757 symlink); 759 symlink);
758 } else if (rc == -EREMOTE) { 760 } else if (rc == -EREMOTE) {
759 cifs_create_dfs_fattr(&fattr, sb); 761 cifs_create_dfs_fattr(&fattr, sb);
@@ -1363,9 +1365,9 @@ out_reval:
1363 cifs_inode = CIFS_I(inode); 1365 cifs_inode = CIFS_I(inode);
1364 cifs_inode->time = 0; /* will force revalidate to get info 1366 cifs_inode->time = 0; /* will force revalidate to get info
1365 when needed */ 1367 when needed */
1366 inode->i_ctime = current_fs_time(sb); 1368 inode->i_ctime = current_time(inode);
1367 } 1369 }
1368 dir->i_ctime = dir->i_mtime = current_fs_time(sb); 1370 dir->i_ctime = dir->i_mtime = current_time(dir);
1369 cifs_inode = CIFS_I(dir); 1371 cifs_inode = CIFS_I(dir);
1370 CIFS_I(dir)->time = 0; /* force revalidate of dir as well */ 1372 CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
1371unlink_out: 1373unlink_out:
@@ -1633,7 +1635,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
1633 cifsInode->time = 0; 1635 cifsInode->time = 0;
1634 1636
1635 d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime = 1637 d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime =
1636 current_fs_time(inode->i_sb); 1638 current_time(inode);
1637 1639
1638rmdir_exit: 1640rmdir_exit:
1639 kfree(full_path); 1641 kfree(full_path);
@@ -1806,7 +1808,7 @@ unlink_target:
1806 CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; 1808 CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
1807 1809
1808 source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime = 1810 source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
1809 target_dir->i_mtime = current_fs_time(source_dir->i_sb); 1811 target_dir->i_mtime = current_time(source_dir);
1810 1812
1811cifs_rename_exit: 1813cifs_rename_exit:
1812 kfree(info_buf_source); 1814 kfree(info_buf_source);
diff --git a/fs/dax.c b/fs/dax.c
index 43bbd6d1037d..66d79067eedf 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -509,21 +509,25 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
509static int dax_load_hole(struct address_space *mapping, void **entry, 509static int dax_load_hole(struct address_space *mapping, void **entry,
510 struct vm_fault *vmf) 510 struct vm_fault *vmf)
511{ 511{
512 struct inode *inode = mapping->host;
512 struct page *page; 513 struct page *page;
513 int ret; 514 int ret;
514 515
515 /* Hole page already exists? Return it... */ 516 /* Hole page already exists? Return it... */
516 if (!radix_tree_exceptional_entry(*entry)) { 517 if (!radix_tree_exceptional_entry(*entry)) {
517 page = *entry; 518 page = *entry;
518 goto out; 519 goto finish_fault;
519 } 520 }
520 521
521 /* This will replace locked radix tree entry with a hole page */ 522 /* This will replace locked radix tree entry with a hole page */
522 page = find_or_create_page(mapping, vmf->pgoff, 523 page = find_or_create_page(mapping, vmf->pgoff,
523 vmf->gfp_mask | __GFP_ZERO); 524 vmf->gfp_mask | __GFP_ZERO);
524 if (!page) 525 if (!page) {
525 return VM_FAULT_OOM; 526 ret = VM_FAULT_OOM;
526 out: 527 goto out;
528 }
529
530finish_fault:
527 vmf->page = page; 531 vmf->page = page;
528 ret = finish_fault(vmf); 532 ret = finish_fault(vmf);
529 vmf->page = NULL; 533 vmf->page = NULL;
@@ -531,8 +535,10 @@ static int dax_load_hole(struct address_space *mapping, void **entry,
531 if (!ret) { 535 if (!ret) {
532 /* Grab reference for PTE that is now referencing the page */ 536 /* Grab reference for PTE that is now referencing the page */
533 get_page(page); 537 get_page(page);
534 return VM_FAULT_NOPAGE; 538 ret = VM_FAULT_NOPAGE;
535 } 539 }
540out:
541 trace_dax_load_hole(inode, vmf, ret);
536 return ret; 542 return ret;
537} 543}
538 544
@@ -817,6 +823,7 @@ static int dax_writeback_one(struct block_device *bdev,
817 spin_lock_irq(&mapping->tree_lock); 823 spin_lock_irq(&mapping->tree_lock);
818 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 824 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
819 spin_unlock_irq(&mapping->tree_lock); 825 spin_unlock_irq(&mapping->tree_lock);
826 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
820 dax_unlock: 827 dax_unlock:
821 dax_read_unlock(id); 828 dax_read_unlock(id);
822 put_locked_mapping_entry(mapping, index, entry); 829 put_locked_mapping_entry(mapping, index, entry);
@@ -857,6 +864,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
857 start_index = wbc->range_start >> PAGE_SHIFT; 864 start_index = wbc->range_start >> PAGE_SHIFT;
858 end_index = wbc->range_end >> PAGE_SHIFT; 865 end_index = wbc->range_end >> PAGE_SHIFT;
859 866
867 trace_dax_writeback_range(inode, start_index, end_index);
868
860 tag_pages_for_writeback(mapping, start_index, end_index); 869 tag_pages_for_writeback(mapping, start_index, end_index);
861 870
862 pagevec_init(&pvec, 0); 871 pagevec_init(&pvec, 0);
@@ -876,14 +885,14 @@ int dax_writeback_mapping_range(struct address_space *mapping,
876 885
877 ret = dax_writeback_one(bdev, dax_dev, mapping, 886 ret = dax_writeback_one(bdev, dax_dev, mapping,
878 indices[i], pvec.pages[i]); 887 indices[i], pvec.pages[i]);
879 if (ret < 0) { 888 if (ret < 0)
880 put_dax(dax_dev); 889 goto out;
881 return ret;
882 }
883 } 890 }
884 } 891 }
892out:
885 put_dax(dax_dev); 893 put_dax(dax_dev);
886 return 0; 894 trace_dax_writeback_range_done(inode, start_index, end_index);
895 return (ret < 0 ? ret : 0);
887} 896}
888EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 897EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
889 898
@@ -916,6 +925,7 @@ static int dax_insert_mapping(struct address_space *mapping,
916 return PTR_ERR(ret); 925 return PTR_ERR(ret);
917 *entryp = ret; 926 *entryp = ret;
918 927
928 trace_dax_insert_mapping(mapping->host, vmf, ret);
919 return vm_insert_mixed(vma, vaddr, pfn); 929 return vm_insert_mixed(vma, vaddr, pfn);
920} 930}
921 931
@@ -927,6 +937,7 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
927{ 937{
928 struct file *file = vmf->vma->vm_file; 938 struct file *file = vmf->vma->vm_file;
929 struct address_space *mapping = file->f_mapping; 939 struct address_space *mapping = file->f_mapping;
940 struct inode *inode = mapping->host;
930 void *entry, **slot; 941 void *entry, **slot;
931 pgoff_t index = vmf->pgoff; 942 pgoff_t index = vmf->pgoff;
932 943
@@ -936,6 +947,7 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
936 if (entry) 947 if (entry)
937 put_unlocked_mapping_entry(mapping, index, entry); 948 put_unlocked_mapping_entry(mapping, index, entry);
938 spin_unlock_irq(&mapping->tree_lock); 949 spin_unlock_irq(&mapping->tree_lock);
950 trace_dax_pfn_mkwrite_no_entry(inode, vmf, VM_FAULT_NOPAGE);
939 return VM_FAULT_NOPAGE; 951 return VM_FAULT_NOPAGE;
940 } 952 }
941 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); 953 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
@@ -948,6 +960,7 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
948 */ 960 */
949 finish_mkwrite_fault(vmf); 961 finish_mkwrite_fault(vmf);
950 put_locked_mapping_entry(mapping, index, entry); 962 put_locked_mapping_entry(mapping, index, entry);
963 trace_dax_pfn_mkwrite(inode, vmf, VM_FAULT_NOPAGE);
951 return VM_FAULT_NOPAGE; 964 return VM_FAULT_NOPAGE;
952} 965}
953EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); 966EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
@@ -1150,13 +1163,16 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
1150 int vmf_ret = 0; 1163 int vmf_ret = 0;
1151 void *entry; 1164 void *entry;
1152 1165
1166 trace_dax_pte_fault(inode, vmf, vmf_ret);
1153 /* 1167 /*
1154 * Check whether offset isn't beyond end of file now. Caller is supposed 1168 * Check whether offset isn't beyond end of file now. Caller is supposed
1155 * to hold locks serializing us with truncate / punch hole so this is 1169 * to hold locks serializing us with truncate / punch hole so this is
1156 * a reliable test. 1170 * a reliable test.
1157 */ 1171 */
1158 if (pos >= i_size_read(inode)) 1172 if (pos >= i_size_read(inode)) {
1159 return VM_FAULT_SIGBUS; 1173 vmf_ret = VM_FAULT_SIGBUS;
1174 goto out;
1175 }
1160 1176
1161 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1177 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1162 flags |= IOMAP_WRITE; 1178 flags |= IOMAP_WRITE;
@@ -1167,8 +1183,10 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
1167 * that we never have to deal with more than a single extent here. 1183 * that we never have to deal with more than a single extent here.
1168 */ 1184 */
1169 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1185 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1170 if (error) 1186 if (error) {
1171 return dax_fault_return(error); 1187 vmf_ret = dax_fault_return(error);
1188 goto out;
1189 }
1172 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1190 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1173 vmf_ret = dax_fault_return(-EIO); /* fs corruption? */ 1191 vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
1174 goto finish_iomap; 1192 goto finish_iomap;
@@ -1252,6 +1270,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
1252 */ 1270 */
1253 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1271 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1254 } 1272 }
1273out:
1274 trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1255 return vmf_ret; 1275 return vmf_ret;
1256} 1276}
1257 1277
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 42f9a0a0c4ca..8eeb694332fe 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -405,8 +405,7 @@ int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de,
405 int err; 405 int err;
406 406
407 lock_page(page); 407 lock_page(page);
408 err = exofs_write_begin(NULL, page->mapping, pos, len, 408 err = exofs_write_begin(NULL, page->mapping, pos, len, 0, &page, NULL);
409 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
410 if (err) 409 if (err)
411 EXOFS_ERR("exofs_set_link: exofs_write_begin FAILED => %d\n", 410 EXOFS_ERR("exofs_set_link: exofs_write_begin FAILED => %d\n",
412 err); 411 err);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 36de58a37653..5083bce20ac4 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2393,7 +2393,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2393 return 0; 2393 return 0;
2394 2394
2395 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 2395 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2396 new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); 2396 new_groupinfo = kvzalloc(size, GFP_KERNEL);
2397 if (!new_groupinfo) { 2397 if (!new_groupinfo) {
2398 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 2398 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2399 return -ENOMEM; 2399 return -ENOMEM;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 96973ee74147..c90edf09b0c3 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2155,7 +2155,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2155 return 0; 2155 return 0;
2156 2156
2157 size = roundup_pow_of_two(size * sizeof(struct flex_groups)); 2157 size = roundup_pow_of_two(size * sizeof(struct flex_groups));
2158 new_groups = ext4_kvzalloc(size, GFP_KERNEL); 2158 new_groups = kvzalloc(size, GFP_KERNEL);
2159 if (!new_groups) { 2159 if (!new_groups) {
2160 ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", 2160 ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
2161 size / (int) sizeof(struct flex_groups)); 2161 size / (int) sizeof(struct flex_groups));
@@ -3889,7 +3889,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3889 goto failed_mount; 3889 goto failed_mount;
3890 } 3890 }
3891 } 3891 }
3892 sbi->s_group_desc = ext4_kvmalloc(db_count * 3892 sbi->s_group_desc = kvmalloc(db_count *
3893 sizeof(struct buffer_head *), 3893 sizeof(struct buffer_head *),
3894 GFP_KERNEL); 3894 GFP_KERNEL);
3895 if (sbi->s_group_desc == NULL) { 3895 if (sbi->s_group_desc == NULL) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e26999a74522..2185c7a040a1 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2071,26 +2071,6 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
2071 return kmalloc(size, flags); 2071 return kmalloc(size, flags);
2072} 2072}
2073 2073
2074static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
2075{
2076 void *ret;
2077
2078 ret = kmalloc(size, flags | __GFP_NOWARN);
2079 if (!ret)
2080 ret = __vmalloc(size, flags, PAGE_KERNEL);
2081 return ret;
2082}
2083
2084static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
2085{
2086 void *ret;
2087
2088 ret = kzalloc(size, flags | __GFP_NOWARN);
2089 if (!ret)
2090 ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
2091 return ret;
2092}
2093
2094#define get_inode_mode(i) \ 2074#define get_inode_mode(i) \
2095 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 2075 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
2096 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 2076 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index abb0403d3414..61af721329fa 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1011,11 +1011,11 @@ static int __exchange_data_block(struct inode *src_inode,
1011 while (len) { 1011 while (len) {
1012 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len); 1012 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1013 1013
1014 src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL); 1014 src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
1015 if (!src_blkaddr) 1015 if (!src_blkaddr)
1016 return -ENOMEM; 1016 return -ENOMEM;
1017 1017
1018 do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL); 1018 do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
1019 if (!do_replace) { 1019 if (!do_replace) {
1020 kvfree(src_blkaddr); 1020 kvfree(src_blkaddr);
1021 return -ENOMEM; 1021 return -ENOMEM;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 98351a4a4da3..4547c5c5cd98 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2652,17 +2652,17 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
2652{ 2652{
2653 struct f2fs_nm_info *nm_i = NM_I(sbi); 2653 struct f2fs_nm_info *nm_i = NM_I(sbi);
2654 2654
2655 nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks * 2655 nm_i->free_nid_bitmap = kvzalloc(nm_i->nat_blocks *
2656 NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL); 2656 NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
2657 if (!nm_i->free_nid_bitmap) 2657 if (!nm_i->free_nid_bitmap)
2658 return -ENOMEM; 2658 return -ENOMEM;
2659 2659
2660 nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8, 2660 nm_i->nat_block_bitmap = kvzalloc(nm_i->nat_blocks / 8,
2661 GFP_KERNEL); 2661 GFP_KERNEL);
2662 if (!nm_i->nat_block_bitmap) 2662 if (!nm_i->nat_block_bitmap)
2663 return -ENOMEM; 2663 return -ENOMEM;
2664 2664
2665 nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * 2665 nm_i->free_nid_count = kvzalloc(nm_i->nat_blocks *
2666 sizeof(unsigned short), GFP_KERNEL); 2666 sizeof(unsigned short), GFP_KERNEL);
2667 if (!nm_i->free_nid_count) 2667 if (!nm_i->free_nid_count)
2668 return -ENOMEM; 2668 return -ENOMEM;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index de31030b5041..96845854e7ee 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2834,13 +2834,13 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
2834 2834
2835 SM_I(sbi)->sit_info = sit_i; 2835 SM_I(sbi)->sit_info = sit_i;
2836 2836
2837 sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) * 2837 sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
2838 sizeof(struct seg_entry), GFP_KERNEL); 2838 sizeof(struct seg_entry), GFP_KERNEL);
2839 if (!sit_i->sentries) 2839 if (!sit_i->sentries)
2840 return -ENOMEM; 2840 return -ENOMEM;
2841 2841
2842 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 2842 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2843 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 2843 sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
2844 if (!sit_i->dirty_sentries_bitmap) 2844 if (!sit_i->dirty_sentries_bitmap)
2845 return -ENOMEM; 2845 return -ENOMEM;
2846 2846
@@ -2873,7 +2873,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
2873 return -ENOMEM; 2873 return -ENOMEM;
2874 2874
2875 if (sbi->segs_per_sec > 1) { 2875 if (sbi->segs_per_sec > 1) {
2876 sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) * 2876 sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
2877 sizeof(struct sec_entry), GFP_KERNEL); 2877 sizeof(struct sec_entry), GFP_KERNEL);
2878 if (!sit_i->sec_entries) 2878 if (!sit_i->sec_entries)
2879 return -ENOMEM; 2879 return -ENOMEM;
@@ -2906,7 +2906,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
2906 sit_i->dirty_sentries = 0; 2906 sit_i->dirty_sentries = 0;
2907 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 2907 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
2908 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 2908 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
2909 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 2909 sit_i->mounted_time = ktime_get_real_seconds();
2910 mutex_init(&sit_i->sentry_lock); 2910 mutex_init(&sit_i->sentry_lock);
2911 return 0; 2911 return 0;
2912} 2912}
@@ -2924,12 +2924,12 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
2924 SM_I(sbi)->free_info = free_i; 2924 SM_I(sbi)->free_info = free_i;
2925 2925
2926 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 2926 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2927 free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL); 2927 free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
2928 if (!free_i->free_segmap) 2928 if (!free_i->free_segmap)
2929 return -ENOMEM; 2929 return -ENOMEM;
2930 2930
2931 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 2931 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2932 free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL); 2932 free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
2933 if (!free_i->free_secmap) 2933 if (!free_i->free_secmap)
2934 return -ENOMEM; 2934 return -ENOMEM;
2935 2935
@@ -3109,7 +3109,7 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
3109 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 3109 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3110 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 3110 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
3111 3111
3112 dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 3112 dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
3113 if (!dirty_i->victim_secmap) 3113 if (!dirty_i->victim_secmap)
3114 return -ENOMEM; 3114 return -ENOMEM;
3115 return 0; 3115 return 0;
@@ -3131,7 +3131,7 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
3131 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 3131 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3132 3132
3133 for (i = 0; i < NR_DIRTY_TYPE; i++) { 3133 for (i = 0; i < NR_DIRTY_TYPE; i++) {
3134 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 3134 dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
3135 if (!dirty_i->dirty_segmap[i]) 3135 if (!dirty_i->dirty_segmap[i])
3136 return -ENOMEM; 3136 return -ENOMEM;
3137 } 3137 }
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 10bf05d4cff4..010f336a7573 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -712,8 +712,9 @@ static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
712static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) 712static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
713{ 713{
714 struct sit_info *sit_i = SIT_I(sbi); 714 struct sit_info *sit_i = SIT_I(sbi);
715 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec - 715 time64_t now = ktime_get_real_seconds();
716 sit_i->mounted_time; 716
717 return sit_i->elapsed_time + now - sit_i->mounted_time;
717} 718}
718 719
719static inline void set_summary(struct f2fs_summary *sum, nid_t nid, 720static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
diff --git a/fs/file.c b/fs/file.c
index ad6f094f2eff..1c2972e3a405 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -42,7 +42,7 @@ static void *alloc_fdmem(size_t size)
42 if (data != NULL) 42 if (data != NULL)
43 return data; 43 return data;
44 } 44 }
45 return __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM, PAGE_KERNEL); 45 return __vmalloc(size, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
46} 46}
47 47
48static void __free_fdtable(struct fdtable *fdt) 48static void __free_fdtable(struct fdtable *fdt)
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 3814a60e0aea..4d810be532dd 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1072,7 +1072,7 @@ out_unlock:
1072 /* Every transaction boundary, we rewrite the dinode 1072 /* Every transaction boundary, we rewrite the dinode
1073 to keep its di_blocks current in case of failure. */ 1073 to keep its di_blocks current in case of failure. */
1074 ip->i_inode.i_mtime = ip->i_inode.i_ctime = 1074 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1075 CURRENT_TIME; 1075 current_time(&ip->i_inode);
1076 gfs2_trans_add_meta(ip->i_gl, dibh); 1076 gfs2_trans_add_meta(ip->i_gl, dibh);
1077 gfs2_dinode_out(ip, dibh->b_data); 1077 gfs2_dinode_out(ip, dibh->b_data);
1078 up_write(&ip->i_rw_mutex); 1078 up_write(&ip->i_rw_mutex);
@@ -1293,7 +1293,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
1293 gfs2_statfs_change(sdp, 0, +btotal, 0); 1293 gfs2_statfs_change(sdp, 0, +btotal, 0);
1294 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid, 1294 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1295 ip->i_inode.i_gid); 1295 ip->i_inode.i_gid);
1296 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1296 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1297 gfs2_trans_add_meta(ip->i_gl, dibh); 1297 gfs2_trans_add_meta(ip->i_gl, dibh);
1298 gfs2_dinode_out(ip, dibh->b_data); 1298 gfs2_dinode_out(ip, dibh->b_data);
1299 up_write(&ip->i_rw_mutex); 1299 up_write(&ip->i_rw_mutex);
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index e33a0d36a93e..5d0182654580 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -485,8 +485,8 @@ void hfs_file_truncate(struct inode *inode)
485 485
486 /* XXX: Can use generic_cont_expand? */ 486 /* XXX: Can use generic_cont_expand? */
487 size = inode->i_size - 1; 487 size = inode->i_size - 1;
488 res = pagecache_write_begin(NULL, mapping, size+1, 0, 488 res = pagecache_write_begin(NULL, mapping, size+1, 0, 0,
489 AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 489 &page, &fsdata);
490 if (!res) { 490 if (!res) {
491 res = pagecache_write_end(NULL, mapping, size+1, 0, 0, 491 res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
492 page, fsdata); 492 page, fsdata);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index feca524ce2a5..a3eb640b4f8f 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -545,9 +545,8 @@ void hfsplus_file_truncate(struct inode *inode)
545 void *fsdata; 545 void *fsdata;
546 loff_t size = inode->i_size; 546 loff_t size = inode->i_size;
547 547
548 res = pagecache_write_begin(NULL, mapping, size, 0, 548 res = pagecache_write_begin(NULL, mapping, size, 0, 0,
549 AOP_FLAG_UNINTERRUPTIBLE, 549 &page, &fsdata);
550 &page, &fsdata);
551 if (res) 550 if (res)
552 return; 551 return;
553 res = pagecache_write_end(NULL, mapping, size, 552 res = pagecache_write_end(NULL, mapping, size,
diff --git a/fs/inode.c b/fs/inode.c
index 131b2bcebc48..6ad1edb52045 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -119,7 +119,7 @@ static int no_open(struct inode *inode, struct file *file)
119} 119}
120 120
121/** 121/**
122 * inode_init_always - perform inode structure intialisation 122 * inode_init_always - perform inode structure initialisation
123 * @sb: superblock inode belongs to 123 * @sb: superblock inode belongs to
124 * @inode: inode to initialise 124 * @inode: inode to initialise
125 * 125 *
diff --git a/fs/iomap.c b/fs/iomap.c
index 1faabe09b8fd..4b10892967a5 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -158,12 +158,6 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
158 ssize_t written = 0; 158 ssize_t written = 0;
159 unsigned int flags = AOP_FLAG_NOFS; 159 unsigned int flags = AOP_FLAG_NOFS;
160 160
161 /*
162 * Copies from kernel address space cannot fail (NFSD is a big user).
163 */
164 if (!iter_is_iovec(i))
165 flags |= AOP_FLAG_UNINTERRUPTIBLE;
166
167 do { 161 do {
168 struct page *page; 162 struct page *page;
169 unsigned long offset; /* Offset into pagecache page */ 163 unsigned long offset; /* Offset into pagecache page */
@@ -291,8 +285,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
291 return PTR_ERR(rpage); 285 return PTR_ERR(rpage);
292 286
293 status = iomap_write_begin(inode, pos, bytes, 287 status = iomap_write_begin(inode, pos, bytes,
294 AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE, 288 AOP_FLAG_NOFS, &page, iomap);
295 &page, iomap);
296 put_page(rpage); 289 put_page(rpage);
297 if (unlikely(status)) 290 if (unlikely(status))
298 return status; 291 return status;
@@ -343,8 +336,8 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
343 struct page *page; 336 struct page *page;
344 int status; 337 int status;
345 338
346 status = iomap_write_begin(inode, pos, bytes, 339 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
347 AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap); 340 iomap);
348 if (status) 341 if (status)
349 return status; 342 return status;
350 343
diff --git a/fs/namei.c b/fs/namei.c
index 9a7f8bd748d8..7286f87ce863 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -4766,7 +4766,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
4766 struct page *page; 4766 struct page *page;
4767 void *fsdata; 4767 void *fsdata;
4768 int err; 4768 int err;
4769 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; 4769 unsigned int flags = 0;
4770 if (nofs) 4770 if (nofs)
4771 flags |= AOP_FLAG_NOFS; 4771 flags |= AOP_FLAG_NOFS;
4772 4772
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 323f492e0822..f3db56e83dd2 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -196,9 +196,11 @@ int ns_get_name(char *buf, size_t size, struct task_struct *task,
196{ 196{
197 struct ns_common *ns; 197 struct ns_common *ns;
198 int res = -ENOENT; 198 int res = -ENOENT;
199 const char *name;
199 ns = ns_ops->get(task); 200 ns = ns_ops->get(task);
200 if (ns) { 201 if (ns) {
201 res = snprintf(buf, size, "%s:[%u]", ns_ops->name, ns->inum); 202 name = ns_ops->real_ns_name ? : ns_ops->name;
203 res = snprintf(buf, size, "%s:[%u]", name, ns->inum);
202 ns_ops->put(ns); 204 ns_ops->put(ns);
203 } 205 }
204 return res; 206 return res;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 2cc7a8030275..e250910cffc8 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -58,7 +58,7 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
58 struct proc_inode *ei; 58 struct proc_inode *ei;
59 struct inode *inode; 59 struct inode *inode;
60 60
61 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); 61 ei = kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
62 if (!ei) 62 if (!ei)
63 return NULL; 63 return NULL;
64 ei->pid = NULL; 64 ei->pid = NULL;
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 766f0c637ad1..3803b24ca220 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -23,6 +23,7 @@ static const struct proc_ns_operations *ns_entries[] = {
23#endif 23#endif
24#ifdef CONFIG_PID_NS 24#ifdef CONFIG_PID_NS
25 &pidns_operations, 25 &pidns_operations,
26 &pidns_for_children_operations,
26#endif 27#endif
27#ifdef CONFIG_USER_NS 28#ifdef CONFIG_USER_NS
28 &userns_operations, 29 &userns_operations,
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
index aca73dd73906..e3c558d1b78c 100644
--- a/fs/reiserfs/item_ops.c
+++ b/fs/reiserfs/item_ops.c
@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
724} 724}
725 725
726static struct item_operations errcatch_ops = { 726static struct item_operations errcatch_ops = {
727 errcatch_bytes_number, 727 .bytes_number = errcatch_bytes_number,
728 errcatch_decrement_key, 728 .decrement_key = errcatch_decrement_key,
729 errcatch_is_left_mergeable, 729 .is_left_mergeable = errcatch_is_left_mergeable,
730 errcatch_print_item, 730 .print_item = errcatch_print_item,
731 errcatch_check_item, 731 .check_item = errcatch_check_item,
732 732
733 errcatch_create_vi, 733 .create_vi = errcatch_create_vi,
734 errcatch_check_left, 734 .check_left = errcatch_check_left,
735 errcatch_check_right, 735 .check_right = errcatch_check_right,
736 errcatch_part_size, 736 .part_size = errcatch_part_size,
737 errcatch_unit_num, 737 .unit_num = errcatch_unit_num,
738 errcatch_print_vi 738 .print_vi = errcatch_print_vi
739}; 739};
740 740
741#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3) 741#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
diff --git a/fs/select.c b/fs/select.c
index bd4b2ccfd346..d6c652a31e99 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -633,10 +633,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
633 goto out_nofds; 633 goto out_nofds;
634 634
635 alloc_size = 6 * size; 635 alloc_size = 6 * size;
636 bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN); 636 bits = kvmalloc(alloc_size, GFP_KERNEL);
637 if (!bits && alloc_size > PAGE_SIZE)
638 bits = vmalloc(alloc_size);
639
640 if (!bits) 637 if (!bits)
641 goto out_nofds; 638 goto out_nofds;
642 } 639 }
diff --git a/fs/seq_file.c b/fs/seq_file.c
index ca69fb99e41a..dc7c2be963ed 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -25,21 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
25 25
26static void *seq_buf_alloc(unsigned long size) 26static void *seq_buf_alloc(unsigned long size)
27{ 27{
28 void *buf; 28 return kvmalloc(size, GFP_KERNEL);
29 gfp_t gfp = GFP_KERNEL;
30
31 /*
32 * For high order allocations, use __GFP_NORETRY to avoid oom-killing -
33 * it's better to fall back to vmalloc() than to kill things. For small
34 * allocations, just use GFP_KERNEL which will oom kill, thus no need
35 * for vmalloc fallback.
36 */
37 if (size > PAGE_SIZE)
38 gfp |= __GFP_NORETRY | __GFP_NOWARN;
39 buf = kmalloc(size, gfp);
40 if (!buf && size > PAGE_SIZE)
41 buf = vmalloc(size);
42 return buf;
43} 29}
44 30
45/** 31/**
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 8049851cac42..566079d9b402 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -121,7 +121,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
121 121
122 inode_init_owner(inode, dir, mode); 122 inode_init_owner(inode, dir, mode);
123 inode->i_mtime = inode->i_atime = inode->i_ctime = 123 inode->i_mtime = inode->i_atime = inode->i_ctime =
124 ubifs_current_time(inode); 124 current_time(inode);
125 inode->i_mapping->nrpages = 0; 125 inode->i_mapping->nrpages = 0;
126 126
127 switch (mode & S_IFMT) { 127 switch (mode & S_IFMT) {
@@ -766,7 +766,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
766 766
767 inc_nlink(inode); 767 inc_nlink(inode);
768 ihold(inode); 768 ihold(inode);
769 inode->i_ctime = ubifs_current_time(inode); 769 inode->i_ctime = current_time(inode);
770 dir->i_size += sz_change; 770 dir->i_size += sz_change;
771 dir_ui->ui_size = dir->i_size; 771 dir_ui->ui_size = dir->i_size;
772 dir->i_mtime = dir->i_ctime = inode->i_ctime; 772 dir->i_mtime = dir->i_ctime = inode->i_ctime;
@@ -841,7 +841,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
841 } 841 }
842 842
843 lock_2_inodes(dir, inode); 843 lock_2_inodes(dir, inode);
844 inode->i_ctime = ubifs_current_time(dir); 844 inode->i_ctime = current_time(dir);
845 drop_nlink(inode); 845 drop_nlink(inode);
846 dir->i_size -= sz_change; 846 dir->i_size -= sz_change;
847 dir_ui->ui_size = dir->i_size; 847 dir_ui->ui_size = dir->i_size;
@@ -945,7 +945,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
945 } 945 }
946 946
947 lock_2_inodes(dir, inode); 947 lock_2_inodes(dir, inode);
948 inode->i_ctime = ubifs_current_time(dir); 948 inode->i_ctime = current_time(dir);
949 clear_nlink(inode); 949 clear_nlink(inode);
950 drop_nlink(dir); 950 drop_nlink(dir);
951 dir->i_size -= sz_change; 951 dir->i_size -= sz_change;
@@ -1422,7 +1422,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
1422 * Like most other Unix systems, set the @i_ctime for inodes on a 1422 * Like most other Unix systems, set the @i_ctime for inodes on a
1423 * rename. 1423 * rename.
1424 */ 1424 */
1425 time = ubifs_current_time(old_dir); 1425 time = current_time(old_dir);
1426 old_inode->i_ctime = time; 1426 old_inode->i_ctime = time;
1427 1427
1428 /* We must adjust parent link count when renaming directories */ 1428 /* We must adjust parent link count when renaming directories */
@@ -1595,7 +1595,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
1595 1595
1596 lock_4_inodes(old_dir, new_dir, NULL, NULL); 1596 lock_4_inodes(old_dir, new_dir, NULL, NULL);
1597 1597
1598 time = ubifs_current_time(old_dir); 1598 time = current_time(old_dir);
1599 fst_inode->i_ctime = time; 1599 fst_inode->i_ctime = time;
1600 snd_inode->i_ctime = time; 1600 snd_inode->i_ctime = time;
1601 old_dir->i_mtime = old_dir->i_ctime = time; 1601 old_dir->i_mtime = old_dir->i_ctime = time;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index d9ae86f96df7..2cda3d67e2d0 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1196,7 +1196,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1196 mutex_lock(&ui->ui_mutex); 1196 mutex_lock(&ui->ui_mutex);
1197 ui->ui_size = inode->i_size; 1197 ui->ui_size = inode->i_size;
1198 /* Truncation changes inode [mc]time */ 1198 /* Truncation changes inode [mc]time */
1199 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); 1199 inode->i_mtime = inode->i_ctime = current_time(inode);
1200 /* Other attributes may be changed at the same time as well */ 1200 /* Other attributes may be changed at the same time as well */
1201 do_attr_changes(inode, attr); 1201 do_attr_changes(inode, attr);
1202 err = ubifs_jnl_truncate(c, inode, old_size, new_size); 1202 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
@@ -1243,7 +1243,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
1243 mutex_lock(&ui->ui_mutex); 1243 mutex_lock(&ui->ui_mutex);
1244 if (attr->ia_valid & ATTR_SIZE) { 1244 if (attr->ia_valid & ATTR_SIZE) {
1245 /* Truncation changes inode [mc]time */ 1245 /* Truncation changes inode [mc]time */
1246 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); 1246 inode->i_mtime = inode->i_ctime = current_time(inode);
1247 /* 'truncate_setsize()' changed @i_size, update @ui_size */ 1247 /* 'truncate_setsize()' changed @i_size, update @ui_size */
1248 ui->ui_size = inode->i_size; 1248 ui->ui_size = inode->i_size;
1249 } 1249 }
@@ -1420,7 +1420,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time,
1420 */ 1420 */
1421static int update_mctime(struct inode *inode) 1421static int update_mctime(struct inode *inode)
1422{ 1422{
1423 struct timespec now = ubifs_current_time(inode); 1423 struct timespec now = current_time(inode);
1424 struct ubifs_inode *ui = ubifs_inode(inode); 1424 struct ubifs_inode *ui = ubifs_inode(inode);
1425 struct ubifs_info *c = inode->i_sb->s_fs_info; 1425 struct ubifs_info *c = inode->i_sb->s_fs_info;
1426 1426
@@ -1434,7 +1434,7 @@ static int update_mctime(struct inode *inode)
1434 return err; 1434 return err;
1435 1435
1436 mutex_lock(&ui->ui_mutex); 1436 mutex_lock(&ui->ui_mutex);
1437 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); 1437 inode->i_mtime = inode->i_ctime = current_time(inode);
1438 release = ui->dirty; 1438 release = ui->dirty;
1439 mark_inode_dirty_sync(inode); 1439 mark_inode_dirty_sync(inode);
1440 mutex_unlock(&ui->ui_mutex); 1440 mutex_unlock(&ui->ui_mutex);
@@ -1511,7 +1511,7 @@ static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1511 struct page *page = vmf->page; 1511 struct page *page = vmf->page;
1512 struct inode *inode = file_inode(vmf->vma->vm_file); 1512 struct inode *inode = file_inode(vmf->vma->vm_file);
1513 struct ubifs_info *c = inode->i_sb->s_fs_info; 1513 struct ubifs_info *c = inode->i_sb->s_fs_info;
1514 struct timespec now = ubifs_current_time(inode); 1514 struct timespec now = current_time(inode);
1515 struct ubifs_budget_req req = { .new_page = 1 }; 1515 struct ubifs_budget_req req = { .new_page = 1 };
1516 int err, update_time; 1516 int err, update_time;
1517 1517
@@ -1579,7 +1579,7 @@ static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1579 struct ubifs_inode *ui = ubifs_inode(inode); 1579 struct ubifs_inode *ui = ubifs_inode(inode);
1580 1580
1581 mutex_lock(&ui->ui_mutex); 1581 mutex_lock(&ui->ui_mutex);
1582 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); 1582 inode->i_mtime = inode->i_ctime = current_time(inode);
1583 release = ui->dirty; 1583 release = ui->dirty;
1584 mark_inode_dirty_sync(inode); 1584 mark_inode_dirty_sync(inode);
1585 mutex_unlock(&ui->ui_mutex); 1585 mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index da519ba205f6..12b9eb5005ff 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -126,7 +126,7 @@ static int setflags(struct inode *inode, int flags)
126 126
127 ui->flags = ioctl2ubifs(flags); 127 ui->flags = ioctl2ubifs(flags);
128 ubifs_set_inode_flags(inode); 128 ubifs_set_inode_flags(inode);
129 inode->i_ctime = ubifs_current_time(inode); 129 inode->i_ctime = current_time(inode);
130 release = ui->dirty; 130 release = ui->dirty;
131 mark_inode_dirty_sync(inode); 131 mark_inode_dirty_sync(inode);
132 mutex_unlock(&ui->ui_mutex); 132 mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index 8ece6ca58c0b..caf83d68fb38 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -225,16 +225,6 @@ static inline void *ubifs_idx_key(const struct ubifs_info *c,
225} 225}
226 226
227/** 227/**
228 * ubifs_current_time - round current time to time granularity.
229 * @inode: inode
230 */
231static inline struct timespec ubifs_current_time(struct inode *inode)
232{
233 return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
234 current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
235}
236
237/**
238 * ubifs_tnc_lookup - look up a file-system node. 228 * ubifs_tnc_lookup - look up a file-system node.
239 * @c: UBIFS file-system description object 229 * @c: UBIFS file-system description object
240 * @key: node key to lookup 230 * @key: node key to lookup
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 7f1ead29e727..8c25081a5109 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -84,6 +84,8 @@ static int create_default_filesystem(struct ubifs_info *c)
84 int min_leb_cnt = UBIFS_MIN_LEB_CNT; 84 int min_leb_cnt = UBIFS_MIN_LEB_CNT;
85 long long tmp64, main_bytes; 85 long long tmp64, main_bytes;
86 __le64 tmp_le64; 86 __le64 tmp_le64;
87 __le32 tmp_le32;
88 struct timespec ts;
87 89
88 /* Some functions called from here depend on the @c->key_len filed */ 90 /* Some functions called from here depend on the @c->key_len filed */
89 c->key_len = UBIFS_SK_LEN; 91 c->key_len = UBIFS_SK_LEN;
@@ -298,13 +300,17 @@ static int create_default_filesystem(struct ubifs_info *c)
298 ino->ch.node_type = UBIFS_INO_NODE; 300 ino->ch.node_type = UBIFS_INO_NODE;
299 ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); 301 ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
300 ino->nlink = cpu_to_le32(2); 302 ino->nlink = cpu_to_le32(2);
301 tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); 303
304 ktime_get_real_ts(&ts);
305 ts = timespec_trunc(ts, DEFAULT_TIME_GRAN);
306 tmp_le64 = cpu_to_le64(ts.tv_sec);
302 ino->atime_sec = tmp_le64; 307 ino->atime_sec = tmp_le64;
303 ino->ctime_sec = tmp_le64; 308 ino->ctime_sec = tmp_le64;
304 ino->mtime_sec = tmp_le64; 309 ino->mtime_sec = tmp_le64;
305 ino->atime_nsec = 0; 310 tmp_le32 = cpu_to_le32(ts.tv_nsec);
306 ino->ctime_nsec = 0; 311 ino->atime_nsec = tmp_le32;
307 ino->mtime_nsec = 0; 312 ino->ctime_nsec = tmp_le32;
313 ino->mtime_nsec = tmp_le32;
308 ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO); 314 ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
309 ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ); 315 ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);
310 316
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index efe00fcb8b75..3e53fdbf7997 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -152,7 +152,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
152 ui->data_len = size; 152 ui->data_len = size;
153 153
154 mutex_lock(&host_ui->ui_mutex); 154 mutex_lock(&host_ui->ui_mutex);
155 host->i_ctime = ubifs_current_time(host); 155 host->i_ctime = current_time(host);
156 host_ui->xattr_cnt += 1; 156 host_ui->xattr_cnt += 1;
157 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); 157 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
158 host_ui->xattr_size += CALC_XATTR_BYTES(size); 158 host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -234,7 +234,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
234 mutex_unlock(&ui->ui_mutex); 234 mutex_unlock(&ui->ui_mutex);
235 235
236 mutex_lock(&host_ui->ui_mutex); 236 mutex_lock(&host_ui->ui_mutex);
237 host->i_ctime = ubifs_current_time(host); 237 host->i_ctime = current_time(host);
238 host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); 238 host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
239 host_ui->xattr_size += CALC_XATTR_BYTES(size); 239 host_ui->xattr_size += CALC_XATTR_BYTES(size);
240 240
@@ -488,7 +488,7 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
488 return err; 488 return err;
489 489
490 mutex_lock(&host_ui->ui_mutex); 490 mutex_lock(&host_ui->ui_mutex);
491 host->i_ctime = ubifs_current_time(host); 491 host->i_ctime = current_time(host);
492 host_ui->xattr_cnt -= 1; 492 host_ui->xattr_cnt -= 1;
493 host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); 493 host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
494 host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len); 494 host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 9774555b3721..d1dd8cc33179 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -176,6 +176,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
176 struct ufs_cg_private_info * ucpi; 176 struct ufs_cg_private_info * ucpi;
177 struct ufs_cylinder_group * ucg; 177 struct ufs_cylinder_group * ucg;
178 struct inode * inode; 178 struct inode * inode;
179 struct timespec64 ts;
179 unsigned cg, bit, i, j, start; 180 unsigned cg, bit, i, j, start;
180 struct ufs_inode_info *ufsi; 181 struct ufs_inode_info *ufsi;
181 int err = -ENOSPC; 182 int err = -ENOSPC;
@@ -323,8 +324,9 @@ cg_found:
323 lock_buffer(bh); 324 lock_buffer(bh);
324 ufs2_inode = (struct ufs2_inode *)bh->b_data; 325 ufs2_inode = (struct ufs2_inode *)bh->b_data;
325 ufs2_inode += ufs_inotofsbo(inode->i_ino); 326 ufs2_inode += ufs_inotofsbo(inode->i_ino);
326 ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec); 327 ktime_get_real_ts64(&ts);
327 ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec); 328 ufs2_inode->ui_birthtime = cpu_to_fs64(sb, ts.tv_sec);
329 ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, ts.tv_nsec);
328 mark_buffer_dirty(bh); 330 mark_buffer_dirty(bh);
329 unlock_buffer(bh); 331 unlock_buffer(bh);
330 if (sb->s_flags & MS_SYNCHRONOUS) 332 if (sb->s_flags & MS_SYNCHRONOUS)
diff --git a/fs/xattr.c b/fs/xattr.c
index 7e3317cf4045..464c94bf65f9 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -431,12 +431,9 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
431 if (size) { 431 if (size) {
432 if (size > XATTR_SIZE_MAX) 432 if (size > XATTR_SIZE_MAX)
433 return -E2BIG; 433 return -E2BIG;
434 kvalue = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 434 kvalue = kvmalloc(size, GFP_KERNEL);
435 if (!kvalue) { 435 if (!kvalue)
436 kvalue = vmalloc(size); 436 return -ENOMEM;
437 if (!kvalue)
438 return -ENOMEM;
439 }
440 if (copy_from_user(kvalue, value, size)) { 437 if (copy_from_user(kvalue, value, size)) {
441 error = -EFAULT; 438 error = -EFAULT;
442 goto out; 439 goto out;
@@ -528,12 +525,9 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
528 if (size) { 525 if (size) {
529 if (size > XATTR_SIZE_MAX) 526 if (size > XATTR_SIZE_MAX)
530 size = XATTR_SIZE_MAX; 527 size = XATTR_SIZE_MAX;
531 kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 528 kvalue = kvzalloc(size, GFP_KERNEL);
532 if (!kvalue) { 529 if (!kvalue)
533 kvalue = vmalloc(size); 530 return -ENOMEM;
534 if (!kvalue)
535 return -ENOMEM;
536 }
537 } 531 }
538 532
539 error = vfs_getxattr(d, kname, kvalue, size); 533 error = vfs_getxattr(d, kname, kvalue, size);
@@ -611,12 +605,9 @@ listxattr(struct dentry *d, char __user *list, size_t size)
611 if (size) { 605 if (size) {
612 if (size > XATTR_LIST_MAX) 606 if (size > XATTR_LIST_MAX)
613 size = XATTR_LIST_MAX; 607 size = XATTR_LIST_MAX;
614 klist = kmalloc(size, __GFP_NOWARN | GFP_KERNEL); 608 klist = kvmalloc(size, GFP_KERNEL);
615 if (!klist) { 609 if (!klist)
616 klist = vmalloc(size); 610 return -ENOMEM;
617 if (!klist)
618 return -ENOMEM;
619 }
620 } 611 }
621 612
622 error = vfs_listxattr(d, klist, size); 613 error = vfs_listxattr(d, klist, size);
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 780fc8986dab..393b6849aeb3 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -67,7 +67,7 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
67 nofs_flag = memalloc_nofs_save(); 67 nofs_flag = memalloc_nofs_save();
68 68
69 lflags = kmem_flags_convert(flags); 69 lflags = kmem_flags_convert(flags);
70 ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 70 ptr = __vmalloc(size, lflags | __GFP_ZERO, PAGE_KERNEL);
71 71
72 if (flags & KM_NOFS) 72 if (flags & KM_NOFS)
73 memalloc_nofs_restore(nofs_flag); 73 memalloc_nofs_restore(nofs_flag);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 4a98762ec8b4..cd0b077deb35 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3796,7 +3796,7 @@ xlog_recover_bud_pass2(
3796 * This routine is called when an inode create format structure is found in a 3796 * This routine is called when an inode create format structure is found in a
3797 * committed transaction in the log. It's purpose is to initialise the inodes 3797 * committed transaction in the log. It's purpose is to initialise the inodes
3798 * being allocated on disk. This requires us to get inode cluster buffers that 3798 * being allocated on disk. This requires us to get inode cluster buffers that
3799 * match the range to be intialised, stamped with inode templates and written 3799 * match the range to be initialised, stamped with inode templates and written
3800 * by delayed write so that subsequent modifications will hit the cached buffer 3800 * by delayed write so that subsequent modifications will hit the cached buffer
3801 * and only need writing out at the end of recovery. 3801 * and only need writing out at the end of recovery.
3802 */ 3802 */
diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h
new file mode 100644
index 000000000000..83e81f8996b2
--- /dev/null
+++ b/include/asm-generic/set_memory.h
@@ -0,0 +1,12 @@
1#ifndef __ASM_SET_MEMORY_H
2#define __ASM_SET_MEMORY_H
3
4/*
5 * Functions to change memory attributes.
6 */
7int set_memory_ro(unsigned long addr, int numpages);
8int set_memory_rw(unsigned long addr, int numpages);
9int set_memory_x(unsigned long addr, int numpages);
10int set_memory_nx(unsigned long addr, int numpages);
11
12#endif
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 70d4e221a3ad..d0f6cf2e5324 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -37,8 +37,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
37 if (size * nmemb <= PAGE_SIZE) 37 if (size * nmemb <= PAGE_SIZE)
38 return kcalloc(nmemb, size, GFP_KERNEL); 38 return kcalloc(nmemb, size, GFP_KERNEL);
39 39
40 return __vmalloc(size * nmemb, 40 return vzalloc(size * nmemb);
41 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
42} 41}
43 42
44/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ 43/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
@@ -50,8 +49,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
50 if (size * nmemb <= PAGE_SIZE) 49 if (size * nmemb <= PAGE_SIZE)
51 return kmalloc(nmemb * size, GFP_KERNEL); 50 return kmalloc(nmemb * size, GFP_KERNEL);
52 51
53 return __vmalloc(size * nmemb, 52 return vmalloc(size * nmemb);
54 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
55} 53}
56 54
57static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp) 55static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
@@ -69,8 +67,7 @@ static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
69 return ptr; 67 return ptr;
70 } 68 }
71 69
72 return __vmalloc(size * nmemb, 70 return __vmalloc(size * nmemb, gfp, PAGE_KERNEL);
73 gfp | __GFP_HIGHMEM, PAGE_KERNEL);
74} 71}
75 72
76static __inline void drm_free_large(void *ptr) 73static __inline void drm_free_large(void *ptr)
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 9657f11d48a7..bca6a5e4ca3d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -80,7 +80,7 @@ struct pci_dev;
80#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ 80#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
81#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ 81#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
82#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */ 82#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */
83#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */ 83#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */
84#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */ 84#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */
85#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ 85#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
86#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ 86#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1a675604b17d..2404ad238c0b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -40,9 +40,9 @@ extern int nr_cpu_ids;
40#ifdef CONFIG_CPUMASK_OFFSTACK 40#ifdef CONFIG_CPUMASK_OFFSTACK
41/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, 41/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
42 * not all bits may be allocated. */ 42 * not all bits may be allocated. */
43#define nr_cpumask_bits nr_cpu_ids 43#define nr_cpumask_bits ((unsigned int)nr_cpu_ids)
44#else 44#else
45#define nr_cpumask_bits NR_CPUS 45#define nr_cpumask_bits ((unsigned int)NR_CPUS)
46#endif 46#endif
47 47
48/* 48/*
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
new file mode 100644
index 000000000000..541a197ba4a2
--- /dev/null
+++ b/include/linux/crash_core.h
@@ -0,0 +1,69 @@
1#ifndef LINUX_CRASH_CORE_H
2#define LINUX_CRASH_CORE_H
3
4#include <linux/linkage.h>
5#include <linux/elfcore.h>
6#include <linux/elf.h>
7
8#define CRASH_CORE_NOTE_NAME "CORE"
9#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
10#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
11#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
12
13#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
14 CRASH_CORE_NOTE_NAME_BYTES + \
15 CRASH_CORE_NOTE_DESC_BYTES)
16
17#define VMCOREINFO_BYTES (4096)
18#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
19#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
20#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
21 VMCOREINFO_NOTE_NAME_BYTES + \
22 VMCOREINFO_BYTES)
23
24typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
25
26void crash_save_vmcoreinfo(void);
27void arch_crash_save_vmcoreinfo(void);
28__printf(1, 2)
29void vmcoreinfo_append_str(const char *fmt, ...);
30phys_addr_t paddr_vmcoreinfo_note(void);
31
32#define VMCOREINFO_OSRELEASE(value) \
33 vmcoreinfo_append_str("OSRELEASE=%s\n", value)
34#define VMCOREINFO_PAGESIZE(value) \
35 vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
36#define VMCOREINFO_SYMBOL(name) \
37 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
38#define VMCOREINFO_SIZE(name) \
39 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
40 (unsigned long)sizeof(name))
41#define VMCOREINFO_STRUCT_SIZE(name) \
42 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
43 (unsigned long)sizeof(struct name))
44#define VMCOREINFO_OFFSET(name, field) \
45 vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
46 (unsigned long)offsetof(struct name, field))
47#define VMCOREINFO_LENGTH(name, value) \
48 vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
49#define VMCOREINFO_NUMBER(name) \
50 vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
51#define VMCOREINFO_CONFIG(name) \
52 vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
53
54extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
55extern size_t vmcoreinfo_size;
56extern size_t vmcoreinfo_max_size;
57
58Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
59 void *data, size_t data_len);
60void final_note(Elf_Word *buf);
61
62int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
63 unsigned long long *crash_size, unsigned long long *crash_base);
64int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
65 unsigned long long *crash_size, unsigned long long *crash_base);
66int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
67 unsigned long long *crash_size, unsigned long long *crash_base);
68
69#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 20fa8d8ae313..ba069e8f4f78 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
29#define elf_note elf32_note 29#define elf_note elf32_note
30#define elf_addr_t Elf32_Off 30#define elf_addr_t Elf32_Off
31#define Elf_Half Elf32_Half 31#define Elf_Half Elf32_Half
32#define Elf_Word Elf32_Word
32 33
33#else 34#else
34 35
@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
39#define elf_note elf64_note 40#define elf_note elf64_note
40#define elf_addr_t Elf64_Off 41#define elf_addr_t Elf64_Off
41#define Elf_Half Elf64_Half 42#define Elf_Half Elf64_Half
43#define Elf_Word Elf64_Word
42 44
43#endif 45#endif
44 46
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 9a7786db14fa..56197f82af45 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -19,7 +19,9 @@
19 19
20#include <net/sch_generic.h> 20#include <net/sch_generic.h>
21 21
22#include <asm/cacheflush.h> 22#ifdef CONFIG_ARCH_HAS_SET_MEMORY
23#include <asm/set_memory.h>
24#endif
23 25
24#include <uapi/linux/filter.h> 26#include <uapi/linux/filter.h>
25#include <uapi/linux/bpf.h> 27#include <uapi/linux/bpf.h>
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5d62d2c47939..249dad4e8d26 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -250,9 +250,8 @@ enum positive_aop_returns {
250 AOP_TRUNCATED_PAGE = 0x80001, 250 AOP_TRUNCATED_PAGE = 0x80001,
251}; 251};
252 252
253#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ 253#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
254#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ 254#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
255#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
256 * helper code (eg buffer layer) 255 * helper code (eg buffer layer)
257 * to clear GFP_FS from alloc */ 256 * to clear GFP_FS from alloc */
258 257
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6d2a63e4ea52..473f088aabea 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -72,7 +72,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
72 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and 72 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
73 * IPMODIFY are a kind of attribute flags which can be set only before 73 * IPMODIFY are a kind of attribute flags which can be set only before
74 * registering the ftrace_ops, and can not be modified while registered. 74 * registering the ftrace_ops, and can not be modified while registered.
75 * Changing those attribute flags after regsitering ftrace_ops will 75 * Changing those attribute flags after registering ftrace_ops will
76 * cause unexpected results. 76 * cause unexpected results.
77 * 77 *
78 * ENABLED - set/unset when ftrace_ops is registered/unregistered 78 * ENABLED - set/unset when ftrace_ops is registered/unregistered
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 9d84942ae2e5..71fd92d81b26 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -8,8 +8,7 @@
8#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ 8#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
9 9
10/* used by in-kernel data structures */ 10/* used by in-kernel data structures */
11struct kern_ipc_perm 11struct kern_ipc_perm {
12{
13 spinlock_t lock; 12 spinlock_t lock;
14 bool deleted; 13 bool deleted;
15 int id; 14 int id;
@@ -18,9 +17,9 @@ struct kern_ipc_perm
18 kgid_t gid; 17 kgid_t gid;
19 kuid_t cuid; 18 kuid_t cuid;
20 kgid_t cgid; 19 kgid_t cgid;
21 umode_t mode; 20 umode_t mode;
22 unsigned long seq; 21 unsigned long seq;
23 void *security; 22 void *security;
24}; 23} ____cacheline_aligned_in_smp;
25 24
26#endif /* _LINUX_IPC_H */ 25#endif /* _LINUX_IPC_H */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 624215cebee5..36872fbb815d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_JIFFIES_H 1#ifndef _LINUX_JIFFIES_H
2#define _LINUX_JIFFIES_H 2#define _LINUX_JIFFIES_H
3 3
4#include <linux/cache.h>
4#include <linux/math64.h> 5#include <linux/math64.h>
5#include <linux/kernel.h> 6#include <linux/kernel.h>
6#include <linux/types.h> 7#include <linux/types.h>
@@ -63,19 +64,13 @@ extern int register_refined_jiffies(long clock_tick_rate);
63/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ 64/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
64#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) 65#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
65 66
66/* some arch's have a small-data section that can be accessed register-relative
67 * but that can only take up to, say, 4-byte variables. jiffies being part of
68 * an 8-byte variable may not be correctly accessed unless we force the issue
69 */
70#define __jiffy_data __attribute__((section(".data")))
71
72/* 67/*
73 * The 64-bit value is not atomic - you MUST NOT read it 68 * The 64-bit value is not atomic - you MUST NOT read it
74 * without sampling the sequence number in jiffies_lock. 69 * without sampling the sequence number in jiffies_lock.
75 * get_jiffies_64() will do this for you as appropriate. 70 * get_jiffies_64() will do this for you as appropriate.
76 */ 71 */
77extern u64 __jiffy_data jiffies_64; 72extern u64 __cacheline_aligned_in_smp jiffies_64;
78extern unsigned long volatile __jiffy_data jiffies; 73extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
79 74
80#if (BITS_PER_LONG < 64) 75#if (BITS_PER_LONG < 64)
81u64 get_jiffies_64(void); 76u64 get_jiffies_64(void);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d419d0e51fe5..c9481ebcbc0c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -14,17 +14,15 @@
14 14
15#if !defined(__ASSEMBLY__) 15#if !defined(__ASSEMBLY__)
16 16
17#include <linux/crash_core.h>
17#include <asm/io.h> 18#include <asm/io.h>
18 19
19#include <uapi/linux/kexec.h> 20#include <uapi/linux/kexec.h>
20 21
21#ifdef CONFIG_KEXEC_CORE 22#ifdef CONFIG_KEXEC_CORE
22#include <linux/list.h> 23#include <linux/list.h>
23#include <linux/linkage.h>
24#include <linux/compat.h> 24#include <linux/compat.h>
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/elfcore.h>
27#include <linux/elf.h>
28#include <linux/module.h> 26#include <linux/module.h>
29#include <asm/kexec.h> 27#include <asm/kexec.h>
30 28
@@ -62,19 +60,15 @@
62#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE 60#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
63#endif 61#endif
64 62
65#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) 63#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
66#define KEXEC_CORE_NOTE_NAME "CORE" 64
67#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
68#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
69/* 65/*
70 * The per-cpu notes area is a list of notes terminated by a "NULL" 66 * The per-cpu notes area is a list of notes terminated by a "NULL"
71 * note header. For kdump, the code in vmcore.c runs in the context 67 * note header. For kdump, the code in vmcore.c runs in the context
72 * of the second kernel to combine them into one note. 68 * of the second kernel to combine them into one note.
73 */ 69 */
74#ifndef KEXEC_NOTE_BYTES 70#ifndef KEXEC_NOTE_BYTES
75#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \ 71#define KEXEC_NOTE_BYTES CRASH_CORE_NOTE_BYTES
76 KEXEC_CORE_NOTE_NAME_BYTES + \
77 KEXEC_CORE_NOTE_DESC_BYTES )
78#endif 72#endif
79 73
80/* 74/*
@@ -256,33 +250,6 @@ extern void crash_kexec(struct pt_regs *);
256int kexec_should_crash(struct task_struct *); 250int kexec_should_crash(struct task_struct *);
257int kexec_crash_loaded(void); 251int kexec_crash_loaded(void);
258void crash_save_cpu(struct pt_regs *regs, int cpu); 252void crash_save_cpu(struct pt_regs *regs, int cpu);
259void crash_save_vmcoreinfo(void);
260void arch_crash_save_vmcoreinfo(void);
261__printf(1, 2)
262void vmcoreinfo_append_str(const char *fmt, ...);
263phys_addr_t paddr_vmcoreinfo_note(void);
264
265#define VMCOREINFO_OSRELEASE(value) \
266 vmcoreinfo_append_str("OSRELEASE=%s\n", value)
267#define VMCOREINFO_PAGESIZE(value) \
268 vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
269#define VMCOREINFO_SYMBOL(name) \
270 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
271#define VMCOREINFO_SIZE(name) \
272 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
273 (unsigned long)sizeof(name))
274#define VMCOREINFO_STRUCT_SIZE(name) \
275 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
276 (unsigned long)sizeof(struct name))
277#define VMCOREINFO_OFFSET(name, field) \
278 vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
279 (unsigned long)offsetof(struct name, field))
280#define VMCOREINFO_LENGTH(name, value) \
281 vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
282#define VMCOREINFO_NUMBER(name) \
283 vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
284#define VMCOREINFO_CONFIG(name) \
285 vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
286 253
287extern struct kimage *kexec_image; 254extern struct kimage *kexec_image;
288extern struct kimage *kexec_crash_image; 255extern struct kimage *kexec_crash_image;
@@ -303,31 +270,15 @@ extern int kexec_load_disabled;
303#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ 270#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
304 KEXEC_FILE_NO_INITRAMFS) 271 KEXEC_FILE_NO_INITRAMFS)
305 272
306#define VMCOREINFO_BYTES (4096)
307#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
308#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
309#define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \
310 + VMCOREINFO_NOTE_NAME_BYTES)
311
312/* Location of a reserved region to hold the crash kernel. 273/* Location of a reserved region to hold the crash kernel.
313 */ 274 */
314extern struct resource crashk_res; 275extern struct resource crashk_res;
315extern struct resource crashk_low_res; 276extern struct resource crashk_low_res;
316typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
317extern note_buf_t __percpu *crash_notes; 277extern note_buf_t __percpu *crash_notes;
318extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
319extern size_t vmcoreinfo_size;
320extern size_t vmcoreinfo_max_size;
321 278
322/* flag to track if kexec reboot is in progress */ 279/* flag to track if kexec reboot is in progress */
323extern bool kexec_in_progress; 280extern bool kexec_in_progress;
324 281
325int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
326 unsigned long long *crash_size, unsigned long long *crash_base);
327int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
328 unsigned long long *crash_size, unsigned long long *crash_base);
329int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
330 unsigned long long *crash_size, unsigned long long *crash_base);
331int crash_shrink_memory(unsigned long new_size); 282int crash_shrink_memory(unsigned long new_size);
332size_t crash_get_memory_size(void); 283size_t crash_get_memory_size(void);
333void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); 284void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index f4156f88f557..29220724bf1c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -66,8 +66,6 @@ static inline void kref_get(struct kref *kref)
66 */ 66 */
67static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) 67static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
68{ 68{
69 WARN_ON(release == NULL);
70
71 if (refcount_dec_and_test(&kref->refcount)) { 69 if (refcount_dec_and_test(&kref->refcount)) {
72 release(kref); 70 release(kref);
73 return 1; 71 return 1;
@@ -79,8 +77,6 @@ static inline int kref_put_mutex(struct kref *kref,
79 void (*release)(struct kref *kref), 77 void (*release)(struct kref *kref),
80 struct mutex *lock) 78 struct mutex *lock)
81{ 79{
82 WARN_ON(release == NULL);
83
84 if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) { 80 if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
85 release(kref); 81 release(kref);
86 return 1; 82 return 1;
@@ -92,8 +88,6 @@ static inline int kref_put_lock(struct kref *kref,
92 void (*release)(struct kref *kref), 88 void (*release)(struct kref *kref),
93 spinlock_t *lock) 89 spinlock_t *lock)
94{ 90{
95 WARN_ON(release == NULL);
96
97 if (refcount_dec_and_lock(&kref->refcount, lock)) { 91 if (refcount_dec_and_lock(&kref->refcount, lock)) {
98 release(kref); 92 release(kref);
99 return 1; 93 return 1;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3727afdf614d..4d629471869b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -776,8 +776,6 @@ void kvm_arch_check_processor_compat(void *rtn);
776int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 776int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
777int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 777int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
778 778
779void *kvm_kvzalloc(unsigned long size);
780
781#ifndef __KVM_HAVE_ARCH_VM_ALLOC 779#ifndef __KVM_HAVE_ARCH_VM_ALLOC
782static inline struct kvm *kvm_arch_alloc_vm(void) 780static inline struct kvm *kvm_arch_alloc_vm(void)
783{ 781{
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 74b765ce48ab..d5bed0875d30 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -108,7 +108,7 @@ enum {
108 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) 108 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
109}; 109};
110 110
111/* Driver supports 3 diffrent device methods to manage traffic steering: 111/* Driver supports 3 different device methods to manage traffic steering:
112 * -device managed - High level API for ib and eth flow steering. FW is 112 * -device managed - High level API for ib and eth flow steering. FW is
113 * managing flow steering tables. 113 * managing flow steering tables.
114 * - B0 steering mode - Common low level API for ib and (if supported) eth. 114 * - B0 steering mode - Common low level API for ib and (if supported) eth.
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3fece51dcf13..18fc65b84b79 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -892,12 +892,7 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
892 892
893static inline void *mlx5_vzalloc(unsigned long size) 893static inline void *mlx5_vzalloc(unsigned long size)
894{ 894{
895 void *rtn; 895 return kvzalloc(size, GFP_KERNEL);
896
897 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
898 if (!rtn)
899 rtn = vzalloc(size);
900 return rtn;
901} 896}
902 897
903static inline u32 mlx5_base_mkey(const u32 key) 898static inline u32 mlx5_base_mkey(const u32 key)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5d22e69f51ea..7cb17c6b97de 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -518,6 +518,28 @@ static inline int is_vmalloc_or_module_addr(const void *x)
518} 518}
519#endif 519#endif
520 520
521extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
522static inline void *kvmalloc(size_t size, gfp_t flags)
523{
524 return kvmalloc_node(size, flags, NUMA_NO_NODE);
525}
526static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
527{
528 return kvmalloc_node(size, flags | __GFP_ZERO, node);
529}
530static inline void *kvzalloc(size_t size, gfp_t flags)
531{
532 return kvmalloc(size, flags | __GFP_ZERO);
533}
534
535static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
536{
537 if (size != 0 && n > SIZE_MAX / size)
538 return NULL;
539
540 return kvmalloc(n * size, flags);
541}
542
521extern void kvfree(const void *addr); 543extern void kvfree(const void *addr);
522 544
523static inline atomic_t *compound_mapcount_ptr(struct page *page) 545static inline atomic_t *compound_mapcount_ptr(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e0c3c5e3d8a0..ebaccd4e7d8c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -74,6 +74,11 @@ extern char * const migratetype_names[MIGRATE_TYPES];
74# define is_migrate_cma_page(_page) false 74# define is_migrate_cma_page(_page) false
75#endif 75#endif
76 76
77static inline bool is_migrate_movable(int mt)
78{
79 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
80}
81
77#define for_each_migratetype_order(order, type) \ 82#define for_each_migratetype_order(order, type) \
78 for (order = 0; order < MAX_ORDER; order++) \ 83 for (order = 0; order < MAX_ORDER; order++) \
79 for (type = 0; type < MIGRATE_TYPES; type++) 84 for (type = 0; type < MIGRATE_TYPES; type++)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 047d64706f2a..d4cd2014fa6f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,10 +33,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
33 bool skip_hwpoisoned_pages); 33 bool skip_hwpoisoned_pages);
34void set_pageblock_migratetype(struct page *page, int migratetype); 34void set_pageblock_migratetype(struct page *page, int migratetype);
35int move_freepages_block(struct zone *zone, struct page *page, 35int move_freepages_block(struct zone *zone, struct page *page,
36 int migratetype); 36 int migratetype, int *num_movable);
37int move_freepages(struct zone *zone,
38 struct page *start_page, struct page *end_page,
39 int migratetype);
40 37
41/* 38/*
42 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. 39 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 571257e0f53d..e10f27468322 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -198,7 +198,7 @@ extern void wake_up_klogd(void);
198 198
199char *log_buf_addr_get(void); 199char *log_buf_addr_get(void);
200u32 log_buf_len_get(void); 200u32 log_buf_len_get(void);
201void log_buf_kexec_setup(void); 201void log_buf_vmcoreinfo_setup(void);
202void __init setup_log_buf(int early); 202void __init setup_log_buf(int early);
203__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); 203__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
204void dump_stack_print_info(const char *log_lvl); 204void dump_stack_print_info(const char *log_lvl);
@@ -246,7 +246,7 @@ static inline u32 log_buf_len_get(void)
246 return 0; 246 return 0;
247} 247}
248 248
249static inline void log_buf_kexec_setup(void) 249static inline void log_buf_vmcoreinfo_setup(void)
250{ 250{
251} 251}
252 252
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 12cb8bd81d2d..58ab28d81fc2 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -14,6 +14,7 @@ struct inode;
14 14
15struct proc_ns_operations { 15struct proc_ns_operations {
16 const char *name; 16 const char *name;
17 const char *real_ns_name;
17 int type; 18 int type;
18 struct ns_common *(*get)(struct task_struct *task); 19 struct ns_common *(*get)(struct task_struct *task);
19 void (*put)(struct ns_common *ns); 20 void (*put)(struct ns_common *ns);
@@ -26,6 +27,7 @@ extern const struct proc_ns_operations netns_operations;
26extern const struct proc_ns_operations utsns_operations; 27extern const struct proc_ns_operations utsns_operations;
27extern const struct proc_ns_operations ipcns_operations; 28extern const struct proc_ns_operations ipcns_operations;
28extern const struct proc_ns_operations pidns_operations; 29extern const struct proc_ns_operations pidns_operations;
30extern const struct proc_ns_operations pidns_for_children_operations;
29extern const struct proc_ns_operations userns_operations; 31extern const struct proc_ns_operations userns_operations;
30extern const struct proc_ns_operations mntns_operations; 32extern const struct proc_ns_operations mntns_operations;
31extern const struct proc_ns_operations cgroupns_operations; 33extern const struct proc_ns_operations cgroupns_operations;
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 9daabe138c99..2b24a6974847 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -191,4 +191,16 @@ static inline void memalloc_nofs_restore(unsigned int flags)
191 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; 191 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
192} 192}
193 193
194static inline unsigned int memalloc_noreclaim_save(void)
195{
196 unsigned int flags = current->flags & PF_MEMALLOC;
197 current->flags |= PF_MEMALLOC;
198 return flags;
199}
200
201static inline void memalloc_noreclaim_restore(unsigned int flags)
202{
203 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
204}
205
194#endif /* _LINUX_SCHED_MM_H */ 206#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 4fc222f8755d..9edec926e9d9 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -10,8 +10,7 @@ struct task_struct;
10 10
11/* One sem_array data structure for each set of semaphores in the system. */ 11/* One sem_array data structure for each set of semaphores in the system. */
12struct sem_array { 12struct sem_array {
13 struct kern_ipc_perm ____cacheline_aligned_in_smp 13 struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
14 sem_perm; /* permissions .. see ipc.h */
15 time_t sem_ctime; /* last change time */ 14 time_t sem_ctime; /* last change time */
16 struct sem *sem_base; /* ptr to first semaphore in array */ 15 struct sem *sem_base; /* ptr to first semaphore in array */
17 struct list_head pending_alter; /* pending operations */ 16 struct list_head pending_alter; /* pending operations */
diff --git a/include/linux/time.h b/include/linux/time.h
index 23f0f5ce3090..c0543f5f25de 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -151,9 +151,6 @@ static inline bool timespec_inject_offset_valid(const struct timespec *ts)
151 return true; 151 return true;
152} 152}
153 153
154#define CURRENT_TIME (current_kernel_time())
155#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
156
157/* Some architectures do not supply their own clocksource. 154/* Some architectures do not supply their own clocksource.
158 * This is mainly the case in architectures that get their 155 * This is mainly the case in architectures that get their
159 * inter-tick times by reading the counter on their interval 156 * inter-tick times by reading the counter on their interval
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index e0cbfb09e60f..201418d5e15c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -203,7 +203,6 @@ static __always_inline void pagefault_disabled_inc(void)
203static __always_inline void pagefault_disabled_dec(void) 203static __always_inline void pagefault_disabled_dec(void)
204{ 204{
205 current->pagefault_disabled--; 205 current->pagefault_disabled--;
206 WARN_ON(current->pagefault_disabled < 0);
207} 206}
208 207
209/* 208/*
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index d68edffbf142..0328ce003992 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -6,6 +6,7 @@
6#include <linux/list.h> 6#include <linux/list.h>
7#include <linux/llist.h> 7#include <linux/llist.h>
8#include <asm/page.h> /* pgprot_t */ 8#include <asm/page.h> /* pgprot_t */
9#include <asm/pgtable.h> /* PAGE_KERNEL */
9#include <linux/rbtree.h> 10#include <linux/rbtree.h>
10 11
11struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 12struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -80,6 +81,25 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
80 unsigned long start, unsigned long end, gfp_t gfp_mask, 81 unsigned long start, unsigned long end, gfp_t gfp_mask,
81 pgprot_t prot, unsigned long vm_flags, int node, 82 pgprot_t prot, unsigned long vm_flags, int node,
82 const void *caller); 83 const void *caller);
84#ifndef CONFIG_MMU
85extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
86#else
87extern void *__vmalloc_node(unsigned long size, unsigned long align,
88 gfp_t gfp_mask, pgprot_t prot,
89 int node, const void *caller);
90
91/*
92 * We really want to have this inlined due to caller tracking. This
93 * function is used by the highlevel vmalloc apis and so we want to track
94 * their callers and inlining will achieve that.
95 */
96static inline void *__vmalloc_node_flags(unsigned long size,
97 int node, gfp_t flags)
98{
99 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
100 node, __builtin_return_address(0));
101}
102#endif
83 103
84extern void vfree(const void *addr); 104extern void vfree(const void *addr);
85extern void vfree_atomic(const void *addr); 105extern void vfree_atomic(const void *addr);
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index c566ddc87f73..08bb3ed18dcc 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -150,6 +150,136 @@ DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
150DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping); 150DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
151DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback); 151DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
152 152
153DECLARE_EVENT_CLASS(dax_pte_fault_class,
154 TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
155 TP_ARGS(inode, vmf, result),
156 TP_STRUCT__entry(
157 __field(unsigned long, ino)
158 __field(unsigned long, vm_flags)
159 __field(unsigned long, address)
160 __field(pgoff_t, pgoff)
161 __field(dev_t, dev)
162 __field(unsigned int, flags)
163 __field(int, result)
164 ),
165 TP_fast_assign(
166 __entry->dev = inode->i_sb->s_dev;
167 __entry->ino = inode->i_ino;
168 __entry->vm_flags = vmf->vma->vm_flags;
169 __entry->address = vmf->address;
170 __entry->flags = vmf->flags;
171 __entry->pgoff = vmf->pgoff;
172 __entry->result = result;
173 ),
174 TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s",
175 MAJOR(__entry->dev),
176 MINOR(__entry->dev),
177 __entry->ino,
178 __entry->vm_flags & VM_SHARED ? "shared" : "private",
179 __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
180 __entry->address,
181 __entry->pgoff,
182 __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
183 )
184)
185
186#define DEFINE_PTE_FAULT_EVENT(name) \
187DEFINE_EVENT(dax_pte_fault_class, name, \
188 TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
189 TP_ARGS(inode, vmf, result))
190
191DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
192DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
193DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite_no_entry);
194DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite);
195DEFINE_PTE_FAULT_EVENT(dax_load_hole);
196
197TRACE_EVENT(dax_insert_mapping,
198 TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
199 TP_ARGS(inode, vmf, radix_entry),
200 TP_STRUCT__entry(
201 __field(unsigned long, ino)
202 __field(unsigned long, vm_flags)
203 __field(unsigned long, address)
204 __field(void *, radix_entry)
205 __field(dev_t, dev)
206 __field(int, write)
207 ),
208 TP_fast_assign(
209 __entry->dev = inode->i_sb->s_dev;
210 __entry->ino = inode->i_ino;
211 __entry->vm_flags = vmf->vma->vm_flags;
212 __entry->address = vmf->address;
213 __entry->write = vmf->flags & FAULT_FLAG_WRITE;
214 __entry->radix_entry = radix_entry;
215 ),
216 TP_printk("dev %d:%d ino %#lx %s %s address %#lx radix_entry %#lx",
217 MAJOR(__entry->dev),
218 MINOR(__entry->dev),
219 __entry->ino,
220 __entry->vm_flags & VM_SHARED ? "shared" : "private",
221 __entry->write ? "write" : "read",
222 __entry->address,
223 (unsigned long)__entry->radix_entry
224 )
225)
226
227DECLARE_EVENT_CLASS(dax_writeback_range_class,
228 TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
229 TP_ARGS(inode, start_index, end_index),
230 TP_STRUCT__entry(
231 __field(unsigned long, ino)
232 __field(pgoff_t, start_index)
233 __field(pgoff_t, end_index)
234 __field(dev_t, dev)
235 ),
236 TP_fast_assign(
237 __entry->dev = inode->i_sb->s_dev;
238 __entry->ino = inode->i_ino;
239 __entry->start_index = start_index;
240 __entry->end_index = end_index;
241 ),
242 TP_printk("dev %d:%d ino %#lx pgoff %#lx-%#lx",
243 MAJOR(__entry->dev),
244 MINOR(__entry->dev),
245 __entry->ino,
246 __entry->start_index,
247 __entry->end_index
248 )
249)
250
251#define DEFINE_WRITEBACK_RANGE_EVENT(name) \
252DEFINE_EVENT(dax_writeback_range_class, name, \
253 TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),\
254 TP_ARGS(inode, start_index, end_index))
255
256DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range);
257DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range_done);
258
259TRACE_EVENT(dax_writeback_one,
260 TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen),
261 TP_ARGS(inode, pgoff, pglen),
262 TP_STRUCT__entry(
263 __field(unsigned long, ino)
264 __field(pgoff_t, pgoff)
265 __field(pgoff_t, pglen)
266 __field(dev_t, dev)
267 ),
268 TP_fast_assign(
269 __entry->dev = inode->i_sb->s_dev;
270 __entry->ino = inode->i_ino;
271 __entry->pgoff = pgoff;
272 __entry->pglen = pglen;
273 ),
274 TP_printk("dev %d:%d ino %#lx pgoff %#lx pglen %#lx",
275 MAJOR(__entry->dev),
276 MINOR(__entry->dev),
277 __entry->ino,
278 __entry->pgoff,
279 __entry->pglen
280 )
281)
282
153#endif /* _TRACE_FS_DAX_H */ 283#endif /* _TRACE_FS_DAX_H */
154 284
155/* This part must be outside protection */ 285/* This part must be outside protection */
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 7b26a62e5707..b9095a27a08a 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -355,7 +355,7 @@ struct ipmi_cmdspec {
355#define IPMICTL_REGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 14, \ 355#define IPMICTL_REGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 14, \
356 struct ipmi_cmdspec) 356 struct ipmi_cmdspec)
357/* 357/*
358 * Unregister a regsitered command. error values: 358 * Unregister a registered command. error values:
359 * - EFAULT - an address supplied was invalid. 359 * - EFAULT - an address supplied was invalid.
360 * - ENOENT - The netfn/cmd was not found registered for this user. 360 * - ENOENT - The netfn/cmd was not found registered for this user.
361 */ 361 */
diff --git a/init/do_mounts.h b/init/do_mounts.h
index 067af1d9e8b6..282d65bfd674 100644
--- a/init/do_mounts.h
+++ b/init/do_mounts.h
@@ -19,29 +19,15 @@ static inline int create_dev(char *name, dev_t dev)
19 return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); 19 return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
20} 20}
21 21
22#if BITS_PER_LONG == 32
23static inline u32 bstat(char *name) 22static inline u32 bstat(char *name)
24{ 23{
25 struct stat64 stat; 24 struct kstat stat;
26 if (sys_stat64(name, &stat) != 0) 25 if (vfs_stat(name, &stat) != 0)
27 return 0; 26 return 0;
28 if (!S_ISBLK(stat.st_mode)) 27 if (!S_ISBLK(stat.mode))
29 return 0; 28 return 0;
30 if (stat.st_rdev != (u32)stat.st_rdev) 29 return stat.rdev;
31 return 0;
32 return stat.st_rdev;
33}
34#else
35static inline u32 bstat(char *name)
36{
37 struct stat stat;
38 if (sys_newstat(name, &stat) != 0)
39 return 0;
40 if (!S_ISBLK(stat.st_mode))
41 return 0;
42 return stat.st_rdev;
43} 30}
44#endif
45 31
46#ifdef CONFIG_BLK_DEV_RAM 32#ifdef CONFIG_BLK_DEV_RAM
47 33
diff --git a/init/initramfs.c b/init/initramfs.c
index 8daf7ac6c7e2..8a532050043f 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -312,10 +312,10 @@ static int __init maybe_link(void)
312 312
313static void __init clean_path(char *path, umode_t fmode) 313static void __init clean_path(char *path, umode_t fmode)
314{ 314{
315 struct stat st; 315 struct kstat st;
316 316
317 if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) { 317 if (!vfs_lstat(path, &st) && (st.mode ^ fmode) & S_IFMT) {
318 if (S_ISDIR(st.st_mode)) 318 if (S_ISDIR(st.mode))
319 sys_rmdir(path); 319 sys_rmdir(path);
320 else 320 else
321 sys_unlink(path); 321 sys_unlink(path);
@@ -581,13 +581,13 @@ static void __init clean_rootfs(void)
581 num = sys_getdents64(fd, dirp, BUF_SIZE); 581 num = sys_getdents64(fd, dirp, BUF_SIZE);
582 while (num > 0) { 582 while (num > 0) {
583 while (num > 0) { 583 while (num > 0) {
584 struct stat st; 584 struct kstat st;
585 int ret; 585 int ret;
586 586
587 ret = sys_newlstat(dirp->d_name, &st); 587 ret = vfs_lstat(dirp->d_name, &st);
588 WARN_ON_ONCE(ret); 588 WARN_ON_ONCE(ret);
589 if (!ret) { 589 if (!ret) {
590 if (S_ISDIR(st.st_mode)) 590 if (S_ISDIR(st.mode))
591 sys_rmdir(dirp->d_name); 591 sys_rmdir(dirp->d_name);
592 else 592 else
593 sys_unlink(dirp->d_name); 593 sys_unlink(dirp->d_name);
@@ -613,7 +613,7 @@ static int __init populate_rootfs(void)
613 if (err) 613 if (err)
614 panic("%s", err); /* Failed to decompress INTERNAL initramfs */ 614 panic("%s", err); /* Failed to decompress INTERNAL initramfs */
615 /* If available load the bootloader supplied initrd */ 615 /* If available load the bootloader supplied initrd */
616 if (initrd_start) { 616 if (initrd_start && !IS_ENABLED(CONFIG_INITRAMFS_FORCE)) {
617#ifdef CONFIG_BLK_DEV_RAM 617#ifdef CONFIG_BLK_DEV_RAM
618 int fd; 618 int fd;
619 printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n"); 619 printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
diff --git a/ipc/shm.c b/ipc/shm.c
index 481d2a9c298a..34c4344e8d4b 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1095,11 +1095,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1095 ulong *raddr, unsigned long shmlba) 1095 ulong *raddr, unsigned long shmlba)
1096{ 1096{
1097 struct shmid_kernel *shp; 1097 struct shmid_kernel *shp;
1098 unsigned long addr; 1098 unsigned long addr = (unsigned long)shmaddr;
1099 unsigned long size; 1099 unsigned long size;
1100 struct file *file; 1100 struct file *file;
1101 int err; 1101 int err;
1102 unsigned long flags; 1102 unsigned long flags = MAP_SHARED;
1103 unsigned long prot; 1103 unsigned long prot;
1104 int acc_mode; 1104 int acc_mode;
1105 struct ipc_namespace *ns; 1105 struct ipc_namespace *ns;
@@ -1111,7 +1111,8 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1111 err = -EINVAL; 1111 err = -EINVAL;
1112 if (shmid < 0) 1112 if (shmid < 0)
1113 goto out; 1113 goto out;
1114 else if ((addr = (ulong)shmaddr)) { 1114
1115 if (addr) {
1115 if (addr & (shmlba - 1)) { 1116 if (addr & (shmlba - 1)) {
1116 /* 1117 /*
1117 * Round down to the nearest multiple of shmlba. 1118 * Round down to the nearest multiple of shmlba.
@@ -1126,13 +1127,10 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1126#endif 1127#endif
1127 goto out; 1128 goto out;
1128 } 1129 }
1129 flags = MAP_SHARED | MAP_FIXED;
1130 } else {
1131 if ((shmflg & SHM_REMAP))
1132 goto out;
1133 1130
1134 flags = MAP_SHARED; 1131 flags |= MAP_FIXED;
1135 } 1132 } else if ((shmflg & SHM_REMAP))
1133 goto out;
1136 1134
1137 if (shmflg & SHM_RDONLY) { 1135 if (shmflg & SHM_RDONLY) {
1138 prot = PROT_READ; 1136 prot = PROT_READ;
diff --git a/ipc/util.c b/ipc/util.c
index 3459a16a9df9..caec7b1bfaa3 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -403,12 +403,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
403 */ 403 */
404void *ipc_alloc(int size) 404void *ipc_alloc(int size)
405{ 405{
406 void *out; 406 return kvmalloc(size, GFP_KERNEL);
407 if (size > PAGE_SIZE)
408 out = vmalloc(size);
409 else
410 out = kmalloc(size, GFP_KERNEL);
411 return out;
412} 407}
413 408
414/** 409/**
diff --git a/kernel/Makefile b/kernel/Makefile
index b302b4731d16..72aa080f91f0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MODULES) += module.o
59obj-$(CONFIG_MODULE_SIG) += module_signing.o 59obj-$(CONFIG_MODULE_SIG) += module_signing.o
60obj-$(CONFIG_KALLSYMS) += kallsyms.o 60obj-$(CONFIG_KALLSYMS) += kallsyms.o
61obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o 61obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
62obj-$(CONFIG_CRASH_CORE) += crash_core.o
62obj-$(CONFIG_KEXEC_CORE) += kexec_core.o 63obj-$(CONFIG_KEXEC_CORE) += kexec_core.o
63obj-$(CONFIG_KEXEC) += kexec.o 64obj-$(CONFIG_KEXEC) += kexec.o
64obj-$(CONFIG_KEXEC_FILE) += kexec_file.o 65obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6f81e0f5a0fa..dedf367f59bb 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -76,8 +76,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
76 76
77struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 77struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78{ 78{
79 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | 79 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
80 gfp_extra_flags;
81 struct bpf_prog_aux *aux; 80 struct bpf_prog_aux *aux;
82 struct bpf_prog *fp; 81 struct bpf_prog *fp;
83 82
@@ -107,8 +106,7 @@ EXPORT_SYMBOL_GPL(bpf_prog_alloc);
107struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 106struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
108 gfp_t gfp_extra_flags) 107 gfp_t gfp_extra_flags)
109{ 108{
110 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | 109 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
111 gfp_extra_flags;
112 struct bpf_prog *fp; 110 struct bpf_prog *fp;
113 u32 pages, delta; 111 u32 pages, delta;
114 int ret; 112 int ret;
@@ -655,8 +653,7 @@ out:
655static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 653static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
656 gfp_t gfp_extra_flags) 654 gfp_t gfp_extra_flags)
657{ 655{
658 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | 656 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
659 gfp_extra_flags;
660 struct bpf_prog *fp; 657 struct bpf_prog *fp;
661 658
662 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 659 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 13642c73dca0..fd2411fd6914 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -67,8 +67,7 @@ void *bpf_map_area_alloc(size_t size)
67 return area; 67 return area;
68 } 68 }
69 69
70 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags, 70 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
71 PAGE_KERNEL);
72} 71}
73 72
74void bpf_map_area_free(void *area) 73void bpf_map_area_free(void *area)
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
new file mode 100644
index 000000000000..fcbd568f1e95
--- /dev/null
+++ b/kernel/crash_core.c
@@ -0,0 +1,439 @@
1/*
2 * crash.c - kernel crash support code.
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/crash_core.h>
10#include <linux/utsname.h>
11#include <linux/vmalloc.h>
12
13#include <asm/page.h>
14#include <asm/sections.h>
15
16/* vmcoreinfo stuff */
17static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
18u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
19size_t vmcoreinfo_size;
20size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
21
22/*
23 * parsing the "crashkernel" commandline
24 *
25 * this code is intended to be called from architecture specific code
26 */
27
28
29/*
30 * This function parses command lines in the format
31 *
32 * crashkernel=ramsize-range:size[,...][@offset]
33 *
34 * The function returns 0 on success and -EINVAL on failure.
35 */
36static int __init parse_crashkernel_mem(char *cmdline,
37 unsigned long long system_ram,
38 unsigned long long *crash_size,
39 unsigned long long *crash_base)
40{
41 char *cur = cmdline, *tmp;
42
43 /* for each entry of the comma-separated list */
44 do {
45 unsigned long long start, end = ULLONG_MAX, size;
46
47 /* get the start of the range */
48 start = memparse(cur, &tmp);
49 if (cur == tmp) {
50 pr_warn("crashkernel: Memory value expected\n");
51 return -EINVAL;
52 }
53 cur = tmp;
54 if (*cur != '-') {
55 pr_warn("crashkernel: '-' expected\n");
56 return -EINVAL;
57 }
58 cur++;
59
60 /* if no ':' is here, than we read the end */
61 if (*cur != ':') {
62 end = memparse(cur, &tmp);
63 if (cur == tmp) {
64 pr_warn("crashkernel: Memory value expected\n");
65 return -EINVAL;
66 }
67 cur = tmp;
68 if (end <= start) {
69 pr_warn("crashkernel: end <= start\n");
70 return -EINVAL;
71 }
72 }
73
74 if (*cur != ':') {
75 pr_warn("crashkernel: ':' expected\n");
76 return -EINVAL;
77 }
78 cur++;
79
80 size = memparse(cur, &tmp);
81 if (cur == tmp) {
82 pr_warn("Memory value expected\n");
83 return -EINVAL;
84 }
85 cur = tmp;
86 if (size >= system_ram) {
87 pr_warn("crashkernel: invalid size\n");
88 return -EINVAL;
89 }
90
91 /* match ? */
92 if (system_ram >= start && system_ram < end) {
93 *crash_size = size;
94 break;
95 }
96 } while (*cur++ == ',');
97
98 if (*crash_size > 0) {
99 while (*cur && *cur != ' ' && *cur != '@')
100 cur++;
101 if (*cur == '@') {
102 cur++;
103 *crash_base = memparse(cur, &tmp);
104 if (cur == tmp) {
105 pr_warn("Memory value expected after '@'\n");
106 return -EINVAL;
107 }
108 }
109 }
110
111 return 0;
112}
113
114/*
115 * That function parses "simple" (old) crashkernel command lines like
116 *
117 * crashkernel=size[@offset]
118 *
119 * It returns 0 on success and -EINVAL on failure.
120 */
121static int __init parse_crashkernel_simple(char *cmdline,
122 unsigned long long *crash_size,
123 unsigned long long *crash_base)
124{
125 char *cur = cmdline;
126
127 *crash_size = memparse(cmdline, &cur);
128 if (cmdline == cur) {
129 pr_warn("crashkernel: memory value expected\n");
130 return -EINVAL;
131 }
132
133 if (*cur == '@')
134 *crash_base = memparse(cur+1, &cur);
135 else if (*cur != ' ' && *cur != '\0') {
136 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
137 return -EINVAL;
138 }
139
140 return 0;
141}
142
143#define SUFFIX_HIGH 0
144#define SUFFIX_LOW 1
145#define SUFFIX_NULL 2
146static __initdata char *suffix_tbl[] = {
147 [SUFFIX_HIGH] = ",high",
148 [SUFFIX_LOW] = ",low",
149 [SUFFIX_NULL] = NULL,
150};
151
152/*
153 * That function parses "suffix" crashkernel command lines like
154 *
155 * crashkernel=size,[high|low]
156 *
157 * It returns 0 on success and -EINVAL on failure.
158 */
159static int __init parse_crashkernel_suffix(char *cmdline,
160 unsigned long long *crash_size,
161 const char *suffix)
162{
163 char *cur = cmdline;
164
165 *crash_size = memparse(cmdline, &cur);
166 if (cmdline == cur) {
167 pr_warn("crashkernel: memory value expected\n");
168 return -EINVAL;
169 }
170
171 /* check with suffix */
172 if (strncmp(cur, suffix, strlen(suffix))) {
173 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
174 return -EINVAL;
175 }
176 cur += strlen(suffix);
177 if (*cur != ' ' && *cur != '\0') {
178 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
179 return -EINVAL;
180 }
181
182 return 0;
183}
184
185static __init char *get_last_crashkernel(char *cmdline,
186 const char *name,
187 const char *suffix)
188{
189 char *p = cmdline, *ck_cmdline = NULL;
190
191 /* find crashkernel and use the last one if there are more */
192 p = strstr(p, name);
193 while (p) {
194 char *end_p = strchr(p, ' ');
195 char *q;
196
197 if (!end_p)
198 end_p = p + strlen(p);
199
200 if (!suffix) {
201 int i;
202
203 /* skip the one with any known suffix */
204 for (i = 0; suffix_tbl[i]; i++) {
205 q = end_p - strlen(suffix_tbl[i]);
206 if (!strncmp(q, suffix_tbl[i],
207 strlen(suffix_tbl[i])))
208 goto next;
209 }
210 ck_cmdline = p;
211 } else {
212 q = end_p - strlen(suffix);
213 if (!strncmp(q, suffix, strlen(suffix)))
214 ck_cmdline = p;
215 }
216next:
217 p = strstr(p+1, name);
218 }
219
220 if (!ck_cmdline)
221 return NULL;
222
223 return ck_cmdline;
224}
225
226static int __init __parse_crashkernel(char *cmdline,
227 unsigned long long system_ram,
228 unsigned long long *crash_size,
229 unsigned long long *crash_base,
230 const char *name,
231 const char *suffix)
232{
233 char *first_colon, *first_space;
234 char *ck_cmdline;
235
236 BUG_ON(!crash_size || !crash_base);
237 *crash_size = 0;
238 *crash_base = 0;
239
240 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
241
242 if (!ck_cmdline)
243 return -EINVAL;
244
245 ck_cmdline += strlen(name);
246
247 if (suffix)
248 return parse_crashkernel_suffix(ck_cmdline, crash_size,
249 suffix);
250 /*
251 * if the commandline contains a ':', then that's the extended
252 * syntax -- if not, it must be the classic syntax
253 */
254 first_colon = strchr(ck_cmdline, ':');
255 first_space = strchr(ck_cmdline, ' ');
256 if (first_colon && (!first_space || first_colon < first_space))
257 return parse_crashkernel_mem(ck_cmdline, system_ram,
258 crash_size, crash_base);
259
260 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
261}
262
263/*
264 * That function is the entry point for command line parsing and should be
265 * called from the arch-specific code.
266 */
267int __init parse_crashkernel(char *cmdline,
268 unsigned long long system_ram,
269 unsigned long long *crash_size,
270 unsigned long long *crash_base)
271{
272 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
273 "crashkernel=", NULL);
274}
275
276int __init parse_crashkernel_high(char *cmdline,
277 unsigned long long system_ram,
278 unsigned long long *crash_size,
279 unsigned long long *crash_base)
280{
281 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
282 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
283}
284
285int __init parse_crashkernel_low(char *cmdline,
286 unsigned long long system_ram,
287 unsigned long long *crash_size,
288 unsigned long long *crash_base)
289{
290 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
291 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
292}
293
294Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
295 void *data, size_t data_len)
296{
297 struct elf_note *note = (struct elf_note *)buf;
298
299 note->n_namesz = strlen(name) + 1;
300 note->n_descsz = data_len;
301 note->n_type = type;
302 buf += DIV_ROUND_UP(sizeof(*note), sizeof(Elf_Word));
303 memcpy(buf, name, note->n_namesz);
304 buf += DIV_ROUND_UP(note->n_namesz, sizeof(Elf_Word));
305 memcpy(buf, data, data_len);
306 buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word));
307
308 return buf;
309}
310
311void final_note(Elf_Word *buf)
312{
313 memset(buf, 0, sizeof(struct elf_note));
314}
315
316static void update_vmcoreinfo_note(void)
317{
318 u32 *buf = vmcoreinfo_note;
319
320 if (!vmcoreinfo_size)
321 return;
322 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
323 vmcoreinfo_size);
324 final_note(buf);
325}
326
327void crash_save_vmcoreinfo(void)
328{
329 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
330 update_vmcoreinfo_note();
331}
332
333void vmcoreinfo_append_str(const char *fmt, ...)
334{
335 va_list args;
336 char buf[0x50];
337 size_t r;
338
339 va_start(args, fmt);
340 r = vscnprintf(buf, sizeof(buf), fmt, args);
341 va_end(args);
342
343 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
344
345 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
346
347 vmcoreinfo_size += r;
348}
349
350/*
351 * provide an empty default implementation here -- architecture
352 * code may override this
353 */
354void __weak arch_crash_save_vmcoreinfo(void)
355{}
356
357phys_addr_t __weak paddr_vmcoreinfo_note(void)
358{
359 return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note);
360}
361
362static int __init crash_save_vmcoreinfo_init(void)
363{
364 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
365 VMCOREINFO_PAGESIZE(PAGE_SIZE);
366
367 VMCOREINFO_SYMBOL(init_uts_ns);
368 VMCOREINFO_SYMBOL(node_online_map);
369#ifdef CONFIG_MMU
370 VMCOREINFO_SYMBOL(swapper_pg_dir);
371#endif
372 VMCOREINFO_SYMBOL(_stext);
373 VMCOREINFO_SYMBOL(vmap_area_list);
374
375#ifndef CONFIG_NEED_MULTIPLE_NODES
376 VMCOREINFO_SYMBOL(mem_map);
377 VMCOREINFO_SYMBOL(contig_page_data);
378#endif
379#ifdef CONFIG_SPARSEMEM
380 VMCOREINFO_SYMBOL(mem_section);
381 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
382 VMCOREINFO_STRUCT_SIZE(mem_section);
383 VMCOREINFO_OFFSET(mem_section, section_mem_map);
384#endif
385 VMCOREINFO_STRUCT_SIZE(page);
386 VMCOREINFO_STRUCT_SIZE(pglist_data);
387 VMCOREINFO_STRUCT_SIZE(zone);
388 VMCOREINFO_STRUCT_SIZE(free_area);
389 VMCOREINFO_STRUCT_SIZE(list_head);
390 VMCOREINFO_SIZE(nodemask_t);
391 VMCOREINFO_OFFSET(page, flags);
392 VMCOREINFO_OFFSET(page, _refcount);
393 VMCOREINFO_OFFSET(page, mapping);
394 VMCOREINFO_OFFSET(page, lru);
395 VMCOREINFO_OFFSET(page, _mapcount);
396 VMCOREINFO_OFFSET(page, private);
397 VMCOREINFO_OFFSET(page, compound_dtor);
398 VMCOREINFO_OFFSET(page, compound_order);
399 VMCOREINFO_OFFSET(page, compound_head);
400 VMCOREINFO_OFFSET(pglist_data, node_zones);
401 VMCOREINFO_OFFSET(pglist_data, nr_zones);
402#ifdef CONFIG_FLAT_NODE_MEM_MAP
403 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
404#endif
405 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
406 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
407 VMCOREINFO_OFFSET(pglist_data, node_id);
408 VMCOREINFO_OFFSET(zone, free_area);
409 VMCOREINFO_OFFSET(zone, vm_stat);
410 VMCOREINFO_OFFSET(zone, spanned_pages);
411 VMCOREINFO_OFFSET(free_area, free_list);
412 VMCOREINFO_OFFSET(list_head, next);
413 VMCOREINFO_OFFSET(list_head, prev);
414 VMCOREINFO_OFFSET(vmap_area, va_start);
415 VMCOREINFO_OFFSET(vmap_area, list);
416 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
417 log_buf_vmcoreinfo_setup();
418 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
419 VMCOREINFO_NUMBER(NR_FREE_PAGES);
420 VMCOREINFO_NUMBER(PG_lru);
421 VMCOREINFO_NUMBER(PG_private);
422 VMCOREINFO_NUMBER(PG_swapcache);
423 VMCOREINFO_NUMBER(PG_slab);
424#ifdef CONFIG_MEMORY_FAILURE
425 VMCOREINFO_NUMBER(PG_hwpoison);
426#endif
427 VMCOREINFO_NUMBER(PG_head_mask);
428 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
429#ifdef CONFIG_HUGETLB_PAGE
430 VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
431#endif
432
433 arch_crash_save_vmcoreinfo();
434 update_vmcoreinfo_note();
435
436 return 0;
437}
438
439subsys_initcall(crash_save_vmcoreinfo_init);
diff --git a/kernel/fork.c b/kernel/fork.c
index dd5a371c392a..08ba696aa561 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -179,6 +179,24 @@ void __weak arch_release_thread_stack(unsigned long *stack)
179 */ 179 */
180#define NR_CACHED_STACKS 2 180#define NR_CACHED_STACKS 2
181static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 181static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
182
183static int free_vm_stack_cache(unsigned int cpu)
184{
185 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
186 int i;
187
188 for (i = 0; i < NR_CACHED_STACKS; i++) {
189 struct vm_struct *vm_stack = cached_vm_stacks[i];
190
191 if (!vm_stack)
192 continue;
193
194 vfree(vm_stack->addr);
195 cached_vm_stacks[i] = NULL;
196 }
197
198 return 0;
199}
182#endif 200#endif
183 201
184static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) 202static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
@@ -203,7 +221,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
203 221
204 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE, 222 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
205 VMALLOC_START, VMALLOC_END, 223 VMALLOC_START, VMALLOC_END,
206 THREADINFO_GFP | __GFP_HIGHMEM, 224 THREADINFO_GFP,
207 PAGE_KERNEL, 225 PAGE_KERNEL,
208 0, node, __builtin_return_address(0)); 226 0, node, __builtin_return_address(0));
209 227
@@ -467,6 +485,11 @@ void __init fork_init(void)
467 for (i = 0; i < UCOUNT_COUNTS; i++) { 485 for (i = 0; i < UCOUNT_COUNTS; i++) {
468 init_user_ns.ucount_max[i] = max_threads/2; 486 init_user_ns.ucount_max[i] = max_threads/2;
469 } 487 }
488
489#ifdef CONFIG_VMAP_STACK
490 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
491 NULL, free_vm_stack_cache);
492#endif
470} 493}
471 494
472int __weak arch_dup_task_struct(struct task_struct *dst, 495int __weak arch_dup_task_struct(struct task_struct *dst,
diff --git a/kernel/groups.c b/kernel/groups.c
index 8dd7a61b7115..d09727692a2a 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -18,7 +18,7 @@ struct group_info *groups_alloc(int gidsetsize)
18 len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize; 18 len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
19 gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY); 19 gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
20 if (!gi) 20 if (!gi)
21 gi = __vmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_HIGHMEM, PAGE_KERNEL); 21 gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
22 if (!gi) 22 if (!gi)
23 return NULL; 23 return NULL;
24 24
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index f0f8e2a8496f..751593ed7c0b 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -43,6 +43,7 @@ unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_
43int __read_mostly sysctl_hung_task_warnings = 10; 43int __read_mostly sysctl_hung_task_warnings = 10;
44 44
45static int __read_mostly did_panic; 45static int __read_mostly did_panic;
46static bool hung_task_show_lock;
46 47
47static struct task_struct *watchdog_task; 48static struct task_struct *watchdog_task;
48 49
@@ -120,12 +121,14 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
120 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" 121 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
121 " disables this message.\n"); 122 " disables this message.\n");
122 sched_show_task(t); 123 sched_show_task(t);
123 debug_show_all_locks(); 124 hung_task_show_lock = true;
124 } 125 }
125 126
126 touch_nmi_watchdog(); 127 touch_nmi_watchdog();
127 128
128 if (sysctl_hung_task_panic) { 129 if (sysctl_hung_task_panic) {
130 if (hung_task_show_lock)
131 debug_show_all_locks();
129 trigger_all_cpu_backtrace(); 132 trigger_all_cpu_backtrace();
130 panic("hung_task: blocked tasks"); 133 panic("hung_task: blocked tasks");
131 } 134 }
@@ -172,6 +175,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
172 if (test_taint(TAINT_DIE) || did_panic) 175 if (test_taint(TAINT_DIE) || did_panic)
173 return; 176 return;
174 177
178 hung_task_show_lock = false;
175 rcu_read_lock(); 179 rcu_read_lock();
176 for_each_process_thread(g, t) { 180 for_each_process_thread(g, t) {
177 if (!max_count--) 181 if (!max_count--)
@@ -187,6 +191,8 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
187 } 191 }
188 unlock: 192 unlock:
189 rcu_read_unlock(); 193 rcu_read_unlock();
194 if (hung_task_show_lock)
195 debug_show_all_locks();
190} 196}
191 197
192static long hung_timeout_jiffies(unsigned long last_checked, 198static long hung_timeout_jiffies(unsigned long last_checked,
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 85e5546cd791..cd771993f96f 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -60,15 +60,8 @@ void notrace __sanitizer_cov_trace_pc(void)
60 /* 60 /*
61 * We are interested in code coverage as a function of a syscall inputs, 61 * We are interested in code coverage as a function of a syscall inputs,
62 * so we ignore code executed in interrupts. 62 * so we ignore code executed in interrupts.
63 * The checks for whether we are in an interrupt are open-coded, because
64 * 1. We can't use in_interrupt() here, since it also returns true
65 * when we are inside local_bh_disable() section.
66 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
67 * since that leads to slower generated code (three separate tests,
68 * one for each of the flags).
69 */ 63 */
70 if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET 64 if (!t || !in_task())
71 | NMI_MASK)))
72 return; 65 return;
73 mode = READ_ONCE(t->kcov_mode); 66 mode = READ_ONCE(t->kcov_mode);
74 if (mode == KCOV_MODE_TRACE) { 67 if (mode == KCOV_MODE_TRACE) {
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index bfe62d5b3872..ae1a3ba24df5 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -51,12 +51,6 @@ DEFINE_MUTEX(kexec_mutex);
51/* Per cpu memory for storing cpu states in case of system crash. */ 51/* Per cpu memory for storing cpu states in case of system crash. */
52note_buf_t __percpu *crash_notes; 52note_buf_t __percpu *crash_notes;
53 53
54/* vmcoreinfo stuff */
55static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
56u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
57size_t vmcoreinfo_size;
58size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
59
60/* Flag to indicate we are going to kexec a new kernel */ 54/* Flag to indicate we are going to kexec a new kernel */
61bool kexec_in_progress = false; 55bool kexec_in_progress = false;
62 56
@@ -996,34 +990,6 @@ unlock:
996 return ret; 990 return ret;
997} 991}
998 992
999static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1000 size_t data_len)
1001{
1002 struct elf_note note;
1003
1004 note.n_namesz = strlen(name) + 1;
1005 note.n_descsz = data_len;
1006 note.n_type = type;
1007 memcpy(buf, &note, sizeof(note));
1008 buf += (sizeof(note) + 3)/4;
1009 memcpy(buf, name, note.n_namesz);
1010 buf += (note.n_namesz + 3)/4;
1011 memcpy(buf, data, note.n_descsz);
1012 buf += (note.n_descsz + 3)/4;
1013
1014 return buf;
1015}
1016
1017static void final_note(u32 *buf)
1018{
1019 struct elf_note note;
1020
1021 note.n_namesz = 0;
1022 note.n_descsz = 0;
1023 note.n_type = 0;
1024 memcpy(buf, &note, sizeof(note));
1025}
1026
1027void crash_save_cpu(struct pt_regs *regs, int cpu) 993void crash_save_cpu(struct pt_regs *regs, int cpu)
1028{ 994{
1029 struct elf_prstatus prstatus; 995 struct elf_prstatus prstatus;
@@ -1085,403 +1051,6 @@ subsys_initcall(crash_notes_memory_init);
1085 1051
1086 1052
1087/* 1053/*
1088 * parsing the "crashkernel" commandline
1089 *
1090 * this code is intended to be called from architecture specific code
1091 */
1092
1093
1094/*
1095 * This function parses command lines in the format
1096 *
1097 * crashkernel=ramsize-range:size[,...][@offset]
1098 *
1099 * The function returns 0 on success and -EINVAL on failure.
1100 */
1101static int __init parse_crashkernel_mem(char *cmdline,
1102 unsigned long long system_ram,
1103 unsigned long long *crash_size,
1104 unsigned long long *crash_base)
1105{
1106 char *cur = cmdline, *tmp;
1107
1108 /* for each entry of the comma-separated list */
1109 do {
1110 unsigned long long start, end = ULLONG_MAX, size;
1111
1112 /* get the start of the range */
1113 start = memparse(cur, &tmp);
1114 if (cur == tmp) {
1115 pr_warn("crashkernel: Memory value expected\n");
1116 return -EINVAL;
1117 }
1118 cur = tmp;
1119 if (*cur != '-') {
1120 pr_warn("crashkernel: '-' expected\n");
1121 return -EINVAL;
1122 }
1123 cur++;
1124
1125 /* if no ':' is here, than we read the end */
1126 if (*cur != ':') {
1127 end = memparse(cur, &tmp);
1128 if (cur == tmp) {
1129 pr_warn("crashkernel: Memory value expected\n");
1130 return -EINVAL;
1131 }
1132 cur = tmp;
1133 if (end <= start) {
1134 pr_warn("crashkernel: end <= start\n");
1135 return -EINVAL;
1136 }
1137 }
1138
1139 if (*cur != ':') {
1140 pr_warn("crashkernel: ':' expected\n");
1141 return -EINVAL;
1142 }
1143 cur++;
1144
1145 size = memparse(cur, &tmp);
1146 if (cur == tmp) {
1147 pr_warn("Memory value expected\n");
1148 return -EINVAL;
1149 }
1150 cur = tmp;
1151 if (size >= system_ram) {
1152 pr_warn("crashkernel: invalid size\n");
1153 return -EINVAL;
1154 }
1155
1156 /* match ? */
1157 if (system_ram >= start && system_ram < end) {
1158 *crash_size = size;
1159 break;
1160 }
1161 } while (*cur++ == ',');
1162
1163 if (*crash_size > 0) {
1164 while (*cur && *cur != ' ' && *cur != '@')
1165 cur++;
1166 if (*cur == '@') {
1167 cur++;
1168 *crash_base = memparse(cur, &tmp);
1169 if (cur == tmp) {
1170 pr_warn("Memory value expected after '@'\n");
1171 return -EINVAL;
1172 }
1173 }
1174 }
1175
1176 return 0;
1177}
1178
1179/*
1180 * That function parses "simple" (old) crashkernel command lines like
1181 *
1182 * crashkernel=size[@offset]
1183 *
1184 * It returns 0 on success and -EINVAL on failure.
1185 */
1186static int __init parse_crashkernel_simple(char *cmdline,
1187 unsigned long long *crash_size,
1188 unsigned long long *crash_base)
1189{
1190 char *cur = cmdline;
1191
1192 *crash_size = memparse(cmdline, &cur);
1193 if (cmdline == cur) {
1194 pr_warn("crashkernel: memory value expected\n");
1195 return -EINVAL;
1196 }
1197
1198 if (*cur == '@')
1199 *crash_base = memparse(cur+1, &cur);
1200 else if (*cur != ' ' && *cur != '\0') {
1201 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1202 return -EINVAL;
1203 }
1204
1205 return 0;
1206}
1207
1208#define SUFFIX_HIGH 0
1209#define SUFFIX_LOW 1
1210#define SUFFIX_NULL 2
1211static __initdata char *suffix_tbl[] = {
1212 [SUFFIX_HIGH] = ",high",
1213 [SUFFIX_LOW] = ",low",
1214 [SUFFIX_NULL] = NULL,
1215};
1216
1217/*
1218 * That function parses "suffix" crashkernel command lines like
1219 *
1220 * crashkernel=size,[high|low]
1221 *
1222 * It returns 0 on success and -EINVAL on failure.
1223 */
1224static int __init parse_crashkernel_suffix(char *cmdline,
1225 unsigned long long *crash_size,
1226 const char *suffix)
1227{
1228 char *cur = cmdline;
1229
1230 *crash_size = memparse(cmdline, &cur);
1231 if (cmdline == cur) {
1232 pr_warn("crashkernel: memory value expected\n");
1233 return -EINVAL;
1234 }
1235
1236 /* check with suffix */
1237 if (strncmp(cur, suffix, strlen(suffix))) {
1238 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1239 return -EINVAL;
1240 }
1241 cur += strlen(suffix);
1242 if (*cur != ' ' && *cur != '\0') {
1243 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1244 return -EINVAL;
1245 }
1246
1247 return 0;
1248}
1249
1250static __init char *get_last_crashkernel(char *cmdline,
1251 const char *name,
1252 const char *suffix)
1253{
1254 char *p = cmdline, *ck_cmdline = NULL;
1255
1256 /* find crashkernel and use the last one if there are more */
1257 p = strstr(p, name);
1258 while (p) {
1259 char *end_p = strchr(p, ' ');
1260 char *q;
1261
1262 if (!end_p)
1263 end_p = p + strlen(p);
1264
1265 if (!suffix) {
1266 int i;
1267
1268 /* skip the one with any known suffix */
1269 for (i = 0; suffix_tbl[i]; i++) {
1270 q = end_p - strlen(suffix_tbl[i]);
1271 if (!strncmp(q, suffix_tbl[i],
1272 strlen(suffix_tbl[i])))
1273 goto next;
1274 }
1275 ck_cmdline = p;
1276 } else {
1277 q = end_p - strlen(suffix);
1278 if (!strncmp(q, suffix, strlen(suffix)))
1279 ck_cmdline = p;
1280 }
1281next:
1282 p = strstr(p+1, name);
1283 }
1284
1285 if (!ck_cmdline)
1286 return NULL;
1287
1288 return ck_cmdline;
1289}
1290
1291static int __init __parse_crashkernel(char *cmdline,
1292 unsigned long long system_ram,
1293 unsigned long long *crash_size,
1294 unsigned long long *crash_base,
1295 const char *name,
1296 const char *suffix)
1297{
1298 char *first_colon, *first_space;
1299 char *ck_cmdline;
1300
1301 BUG_ON(!crash_size || !crash_base);
1302 *crash_size = 0;
1303 *crash_base = 0;
1304
1305 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1306
1307 if (!ck_cmdline)
1308 return -EINVAL;
1309
1310 ck_cmdline += strlen(name);
1311
1312 if (suffix)
1313 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1314 suffix);
1315 /*
1316 * if the commandline contains a ':', then that's the extended
1317 * syntax -- if not, it must be the classic syntax
1318 */
1319 first_colon = strchr(ck_cmdline, ':');
1320 first_space = strchr(ck_cmdline, ' ');
1321 if (first_colon && (!first_space || first_colon < first_space))
1322 return parse_crashkernel_mem(ck_cmdline, system_ram,
1323 crash_size, crash_base);
1324
1325 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1326}
1327
1328/*
1329 * That function is the entry point for command line parsing and should be
1330 * called from the arch-specific code.
1331 */
1332int __init parse_crashkernel(char *cmdline,
1333 unsigned long long system_ram,
1334 unsigned long long *crash_size,
1335 unsigned long long *crash_base)
1336{
1337 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1338 "crashkernel=", NULL);
1339}
1340
1341int __init parse_crashkernel_high(char *cmdline,
1342 unsigned long long system_ram,
1343 unsigned long long *crash_size,
1344 unsigned long long *crash_base)
1345{
1346 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1347 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1348}
1349
1350int __init parse_crashkernel_low(char *cmdline,
1351 unsigned long long system_ram,
1352 unsigned long long *crash_size,
1353 unsigned long long *crash_base)
1354{
1355 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1356 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
1357}
1358
1359static void update_vmcoreinfo_note(void)
1360{
1361 u32 *buf = vmcoreinfo_note;
1362
1363 if (!vmcoreinfo_size)
1364 return;
1365 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1366 vmcoreinfo_size);
1367 final_note(buf);
1368}
1369
1370void crash_save_vmcoreinfo(void)
1371{
1372 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1373 update_vmcoreinfo_note();
1374}
1375
1376void vmcoreinfo_append_str(const char *fmt, ...)
1377{
1378 va_list args;
1379 char buf[0x50];
1380 size_t r;
1381
1382 va_start(args, fmt);
1383 r = vscnprintf(buf, sizeof(buf), fmt, args);
1384 va_end(args);
1385
1386 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1387
1388 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1389
1390 vmcoreinfo_size += r;
1391}
1392
1393/*
1394 * provide an empty default implementation here -- architecture
1395 * code may override this
1396 */
1397void __weak arch_crash_save_vmcoreinfo(void)
1398{}
1399
1400phys_addr_t __weak paddr_vmcoreinfo_note(void)
1401{
1402 return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note);
1403}
1404
1405static int __init crash_save_vmcoreinfo_init(void)
1406{
1407 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1408 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1409
1410 VMCOREINFO_SYMBOL(init_uts_ns);
1411 VMCOREINFO_SYMBOL(node_online_map);
1412#ifdef CONFIG_MMU
1413 VMCOREINFO_SYMBOL(swapper_pg_dir);
1414#endif
1415 VMCOREINFO_SYMBOL(_stext);
1416 VMCOREINFO_SYMBOL(vmap_area_list);
1417
1418#ifndef CONFIG_NEED_MULTIPLE_NODES
1419 VMCOREINFO_SYMBOL(mem_map);
1420 VMCOREINFO_SYMBOL(contig_page_data);
1421#endif
1422#ifdef CONFIG_SPARSEMEM
1423 VMCOREINFO_SYMBOL(mem_section);
1424 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1425 VMCOREINFO_STRUCT_SIZE(mem_section);
1426 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1427#endif
1428 VMCOREINFO_STRUCT_SIZE(page);
1429 VMCOREINFO_STRUCT_SIZE(pglist_data);
1430 VMCOREINFO_STRUCT_SIZE(zone);
1431 VMCOREINFO_STRUCT_SIZE(free_area);
1432 VMCOREINFO_STRUCT_SIZE(list_head);
1433 VMCOREINFO_SIZE(nodemask_t);
1434 VMCOREINFO_OFFSET(page, flags);
1435 VMCOREINFO_OFFSET(page, _refcount);
1436 VMCOREINFO_OFFSET(page, mapping);
1437 VMCOREINFO_OFFSET(page, lru);
1438 VMCOREINFO_OFFSET(page, _mapcount);
1439 VMCOREINFO_OFFSET(page, private);
1440 VMCOREINFO_OFFSET(page, compound_dtor);
1441 VMCOREINFO_OFFSET(page, compound_order);
1442 VMCOREINFO_OFFSET(page, compound_head);
1443 VMCOREINFO_OFFSET(pglist_data, node_zones);
1444 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1445#ifdef CONFIG_FLAT_NODE_MEM_MAP
1446 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1447#endif
1448 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1449 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1450 VMCOREINFO_OFFSET(pglist_data, node_id);
1451 VMCOREINFO_OFFSET(zone, free_area);
1452 VMCOREINFO_OFFSET(zone, vm_stat);
1453 VMCOREINFO_OFFSET(zone, spanned_pages);
1454 VMCOREINFO_OFFSET(free_area, free_list);
1455 VMCOREINFO_OFFSET(list_head, next);
1456 VMCOREINFO_OFFSET(list_head, prev);
1457 VMCOREINFO_OFFSET(vmap_area, va_start);
1458 VMCOREINFO_OFFSET(vmap_area, list);
1459 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1460 log_buf_kexec_setup();
1461 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1462 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1463 VMCOREINFO_NUMBER(PG_lru);
1464 VMCOREINFO_NUMBER(PG_private);
1465 VMCOREINFO_NUMBER(PG_swapcache);
1466 VMCOREINFO_NUMBER(PG_slab);
1467#ifdef CONFIG_MEMORY_FAILURE
1468 VMCOREINFO_NUMBER(PG_hwpoison);
1469#endif
1470 VMCOREINFO_NUMBER(PG_head_mask);
1471 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1472#ifdef CONFIG_HUGETLB_PAGE
1473 VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
1474#endif
1475
1476 arch_crash_save_vmcoreinfo();
1477 update_vmcoreinfo_note();
1478
1479 return 0;
1480}
1481
1482subsys_initcall(crash_save_vmcoreinfo_init);
1483
1484/*
1485 * Move into place and start executing a preloaded standalone 1054 * Move into place and start executing a preloaded standalone
1486 * executable. If nothing was preloaded return an error. 1055 * executable. If nothing was preloaded return an error.
1487 */ 1056 */
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 0999679d6f26..23cd70651238 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -125,6 +125,10 @@ static ssize_t kexec_crash_size_store(struct kobject *kobj,
125} 125}
126KERNEL_ATTR_RW(kexec_crash_size); 126KERNEL_ATTR_RW(kexec_crash_size);
127 127
128#endif /* CONFIG_KEXEC_CORE */
129
130#ifdef CONFIG_CRASH_CORE
131
128static ssize_t vmcoreinfo_show(struct kobject *kobj, 132static ssize_t vmcoreinfo_show(struct kobject *kobj,
129 struct kobj_attribute *attr, char *buf) 133 struct kobj_attribute *attr, char *buf)
130{ 134{
@@ -134,7 +138,7 @@ static ssize_t vmcoreinfo_show(struct kobject *kobj,
134} 138}
135KERNEL_ATTR_RO(vmcoreinfo); 139KERNEL_ATTR_RO(vmcoreinfo);
136 140
137#endif /* CONFIG_KEXEC_CORE */ 141#endif /* CONFIG_CRASH_CORE */
138 142
139/* whether file capabilities are enabled */ 143/* whether file capabilities are enabled */
140static ssize_t fscaps_show(struct kobject *kobj, 144static ssize_t fscaps_show(struct kobject *kobj,
@@ -219,6 +223,8 @@ static struct attribute * kernel_attrs[] = {
219 &kexec_loaded_attr.attr, 223 &kexec_loaded_attr.attr,
220 &kexec_crash_loaded_attr.attr, 224 &kexec_crash_loaded_attr.attr,
221 &kexec_crash_size_attr.attr, 225 &kexec_crash_size_attr.attr,
226#endif
227#ifdef CONFIG_CRASH_CORE
222 &vmcoreinfo_attr.attr, 228 &vmcoreinfo_attr.attr,
223#endif 229#endif
224#ifndef CONFIG_TINY_RCU 230#ifndef CONFIG_TINY_RCU
diff --git a/kernel/module.c b/kernel/module.c
index f37308b733d8..4a3665f8f837 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -49,6 +49,9 @@
49#include <linux/rculist.h> 49#include <linux/rculist.h>
50#include <linux/uaccess.h> 50#include <linux/uaccess.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52#ifdef CONFIG_STRICT_MODULE_RWX
53#include <asm/set_memory.h>
54#endif
52#include <asm/mmu_context.h> 55#include <asm/mmu_context.h>
53#include <linux/license.h> 56#include <linux/license.h>
54#include <asm/sections.h> 57#include <asm/sections.h>
@@ -2864,7 +2867,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
2864 2867
2865 /* Suck in entire file: we'll want most of it. */ 2868 /* Suck in entire file: we'll want most of it. */
2866 info->hdr = __vmalloc(info->len, 2869 info->hdr = __vmalloc(info->len,
2867 GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL); 2870 GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
2868 if (!info->hdr) 2871 if (!info->hdr)
2869 return -ENOMEM; 2872 return -ENOMEM;
2870 2873
diff --git a/kernel/pid.c b/kernel/pid.c
index 0143ac0ddceb..fd1cde1e4576 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -321,8 +321,10 @@ struct pid *alloc_pid(struct pid_namespace *ns)
321 } 321 }
322 322
323 if (unlikely(is_child_reaper(pid))) { 323 if (unlikely(is_child_reaper(pid))) {
324 if (pid_ns_prepare_proc(ns)) 324 if (pid_ns_prepare_proc(ns)) {
325 disable_pid_allocation(ns);
325 goto out_free; 326 goto out_free;
327 }
326 } 328 }
327 329
328 get_pid_ns(ns); 330 get_pid_ns(ns);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index de461aa0bf9a..d1f3e9f558b8 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -374,6 +374,29 @@ static struct ns_common *pidns_get(struct task_struct *task)
374 return ns ? &ns->ns : NULL; 374 return ns ? &ns->ns : NULL;
375} 375}
376 376
377static struct ns_common *pidns_for_children_get(struct task_struct *task)
378{
379 struct pid_namespace *ns = NULL;
380
381 task_lock(task);
382 if (task->nsproxy) {
383 ns = task->nsproxy->pid_ns_for_children;
384 get_pid_ns(ns);
385 }
386 task_unlock(task);
387
388 if (ns) {
389 read_lock(&tasklist_lock);
390 if (!ns->child_reaper) {
391 put_pid_ns(ns);
392 ns = NULL;
393 }
394 read_unlock(&tasklist_lock);
395 }
396
397 return ns ? &ns->ns : NULL;
398}
399
377static void pidns_put(struct ns_common *ns) 400static void pidns_put(struct ns_common *ns)
378{ 401{
379 put_pid_ns(to_pid_ns(ns)); 402 put_pid_ns(to_pid_ns(ns));
@@ -443,6 +466,17 @@ const struct proc_ns_operations pidns_operations = {
443 .get_parent = pidns_get_parent, 466 .get_parent = pidns_get_parent,
444}; 467};
445 468
469const struct proc_ns_operations pidns_for_children_operations = {
470 .name = "pid_for_children",
471 .real_ns_name = "pid",
472 .type = CLONE_NEWPID,
473 .get = pidns_for_children_get,
474 .put = pidns_put,
475 .install = pidns_install,
476 .owner = pidns_owner,
477 .get_parent = pidns_get_parent,
478};
479
446static __init int pid_namespaces_init(void) 480static __init int pid_namespaces_init(void)
447{ 481{
448 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); 482 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index d79a38de425a..3b1e0f3ad07f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -36,6 +36,9 @@
36#include <asm/pgtable.h> 36#include <asm/pgtable.h>
37#include <asm/tlbflush.h> 37#include <asm/tlbflush.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#ifdef CONFIG_STRICT_KERNEL_RWX
40#include <asm/set_memory.h>
41#endif
39 42
40#include "power.h" 43#include "power.h"
41 44
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 779479ac9f57..fb2d1591f671 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -32,7 +32,7 @@
32#include <linux/bootmem.h> 32#include <linux/bootmem.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h> 35#include <linux/crash_core.h>
36#include <linux/kdb.h> 36#include <linux/kdb.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38#include <linux/kmsg_dump.h> 38#include <linux/kmsg_dump.h>
@@ -1002,7 +1002,7 @@ const struct file_operations kmsg_fops = {
1002 .release = devkmsg_release, 1002 .release = devkmsg_release,
1003}; 1003};
1004 1004
1005#ifdef CONFIG_KEXEC_CORE 1005#ifdef CONFIG_CRASH_CORE
1006/* 1006/*
1007 * This appends the listed symbols to /proc/vmcore 1007 * This appends the listed symbols to /proc/vmcore
1008 * 1008 *
@@ -1011,7 +1011,7 @@ const struct file_operations kmsg_fops = {
1011 * symbols are specifically used so that utilities can access and extract the 1011 * symbols are specifically used so that utilities can access and extract the
1012 * dmesg log from a vmcore file after a crash. 1012 * dmesg log from a vmcore file after a crash.
1013 */ 1013 */
1014void log_buf_kexec_setup(void) 1014void log_buf_vmcoreinfo_setup(void)
1015{ 1015{
1016 VMCOREINFO_SYMBOL(log_buf); 1016 VMCOREINFO_SYMBOL(log_buf);
1017 VMCOREINFO_SYMBOL(log_buf_len); 1017 VMCOREINFO_SYMBOL(log_buf_len);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 21343d110296..4dfba1a76cc3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2576,7 +2576,7 @@ static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp,
2576 int write, void *data) 2576 int write, void *data)
2577{ 2577{
2578 if (write) { 2578 if (write) {
2579 if (*lvalp > LONG_MAX / HZ) 2579 if (*lvalp > INT_MAX / HZ)
2580 return 1; 2580 return 1;
2581 *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); 2581 *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ);
2582 } else { 2582 } else {
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 8a5e44236f78..4559e914452b 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -30,6 +30,7 @@
30#include <linux/pid_namespace.h> 30#include <linux/pid_namespace.h>
31#include <net/genetlink.h> 31#include <net/genetlink.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <linux/sched/cputime.h>
33 34
34/* 35/*
35 * Maximum length of a cpumask that can be specified in 36 * Maximum length of a cpumask that can be specified in
@@ -210,6 +211,8 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
210 struct task_struct *tsk, *first; 211 struct task_struct *tsk, *first;
211 unsigned long flags; 212 unsigned long flags;
212 int rc = -ESRCH; 213 int rc = -ESRCH;
214 u64 delta, utime, stime;
215 u64 start_time;
213 216
214 /* 217 /*
215 * Add additional stats from live tasks except zombie thread group 218 * Add additional stats from live tasks except zombie thread group
@@ -227,6 +230,7 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
227 memset(stats, 0, sizeof(*stats)); 230 memset(stats, 0, sizeof(*stats));
228 231
229 tsk = first; 232 tsk = first;
233 start_time = ktime_get_ns();
230 do { 234 do {
231 if (tsk->exit_state) 235 if (tsk->exit_state)
232 continue; 236 continue;
@@ -238,6 +242,16 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
238 */ 242 */
239 delayacct_add_tsk(stats, tsk); 243 delayacct_add_tsk(stats, tsk);
240 244
245 /* calculate task elapsed time in nsec */
246 delta = start_time - tsk->start_time;
247 /* Convert to micro seconds */
248 do_div(delta, NSEC_PER_USEC);
249 stats->ac_etime += delta;
250
251 task_cputime(tsk, &utime, &stime);
252 stats->ac_utime += div_u64(utime, NSEC_PER_USEC);
253 stats->ac_stime += div_u64(stime, NSEC_PER_USEC);
254
241 stats->nvcsw += tsk->nvcsw; 255 stats->nvcsw += tsk->nvcsw;
242 stats->nivcsw += tsk->nivcsw; 256 stats->nivcsw += tsk->nivcsw;
243 } while_each_thread(first, tsk); 257 } while_each_thread(first, tsk);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index c203ac4df791..adcdbbeae010 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -348,14 +348,14 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
348 __field( u64, duration ) 348 __field( u64, duration )
349 __field( u64, outer_duration ) 349 __field( u64, outer_duration )
350 __field( u64, nmi_total_ts ) 350 __field( u64, nmi_total_ts )
351 __field_struct( struct timespec, timestamp ) 351 __field_struct( struct timespec64, timestamp )
352 __field_desc( long, timestamp, tv_sec ) 352 __field_desc( s64, timestamp, tv_sec )
353 __field_desc( long, timestamp, tv_nsec ) 353 __field_desc( long, timestamp, tv_nsec )
354 __field( unsigned int, nmi_count ) 354 __field( unsigned int, nmi_count )
355 __field( unsigned int, seqnum ) 355 __field( unsigned int, seqnum )
356 ), 356 ),
357 357
358 F_printk("cnt:%u\tts:%010lu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n", 358 F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
359 __entry->seqnum, 359 __entry->seqnum,
360 __entry->tv_sec, 360 __entry->tv_sec,
361 __entry->tv_nsec, 361 __entry->tv_nsec,
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 21ea6ae77d93..d7c8e4ec3d9d 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -79,12 +79,12 @@ static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
79 79
80/* Individual latency samples are stored here when detected. */ 80/* Individual latency samples are stored here when detected. */
81struct hwlat_sample { 81struct hwlat_sample {
82 u64 seqnum; /* unique sequence */ 82 u64 seqnum; /* unique sequence */
83 u64 duration; /* delta */ 83 u64 duration; /* delta */
84 u64 outer_duration; /* delta (outer loop) */ 84 u64 outer_duration; /* delta (outer loop) */
85 u64 nmi_total_ts; /* Total time spent in NMIs */ 85 u64 nmi_total_ts; /* Total time spent in NMIs */
86 struct timespec timestamp; /* wall time */ 86 struct timespec64 timestamp; /* wall time */
87 int nmi_count; /* # NMIs during this sample */ 87 int nmi_count; /* # NMIs during this sample */
88}; 88};
89 89
90/* keep the global state somewhere. */ 90/* keep the global state somewhere. */
@@ -250,7 +250,7 @@ static int get_sample(void)
250 s.seqnum = hwlat_data.count; 250 s.seqnum = hwlat_data.count;
251 s.duration = sample; 251 s.duration = sample;
252 s.outer_duration = outer_sample; 252 s.outer_duration = outer_sample;
253 s.timestamp = CURRENT_TIME; 253 ktime_get_real_ts64(&s.timestamp);
254 s.nmi_total_ts = nmi_total_ts; 254 s.nmi_total_ts = nmi_total_ts;
255 s.nmi_count = nmi_count; 255 s.nmi_count = nmi_count;
256 trace_hwlat_sample(&s); 256 trace_hwlat_sample(&s);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 02a4aeb22c47..08f9bab8089e 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 * 5 *
6 */ 6 */
7
8#include <linux/module.h> 7#include <linux/module.h>
9#include <linux/mutex.h> 8#include <linux/mutex.h>
10#include <linux/ftrace.h> 9#include <linux/ftrace.h>
@@ -1161,11 +1160,11 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
1161 1160
1162 trace_assign_type(field, entry); 1161 trace_assign_type(field, entry);
1163 1162
1164 trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ld.%09ld", 1163 trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld",
1165 field->seqnum, 1164 field->seqnum,
1166 field->duration, 1165 field->duration,
1167 field->outer_duration, 1166 field->outer_duration,
1168 field->timestamp.tv_sec, 1167 (long long)field->timestamp.tv_sec,
1169 field->timestamp.tv_nsec); 1168 field->timestamp.tv_nsec);
1170 1169
1171 if (field->nmi_count) { 1170 if (field->nmi_count) {
@@ -1195,10 +1194,10 @@ trace_hwlat_raw(struct trace_iterator *iter, int flags,
1195 1194
1196 trace_assign_type(field, iter->ent); 1195 trace_assign_type(field, iter->ent);
1197 1196
1198 trace_seq_printf(s, "%llu %lld %ld %09ld %u\n", 1197 trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
1199 field->duration, 1198 field->duration,
1200 field->outer_duration, 1199 field->outer_duration,
1201 field->timestamp.tv_sec, 1200 (long long)field->timestamp.tv_sec,
1202 field->timestamp.tv_nsec, 1201 field->timestamp.tv_nsec,
1203 field->seqnum); 1202 field->seqnum);
1204 1203
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e2a617e09ab7..e4587ebe52c7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1719,19 +1719,21 @@ config LKDTM
1719 Documentation/fault-injection/provoke-crashes.txt 1719 Documentation/fault-injection/provoke-crashes.txt
1720 1720
1721config TEST_LIST_SORT 1721config TEST_LIST_SORT
1722 bool "Linked list sorting test" 1722 tristate "Linked list sorting test"
1723 depends on DEBUG_KERNEL 1723 depends on DEBUG_KERNEL || m
1724 help 1724 help
1725 Enable this to turn on 'list_sort()' function test. This test is 1725 Enable this to turn on 'list_sort()' function test. This test is
1726 executed only once during system boot, so affects only boot time. 1726 executed only once during system boot (so affects only boot time),
1727 or at module load time.
1727 1728
1728 If unsure, say N. 1729 If unsure, say N.
1729 1730
1730config TEST_SORT 1731config TEST_SORT
1731 bool "Array-based sort test" 1732 tristate "Array-based sort test"
1732 depends on DEBUG_KERNEL 1733 depends on DEBUG_KERNEL || m
1733 help 1734 help
1734 This option enables the self-test function of 'sort()' at boot. 1735 This option enables the self-test function of 'sort()' at boot,
1736 or at module load time.
1735 1737
1736 If unsure, say N. 1738 If unsure, say N.
1737 1739
diff --git a/lib/Makefile b/lib/Makefile
index a155c73e3437..0166fbc0fa81 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
52obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o 52obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
53obj-$(CONFIG_TEST_KASAN) += test_kasan.o 53obj-$(CONFIG_TEST_KASAN) += test_kasan.o
54obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 54obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
55obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
55obj-$(CONFIG_TEST_LKM) += test_module.o 56obj-$(CONFIG_TEST_LKM) += test_module.o
56obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o 57obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
57obj-$(CONFIG_TEST_SORT) += test_sort.o 58obj-$(CONFIG_TEST_SORT) += test_sort.o
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 6a823a53e357..4ff157159a0d 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -56,7 +56,7 @@ static void fail_dump(struct fault_attr *attr)
56 56
57static bool fail_task(struct fault_attr *attr, struct task_struct *task) 57static bool fail_task(struct fault_attr *attr, struct task_struct *task)
58{ 58{
59 return !in_interrupt() && task->make_it_fail; 59 return in_task() && task->make_it_fail;
60} 60}
61 61
62#define MAX_STACK_TRACE_DEPTH 32 62#define MAX_STACK_TRACE_DEPTH 32
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 4952311422c1..ae82d9cea553 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1028,10 +1028,7 @@ EXPORT_SYMBOL(iov_iter_get_pages);
1028 1028
1029static struct page **get_pages_array(size_t n) 1029static struct page **get_pages_array(size_t n)
1030{ 1030{
1031 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); 1031 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1032 if (!p)
1033 p = vmalloc(n * sizeof(struct page *));
1034 return p;
1035} 1032}
1036 1033
1037static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1034static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 3fe401067e20..9e9acc37652f 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -1,6 +1,3 @@
1
2#define pr_fmt(fmt) "list_sort_test: " fmt
3
4#include <linux/kernel.h> 1#include <linux/kernel.h>
5#include <linux/bug.h> 2#include <linux/bug.h>
6#include <linux/compiler.h> 3#include <linux/compiler.h>
@@ -145,149 +142,3 @@ void list_sort(void *priv, struct list_head *head,
145 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); 142 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
146} 143}
147EXPORT_SYMBOL(list_sort); 144EXPORT_SYMBOL(list_sort);
148
149#ifdef CONFIG_TEST_LIST_SORT
150
151#include <linux/slab.h>
152#include <linux/random.h>
153
154/*
155 * The pattern of set bits in the list length determines which cases
156 * are hit in list_sort().
157 */
158#define TEST_LIST_LEN (512+128+2) /* not including head */
159
160#define TEST_POISON1 0xDEADBEEF
161#define TEST_POISON2 0xA324354C
162
163struct debug_el {
164 unsigned int poison1;
165 struct list_head list;
166 unsigned int poison2;
167 int value;
168 unsigned serial;
169};
170
171/* Array, containing pointers to all elements in the test list */
172static struct debug_el **elts __initdata;
173
174static int __init check(struct debug_el *ela, struct debug_el *elb)
175{
176 if (ela->serial >= TEST_LIST_LEN) {
177 pr_err("error: incorrect serial %d\n", ela->serial);
178 return -EINVAL;
179 }
180 if (elb->serial >= TEST_LIST_LEN) {
181 pr_err("error: incorrect serial %d\n", elb->serial);
182 return -EINVAL;
183 }
184 if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
185 pr_err("error: phantom element\n");
186 return -EINVAL;
187 }
188 if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
189 pr_err("error: bad poison: %#x/%#x\n",
190 ela->poison1, ela->poison2);
191 return -EINVAL;
192 }
193 if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
194 pr_err("error: bad poison: %#x/%#x\n",
195 elb->poison1, elb->poison2);
196 return -EINVAL;
197 }
198 return 0;
199}
200
201static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
202{
203 struct debug_el *ela, *elb;
204
205 ela = container_of(a, struct debug_el, list);
206 elb = container_of(b, struct debug_el, list);
207
208 check(ela, elb);
209 return ela->value - elb->value;
210}
211
212static int __init list_sort_test(void)
213{
214 int i, count = 1, err = -ENOMEM;
215 struct debug_el *el;
216 struct list_head *cur;
217 LIST_HEAD(head);
218
219 pr_debug("start testing list_sort()\n");
220
221 elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
222 if (!elts) {
223 pr_err("error: cannot allocate memory\n");
224 return err;
225 }
226
227 for (i = 0; i < TEST_LIST_LEN; i++) {
228 el = kmalloc(sizeof(*el), GFP_KERNEL);
229 if (!el) {
230 pr_err("error: cannot allocate memory\n");
231 goto exit;
232 }
233 /* force some equivalencies */
234 el->value = prandom_u32() % (TEST_LIST_LEN / 3);
235 el->serial = i;
236 el->poison1 = TEST_POISON1;
237 el->poison2 = TEST_POISON2;
238 elts[i] = el;
239 list_add_tail(&el->list, &head);
240 }
241
242 list_sort(NULL, &head, cmp);
243
244 err = -EINVAL;
245 for (cur = head.next; cur->next != &head; cur = cur->next) {
246 struct debug_el *el1;
247 int cmp_result;
248
249 if (cur->next->prev != cur) {
250 pr_err("error: list is corrupted\n");
251 goto exit;
252 }
253
254 cmp_result = cmp(NULL, cur, cur->next);
255 if (cmp_result > 0) {
256 pr_err("error: list is not sorted\n");
257 goto exit;
258 }
259
260 el = container_of(cur, struct debug_el, list);
261 el1 = container_of(cur->next, struct debug_el, list);
262 if (cmp_result == 0 && el->serial >= el1->serial) {
263 pr_err("error: order of equivalent elements not "
264 "preserved\n");
265 goto exit;
266 }
267
268 if (check(el, el1)) {
269 pr_err("error: element check failed\n");
270 goto exit;
271 }
272 count++;
273 }
274 if (head.prev != cur) {
275 pr_err("error: list is corrupted\n");
276 goto exit;
277 }
278
279
280 if (count != TEST_LIST_LEN) {
281 pr_err("error: bad list length %d", count);
282 goto exit;
283 }
284
285 err = 0;
286exit:
287 for (i = 0; i < TEST_LIST_LEN; i++)
288 kfree(elts[i]);
289 kfree(elts);
290 return err;
291}
292late_initcall(list_sort_test);
293#endif /* CONFIG_TEST_LIST_SORT */
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a930e436db5d..d9e7274a04cd 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -86,16 +86,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
86 size = min(size, 1U << tbl->nest); 86 size = min(size, 1U << tbl->nest);
87 87
88 if (sizeof(spinlock_t) != 0) { 88 if (sizeof(spinlock_t) != 0) {
89 tbl->locks = NULL; 89 if (gfpflags_allow_blocking(gfp))
90#ifdef CONFIG_NUMA 90 tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
91 if (size * sizeof(spinlock_t) > PAGE_SIZE && 91 else
92 gfp == GFP_KERNEL)
93 tbl->locks = vmalloc(size * sizeof(spinlock_t));
94#endif
95 if (gfp != GFP_KERNEL)
96 gfp |= __GFP_NOWARN | __GFP_NORETRY;
97
98 if (!tbl->locks)
99 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 92 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
100 gfp); 93 gfp);
101 if (!tbl->locks) 94 if (!tbl->locks)
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
new file mode 100644
index 000000000000..28e817387b04
--- /dev/null
+++ b/lib/test_list_sort.c
@@ -0,0 +1,150 @@
1#define pr_fmt(fmt) "list_sort_test: " fmt
2
3#include <linux/kernel.h>
4#include <linux/list_sort.h>
5#include <linux/list.h>
6#include <linux/module.h>
7#include <linux/printk.h>
8#include <linux/slab.h>
9#include <linux/random.h>
10
11/*
12 * The pattern of set bits in the list length determines which cases
13 * are hit in list_sort().
14 */
15#define TEST_LIST_LEN (512+128+2) /* not including head */
16
17#define TEST_POISON1 0xDEADBEEF
18#define TEST_POISON2 0xA324354C
19
20struct debug_el {
21 unsigned int poison1;
22 struct list_head list;
23 unsigned int poison2;
24 int value;
25 unsigned serial;
26};
27
28/* Array, containing pointers to all elements in the test list */
29static struct debug_el **elts __initdata;
30
31static int __init check(struct debug_el *ela, struct debug_el *elb)
32{
33 if (ela->serial >= TEST_LIST_LEN) {
34 pr_err("error: incorrect serial %d\n", ela->serial);
35 return -EINVAL;
36 }
37 if (elb->serial >= TEST_LIST_LEN) {
38 pr_err("error: incorrect serial %d\n", elb->serial);
39 return -EINVAL;
40 }
41 if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
42 pr_err("error: phantom element\n");
43 return -EINVAL;
44 }
45 if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
46 pr_err("error: bad poison: %#x/%#x\n",
47 ela->poison1, ela->poison2);
48 return -EINVAL;
49 }
50 if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
51 pr_err("error: bad poison: %#x/%#x\n",
52 elb->poison1, elb->poison2);
53 return -EINVAL;
54 }
55 return 0;
56}
57
58static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
59{
60 struct debug_el *ela, *elb;
61
62 ela = container_of(a, struct debug_el, list);
63 elb = container_of(b, struct debug_el, list);
64
65 check(ela, elb);
66 return ela->value - elb->value;
67}
68
69static int __init list_sort_test(void)
70{
71 int i, count = 1, err = -ENOMEM;
72 struct debug_el *el;
73 struct list_head *cur;
74 LIST_HEAD(head);
75
76 pr_debug("start testing list_sort()\n");
77
78 elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
79 if (!elts) {
80 pr_err("error: cannot allocate memory\n");
81 return err;
82 }
83
84 for (i = 0; i < TEST_LIST_LEN; i++) {
85 el = kmalloc(sizeof(*el), GFP_KERNEL);
86 if (!el) {
87 pr_err("error: cannot allocate memory\n");
88 goto exit;
89 }
90 /* force some equivalencies */
91 el->value = prandom_u32() % (TEST_LIST_LEN / 3);
92 el->serial = i;
93 el->poison1 = TEST_POISON1;
94 el->poison2 = TEST_POISON2;
95 elts[i] = el;
96 list_add_tail(&el->list, &head);
97 }
98
99 list_sort(NULL, &head, cmp);
100
101 err = -EINVAL;
102 for (cur = head.next; cur->next != &head; cur = cur->next) {
103 struct debug_el *el1;
104 int cmp_result;
105
106 if (cur->next->prev != cur) {
107 pr_err("error: list is corrupted\n");
108 goto exit;
109 }
110
111 cmp_result = cmp(NULL, cur, cur->next);
112 if (cmp_result > 0) {
113 pr_err("error: list is not sorted\n");
114 goto exit;
115 }
116
117 el = container_of(cur, struct debug_el, list);
118 el1 = container_of(cur->next, struct debug_el, list);
119 if (cmp_result == 0 && el->serial >= el1->serial) {
120 pr_err("error: order of equivalent elements not "
121 "preserved\n");
122 goto exit;
123 }
124
125 if (check(el, el1)) {
126 pr_err("error: element check failed\n");
127 goto exit;
128 }
129 count++;
130 }
131 if (head.prev != cur) {
132 pr_err("error: list is corrupted\n");
133 goto exit;
134 }
135
136
137 if (count != TEST_LIST_LEN) {
138 pr_err("error: bad list length %d", count);
139 goto exit;
140 }
141
142 err = 0;
143exit:
144 for (i = 0; i < TEST_LIST_LEN; i++)
145 kfree(elts[i]);
146 kfree(elts);
147 return err;
148}
149module_init(list_sort_test);
150MODULE_LICENSE("GPL");
diff --git a/lib/test_sort.c b/lib/test_sort.c
index 4db3911db50a..d389c1cc2f6c 100644
--- a/lib/test_sort.c
+++ b/lib/test_sort.c
@@ -1,11 +1,8 @@
1#include <linux/sort.h> 1#include <linux/sort.h>
2#include <linux/slab.h> 2#include <linux/slab.h>
3#include <linux/init.h> 3#include <linux/module.h>
4 4
5/* 5/* a simple boot-time regression test */
6 * A simple boot-time regression test
7 * License: GPL
8 */
9 6
10#define TEST_LEN 1000 7#define TEST_LEN 1000
11 8
@@ -41,4 +38,6 @@ exit:
41 kfree(a); 38 kfree(a);
42 return err; 39 return err;
43} 40}
44subsys_initcall(test_sort_init); 41
42module_init(test_sort_init);
43MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 176641cc549d..2d41de3f98a1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1477,6 +1477,9 @@ int kptr_restrict __read_mostly;
1477 * by an extra set of alphanumeric characters that are extended format 1477 * by an extra set of alphanumeric characters that are extended format
1478 * specifiers. 1478 * specifiers.
1479 * 1479 *
1480 * Please update scripts/checkpatch.pl when adding/removing conversion
1481 * characters. (Search for "check for vsprintf extension").
1482 *
1480 * Right now we handle: 1483 * Right now we handle:
1481 * 1484 *
1482 * - 'F' For symbolic function descriptor pointers with offset 1485 * - 'F' For symbolic function descriptor pointers with offset
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
index 3fe6ce5b53e5..028943052926 100644
--- a/lib/zlib_inflate/inftrees.c
+++ b/lib/zlib_inflate/inftrees.c
@@ -109,7 +109,7 @@ int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
109 *bits = 1; 109 *bits = 1;
110 return 0; /* no symbols, but wait for decoding to report error */ 110 return 0; /* no symbols, but wait for decoding to report error */
111 } 111 }
112 for (min = 1; min <= MAXBITS; min++) 112 for (min = 1; min < MAXBITS; min++)
113 if (count[min] != 0) break; 113 if (count[min] != 0) break;
114 if (root < min) root = min; 114 if (root < min) root = min;
115 115
diff --git a/mm/compaction.c b/mm/compaction.c
index 09c5282ebdd2..613c59e928cb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -89,11 +89,6 @@ static void map_pages(struct list_head *list)
89 list_splice(&tmp_list, list); 89 list_splice(&tmp_list, list);
90} 90}
91 91
92static inline bool migrate_async_suitable(int migratetype)
93{
94 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
95}
96
97#ifdef CONFIG_COMPACTION 92#ifdef CONFIG_COMPACTION
98 93
99int PageMovable(struct page *page) 94int PageMovable(struct page *page)
@@ -988,6 +983,22 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
988#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 983#endif /* CONFIG_COMPACTION || CONFIG_CMA */
989#ifdef CONFIG_COMPACTION 984#ifdef CONFIG_COMPACTION
990 985
986static bool suitable_migration_source(struct compact_control *cc,
987 struct page *page)
988{
989 int block_mt;
990
991 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
992 return true;
993
994 block_mt = get_pageblock_migratetype(page);
995
996 if (cc->migratetype == MIGRATE_MOVABLE)
997 return is_migrate_movable(block_mt);
998 else
999 return block_mt == cc->migratetype;
1000}
1001
991/* Returns true if the page is within a block suitable for migration to */ 1002/* Returns true if the page is within a block suitable for migration to */
992static bool suitable_migration_target(struct compact_control *cc, 1003static bool suitable_migration_target(struct compact_control *cc,
993 struct page *page) 1004 struct page *page)
@@ -1007,7 +1018,7 @@ static bool suitable_migration_target(struct compact_control *cc,
1007 return true; 1018 return true;
1008 1019
1009 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1020 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1010 if (migrate_async_suitable(get_pageblock_migratetype(page))) 1021 if (is_migrate_movable(get_pageblock_migratetype(page)))
1011 return true; 1022 return true;
1012 1023
1013 /* Otherwise skip the block */ 1024 /* Otherwise skip the block */
@@ -1242,8 +1253,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1242 * Async compaction is optimistic to see if the minimum amount 1253 * Async compaction is optimistic to see if the minimum amount
1243 * of work satisfies the allocation. 1254 * of work satisfies the allocation.
1244 */ 1255 */
1245 if (cc->mode == MIGRATE_ASYNC && 1256 if (!suitable_migration_source(cc, page))
1246 !migrate_async_suitable(get_pageblock_migratetype(page)))
1247 continue; 1257 continue;
1248 1258
1249 /* Perform the isolation */ 1259 /* Perform the isolation */
@@ -1276,11 +1286,11 @@ static inline bool is_via_compact_memory(int order)
1276 return order == -1; 1286 return order == -1;
1277} 1287}
1278 1288
1279static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc, 1289static enum compact_result __compact_finished(struct zone *zone,
1280 const int migratetype) 1290 struct compact_control *cc)
1281{ 1291{
1282 unsigned int order; 1292 unsigned int order;
1283 unsigned long watermark; 1293 const int migratetype = cc->migratetype;
1284 1294
1285 if (cc->contended || fatal_signal_pending(current)) 1295 if (cc->contended || fatal_signal_pending(current))
1286 return COMPACT_CONTENDED; 1296 return COMPACT_CONTENDED;
@@ -1308,12 +1318,16 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
1308 if (is_via_compact_memory(cc->order)) 1318 if (is_via_compact_memory(cc->order))
1309 return COMPACT_CONTINUE; 1319 return COMPACT_CONTINUE;
1310 1320
1311 /* Compaction run is not finished if the watermark is not met */ 1321 if (cc->finishing_block) {
1312 watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK]; 1322 /*
1313 1323 * We have finished the pageblock, but better check again that
1314 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, 1324 * we really succeeded.
1315 cc->alloc_flags)) 1325 */
1316 return COMPACT_CONTINUE; 1326 if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
1327 cc->finishing_block = false;
1328 else
1329 return COMPACT_CONTINUE;
1330 }
1317 1331
1318 /* Direct compactor: Is a suitable page free? */ 1332 /* Direct compactor: Is a suitable page free? */
1319 for (order = cc->order; order < MAX_ORDER; order++) { 1333 for (order = cc->order; order < MAX_ORDER; order++) {
@@ -1335,20 +1349,40 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
1335 * other migratetype buddy lists. 1349 * other migratetype buddy lists.
1336 */ 1350 */
1337 if (find_suitable_fallback(area, order, migratetype, 1351 if (find_suitable_fallback(area, order, migratetype,
1338 true, &can_steal) != -1) 1352 true, &can_steal) != -1) {
1339 return COMPACT_SUCCESS; 1353
1354 /* movable pages are OK in any pageblock */
1355 if (migratetype == MIGRATE_MOVABLE)
1356 return COMPACT_SUCCESS;
1357
1358 /*
1359 * We are stealing for a non-movable allocation. Make
1360 * sure we finish compacting the current pageblock
1361 * first so it is as free as possible and we won't
1362 * have to steal another one soon. This only applies
1363 * to sync compaction, as async compaction operates
1364 * on pageblocks of the same migratetype.
1365 */
1366 if (cc->mode == MIGRATE_ASYNC ||
1367 IS_ALIGNED(cc->migrate_pfn,
1368 pageblock_nr_pages)) {
1369 return COMPACT_SUCCESS;
1370 }
1371
1372 cc->finishing_block = true;
1373 return COMPACT_CONTINUE;
1374 }
1340 } 1375 }
1341 1376
1342 return COMPACT_NO_SUITABLE_PAGE; 1377 return COMPACT_NO_SUITABLE_PAGE;
1343} 1378}
1344 1379
1345static enum compact_result compact_finished(struct zone *zone, 1380static enum compact_result compact_finished(struct zone *zone,
1346 struct compact_control *cc, 1381 struct compact_control *cc)
1347 const int migratetype)
1348{ 1382{
1349 int ret; 1383 int ret;
1350 1384
1351 ret = __compact_finished(zone, cc, migratetype); 1385 ret = __compact_finished(zone, cc);
1352 trace_mm_compaction_finished(zone, cc->order, ret); 1386 trace_mm_compaction_finished(zone, cc->order, ret);
1353 if (ret == COMPACT_NO_SUITABLE_PAGE) 1387 if (ret == COMPACT_NO_SUITABLE_PAGE)
1354 ret = COMPACT_CONTINUE; 1388 ret = COMPACT_CONTINUE;
@@ -1481,9 +1515,9 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
1481 enum compact_result ret; 1515 enum compact_result ret;
1482 unsigned long start_pfn = zone->zone_start_pfn; 1516 unsigned long start_pfn = zone->zone_start_pfn;
1483 unsigned long end_pfn = zone_end_pfn(zone); 1517 unsigned long end_pfn = zone_end_pfn(zone);
1484 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1485 const bool sync = cc->mode != MIGRATE_ASYNC; 1518 const bool sync = cc->mode != MIGRATE_ASYNC;
1486 1519
1520 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1487 ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1521 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1488 cc->classzone_idx); 1522 cc->classzone_idx);
1489 /* Compaction is likely to fail */ 1523 /* Compaction is likely to fail */
@@ -1533,8 +1567,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
1533 1567
1534 migrate_prep_local(); 1568 migrate_prep_local();
1535 1569
1536 while ((ret = compact_finished(zone, cc, migratetype)) == 1570 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1537 COMPACT_CONTINUE) {
1538 int err; 1571 int err;
1539 1572
1540 switch (isolate_migratepages(zone, cc)) { 1573 switch (isolate_migratepages(zone, cc)) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 681da61080bc..b7b973b47d8d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2791,12 +2791,6 @@ ssize_t generic_perform_write(struct file *file,
2791 ssize_t written = 0; 2791 ssize_t written = 0;
2792 unsigned int flags = 0; 2792 unsigned int flags = 0;
2793 2793
2794 /*
2795 * Copies from kernel address space cannot fail (NFSD is a big user).
2796 */
2797 if (!iter_is_iovec(i))
2798 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2799
2800 do { 2794 do {
2801 struct page *page; 2795 struct page *page;
2802 unsigned long offset; /* Offset into pagecache page */ 2796 unsigned long offset; /* Offset into pagecache page */
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index db77dcb38afd..72ebec18629c 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -200,10 +200,7 @@ struct frame_vector *frame_vector_create(unsigned int nr_frames)
200 * Avoid higher order allocations, use vmalloc instead. It should 200 * Avoid higher order allocations, use vmalloc instead. It should
201 * be rare anyway. 201 * be rare anyway.
202 */ 202 */
203 if (size <= PAGE_SIZE) 203 vec = kvmalloc(size, GFP_KERNEL);
204 vec = kmalloc(size, GFP_KERNEL);
205 else
206 vec = vmalloc(size);
207 if (!vec) 204 if (!vec)
208 return NULL; 205 return NULL;
209 vec->nr_allocated = nr_frames; 206 vec->nr_allocated = nr_frames;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b787c4cfda0e..a84909cf20d3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -715,7 +715,8 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
715} 715}
716 716
717static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 717static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
718 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write) 718 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
719 pgtable_t pgtable)
719{ 720{
720 struct mm_struct *mm = vma->vm_mm; 721 struct mm_struct *mm = vma->vm_mm;
721 pmd_t entry; 722 pmd_t entry;
@@ -729,6 +730,12 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
729 entry = pmd_mkyoung(pmd_mkdirty(entry)); 730 entry = pmd_mkyoung(pmd_mkdirty(entry));
730 entry = maybe_pmd_mkwrite(entry, vma); 731 entry = maybe_pmd_mkwrite(entry, vma);
731 } 732 }
733
734 if (pgtable) {
735 pgtable_trans_huge_deposit(mm, pmd, pgtable);
736 atomic_long_inc(&mm->nr_ptes);
737 }
738
732 set_pmd_at(mm, addr, pmd, entry); 739 set_pmd_at(mm, addr, pmd, entry);
733 update_mmu_cache_pmd(vma, addr, pmd); 740 update_mmu_cache_pmd(vma, addr, pmd);
734 spin_unlock(ptl); 741 spin_unlock(ptl);
@@ -738,6 +745,7 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
738 pmd_t *pmd, pfn_t pfn, bool write) 745 pmd_t *pmd, pfn_t pfn, bool write)
739{ 746{
740 pgprot_t pgprot = vma->vm_page_prot; 747 pgprot_t pgprot = vma->vm_page_prot;
748 pgtable_t pgtable = NULL;
741 /* 749 /*
742 * If we had pmd_special, we could avoid all these restrictions, 750 * If we had pmd_special, we could avoid all these restrictions,
743 * but we need to be consistent with PTEs and architectures that 751 * but we need to be consistent with PTEs and architectures that
@@ -752,9 +760,15 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
752 if (addr < vma->vm_start || addr >= vma->vm_end) 760 if (addr < vma->vm_start || addr >= vma->vm_end)
753 return VM_FAULT_SIGBUS; 761 return VM_FAULT_SIGBUS;
754 762
763 if (arch_needs_pgtable_deposit()) {
764 pgtable = pte_alloc_one(vma->vm_mm, addr);
765 if (!pgtable)
766 return VM_FAULT_OOM;
767 }
768
755 track_pfn_insert(vma, &pgprot, pfn); 769 track_pfn_insert(vma, &pgprot, pfn);
756 770
757 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); 771 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
758 return VM_FAULT_NOPAGE; 772 return VM_FAULT_NOPAGE;
759} 773}
760EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 774EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@@ -1611,12 +1625,13 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1611 tlb->fullmm); 1625 tlb->fullmm);
1612 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1626 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1613 if (vma_is_dax(vma)) { 1627 if (vma_is_dax(vma)) {
1628 if (arch_needs_pgtable_deposit())
1629 zap_deposited_table(tlb->mm, pmd);
1614 spin_unlock(ptl); 1630 spin_unlock(ptl);
1615 if (is_huge_zero_pmd(orig_pmd)) 1631 if (is_huge_zero_pmd(orig_pmd))
1616 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1632 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1617 } else if (is_huge_zero_pmd(orig_pmd)) { 1633 } else if (is_huge_zero_pmd(orig_pmd)) {
1618 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1634 zap_deposited_table(tlb->mm, pmd);
1619 atomic_long_dec(&tlb->mm->nr_ptes);
1620 spin_unlock(ptl); 1635 spin_unlock(ptl);
1621 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1636 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1622 } else { 1637 } else {
@@ -1625,10 +1640,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1625 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1640 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1626 VM_BUG_ON_PAGE(!PageHead(page), page); 1641 VM_BUG_ON_PAGE(!PageHead(page), page);
1627 if (PageAnon(page)) { 1642 if (PageAnon(page)) {
1628 pgtable_t pgtable; 1643 zap_deposited_table(tlb->mm, pmd);
1629 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1630 pte_free(tlb->mm, pgtable);
1631 atomic_long_dec(&tlb->mm->nr_ptes);
1632 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1644 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1633 } else { 1645 } else {
1634 if (arch_needs_pgtable_deposit()) 1646 if (arch_needs_pgtable_deposit())
diff --git a/mm/internal.h b/mm/internal.h
index 04d08ef91224..0e4f558412fb 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -183,6 +183,7 @@ extern int user_min_free_kbytes;
183struct compact_control { 183struct compact_control {
184 struct list_head freepages; /* List of free pages to migrate to */ 184 struct list_head freepages; /* List of free pages to migrate to */
185 struct list_head migratepages; /* List of pages being migrated */ 185 struct list_head migratepages; /* List of pages being migrated */
186 struct zone *zone;
186 unsigned long nr_freepages; /* Number of isolated free pages */ 187 unsigned long nr_freepages; /* Number of isolated free pages */
187 unsigned long nr_migratepages; /* Number of pages to migrate */ 188 unsigned long nr_migratepages; /* Number of pages to migrate */
188 unsigned long total_migrate_scanned; 189 unsigned long total_migrate_scanned;
@@ -190,17 +191,18 @@ struct compact_control {
190 unsigned long free_pfn; /* isolate_freepages search base */ 191 unsigned long free_pfn; /* isolate_freepages search base */
191 unsigned long migrate_pfn; /* isolate_migratepages search base */ 192 unsigned long migrate_pfn; /* isolate_migratepages search base */
192 unsigned long last_migrated_pfn;/* Not yet flushed page being freed */ 193 unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
194 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
195 int order; /* order a direct compactor needs */
196 int migratetype; /* migratetype of direct compactor */
197 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
198 const int classzone_idx; /* zone index of a direct compactor */
193 enum migrate_mode mode; /* Async or sync migration mode */ 199 enum migrate_mode mode; /* Async or sync migration mode */
194 bool ignore_skip_hint; /* Scan blocks even if marked skip */ 200 bool ignore_skip_hint; /* Scan blocks even if marked skip */
195 bool ignore_block_suitable; /* Scan blocks considered unsuitable */ 201 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
196 bool direct_compaction; /* False from kcompactd or /proc/... */ 202 bool direct_compaction; /* False from kcompactd or /proc/... */
197 bool whole_zone; /* Whole zone should/has been scanned */ 203 bool whole_zone; /* Whole zone should/has been scanned */
198 int order; /* order a direct compactor needs */
199 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
200 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
201 const int classzone_idx; /* zone index of a direct compactor */
202 struct zone *zone;
203 bool contended; /* Signal lock or sched contention */ 204 bool contended; /* Signal lock or sched contention */
205 bool finishing_block; /* Finishing current pageblock */
204}; 206};
205 207
206unsigned long 208unsigned long
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 9348d27088c1..b10da59cf765 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -691,7 +691,7 @@ int kasan_module_alloc(void *addr, size_t size)
691 691
692 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 692 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
693 shadow_start + shadow_size, 693 shadow_start + shadow_size,
694 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 694 GFP_KERNEL | __GFP_ZERO,
695 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 695 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
696 __builtin_return_address(0)); 696 __builtin_return_address(0));
697 697
diff --git a/mm/nommu.c b/mm/nommu.c
index 2d131b97a851..fc184f597d59 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -237,12 +237,16 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
237} 237}
238EXPORT_SYMBOL(__vmalloc); 238EXPORT_SYMBOL(__vmalloc);
239 239
240void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
241{
242 return __vmalloc(size, flags, PAGE_KERNEL);
243}
244
240void *vmalloc_user(unsigned long size) 245void *vmalloc_user(unsigned long size)
241{ 246{
242 void *ret; 247 void *ret;
243 248
244 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 249 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
245 PAGE_KERNEL);
246 if (ret) { 250 if (ret) {
247 struct vm_area_struct *vma; 251 struct vm_area_struct *vma;
248 252
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c25de46c58f..f9e450c6b6e4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1832,9 +1832,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1832 * Note that start_page and end_pages are not aligned on a pageblock 1832 * Note that start_page and end_pages are not aligned on a pageblock
1833 * boundary. If alignment is required, use move_freepages_block() 1833 * boundary. If alignment is required, use move_freepages_block()
1834 */ 1834 */
1835int move_freepages(struct zone *zone, 1835static int move_freepages(struct zone *zone,
1836 struct page *start_page, struct page *end_page, 1836 struct page *start_page, struct page *end_page,
1837 int migratetype) 1837 int migratetype, int *num_movable)
1838{ 1838{
1839 struct page *page; 1839 struct page *page;
1840 unsigned int order; 1840 unsigned int order;
@@ -1851,6 +1851,9 @@ int move_freepages(struct zone *zone,
1851 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1851 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1852#endif 1852#endif
1853 1853
1854 if (num_movable)
1855 *num_movable = 0;
1856
1854 for (page = start_page; page <= end_page;) { 1857 for (page = start_page; page <= end_page;) {
1855 if (!pfn_valid_within(page_to_pfn(page))) { 1858 if (!pfn_valid_within(page_to_pfn(page))) {
1856 page++; 1859 page++;
@@ -1861,6 +1864,15 @@ int move_freepages(struct zone *zone,
1861 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1864 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1862 1865
1863 if (!PageBuddy(page)) { 1866 if (!PageBuddy(page)) {
1867 /*
1868 * We assume that pages that could be isolated for
1869 * migration are movable. But we don't actually try
1870 * isolating, as that would be expensive.
1871 */
1872 if (num_movable &&
1873 (PageLRU(page) || __PageMovable(page)))
1874 (*num_movable)++;
1875
1864 page++; 1876 page++;
1865 continue; 1877 continue;
1866 } 1878 }
@@ -1876,7 +1888,7 @@ int move_freepages(struct zone *zone,
1876} 1888}
1877 1889
1878int move_freepages_block(struct zone *zone, struct page *page, 1890int move_freepages_block(struct zone *zone, struct page *page,
1879 int migratetype) 1891 int migratetype, int *num_movable)
1880{ 1892{
1881 unsigned long start_pfn, end_pfn; 1893 unsigned long start_pfn, end_pfn;
1882 struct page *start_page, *end_page; 1894 struct page *start_page, *end_page;
@@ -1893,7 +1905,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
1893 if (!zone_spans_pfn(zone, end_pfn)) 1905 if (!zone_spans_pfn(zone, end_pfn))
1894 return 0; 1906 return 0;
1895 1907
1896 return move_freepages(zone, start_page, end_page, migratetype); 1908 return move_freepages(zone, start_page, end_page, migratetype,
1909 num_movable);
1897} 1910}
1898 1911
1899static void change_pageblock_range(struct page *pageblock_page, 1912static void change_pageblock_range(struct page *pageblock_page,
@@ -1943,28 +1956,79 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
1943/* 1956/*
1944 * This function implements actual steal behaviour. If order is large enough, 1957 * This function implements actual steal behaviour. If order is large enough,
1945 * we can steal whole pageblock. If not, we first move freepages in this 1958 * we can steal whole pageblock. If not, we first move freepages in this
1946 * pageblock and check whether half of pages are moved or not. If half of 1959 * pageblock to our migratetype and determine how many already-allocated pages
1947 * pages are moved, we can change migratetype of pageblock and permanently 1960 * are there in the pageblock with a compatible migratetype. If at least half
1948 * use it's pages as requested migratetype in the future. 1961 * of pages are free or compatible, we can change migratetype of the pageblock
1962 * itself, so pages freed in the future will be put on the correct free list.
1949 */ 1963 */
1950static void steal_suitable_fallback(struct zone *zone, struct page *page, 1964static void steal_suitable_fallback(struct zone *zone, struct page *page,
1951 int start_type) 1965 int start_type, bool whole_block)
1952{ 1966{
1953 unsigned int current_order = page_order(page); 1967 unsigned int current_order = page_order(page);
1954 int pages; 1968 struct free_area *area;
1969 int free_pages, movable_pages, alike_pages;
1970 int old_block_type;
1971
1972 old_block_type = get_pageblock_migratetype(page);
1973
1974 /*
1975 * This can happen due to races and we want to prevent broken
1976 * highatomic accounting.
1977 */
1978 if (is_migrate_highatomic(old_block_type))
1979 goto single_page;
1955 1980
1956 /* Take ownership for orders >= pageblock_order */ 1981 /* Take ownership for orders >= pageblock_order */
1957 if (current_order >= pageblock_order) { 1982 if (current_order >= pageblock_order) {
1958 change_pageblock_range(page, current_order, start_type); 1983 change_pageblock_range(page, current_order, start_type);
1959 return; 1984 goto single_page;
1985 }
1986
1987 /* We are not allowed to try stealing from the whole block */
1988 if (!whole_block)
1989 goto single_page;
1990
1991 free_pages = move_freepages_block(zone, page, start_type,
1992 &movable_pages);
1993 /*
1994 * Determine how many pages are compatible with our allocation.
1995 * For movable allocation, it's the number of movable pages which
1996 * we just obtained. For other types it's a bit more tricky.
1997 */
1998 if (start_type == MIGRATE_MOVABLE) {
1999 alike_pages = movable_pages;
2000 } else {
2001 /*
2002 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2003 * to MOVABLE pageblock, consider all non-movable pages as
2004 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2005 * vice versa, be conservative since we can't distinguish the
2006 * exact migratetype of non-movable pages.
2007 */
2008 if (old_block_type == MIGRATE_MOVABLE)
2009 alike_pages = pageblock_nr_pages
2010 - (free_pages + movable_pages);
2011 else
2012 alike_pages = 0;
1960 } 2013 }
1961 2014
1962 pages = move_freepages_block(zone, page, start_type); 2015 /* moving whole block can fail due to zone boundary conditions */
2016 if (!free_pages)
2017 goto single_page;
1963 2018
1964 /* Claim the whole block if over half of it is free */ 2019 /*
1965 if (pages >= (1 << (pageblock_order-1)) || 2020 * If a sufficient number of pages in the block are either free or of
2021 * comparable migratability as our allocation, claim the whole block.
2022 */
2023 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
1966 page_group_by_mobility_disabled) 2024 page_group_by_mobility_disabled)
1967 set_pageblock_migratetype(page, start_type); 2025 set_pageblock_migratetype(page, start_type);
2026
2027 return;
2028
2029single_page:
2030 area = &zone->free_area[current_order];
2031 list_move(&page->lru, &area->free_list[start_type]);
1968} 2032}
1969 2033
1970/* 2034/*
@@ -2034,7 +2098,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2034 && !is_migrate_cma(mt)) { 2098 && !is_migrate_cma(mt)) {
2035 zone->nr_reserved_highatomic += pageblock_nr_pages; 2099 zone->nr_reserved_highatomic += pageblock_nr_pages;
2036 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2100 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2037 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); 2101 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2038 } 2102 }
2039 2103
2040out_unlock: 2104out_unlock:
@@ -2111,7 +2175,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2111 * may increase. 2175 * may increase.
2112 */ 2176 */
2113 set_pageblock_migratetype(page, ac->migratetype); 2177 set_pageblock_migratetype(page, ac->migratetype);
2114 ret = move_freepages_block(zone, page, ac->migratetype); 2178 ret = move_freepages_block(zone, page, ac->migratetype,
2179 NULL);
2115 if (ret) { 2180 if (ret) {
2116 spin_unlock_irqrestore(&zone->lock, flags); 2181 spin_unlock_irqrestore(&zone->lock, flags);
2117 return ret; 2182 return ret;
@@ -2123,8 +2188,13 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2123 return false; 2188 return false;
2124} 2189}
2125 2190
2126/* Remove an element from the buddy allocator from the fallback list */ 2191/*
2127static inline struct page * 2192 * Try finding a free buddy page on the fallback list and put it on the free
2193 * list of requested migratetype, possibly along with other pages from the same
2194 * block, depending on fragmentation avoidance heuristics. Returns true if
2195 * fallback was found so that __rmqueue_smallest() can grab it.
2196 */
2197static inline bool
2128__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 2198__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2129{ 2199{
2130 struct free_area *area; 2200 struct free_area *area;
@@ -2145,32 +2215,17 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2145 2215
2146 page = list_first_entry(&area->free_list[fallback_mt], 2216 page = list_first_entry(&area->free_list[fallback_mt],
2147 struct page, lru); 2217 struct page, lru);
2148 if (can_steal && !is_migrate_highatomic_page(page))
2149 steal_suitable_fallback(zone, page, start_migratetype);
2150
2151 /* Remove the page from the freelists */
2152 area->nr_free--;
2153 list_del(&page->lru);
2154 rmv_page_order(page);
2155 2218
2156 expand(zone, page, order, current_order, area, 2219 steal_suitable_fallback(zone, page, start_migratetype,
2157 start_migratetype); 2220 can_steal);
2158 /*
2159 * The pcppage_migratetype may differ from pageblock's
2160 * migratetype depending on the decisions in
2161 * find_suitable_fallback(). This is OK as long as it does not
2162 * differ for MIGRATE_CMA pageblocks. Those can be used as
2163 * fallback only via special __rmqueue_cma_fallback() function
2164 */
2165 set_pcppage_migratetype(page, start_migratetype);
2166 2221
2167 trace_mm_page_alloc_extfrag(page, order, current_order, 2222 trace_mm_page_alloc_extfrag(page, order, current_order,
2168 start_migratetype, fallback_mt); 2223 start_migratetype, fallback_mt);
2169 2224
2170 return page; 2225 return true;
2171 } 2226 }
2172 2227
2173 return NULL; 2228 return false;
2174} 2229}
2175 2230
2176/* 2231/*
@@ -2182,13 +2237,14 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
2182{ 2237{
2183 struct page *page; 2238 struct page *page;
2184 2239
2240retry:
2185 page = __rmqueue_smallest(zone, order, migratetype); 2241 page = __rmqueue_smallest(zone, order, migratetype);
2186 if (unlikely(!page)) { 2242 if (unlikely(!page)) {
2187 if (migratetype == MIGRATE_MOVABLE) 2243 if (migratetype == MIGRATE_MOVABLE)
2188 page = __rmqueue_cma_fallback(zone, order); 2244 page = __rmqueue_cma_fallback(zone, order);
2189 2245
2190 if (!page) 2246 if (!page && __rmqueue_fallback(zone, order, migratetype))
2191 page = __rmqueue_fallback(zone, order, migratetype); 2247 goto retry;
2192 } 2248 }
2193 2249
2194 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2250 trace_mm_page_alloc_zone_locked(page, order, migratetype);
@@ -3227,14 +3283,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3227 enum compact_priority prio, enum compact_result *compact_result) 3283 enum compact_priority prio, enum compact_result *compact_result)
3228{ 3284{
3229 struct page *page; 3285 struct page *page;
3286 unsigned int noreclaim_flag;
3230 3287
3231 if (!order) 3288 if (!order)
3232 return NULL; 3289 return NULL;
3233 3290
3234 current->flags |= PF_MEMALLOC; 3291 noreclaim_flag = memalloc_noreclaim_save();
3235 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3292 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3236 prio); 3293 prio);
3237 current->flags &= ~PF_MEMALLOC; 3294 memalloc_noreclaim_restore(noreclaim_flag);
3238 3295
3239 if (*compact_result <= COMPACT_INACTIVE) 3296 if (*compact_result <= COMPACT_INACTIVE)
3240 return NULL; 3297 return NULL;
@@ -3381,12 +3438,13 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3381{ 3438{
3382 struct reclaim_state reclaim_state; 3439 struct reclaim_state reclaim_state;
3383 int progress; 3440 int progress;
3441 unsigned int noreclaim_flag;
3384 3442
3385 cond_resched(); 3443 cond_resched();
3386 3444
3387 /* We now go into synchronous reclaim */ 3445 /* We now go into synchronous reclaim */
3388 cpuset_memory_pressure_bump(); 3446 cpuset_memory_pressure_bump();
3389 current->flags |= PF_MEMALLOC; 3447 noreclaim_flag = memalloc_noreclaim_save();
3390 lockdep_set_current_reclaim_state(gfp_mask); 3448 lockdep_set_current_reclaim_state(gfp_mask);
3391 reclaim_state.reclaimed_slab = 0; 3449 reclaim_state.reclaimed_slab = 0;
3392 current->reclaim_state = &reclaim_state; 3450 current->reclaim_state = &reclaim_state;
@@ -3396,7 +3454,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3396 3454
3397 current->reclaim_state = NULL; 3455 current->reclaim_state = NULL;
3398 lockdep_clear_current_reclaim_state(); 3456 lockdep_clear_current_reclaim_state();
3399 current->flags &= ~PF_MEMALLOC; 3457 memalloc_noreclaim_restore(noreclaim_flag);
3400 3458
3401 cond_resched(); 3459 cond_resched();
3402 3460
@@ -3609,6 +3667,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3609 struct alloc_context *ac) 3667 struct alloc_context *ac)
3610{ 3668{
3611 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3669 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3670 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3612 struct page *page = NULL; 3671 struct page *page = NULL;
3613 unsigned int alloc_flags; 3672 unsigned int alloc_flags;
3614 unsigned long did_some_progress; 3673 unsigned long did_some_progress;
@@ -3676,12 +3735,17 @@ retry_cpuset:
3676 3735
3677 /* 3736 /*
3678 * For costly allocations, try direct compaction first, as it's likely 3737 * For costly allocations, try direct compaction first, as it's likely
3679 * that we have enough base pages and don't need to reclaim. Don't try 3738 * that we have enough base pages and don't need to reclaim. For non-
3680 * that for allocations that are allowed to ignore watermarks, as the 3739 * movable high-order allocations, do that as well, as compaction will
3681 * ALLOC_NO_WATERMARKS attempt didn't yet happen. 3740 * try prevent permanent fragmentation by migrating from blocks of the
3741 * same migratetype.
3742 * Don't try this for allocations that are allowed to ignore
3743 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
3682 */ 3744 */
3683 if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER && 3745 if (can_direct_reclaim &&
3684 !gfp_pfmemalloc_allowed(gfp_mask)) { 3746 (costly_order ||
3747 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3748 && !gfp_pfmemalloc_allowed(gfp_mask)) {
3685 page = __alloc_pages_direct_compact(gfp_mask, order, 3749 page = __alloc_pages_direct_compact(gfp_mask, order,
3686 alloc_flags, ac, 3750 alloc_flags, ac,
3687 INIT_COMPACT_PRIORITY, 3751 INIT_COMPACT_PRIORITY,
@@ -3693,7 +3757,7 @@ retry_cpuset:
3693 * Checks for costly allocations with __GFP_NORETRY, which 3757 * Checks for costly allocations with __GFP_NORETRY, which
3694 * includes THP page fault allocations 3758 * includes THP page fault allocations
3695 */ 3759 */
3696 if (gfp_mask & __GFP_NORETRY) { 3760 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3697 /* 3761 /*
3698 * If compaction is deferred for high-order allocations, 3762 * If compaction is deferred for high-order allocations,
3699 * it is because sync compaction recently failed. If 3763 * it is because sync compaction recently failed. If
@@ -3774,7 +3838,7 @@ retry:
3774 * Do not retry costly high order allocations unless they are 3838 * Do not retry costly high order allocations unless they are
3775 * __GFP_REPEAT 3839 * __GFP_REPEAT
3776 */ 3840 */
3777 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT)) 3841 if (costly_order && !(gfp_mask & __GFP_REPEAT))
3778 goto nopage; 3842 goto nopage;
3779 3843
3780 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 3844 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 7927bbb54a4e..5092e4ef00c8 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -66,7 +66,8 @@ out:
66 66
67 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 67 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
68 zone->nr_isolate_pageblock++; 68 zone->nr_isolate_pageblock++;
69 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 69 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
70 NULL);
70 71
71 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 72 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
72 } 73 }
@@ -120,7 +121,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
120 * pageblock scanning for freepage moving. 121 * pageblock scanning for freepage moving.
121 */ 122 */
122 if (!isolated_page) { 123 if (!isolated_page) {
123 nr_pages = move_freepages_block(zone, page, migratetype); 124 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
124 __mod_zone_freepage_state(zone, nr_pages, migratetype); 125 __mod_zone_freepage_state(zone, nr_pages, migratetype);
125 } 126 }
126 set_pageblock_migratetype(page, migratetype); 127 set_pageblock_migratetype(page, migratetype);
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index aa1c415f4abd..58f6c78f1dad 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -31,6 +31,7 @@
31#include <linux/cpumask.h> 31#include <linux/cpumask.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/mm.h>
34 35
35#ifdef CONFIG_SWAP 36#ifdef CONFIG_SWAP
36 37
@@ -119,16 +120,18 @@ static int alloc_swap_slot_cache(unsigned int cpu)
119 120
120 /* 121 /*
121 * Do allocation outside swap_slots_cache_mutex 122 * Do allocation outside swap_slots_cache_mutex
122 * as vzalloc could trigger reclaim and get_swap_page, 123 * as kvzalloc could trigger reclaim and get_swap_page,
123 * which can lock swap_slots_cache_mutex. 124 * which can lock swap_slots_cache_mutex.
124 */ 125 */
125 slots = vzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE); 126 slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
127 GFP_KERNEL);
126 if (!slots) 128 if (!slots)
127 return -ENOMEM; 129 return -ENOMEM;
128 130
129 slots_ret = vzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE); 131 slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
132 GFP_KERNEL);
130 if (!slots_ret) { 133 if (!slots_ret) {
131 vfree(slots); 134 kvfree(slots);
132 return -ENOMEM; 135 return -ENOMEM;
133 } 136 }
134 137
@@ -152,9 +155,9 @@ static int alloc_swap_slot_cache(unsigned int cpu)
152out: 155out:
153 mutex_unlock(&swap_slots_cache_mutex); 156 mutex_unlock(&swap_slots_cache_mutex);
154 if (slots) 157 if (slots)
155 vfree(slots); 158 kvfree(slots);
156 if (slots_ret) 159 if (slots_ret)
157 vfree(slots_ret); 160 kvfree(slots_ret);
158 return 0; 161 return 0;
159} 162}
160 163
@@ -171,7 +174,7 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
171 cache->cur = 0; 174 cache->cur = 0;
172 cache->nr = 0; 175 cache->nr = 0;
173 if (free_slots && cache->slots) { 176 if (free_slots && cache->slots) {
174 vfree(cache->slots); 177 kvfree(cache->slots);
175 cache->slots = NULL; 178 cache->slots = NULL;
176 } 179 }
177 mutex_unlock(&cache->alloc_lock); 180 mutex_unlock(&cache->alloc_lock);
@@ -186,7 +189,7 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
186 } 189 }
187 spin_unlock_irq(&cache->free_lock); 190 spin_unlock_irq(&cache->free_lock);
188 if (slots) 191 if (slots)
189 vfree(slots); 192 kvfree(slots);
190 } 193 }
191} 194}
192 195
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7bfb9bd1ca21..539b8885e3d1 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -523,7 +523,7 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
523 unsigned int i, nr; 523 unsigned int i, nr;
524 524
525 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 525 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
526 spaces = vzalloc(sizeof(struct address_space) * nr); 526 spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
527 if (!spaces) 527 if (!spaces)
528 return -ENOMEM; 528 return -ENOMEM;
529 for (i = 0; i < nr; i++) { 529 for (i = 0; i < nr; i++) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b86b2aca3fb9..4f6cba1b6632 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2270,8 +2270,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2270 free_percpu(p->percpu_cluster); 2270 free_percpu(p->percpu_cluster);
2271 p->percpu_cluster = NULL; 2271 p->percpu_cluster = NULL;
2272 vfree(swap_map); 2272 vfree(swap_map);
2273 vfree(cluster_info); 2273 kvfree(cluster_info);
2274 vfree(frontswap_map); 2274 kvfree(frontswap_map);
2275 /* Destroy swap account information */ 2275 /* Destroy swap account information */
2276 swap_cgroup_swapoff(p->type); 2276 swap_cgroup_swapoff(p->type);
2277 exit_swap_address_space(p->type); 2277 exit_swap_address_space(p->type);
@@ -2794,7 +2794,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2794 p->cluster_next = 1 + (prandom_u32() % p->highest_bit); 2794 p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
2795 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 2795 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2796 2796
2797 cluster_info = vzalloc(nr_cluster * sizeof(*cluster_info)); 2797 cluster_info = kvzalloc(nr_cluster * sizeof(*cluster_info),
2798 GFP_KERNEL);
2798 if (!cluster_info) { 2799 if (!cluster_info) {
2799 error = -ENOMEM; 2800 error = -ENOMEM;
2800 goto bad_swap; 2801 goto bad_swap;
@@ -2827,7 +2828,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2827 } 2828 }
2828 /* frontswap enabled? set up bit-per-page map for frontswap */ 2829 /* frontswap enabled? set up bit-per-page map for frontswap */
2829 if (IS_ENABLED(CONFIG_FRONTSWAP)) 2830 if (IS_ENABLED(CONFIG_FRONTSWAP))
2830 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long)); 2831 frontswap_map = kvzalloc(BITS_TO_LONGS(maxpages) * sizeof(long),
2832 GFP_KERNEL);
2831 2833
2832 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 2834 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
2833 /* 2835 /*
diff --git a/mm/util.c b/mm/util.c
index 656dc5e37a87..718154debc87 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -329,6 +329,63 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
329} 329}
330EXPORT_SYMBOL(vm_mmap); 330EXPORT_SYMBOL(vm_mmap);
331 331
332/**
333 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
334 * failure, fall back to non-contiguous (vmalloc) allocation.
335 * @size: size of the request.
336 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
337 * @node: numa node to allocate from
338 *
339 * Uses kmalloc to get the memory but if the allocation fails then falls back
340 * to the vmalloc allocator. Use kvfree for freeing the memory.
341 *
342 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT
343 * is supported only for large (>32kB) allocations, and it should be used only if
344 * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks.
345 *
346 * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
347 */
348void *kvmalloc_node(size_t size, gfp_t flags, int node)
349{
350 gfp_t kmalloc_flags = flags;
351 void *ret;
352
353 /*
354 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
355 * so the given set of flags has to be compatible.
356 */
357 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
358
359 /*
360 * Make sure that larger requests are not too disruptive - no OOM
361 * killer and no allocation failure warnings as we have a fallback
362 */
363 if (size > PAGE_SIZE) {
364 kmalloc_flags |= __GFP_NOWARN;
365
366 /*
367 * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly
368 * requests because there is no other way to tell the allocator
369 * that we want to fail rather than retry endlessly.
370 */
371 if (!(kmalloc_flags & __GFP_REPEAT) ||
372 (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
373 kmalloc_flags |= __GFP_NORETRY;
374 }
375
376 ret = kmalloc_node(size, kmalloc_flags, node);
377
378 /*
379 * It doesn't really make sense to fallback to vmalloc for sub page
380 * requests
381 */
382 if (ret || size <= PAGE_SIZE)
383 return ret;
384
385 return __vmalloc_node_flags(size, node, flags);
386}
387EXPORT_SYMBOL(kvmalloc_node);
388
332void kvfree(const void *addr) 389void kvfree(const void *addr)
333{ 390{
334 if (is_vmalloc_addr(addr)) 391 if (is_vmalloc_addr(addr))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b52aeed3f58e..1dda6d8a200a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1649,16 +1649,13 @@ void *vmap(struct page **pages, unsigned int count,
1649} 1649}
1650EXPORT_SYMBOL(vmap); 1650EXPORT_SYMBOL(vmap);
1651 1651
1652static void *__vmalloc_node(unsigned long size, unsigned long align,
1653 gfp_t gfp_mask, pgprot_t prot,
1654 int node, const void *caller);
1655static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1652static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1656 pgprot_t prot, int node) 1653 pgprot_t prot, int node)
1657{ 1654{
1658 struct page **pages; 1655 struct page **pages;
1659 unsigned int nr_pages, array_size, i; 1656 unsigned int nr_pages, array_size, i;
1660 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1657 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1661 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 1658 const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN;
1662 1659
1663 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 1660 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1664 array_size = (nr_pages * sizeof(struct page *)); 1661 array_size = (nr_pages * sizeof(struct page *));
@@ -1786,8 +1783,15 @@ fail:
1786 * Allocate enough pages to cover @size from the page level 1783 * Allocate enough pages to cover @size from the page level
1787 * allocator with @gfp_mask flags. Map them into contiguous 1784 * allocator with @gfp_mask flags. Map them into contiguous
1788 * kernel virtual space, using a pagetable protection of @prot. 1785 * kernel virtual space, using a pagetable protection of @prot.
1786 *
1787 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_REPEAT
1788 * and __GFP_NOFAIL are not supported
1789 *
1790 * Any use of gfp flags outside of GFP_KERNEL should be consulted
1791 * with mm people.
1792 *
1789 */ 1793 */
1790static void *__vmalloc_node(unsigned long size, unsigned long align, 1794void *__vmalloc_node(unsigned long size, unsigned long align,
1791 gfp_t gfp_mask, pgprot_t prot, 1795 gfp_t gfp_mask, pgprot_t prot,
1792 int node, const void *caller) 1796 int node, const void *caller)
1793{ 1797{
@@ -1802,13 +1806,6 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1802} 1806}
1803EXPORT_SYMBOL(__vmalloc); 1807EXPORT_SYMBOL(__vmalloc);
1804 1808
1805static inline void *__vmalloc_node_flags(unsigned long size,
1806 int node, gfp_t flags)
1807{
1808 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1809 node, __builtin_return_address(0));
1810}
1811
1812/** 1809/**
1813 * vmalloc - allocate virtually contiguous memory 1810 * vmalloc - allocate virtually contiguous memory
1814 * @size: allocation size 1811 * @size: allocation size
@@ -1821,7 +1818,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
1821void *vmalloc(unsigned long size) 1818void *vmalloc(unsigned long size)
1822{ 1819{
1823 return __vmalloc_node_flags(size, NUMA_NO_NODE, 1820 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1824 GFP_KERNEL | __GFP_HIGHMEM); 1821 GFP_KERNEL);
1825} 1822}
1826EXPORT_SYMBOL(vmalloc); 1823EXPORT_SYMBOL(vmalloc);
1827 1824
@@ -1838,7 +1835,7 @@ EXPORT_SYMBOL(vmalloc);
1838void *vzalloc(unsigned long size) 1835void *vzalloc(unsigned long size)
1839{ 1836{
1840 return __vmalloc_node_flags(size, NUMA_NO_NODE, 1837 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1841 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1838 GFP_KERNEL | __GFP_ZERO);
1842} 1839}
1843EXPORT_SYMBOL(vzalloc); 1840EXPORT_SYMBOL(vzalloc);
1844 1841
@@ -1855,7 +1852,7 @@ void *vmalloc_user(unsigned long size)
1855 void *ret; 1852 void *ret;
1856 1853
1857 ret = __vmalloc_node(size, SHMLBA, 1854 ret = __vmalloc_node(size, SHMLBA,
1858 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1855 GFP_KERNEL | __GFP_ZERO,
1859 PAGE_KERNEL, NUMA_NO_NODE, 1856 PAGE_KERNEL, NUMA_NO_NODE,
1860 __builtin_return_address(0)); 1857 __builtin_return_address(0));
1861 if (ret) { 1858 if (ret) {
@@ -1879,7 +1876,7 @@ EXPORT_SYMBOL(vmalloc_user);
1879 */ 1876 */
1880void *vmalloc_node(unsigned long size, int node) 1877void *vmalloc_node(unsigned long size, int node)
1881{ 1878{
1882 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1879 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
1883 node, __builtin_return_address(0)); 1880 node, __builtin_return_address(0));
1884} 1881}
1885EXPORT_SYMBOL(vmalloc_node); 1882EXPORT_SYMBOL(vmalloc_node);
@@ -1899,7 +1896,7 @@ EXPORT_SYMBOL(vmalloc_node);
1899void *vzalloc_node(unsigned long size, int node) 1896void *vzalloc_node(unsigned long size, int node)
1900{ 1897{
1901 return __vmalloc_node_flags(size, node, 1898 return __vmalloc_node_flags(size, node,
1902 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1899 GFP_KERNEL | __GFP_ZERO);
1903} 1900}
1904EXPORT_SYMBOL(vzalloc_node); 1901EXPORT_SYMBOL(vzalloc_node);
1905 1902
@@ -1921,7 +1918,7 @@ EXPORT_SYMBOL(vzalloc_node);
1921 1918
1922void *vmalloc_exec(unsigned long size) 1919void *vmalloc_exec(unsigned long size)
1923{ 1920{
1924 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1921 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
1925 NUMA_NO_NODE, __builtin_return_address(0)); 1922 NUMA_NO_NODE, __builtin_return_address(0));
1926} 1923}
1927 1924
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4e7ed65842af..2f45c0520f43 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3036,6 +3036,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3036 struct zonelist *zonelist; 3036 struct zonelist *zonelist;
3037 unsigned long nr_reclaimed; 3037 unsigned long nr_reclaimed;
3038 int nid; 3038 int nid;
3039 unsigned int noreclaim_flag;
3039 struct scan_control sc = { 3040 struct scan_control sc = {
3040 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 3041 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3041 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 3042 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
@@ -3062,9 +3063,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3062 sc.gfp_mask, 3063 sc.gfp_mask,
3063 sc.reclaim_idx); 3064 sc.reclaim_idx);
3064 3065
3065 current->flags |= PF_MEMALLOC; 3066 noreclaim_flag = memalloc_noreclaim_save();
3066 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3067 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3067 current->flags &= ~PF_MEMALLOC; 3068 memalloc_noreclaim_restore(noreclaim_flag);
3068 3069
3069 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 3070 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3070 3071
@@ -3589,8 +3590,9 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3589 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3590 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3590 struct task_struct *p = current; 3591 struct task_struct *p = current;
3591 unsigned long nr_reclaimed; 3592 unsigned long nr_reclaimed;
3593 unsigned int noreclaim_flag;
3592 3594
3593 p->flags |= PF_MEMALLOC; 3595 noreclaim_flag = memalloc_noreclaim_save();
3594 lockdep_set_current_reclaim_state(sc.gfp_mask); 3596 lockdep_set_current_reclaim_state(sc.gfp_mask);
3595 reclaim_state.reclaimed_slab = 0; 3597 reclaim_state.reclaimed_slab = 0;
3596 p->reclaim_state = &reclaim_state; 3598 p->reclaim_state = &reclaim_state;
@@ -3599,7 +3601,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3599 3601
3600 p->reclaim_state = NULL; 3602 p->reclaim_state = NULL;
3601 lockdep_clear_current_reclaim_state(); 3603 lockdep_clear_current_reclaim_state();
3602 p->flags &= ~PF_MEMALLOC; 3604 memalloc_noreclaim_restore(noreclaim_flag);
3603 3605
3604 return nr_reclaimed; 3606 return nr_reclaimed;
3605} 3607}
@@ -3764,6 +3766,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
3764 struct task_struct *p = current; 3766 struct task_struct *p = current;
3765 struct reclaim_state reclaim_state; 3767 struct reclaim_state reclaim_state;
3766 int classzone_idx = gfp_zone(gfp_mask); 3768 int classzone_idx = gfp_zone(gfp_mask);
3769 unsigned int noreclaim_flag;
3767 struct scan_control sc = { 3770 struct scan_control sc = {
3768 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 3771 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3769 .gfp_mask = (gfp_mask = current_gfp_context(gfp_mask)), 3772 .gfp_mask = (gfp_mask = current_gfp_context(gfp_mask)),
@@ -3781,7 +3784,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
3781 * and we also need to be able to write out pages for RECLAIM_WRITE 3784 * and we also need to be able to write out pages for RECLAIM_WRITE
3782 * and RECLAIM_UNMAP. 3785 * and RECLAIM_UNMAP.
3783 */ 3786 */
3784 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 3787 noreclaim_flag = memalloc_noreclaim_save();
3788 p->flags |= PF_SWAPWRITE;
3785 lockdep_set_current_reclaim_state(gfp_mask); 3789 lockdep_set_current_reclaim_state(gfp_mask);
3786 reclaim_state.reclaimed_slab = 0; 3790 reclaim_state.reclaimed_slab = 0;
3787 p->reclaim_state = &reclaim_state; 3791 p->reclaim_state = &reclaim_state;
@@ -3797,7 +3801,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
3797 } 3801 }
3798 3802
3799 p->reclaim_state = NULL; 3803 p->reclaim_state = NULL;
3800 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 3804 current->flags &= ~PF_SWAPWRITE;
3805 memalloc_noreclaim_restore(noreclaim_flag);
3801 lockdep_clear_current_reclaim_state(); 3806 lockdep_clear_current_reclaim_state();
3802 return sc.nr_reclaimed >= nr_pages; 3807 return sc.nr_reclaimed >= nr_pages;
3803} 3808}
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 108533859a53..4eb773ccce11 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -187,7 +187,7 @@ void *ceph_kvmalloc(size_t size, gfp_t flags)
187 return ptr; 187 return ptr;
188 } 188 }
189 189
190 return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL); 190 return __vmalloc(size, flags, PAGE_KERNEL);
191} 191}
192 192
193 193
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index f76bb3332613..5766a6c896c4 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1386,8 +1386,9 @@ static void prepare_write_keepalive(struct ceph_connection *con)
1386 dout("prepare_write_keepalive %p\n", con); 1386 dout("prepare_write_keepalive %p\n", con);
1387 con_out_kvec_reset(con); 1387 con_out_kvec_reset(con);
1388 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { 1388 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
1389 struct timespec now = CURRENT_TIME; 1389 struct timespec now;
1390 1390
1391 ktime_get_real_ts(&now);
1391 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); 1392 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
1392 ceph_encode_timespec(&con->out_temp_keepalive2, &now); 1393 ceph_encode_timespec(&con->out_temp_keepalive2, &now);
1393 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2), 1394 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
@@ -3176,8 +3177,9 @@ bool ceph_con_keepalive_expired(struct ceph_connection *con,
3176{ 3177{
3177 if (interval > 0 && 3178 if (interval > 0 &&
3178 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) { 3179 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
3179 struct timespec now = CURRENT_TIME; 3180 struct timespec now;
3180 struct timespec ts; 3181 struct timespec ts;
3182 ktime_get_real_ts(&now);
3181 jiffies_to_timespec(interval, &ts); 3183 jiffies_to_timespec(interval, &ts);
3182 ts = timespec_add(con->last_keepalive_ack, ts); 3184 ts = timespec_add(con->last_keepalive_ack, ts);
3183 return timespec_compare(&now, &ts) >= 0; 3185 return timespec_compare(&now, &ts) >= 0;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index e15ea9e4c495..242d7c0d92f8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -3574,7 +3574,7 @@ ceph_osdc_watch(struct ceph_osd_client *osdc,
3574 ceph_oid_copy(&lreq->t.base_oid, oid); 3574 ceph_oid_copy(&lreq->t.base_oid, oid);
3575 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 3575 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3576 lreq->t.flags = CEPH_OSD_FLAG_WRITE; 3576 lreq->t.flags = CEPH_OSD_FLAG_WRITE;
3577 lreq->mtime = CURRENT_TIME; 3577 ktime_get_real_ts(&lreq->mtime);
3578 3578
3579 lreq->reg_req = alloc_linger_request(lreq); 3579 lreq->reg_req = alloc_linger_request(lreq);
3580 if (!lreq->reg_req) { 3580 if (!lreq->reg_req) {
@@ -3632,7 +3632,7 @@ int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3632 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 3632 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3633 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 3633 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3634 req->r_flags = CEPH_OSD_FLAG_WRITE; 3634 req->r_flags = CEPH_OSD_FLAG_WRITE;
3635 req->r_mtime = CURRENT_TIME; 3635 ktime_get_real_ts(&req->r_mtime);
3636 osd_req_op_watch_init(req, 0, lreq->linger_id, 3636 osd_req_op_watch_init(req, 0, lreq->linger_id,
3637 CEPH_OSD_WATCH_OP_UNWATCH); 3637 CEPH_OSD_WATCH_OP_UNWATCH);
3638 3638
diff --git a/net/core/dev.c b/net/core/dev.c
index d07aa5ffb511..96cf83da0d66 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -81,6 +81,7 @@
81#include <linux/hash.h> 81#include <linux/hash.h>
82#include <linux/slab.h> 82#include <linux/slab.h>
83#include <linux/sched.h> 83#include <linux/sched.h>
84#include <linux/sched/mm.h>
84#include <linux/mutex.h> 85#include <linux/mutex.h>
85#include <linux/string.h> 86#include <linux/string.h>
86#include <linux/mm.h> 87#include <linux/mm.h>
@@ -4235,7 +4236,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
4235 int ret; 4236 int ret;
4236 4237
4237 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 4238 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4238 unsigned long pflags = current->flags; 4239 unsigned int noreclaim_flag;
4239 4240
4240 /* 4241 /*
4241 * PFMEMALLOC skbs are special, they should 4242 * PFMEMALLOC skbs are special, they should
@@ -4246,9 +4247,9 @@ static int __netif_receive_skb(struct sk_buff *skb)
4246 * Use PF_MEMALLOC as this saves us from propagating the allocation 4247 * Use PF_MEMALLOC as this saves us from propagating the allocation
4247 * context down to all allocation sites. 4248 * context down to all allocation sites.
4248 */ 4249 */
4249 current->flags |= PF_MEMALLOC; 4250 noreclaim_flag = memalloc_noreclaim_save();
4250 ret = __netif_receive_skb_core(skb, true); 4251 ret = __netif_receive_skb_core(skb, true);
4251 current_restore_flags(pflags, PF_MEMALLOC); 4252 memalloc_noreclaim_restore(noreclaim_flag);
4252 } else 4253 } else
4253 ret = __netif_receive_skb_core(skb, false); 4254 ret = __netif_receive_skb_core(skb, false);
4254 4255
@@ -7264,12 +7265,10 @@ static int netif_alloc_rx_queues(struct net_device *dev)
7264 7265
7265 BUG_ON(count < 1); 7266 BUG_ON(count < 1);
7266 7267
7267 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 7268 rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7268 if (!rx) { 7269 if (!rx)
7269 rx = vzalloc(sz); 7270 return -ENOMEM;
7270 if (!rx) 7271
7271 return -ENOMEM;
7272 }
7273 dev->_rx = rx; 7272 dev->_rx = rx;
7274 7273
7275 for (i = 0; i < count; i++) 7274 for (i = 0; i < count; i++)
@@ -7306,12 +7305,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
7306 if (count < 1 || count > 0xffff) 7305 if (count < 1 || count > 0xffff)
7307 return -EINVAL; 7306 return -EINVAL;
7308 7307
7309 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 7308 tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7310 if (!tx) { 7309 if (!tx)
7311 tx = vzalloc(sz); 7310 return -ENOMEM;
7312 if (!tx) 7311
7313 return -ENOMEM;
7314 }
7315 dev->_tx = tx; 7312 dev->_tx = tx;
7316 7313
7317 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 7314 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
@@ -7845,9 +7842,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7845 /* ensure 32-byte alignment of whole construct */ 7842 /* ensure 32-byte alignment of whole construct */
7846 alloc_size += NETDEV_ALIGN - 1; 7843 alloc_size += NETDEV_ALIGN - 1;
7847 7844
7848 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 7845 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT);
7849 if (!p)
7850 p = vzalloc(alloc_size);
7851 if (!p) 7846 if (!p)
7852 return NULL; 7847 return NULL;
7853 7848
diff --git a/net/core/sock.c b/net/core/sock.c
index b5baeb9cb0fb..79c6aee6af9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -102,6 +102,7 @@
102#include <linux/proc_fs.h> 102#include <linux/proc_fs.h>
103#include <linux/seq_file.h> 103#include <linux/seq_file.h>
104#include <linux/sched.h> 104#include <linux/sched.h>
105#include <linux/sched/mm.h>
105#include <linux/timer.h> 106#include <linux/timer.h>
106#include <linux/string.h> 107#include <linux/string.h>
107#include <linux/sockios.h> 108#include <linux/sockios.h>
@@ -372,14 +373,14 @@ EXPORT_SYMBOL_GPL(sk_clear_memalloc);
372int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 373int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
373{ 374{
374 int ret; 375 int ret;
375 unsigned long pflags = current->flags; 376 unsigned int noreclaim_flag;
376 377
377 /* these should have been dropped before queueing */ 378 /* these should have been dropped before queueing */
378 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 379 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
379 380
380 current->flags |= PF_MEMALLOC; 381 noreclaim_flag = memalloc_noreclaim_save();
381 ret = sk->sk_backlog_rcv(sk, skb); 382 ret = sk->sk_backlog_rcv(sk, skb);
382 current_restore_flags(pflags, PF_MEMALLOC); 383 memalloc_noreclaim_restore(noreclaim_flag);
383 384
384 return ret; 385 return ret;
385} 386}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9afa2a5030b2..405483a07efc 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2361,7 +2361,8 @@ MODULE_AUTHOR("Linux DECnet Project Team");
2361MODULE_LICENSE("GPL"); 2361MODULE_LICENSE("GPL");
2362MODULE_ALIAS_NETPROTO(PF_DECnet); 2362MODULE_ALIAS_NETPROTO(PF_DECnet);
2363 2363
2364static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n"; 2364static const char banner[] __initconst = KERN_INFO
2365"NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2365 2366
2366static int __init decnet_init(void) 2367static int __init decnet_init(void)
2367{ 2368{
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8bea74298173..e9a59d2d91d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -678,11 +678,7 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
678 /* no more locks than number of hash buckets */ 678 /* no more locks than number of hash buckets */
679 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 679 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
680 680
681 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, 681 hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
682 GFP_KERNEL | __GFP_NOWARN);
683 if (!hashinfo->ehash_locks)
684 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
685
686 if (!hashinfo->ehash_locks) 682 if (!hashinfo->ehash_locks)
687 return -ENOMEM; 683 return -ENOMEM;
688 684
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 9d0d4f39e42b..653bbd67e3a3 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -1011,10 +1011,7 @@ static int __net_init tcp_net_metrics_init(struct net *net)
1011 tcp_metrics_hash_log = order_base_2(slots); 1011 tcp_metrics_hash_log = order_base_2(slots);
1012 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log; 1012 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1013 1013
1014 tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 1014 tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1015 if (!tcp_metrics_hash)
1016 tcp_metrics_hash = vzalloc(size);
1017
1018 if (!tcp_metrics_hash) 1015 if (!tcp_metrics_hash)
1019 return -ENOMEM; 1016 return -ENOMEM;
1020 1017
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index af8f52ee7180..2fd5ca151dcf 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -41,13 +41,7 @@ static int alloc_ila_locks(struct ila_net *ilan)
41 size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU); 41 size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU);
42 42
43 if (sizeof(spinlock_t) != 0) { 43 if (sizeof(spinlock_t) != 0) {
44#ifdef CONFIG_NUMA 44 ilan->locks = kvmalloc(size * sizeof(spinlock_t), GFP_KERNEL);
45 if (size * sizeof(spinlock_t) > PAGE_SIZE)
46 ilan->locks = vmalloc(size * sizeof(spinlock_t));
47 else
48#endif
49 ilan->locks = kmalloc_array(size, sizeof(spinlock_t),
50 GFP_KERNEL);
51 if (!ilan->locks) 45 if (!ilan->locks)
52 return -ENOMEM; 46 return -ENOMEM;
53 for (i = 0; i < size; i++) 47 for (i = 0; i < size; i++)
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 088e2b459d0f..257ec66009da 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2005,10 +2005,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
2005 unsigned index; 2005 unsigned index;
2006 2006
2007 if (size) { 2007 if (size) {
2008 labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 2008 labels = kvzalloc(size, GFP_KERNEL);
2009 if (!labels)
2010 labels = vzalloc(size);
2011
2012 if (!labels) 2009 if (!labels)
2013 goto nolabels; 2010 goto nolabels;
2014 } 2011 }
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index f134d384852f..8876b7da6884 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -763,17 +763,8 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
763 */ 763 */
764unsigned int *xt_alloc_entry_offsets(unsigned int size) 764unsigned int *xt_alloc_entry_offsets(unsigned int size)
765{ 765{
766 unsigned int *off; 766 return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
767 767
768 off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
769
770 if (off)
771 return off;
772
773 if (size < (SIZE_MAX / sizeof(unsigned int)))
774 off = vmalloc(size * sizeof(unsigned int));
775
776 return off;
777} 768}
778EXPORT_SYMBOL(xt_alloc_entry_offsets); 769EXPORT_SYMBOL(xt_alloc_entry_offsets);
779 770
@@ -1007,8 +998,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
1007 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 998 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
1008 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 999 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1009 if (!info) { 1000 if (!info) {
1010 info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | 1001 info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1011 __GFP_NORETRY | __GFP_HIGHMEM,
1012 PAGE_KERNEL); 1002 PAGE_KERNEL);
1013 if (!info) 1003 if (!info)
1014 return NULL; 1004 return NULL;
@@ -1116,7 +1106,7 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
1116 1106
1117 size = sizeof(void **) * nr_cpu_ids; 1107 size = sizeof(void **) * nr_cpu_ids;
1118 if (size > PAGE_SIZE) 1108 if (size > PAGE_SIZE)
1119 i->jumpstack = vzalloc(size); 1109 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1120 else 1110 else
1121 i->jumpstack = kzalloc(size, GFP_KERNEL); 1111 i->jumpstack = kzalloc(size, GFP_KERNEL);
1122 if (i->jumpstack == NULL) 1112 if (i->jumpstack == NULL)
@@ -1138,12 +1128,8 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
1138 */ 1128 */
1139 size = sizeof(void *) * i->stacksize * 2u; 1129 size = sizeof(void *) * i->stacksize * 2u;
1140 for_each_possible_cpu(cpu) { 1130 for_each_possible_cpu(cpu) {
1141 if (size > PAGE_SIZE) 1131 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1142 i->jumpstack[cpu] = vmalloc_node(size, 1132 cpu_to_node(cpu));
1143 cpu_to_node(cpu));
1144 else
1145 i->jumpstack[cpu] = kmalloc_node(size,
1146 GFP_KERNEL, cpu_to_node(cpu));
1147 if (i->jumpstack[cpu] == NULL) 1133 if (i->jumpstack[cpu] == NULL)
1148 /* 1134 /*
1149 * Freeing will be done later on by the callers. The 1135 * Freeing will be done later on by the callers. The
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 37d581a31cff..3f6c4fa78bdb 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -388,10 +388,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
388 } 388 }
389 389
390 sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size; 390 sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size;
391 if (sz <= PAGE_SIZE) 391 t = kvzalloc(sz, GFP_KERNEL);
392 t = kzalloc(sz, GFP_KERNEL);
393 else
394 t = vzalloc(sz);
395 if (t == NULL) { 392 if (t == NULL) {
396 ret = -ENOMEM; 393 ret = -ENOMEM;
397 goto out; 394 goto out;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index d00f4c7c2f3a..b30a2c70bd48 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -376,10 +376,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
376 if (mask != q->tab_mask) { 376 if (mask != q->tab_mask) {
377 struct sk_buff **ntab; 377 struct sk_buff **ntab;
378 378
379 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), 379 ntab = kvmalloc_array((mask + 1), sizeof(struct sk_buff *), GFP_KERNEL | __GFP_ZERO);
380 GFP_KERNEL | __GFP_NOWARN);
381 if (!ntab)
382 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
383 if (!ntab) 380 if (!ntab)
384 return -ENOMEM; 381 return -ENOMEM;
385 382
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index da4f67bda0ee..b488721a0059 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -624,16 +624,6 @@ static void fq_rehash(struct fq_sched_data *q,
624 q->stat_gc_flows += fcnt; 624 q->stat_gc_flows += fcnt;
625} 625}
626 626
627static void *fq_alloc_node(size_t sz, int node)
628{
629 void *ptr;
630
631 ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
632 if (!ptr)
633 ptr = vmalloc_node(sz, node);
634 return ptr;
635}
636
637static void fq_free(void *addr) 627static void fq_free(void *addr)
638{ 628{
639 kvfree(addr); 629 kvfree(addr);
@@ -650,7 +640,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
650 return 0; 640 return 0;
651 641
652 /* If XPS was setup, we can allocate memory on right NUMA node */ 642 /* If XPS was setup, we can allocate memory on right NUMA node */
653 array = fq_alloc_node(sizeof(struct rb_root) << log, 643 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_REPEAT,
654 netdev_queue_numa_node_read(sch->dev_queue)); 644 netdev_queue_numa_node_read(sch->dev_queue));
655 if (!array) 645 if (!array)
656 return -ENOMEM; 646 return -ENOMEM;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 18bbb5476c83..9201abce928c 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -446,27 +446,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
446 return 0; 446 return 0;
447} 447}
448 448
449static void *fq_codel_zalloc(size_t sz)
450{
451 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
452
453 if (!ptr)
454 ptr = vzalloc(sz);
455 return ptr;
456}
457
458static void fq_codel_free(void *addr)
459{
460 kvfree(addr);
461}
462
463static void fq_codel_destroy(struct Qdisc *sch) 449static void fq_codel_destroy(struct Qdisc *sch)
464{ 450{
465 struct fq_codel_sched_data *q = qdisc_priv(sch); 451 struct fq_codel_sched_data *q = qdisc_priv(sch);
466 452
467 tcf_destroy_chain(&q->filter_list); 453 tcf_destroy_chain(&q->filter_list);
468 fq_codel_free(q->backlogs); 454 kvfree(q->backlogs);
469 fq_codel_free(q->flows); 455 kvfree(q->flows);
470} 456}
471 457
472static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) 458static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
@@ -493,13 +479,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
493 } 479 }
494 480
495 if (!q->flows) { 481 if (!q->flows) {
496 q->flows = fq_codel_zalloc(q->flows_cnt * 482 q->flows = kvzalloc(q->flows_cnt *
497 sizeof(struct fq_codel_flow)); 483 sizeof(struct fq_codel_flow), GFP_KERNEL);
498 if (!q->flows) 484 if (!q->flows)
499 return -ENOMEM; 485 return -ENOMEM;
500 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); 486 q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
501 if (!q->backlogs) { 487 if (!q->backlogs) {
502 fq_codel_free(q->flows); 488 kvfree(q->flows);
503 return -ENOMEM; 489 return -ENOMEM;
504 } 490 }
505 for (i = 0; i < q->flows_cnt; i++) { 491 for (i = 0; i < q->flows_cnt; i++) {
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index c19d346e6c5a..51d3ba682af9 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -467,29 +467,14 @@ static void hhf_reset(struct Qdisc *sch)
467 rtnl_kfree_skbs(skb, skb); 467 rtnl_kfree_skbs(skb, skb);
468} 468}
469 469
470static void *hhf_zalloc(size_t sz)
471{
472 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
473
474 if (!ptr)
475 ptr = vzalloc(sz);
476
477 return ptr;
478}
479
480static void hhf_free(void *addr)
481{
482 kvfree(addr);
483}
484
485static void hhf_destroy(struct Qdisc *sch) 470static void hhf_destroy(struct Qdisc *sch)
486{ 471{
487 int i; 472 int i;
488 struct hhf_sched_data *q = qdisc_priv(sch); 473 struct hhf_sched_data *q = qdisc_priv(sch);
489 474
490 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 475 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
491 hhf_free(q->hhf_arrays[i]); 476 kvfree(q->hhf_arrays[i]);
492 hhf_free(q->hhf_valid_bits[i]); 477 kvfree(q->hhf_valid_bits[i]);
493 } 478 }
494 479
495 for (i = 0; i < HH_FLOWS_CNT; i++) { 480 for (i = 0; i < HH_FLOWS_CNT; i++) {
@@ -503,7 +488,7 @@ static void hhf_destroy(struct Qdisc *sch)
503 kfree(flow); 488 kfree(flow);
504 } 489 }
505 } 490 }
506 hhf_free(q->hh_flows); 491 kvfree(q->hh_flows);
507} 492}
508 493
509static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = { 494static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
@@ -609,8 +594,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
609 594
610 if (!q->hh_flows) { 595 if (!q->hh_flows) {
611 /* Initialize heavy-hitter flow table. */ 596 /* Initialize heavy-hitter flow table. */
612 q->hh_flows = hhf_zalloc(HH_FLOWS_CNT * 597 q->hh_flows = kvzalloc(HH_FLOWS_CNT *
613 sizeof(struct list_head)); 598 sizeof(struct list_head), GFP_KERNEL);
614 if (!q->hh_flows) 599 if (!q->hh_flows)
615 return -ENOMEM; 600 return -ENOMEM;
616 for (i = 0; i < HH_FLOWS_CNT; i++) 601 for (i = 0; i < HH_FLOWS_CNT; i++)
@@ -624,8 +609,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
624 609
625 /* Initialize heavy-hitter filter arrays. */ 610 /* Initialize heavy-hitter filter arrays. */
626 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 611 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
627 q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * 612 q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN *
628 sizeof(u32)); 613 sizeof(u32), GFP_KERNEL);
629 if (!q->hhf_arrays[i]) { 614 if (!q->hhf_arrays[i]) {
630 /* Note: hhf_destroy() will be called 615 /* Note: hhf_destroy() will be called
631 * by our caller. 616 * by our caller.
@@ -637,8 +622,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
637 622
638 /* Initialize valid bits of heavy-hitter filter arrays. */ 623 /* Initialize valid bits of heavy-hitter filter arrays. */
639 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 624 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
640 q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / 625 q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
641 BITS_PER_BYTE); 626 BITS_PER_BYTE, GFP_KERNEL);
642 if (!q->hhf_valid_bits[i]) { 627 if (!q->hhf_valid_bits[i]) {
643 /* Note: hhf_destroy() will be called 628 /* Note: hhf_destroy() will be called
644 * by our caller. 629 * by our caller.
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index f0ce4780f395..1b3dd6190e93 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -702,15 +702,11 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
702 spinlock_t *root_lock; 702 spinlock_t *root_lock;
703 struct disttable *d; 703 struct disttable *d;
704 int i; 704 int i;
705 size_t s;
706 705
707 if (n > NETEM_DIST_MAX) 706 if (n > NETEM_DIST_MAX)
708 return -EINVAL; 707 return -EINVAL;
709 708
710 s = sizeof(struct disttable) + n * sizeof(s16); 709 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
711 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
712 if (!d)
713 d = vmalloc(s);
714 if (!d) 710 if (!d)
715 return -ENOMEM; 711 return -ENOMEM;
716 712
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b00e02c139de..332d94be6e1c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -685,11 +685,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
685 685
686static void *sfq_alloc(size_t sz) 686static void *sfq_alloc(size_t sz)
687{ 687{
688 void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); 688 return kvmalloc(sz, GFP_KERNEL);
689
690 if (!ptr)
691 ptr = vmalloc(sz);
692 return ptr;
693} 689}
694 690
695static void sfq_free(void *addr) 691static void sfq_free(void *addr)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index baa3c7be04ad..4b9569fa931b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -55,6 +55,7 @@ my $spelling_file = "$D/spelling.txt";
55my $codespell = 0; 55my $codespell = 0;
56my $codespellfile = "/usr/share/codespell/dictionary.txt"; 56my $codespellfile = "/usr/share/codespell/dictionary.txt";
57my $conststructsfile = "$D/const_structs.checkpatch"; 57my $conststructsfile = "$D/const_structs.checkpatch";
58my $typedefsfile = "";
58my $color = 1; 59my $color = 1;
59my $allow_c99_comments = 1; 60my $allow_c99_comments = 1;
60 61
@@ -113,6 +114,7 @@ Options:
113 --codespell Use the codespell dictionary for spelling/typos 114 --codespell Use the codespell dictionary for spelling/typos
114 (default:/usr/share/codespell/dictionary.txt) 115 (default:/usr/share/codespell/dictionary.txt)
115 --codespellfile Use this codespell dictionary 116 --codespellfile Use this codespell dictionary
117 --typedefsfile Read additional types from this file
116 --color Use colors when output is STDOUT (default: on) 118 --color Use colors when output is STDOUT (default: on)
117 -h, --help, --version display this help and exit 119 -h, --help, --version display this help and exit
118 120
@@ -208,6 +210,7 @@ GetOptions(
208 'test-only=s' => \$tst_only, 210 'test-only=s' => \$tst_only,
209 'codespell!' => \$codespell, 211 'codespell!' => \$codespell,
210 'codespellfile=s' => \$codespellfile, 212 'codespellfile=s' => \$codespellfile,
213 'typedefsfile=s' => \$typedefsfile,
211 'color!' => \$color, 214 'color!' => \$color,
212 'h|help' => \$help, 215 'h|help' => \$help,
213 'version' => \$help 216 'version' => \$help
@@ -629,29 +632,44 @@ if ($codespell) {
629 632
630$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix; 633$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
631 634
632my $const_structs = ""; 635sub read_words {
633if (open(my $conststructs, '<', $conststructsfile)) { 636 my ($wordsRef, $file) = @_;
634 while (<$conststructs>) {
635 my $line = $_;
636 637
637 $line =~ s/\s*\n?$//g; 638 if (open(my $words, '<', $file)) {
638 $line =~ s/^\s*//g; 639 while (<$words>) {
640 my $line = $_;
639 641
640 next if ($line =~ m/^\s*#/); 642 $line =~ s/\s*\n?$//g;
641 next if ($line =~ m/^\s*$/); 643 $line =~ s/^\s*//g;
642 if ($line =~ /\s/) {
643 print("$conststructsfile: '$line' invalid - ignored\n");
644 next;
645 }
646 644
647 $const_structs .= '|' if ($const_structs ne ""); 645 next if ($line =~ m/^\s*#/);
648 $const_structs .= $line; 646 next if ($line =~ m/^\s*$/);
647 if ($line =~ /\s/) {
648 print("$file: '$line' invalid - ignored\n");
649 next;
650 }
651
652 $$wordsRef .= '|' if ($$wordsRef ne "");
653 $$wordsRef .= $line;
654 }
655 close($file);
656 return 1;
649 } 657 }
650 close($conststructsfile); 658
651} else { 659 return 0;
652 warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
653} 660}
654 661
662my $const_structs = "";
663read_words(\$const_structs, $conststructsfile)
664 or warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
665
666my $typeOtherTypedefs = "";
667if (length($typedefsfile)) {
668 read_words(\$typeOtherTypedefs, $typedefsfile)
669 or warn "No additional types will be considered - file '$typedefsfile': $!\n";
670}
671$typeTypedefs .= '|' . $typeOtherTypedefs if ($typeOtherTypedefs ne "");
672
655sub build_types { 673sub build_types {
656 my $mods = "(?x: \n" . join("|\n ", (@modifierList, @modifierListFile)) . "\n)"; 674 my $mods = "(?x: \n" . join("|\n ", (@modifierList, @modifierListFile)) . "\n)";
657 my $all = "(?x: \n" . join("|\n ", (@typeList, @typeListFile)) . "\n)"; 675 my $all = "(?x: \n" . join("|\n ", (@typeList, @typeListFile)) . "\n)";
@@ -2195,8 +2213,7 @@ sub process {
2195 } 2213 }
2196 #next; 2214 #next;
2197 } 2215 }
2198 if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) { 2216 if ($rawline =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
2199 my $context = $4;
2200 $realline=$1-1; 2217 $realline=$1-1;
2201 if (defined $2) { 2218 if (defined $2) {
2202 $realcnt=$3+1; 2219 $realcnt=$3+1;
@@ -2205,12 +2222,6 @@ sub process {
2205 } 2222 }
2206 $in_comment = 0; 2223 $in_comment = 0;
2207 2224
2208 if ($context =~ /\b(\w+)\s*\(/) {
2209 $context_function = $1;
2210 } else {
2211 undef $context_function;
2212 }
2213
2214 # Guestimate if this is a continuing comment. Run 2225 # Guestimate if this is a continuing comment. Run
2215 # the context looking for a comment "edge". If this 2226 # the context looking for a comment "edge". If this
2216 # edge is a close comment then we must be in a comment 2227 # edge is a close comment then we must be in a comment
@@ -2281,7 +2292,8 @@ sub process {
2281 2292
2282#extract the line range in the file after the patch is applied 2293#extract the line range in the file after the patch is applied
2283 if (!$in_commit_log && 2294 if (!$in_commit_log &&
2284 $line =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) { 2295 $line =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) {
2296 my $context = $4;
2285 $is_patch = 1; 2297 $is_patch = 1;
2286 $first_line = $linenr + 1; 2298 $first_line = $linenr + 1;
2287 $realline=$1-1; 2299 $realline=$1-1;
@@ -2297,6 +2309,11 @@ sub process {
2297 %suppress_whiletrailers = (); 2309 %suppress_whiletrailers = ();
2298 %suppress_export = (); 2310 %suppress_export = ();
2299 $suppress_statement = 0; 2311 $suppress_statement = 0;
2312 if ($context =~ /\b(\w+)\s*\(/) {
2313 $context_function = $1;
2314 } else {
2315 undef $context_function;
2316 }
2300 next; 2317 next;
2301 2318
2302# track the line number as we move through the hunk, note that 2319# track the line number as we move through the hunk, note that
@@ -2539,6 +2556,7 @@ sub process {
2539# Check for git id commit length and improperly formed commit descriptions 2556# Check for git id commit length and improperly formed commit descriptions
2540 if ($in_commit_log && !$commit_log_possible_stack_dump && 2557 if ($in_commit_log && !$commit_log_possible_stack_dump &&
2541 $line !~ /^\s*(?:Link|Patchwork|http|https|BugLink):/i && 2558 $line !~ /^\s*(?:Link|Patchwork|http|https|BugLink):/i &&
2559 $line !~ /^This reverts commit [0-9a-f]{7,40}/ &&
2542 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || 2560 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
2543 ($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i && 2561 ($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i &&
2544 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && 2562 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
@@ -2628,8 +2646,8 @@ sub process {
2628# Check if it's the start of a commit log 2646# Check if it's the start of a commit log
2629# (not a header line and we haven't seen the patch filename) 2647# (not a header line and we haven't seen the patch filename)
2630 if ($in_header_lines && $realfile =~ /^$/ && 2648 if ($in_header_lines && $realfile =~ /^$/ &&
2631 !($rawline =~ /^\s+\S/ || 2649 !($rawline =~ /^\s+(?:\S|$)/ ||
2632 $rawline =~ /^(commit\b|from\b|[\w-]+:).*$/i)) { 2650 $rawline =~ /^(?:commit\b|from\b|[\w-]+:)/i)) {
2633 $in_header_lines = 0; 2651 $in_header_lines = 0;
2634 $in_commit_log = 1; 2652 $in_commit_log = 1;
2635 $has_commit_log = 1; 2653 $has_commit_log = 1;
@@ -2757,13 +2775,6 @@ sub process {
2757 #print "is_start<$is_start> is_end<$is_end> length<$length>\n"; 2775 #print "is_start<$is_start> is_end<$is_end> length<$length>\n";
2758 } 2776 }
2759 2777
2760# discourage the addition of CONFIG_EXPERIMENTAL in Kconfig.
2761 if ($realfile =~ /Kconfig/ &&
2762 $line =~ /.\s*depends on\s+.*\bEXPERIMENTAL\b/) {
2763 WARN("CONFIG_EXPERIMENTAL",
2764 "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
2765 }
2766
2767# discourage the use of boolean for type definition attributes of Kconfig options 2778# discourage the use of boolean for type definition attributes of Kconfig options
2768 if ($realfile =~ /Kconfig/ && 2779 if ($realfile =~ /Kconfig/ &&
2769 $line =~ /^\+\s*\bboolean\b/) { 2780 $line =~ /^\+\s*\bboolean\b/) {
@@ -3133,6 +3144,17 @@ sub process {
3133# check we are in a valid C source file if not then ignore this hunk 3144# check we are in a valid C source file if not then ignore this hunk
3134 next if ($realfile !~ /\.(h|c)$/); 3145 next if ($realfile !~ /\.(h|c)$/);
3135 3146
3147# check if this appears to be the start function declaration, save the name
3148 if ($sline =~ /^\+\{\s*$/ &&
3149 $prevline =~ /^\+(?:(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*)?($Ident)\(/) {
3150 $context_function = $1;
3151 }
3152
3153# check if this appears to be the end of function declaration
3154 if ($sline =~ /^\+\}\s*$/) {
3155 undef $context_function;
3156 }
3157
3136# check indentation of any line with a bare else 3158# check indentation of any line with a bare else
3137# (but not if it is a multiple line "if (foo) return bar; else return baz;") 3159# (but not if it is a multiple line "if (foo) return bar; else return baz;")
3138# if the previous line is a break or return and is indented 1 tab more... 3160# if the previous line is a break or return and is indented 1 tab more...
@@ -3157,12 +3179,6 @@ sub process {
3157 } 3179 }
3158 } 3180 }
3159 3181
3160# discourage the addition of CONFIG_EXPERIMENTAL in #if(def).
3161 if ($line =~ /^\+\s*\#\s*if.*\bCONFIG_EXPERIMENTAL\b/) {
3162 WARN("CONFIG_EXPERIMENTAL",
3163 "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
3164 }
3165
3166# check for RCS/CVS revision markers 3182# check for RCS/CVS revision markers
3167 if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) { 3183 if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
3168 WARN("CVS_KEYWORD", 3184 WARN("CVS_KEYWORD",
@@ -3338,7 +3354,7 @@ sub process {
3338 } 3354 }
3339 3355
3340# Check relative indent for conditionals and blocks. 3356# Check relative indent for conditionals and blocks.
3341 if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) { 3357 if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|(?:do|else)\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
3342 ($stat, $cond, $line_nr_next, $remain_next, $off_next) = 3358 ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
3343 ctx_statement_block($linenr, $realcnt, 0) 3359 ctx_statement_block($linenr, $realcnt, 0)
3344 if (!defined $stat); 3360 if (!defined $stat);
@@ -3430,6 +3446,8 @@ sub process {
3430 if ($check && $s ne '' && 3446 if ($check && $s ne '' &&
3431 (($sindent % 8) != 0 || 3447 (($sindent % 8) != 0 ||
3432 ($sindent < $indent) || 3448 ($sindent < $indent) ||
3449 ($sindent == $indent &&
3450 ($s !~ /^\s*(?:\}|\{|else\b)/)) ||
3433 ($sindent > $indent + 8))) { 3451 ($sindent > $indent + 8))) {
3434 WARN("SUSPECT_CODE_INDENT", 3452 WARN("SUSPECT_CODE_INDENT",
3435 "suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n"); 3453 "suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n");
@@ -4851,8 +4869,10 @@ sub process {
4851 $dstat !~ /^\(\{/ && # ({... 4869 $dstat !~ /^\(\{/ && # ({...
4852 $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/) 4870 $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
4853 { 4871 {
4854 4872 if ($dstat =~ /^\s*if\b/) {
4855 if ($dstat =~ /;/) { 4873 ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
4874 "Macros starting with if should be enclosed by a do - while loop to avoid possible if/else logic defects\n" . "$herectx");
4875 } elsif ($dstat =~ /;/) {
4856 ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE", 4876 ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
4857 "Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx"); 4877 "Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx");
4858 } else { 4878 } else {
@@ -5174,14 +5194,16 @@ sub process {
5174 "break quoted strings at a space character\n" . $hereprev); 5194 "break quoted strings at a space character\n" . $hereprev);
5175 } 5195 }
5176 5196
5177#check for an embedded function name in a string when the function is known 5197# check for an embedded function name in a string when the function is known
5178# as part of a diff. This does not work for -f --file checking as it 5198# This does not work very well for -f --file checking as it depends on patch
5179#depends on patch context providing the function name 5199# context providing the function name or a single line form for in-file
5200# function declarations
5180 if ($line =~ /^\+.*$String/ && 5201 if ($line =~ /^\+.*$String/ &&
5181 defined($context_function) && 5202 defined($context_function) &&
5182 get_quoted_string($line, $rawline) =~ /\b$context_function\b/) { 5203 get_quoted_string($line, $rawline) =~ /\b$context_function\b/ &&
5204 length(get_quoted_string($line, $rawline)) != (length($context_function) + 2)) {
5183 WARN("EMBEDDED_FUNCTION_NAME", 5205 WARN("EMBEDDED_FUNCTION_NAME",
5184 "Prefer using \"%s\", __func__ to embedded function names\n" . $herecurr); 5206 "Prefer using '\"%s...\", __func__' to using '$context_function', this function's name, in a string\n" . $herecurr);
5185 } 5207 }
5186 5208
5187# check for spaces before a quoted newline 5209# check for spaces before a quoted newline
@@ -5676,6 +5698,32 @@ sub process {
5676 } 5698 }
5677 } 5699 }
5678 5700
5701 # check for vsprintf extension %p<foo> misuses
5702 if ($^V && $^V ge 5.10.0 &&
5703 defined $stat &&
5704 $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
5705 $1 !~ /^_*volatile_*$/) {
5706 my $bad_extension = "";
5707 my $lc = $stat =~ tr@\n@@;
5708 $lc = $lc + $linenr;
5709 for (my $count = $linenr; $count <= $lc; $count++) {
5710 my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
5711 $fmt =~ s/%%//g;
5712 if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGN]).)/) {
5713 $bad_extension = $1;
5714 last;
5715 }
5716 }
5717 if ($bad_extension ne "") {
5718 my $stat_real = raw_line($linenr, 0);
5719 for (my $count = $linenr + 1; $count <= $lc; $count++) {
5720 $stat_real = $stat_real . "\n" . raw_line($count, 0);
5721 }
5722 WARN("VSPRINTF_POINTER_EXTENSION",
5723 "Invalid vsprintf pointer extension '$bad_extension'\n" . "$here\n$stat_real\n");
5724 }
5725 }
5726
5679# Check for misused memsets 5727# Check for misused memsets
5680 if ($^V && $^V ge 5.10.0 && 5728 if ($^V && $^V ge 5.10.0 &&
5681 defined $stat && 5729 defined $stat &&
@@ -5893,7 +5941,8 @@ sub process {
5893 5941
5894# check for k[mz]alloc with multiplies that could be kmalloc_array/kcalloc 5942# check for k[mz]alloc with multiplies that could be kmalloc_array/kcalloc
5895 if ($^V && $^V ge 5.10.0 && 5943 if ($^V && $^V ge 5.10.0 &&
5896 $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)\s*,/) { 5944 defined $stat &&
5945 $stat =~ /^\+\s*($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)\s*,/) {
5897 my $oldfunc = $3; 5946 my $oldfunc = $3;
5898 my $a1 = $4; 5947 my $a1 = $4;
5899 my $a2 = $10; 5948 my $a2 = $10;
@@ -5907,11 +5956,17 @@ sub process {
5907 } 5956 }
5908 if ($r1 !~ /^sizeof\b/ && $r2 =~ /^sizeof\s*\S/ && 5957 if ($r1 !~ /^sizeof\b/ && $r2 =~ /^sizeof\s*\S/ &&
5909 !($r1 =~ /^$Constant$/ || $r1 =~ /^[A-Z_][A-Z0-9_]*$/)) { 5958 !($r1 =~ /^$Constant$/ || $r1 =~ /^[A-Z_][A-Z0-9_]*$/)) {
5959 my $ctx = '';
5960 my $herectx = $here . "\n";
5961 my $cnt = statement_rawlines($stat);
5962 for (my $n = 0; $n < $cnt; $n++) {
5963 $herectx .= raw_line($linenr, $n) . "\n";
5964 }
5910 if (WARN("ALLOC_WITH_MULTIPLY", 5965 if (WARN("ALLOC_WITH_MULTIPLY",
5911 "Prefer $newfunc over $oldfunc with multiply\n" . $herecurr) && 5966 "Prefer $newfunc over $oldfunc with multiply\n" . $herectx) &&
5967 $cnt == 1 &&
5912 $fix) { 5968 $fix) {
5913 $fixed[$fixlinenr] =~ s/\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)/$1 . ' = ' . "$newfunc(" . trim($r1) . ', ' . trim($r2)/e; 5969 $fixed[$fixlinenr] =~ s/\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)/$1 . ' = ' . "$newfunc(" . trim($r1) . ', ' . trim($r2)/e;
5914
5915 } 5970 }
5916 } 5971 }
5917 } 5972 }
@@ -6066,11 +6121,11 @@ sub process {
6066 } 6121 }
6067 6122
6068# check for various structs that are normally const (ops, kgdb, device_tree) 6123# check for various structs that are normally const (ops, kgdb, device_tree)
6124# and avoid what seem like struct definitions 'struct foo {'
6069 if ($line !~ /\bconst\b/ && 6125 if ($line !~ /\bconst\b/ &&
6070 $line =~ /\bstruct\s+($const_structs)\b/) { 6126 $line =~ /\bstruct\s+($const_structs)\b(?!\s*\{)/) {
6071 WARN("CONST_STRUCT", 6127 WARN("CONST_STRUCT",
6072 "struct $1 should normally be const\n" . 6128 "struct $1 should normally be const\n" . $herecurr);
6073 $herecurr);
6074 } 6129 }
6075 6130
6076# use of NR_CPUS is usually wrong 6131# use of NR_CPUS is usually wrong
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index b67e74b22826..eb38f49d4b75 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -179,6 +179,7 @@ bakup||backup
179baloon||balloon 179baloon||balloon
180baloons||balloons 180baloons||balloons
181bandwith||bandwidth 181bandwith||bandwidth
182banlance||balance
182batery||battery 183batery||battery
183beacuse||because 184beacuse||because
184becasue||because 185becasue||because
@@ -375,6 +376,8 @@ dictionnary||dictionary
375didnt||didn't 376didnt||didn't
376diferent||different 377diferent||different
377differrence||difference 378differrence||difference
379diffrent||different
380diffrentiate||differentiate
378difinition||definition 381difinition||definition
379diplay||display 382diplay||display
380direectly||directly 383direectly||directly
@@ -605,6 +608,9 @@ interruptted||interrupted
605interupted||interrupted 608interupted||interrupted
606interupt||interrupt 609interupt||interrupt
607intial||initial 610intial||initial
611intialisation||initialisation
612intialised||initialised
613intialise||initialise
608intialization||initialization 614intialization||initialization
609intialized||initialized 615intialized||initialized
610intialize||initialize 616intialize||initialize
@@ -691,6 +697,7 @@ miximum||maximum
691mmnemonic||mnemonic 697mmnemonic||mnemonic
692mnay||many 698mnay||many
693modulues||modules 699modulues||modules
700momery||memory
694monochorome||monochrome 701monochorome||monochrome
695monochromo||monochrome 702monochromo||monochrome
696monocrome||monochrome 703monocrome||monochrome
@@ -890,6 +897,7 @@ registerd||registered
890registeresd||registered 897registeresd||registered
891registes||registers 898registes||registers
892registraration||registration 899registraration||registration
900regsiter||register
893regster||register 901regster||register
894regualar||regular 902regualar||regular
895reguator||regulator 903reguator||regulator
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 41073f70eb41..4f6ac9dbc65d 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -98,7 +98,7 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
98 return ERR_PTR(-ESPIPE); 98 return ERR_PTR(-ESPIPE);
99 99
100 /* freed by caller to simple_write_to_buffer */ 100 /* freed by caller to simple_write_to_buffer */
101 data = kvmalloc(sizeof(*data) + alloc_size); 101 data = kvmalloc(sizeof(*data) + alloc_size, GFP_KERNEL);
102 if (data == NULL) 102 if (data == NULL)
103 return ERR_PTR(-ENOMEM); 103 return ERR_PTR(-ENOMEM);
104 kref_init(&data->count); 104 kref_init(&data->count);
@@ -1357,7 +1357,7 @@ static int aa_mk_null_file(struct dentry *parent)
1357 1357
1358 inode->i_ino = get_next_ino(); 1358 inode->i_ino = get_next_ino();
1359 inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO; 1359 inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO;
1360 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1360 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
1361 init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, 1361 init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO,
1362 MKDEV(MEM_MAJOR, 3)); 1362 MKDEV(MEM_MAJOR, 3));
1363 d_instantiate(dentry, inode); 1363 d_instantiate(dentry, inode);
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 0291ff3902f9..550a700563b4 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -64,17 +64,6 @@ char *aa_split_fqname(char *args, char **ns_name);
64const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name, 64const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
65 size_t *ns_len); 65 size_t *ns_len);
66void aa_info_message(const char *str); 66void aa_info_message(const char *str);
67void *__aa_kvmalloc(size_t size, gfp_t flags);
68
69static inline void *kvmalloc(size_t size)
70{
71 return __aa_kvmalloc(size, 0);
72}
73
74static inline void *kvzalloc(size_t size)
75{
76 return __aa_kvmalloc(size, __GFP_ZERO);
77}
78 67
79/** 68/**
80 * aa_strneq - compare null terminated @str to a non null terminated substring 69 * aa_strneq - compare null terminated @str to a non null terminated substring
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 32cafc12593e..7cd788a9445b 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -129,36 +129,6 @@ void aa_info_message(const char *str)
129} 129}
130 130
131/** 131/**
132 * __aa_kvmalloc - do allocation preferring kmalloc but falling back to vmalloc
133 * @size: how many bytes of memory are required
134 * @flags: the type of memory to allocate (see kmalloc).
135 *
136 * Return: allocated buffer or NULL if failed
137 *
138 * It is possible that policy being loaded from the user is larger than
139 * what can be allocated by kmalloc, in those cases fall back to vmalloc.
140 */
141void *__aa_kvmalloc(size_t size, gfp_t flags)
142{
143 void *buffer = NULL;
144
145 if (size == 0)
146 return NULL;
147
148 /* do not attempt kmalloc if we need more than 16 pages at once */
149 if (size <= (16*PAGE_SIZE))
150 buffer = kmalloc(size, flags | GFP_KERNEL | __GFP_NORETRY |
151 __GFP_NOWARN);
152 if (!buffer) {
153 if (flags & __GFP_ZERO)
154 buffer = vzalloc(size);
155 else
156 buffer = vmalloc(size);
157 }
158 return buffer;
159}
160
161/**
162 * aa_policy_init - initialize a policy structure 132 * aa_policy_init - initialize a policy structure
163 * @policy: policy to initialize (NOT NULL) 133 * @policy: policy to initialize (NOT NULL)
164 * @prefix: prefix name if any is required. (MAYBE NULL) 134 * @prefix: prefix name if any is required. (MAYBE NULL)
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index eb0efef746f5..960c913381e2 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -88,7 +88,7 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
88 if (bsize < tsize) 88 if (bsize < tsize)
89 goto out; 89 goto out;
90 90
91 table = kvzalloc(tsize); 91 table = kvzalloc(tsize, GFP_KERNEL);
92 if (table) { 92 if (table) {
93 table->td_id = th.td_id; 93 table->td_id = th.td_id;
94 table->td_flags = th.td_flags; 94 table->td_flags = th.td_flags;
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 2e37c9c26bbd..f3422a91353c 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -487,7 +487,7 @@ fail:
487 487
488static void *kvmemdup(const void *src, size_t len) 488static void *kvmemdup(const void *src, size_t len)
489{ 489{
490 void *p = kvmalloc(len); 490 void *p = kvmalloc(len, GFP_KERNEL);
491 491
492 if (p) 492 if (p)
493 memcpy(p, src, len); 493 memcpy(p, src, len);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 82a9e1851108..447a7d5cee0f 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -101,14 +101,9 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
101 101
102 if (_payload) { 102 if (_payload) {
103 ret = -ENOMEM; 103 ret = -ENOMEM;
104 payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN); 104 payload = kvmalloc(plen, GFP_KERNEL);
105 if (!payload) { 105 if (!payload)
106 if (plen <= PAGE_SIZE) 106 goto error2;
107 goto error2;
108 payload = vmalloc(plen);
109 if (!payload)
110 goto error2;
111 }
112 107
113 ret = -EFAULT; 108 ret = -EFAULT;
114 if (copy_from_user(payload, _payload, plen) != 0) 109 if (copy_from_user(payload, _payload, plen) != 0)
@@ -1071,14 +1066,9 @@ long keyctl_instantiate_key_common(key_serial_t id,
1071 1066
1072 if (from) { 1067 if (from) {
1073 ret = -ENOMEM; 1068 ret = -ENOMEM;
1074 payload = kmalloc(plen, GFP_KERNEL); 1069 payload = kvmalloc(plen, GFP_KERNEL);
1075 if (!payload) { 1070 if (!payload)
1076 if (plen <= PAGE_SIZE) 1071 goto error;
1077 goto error;
1078 payload = vmalloc(plen);
1079 if (!payload)
1080 goto error;
1081 }
1082 1072
1083 ret = -EFAULT; 1073 ret = -EFAULT;
1084 if (!copy_from_iter_full(payload, plen, from)) 1074 if (!copy_from_iter_full(payload, plen, from))
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b786fbab029f..1770f085c2a6 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -53,7 +53,7 @@
53#ifdef CONFIG_X86 53#ifdef CONFIG_X86
54/* for snoop control */ 54/* for snoop control */
55#include <asm/pgtable.h> 55#include <asm/pgtable.h>
56#include <asm/cacheflush.h> 56#include <asm/set_memory.h>
57#include <asm/cpufeature.h> 57#include <asm/cpufeature.h>
58#endif 58#endif
59#include <sound/core.h> 59#include <sound/core.h>
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 9720a30dbfff..6d17b171c17b 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -40,7 +40,9 @@
40#include <sound/initval.h> 40#include <sound/initval.h>
41/* for 440MX workaround */ 41/* for 440MX workaround */
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/cacheflush.h> 43#ifdef CONFIG_X86
44#include <asm/set_memory.h>
45#endif
44 46
45MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 47MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
46MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455"); 48MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 525f2f397b4c..aae099c0e502 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -936,7 +936,7 @@ static struct snd_soc_component *soc_find_component(
936 * 936 *
937 * @dlc: name of the DAI and optional component info to match 937 * @dlc: name of the DAI and optional component info to match
938 * 938 *
939 * This function will search all regsitered components and their DAIs to 939 * This function will search all registered components and their DAIs to
940 * find the DAI of the same name. The component's of_node and name 940 * find the DAI of the same name. The component's of_node and name
941 * should also match if being specified. 941 * should also match if being specified.
942 * 942 *
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index c505b019e09c..664b7fe206d6 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -30,7 +30,7 @@
30#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <asm/cacheflush.h> 33#include <asm/set_memory.h>
34#include <sound/core.h> 34#include <sound/core.h>
35#include <sound/asoundef.h> 35#include <sound/asoundef.h>
36#include <sound/pcm.h> 36#include <sound/pcm.h>
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index dba889004ea1..cbb29e41ef2b 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -16,6 +16,7 @@ TEST_GEN_FILES += thuge-gen
16TEST_GEN_FILES += transhuge-stress 16TEST_GEN_FILES += transhuge-stress
17TEST_GEN_FILES += userfaultfd 17TEST_GEN_FILES += userfaultfd
18TEST_GEN_FILES += mlock-random-test 18TEST_GEN_FILES += mlock-random-test
19TEST_GEN_FILES += virtual_address_range
19 20
20TEST_PROGS := run_vmtests 21TEST_PROGS := run_vmtests
21 22
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 3214a6456d13..0640923ded7e 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -165,4 +165,15 @@ else
165 echo "[PASS]" 165 echo "[PASS]"
166fi 166fi
167 167
168echo "-----------------------------"
169echo "running virtual_address_range"
170echo "-----------------------------"
171./virtual_address_range
172if [ $? -ne 0 ]; then
173 echo "[FAIL]"
174 exitcode=1
175else
176 echo "[PASS]"
177fi
178
168exit $exitcode 179exit $exitcode
diff --git a/tools/testing/selftests/vm/virtual_address_range.c b/tools/testing/selftests/vm/virtual_address_range.c
new file mode 100644
index 000000000000..3b02aa6eb9da
--- /dev/null
+++ b/tools/testing/selftests/vm/virtual_address_range.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright 2017, Anshuman Khandual, IBM Corp.
3 * Licensed under GPLv2.
4 *
5 * Works on architectures which support 128TB virtual
6 * address range and beyond.
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <unistd.h>
12#include <errno.h>
13#include <numaif.h>
14#include <sys/mman.h>
15#include <sys/time.h>
16
17/*
18 * Maximum address range mapped with a single mmap()
19 * call is little bit more than 16GB. Hence 16GB is
20 * chosen as the single chunk size for address space
21 * mapping.
22 */
23#define MAP_CHUNK_SIZE 17179869184UL /* 16GB */
24
25/*
26 * Address space till 128TB is mapped without any hint
27 * and is enabled by default. Address space beyond 128TB
28 * till 512TB is obtained by passing hint address as the
29 * first argument into mmap() system call.
30 *
31 * The process heap address space is divided into two
32 * different areas one below 128TB and one above 128TB
33 * till it reaches 512TB. One with size 128TB and the
34 * other being 384TB.
35 */
36#define NR_CHUNKS_128TB 8192UL /* Number of 16GB chunks for 128TB */
37#define NR_CHUNKS_384TB 24576UL /* Number of 16GB chunks for 384TB */
38
39#define ADDR_MARK_128TB (1UL << 47) /* First address beyond 128TB */
40
41static char *hind_addr(void)
42{
43 int bits = 48 + rand() % 15;
44
45 return (char *) (1UL << bits);
46}
47
48static int validate_addr(char *ptr, int high_addr)
49{
50 unsigned long addr = (unsigned long) ptr;
51
52 if (high_addr) {
53 if (addr < ADDR_MARK_128TB) {
54 printf("Bad address %lx\n", addr);
55 return 1;
56 }
57 return 0;
58 }
59
60 if (addr > ADDR_MARK_128TB) {
61 printf("Bad address %lx\n", addr);
62 return 1;
63 }
64 return 0;
65}
66
67static int validate_lower_address_hint(void)
68{
69 char *ptr;
70
71 ptr = mmap((void *) (1UL << 45), MAP_CHUNK_SIZE, PROT_READ |
72 PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
73
74 if (ptr == MAP_FAILED)
75 return 0;
76
77 return 1;
78}
79
80int main(int argc, char *argv[])
81{
82 char *ptr[NR_CHUNKS_128TB];
83 char *hptr[NR_CHUNKS_384TB];
84 char *hint;
85 unsigned long i, lchunks, hchunks;
86
87 for (i = 0; i < NR_CHUNKS_128TB; i++) {
88 ptr[i] = mmap(NULL, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
89 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
90
91 if (ptr[i] == MAP_FAILED) {
92 if (validate_lower_address_hint())
93 return 1;
94 break;
95 }
96
97 if (validate_addr(ptr[i], 0))
98 return 1;
99 }
100 lchunks = i;
101
102 for (i = 0; i < NR_CHUNKS_384TB; i++) {
103 hint = hind_addr();
104 hptr[i] = mmap(hint, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
105 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
106
107 if (hptr[i] == MAP_FAILED)
108 break;
109
110 if (validate_addr(hptr[i], 1))
111 return 1;
112 }
113 hchunks = i;
114
115 for (i = 0; i < lchunks; i++)
116 munmap(ptr[i], MAP_CHUNK_SIZE);
117
118 for (i = 0; i < hchunks; i++)
119 munmap(hptr[i], MAP_CHUNK_SIZE);
120
121 return 0;
122}
diff --git a/usr/Kconfig b/usr/Kconfig
index 6278f135256d..c0c48507e44e 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -21,6 +21,16 @@ config INITRAMFS_SOURCE
21 21
22 If you are not sure, leave it blank. 22 If you are not sure, leave it blank.
23 23
24config INITRAMFS_FORCE
25 bool "Ignore the initramfs passed by the bootloader"
26 depends on CMDLINE_EXTEND || CMDLINE_FORCE
27 help
28 This option causes the kernel to ignore the initramfs image
29 (or initrd image) passed to it by the bootloader. This is
30 analogous to CMDLINE_FORCE, which is found on some architectures,
31 and is useful if you cannot or don't want to change the image
32 your bootloader passes to the kernel.
33
24config INITRAMFS_ROOT_UID 34config INITRAMFS_ROOT_UID
25 int "User ID to map to 0 (user root)" 35 int "User ID to map to 0 (user root)"
26 depends on INITRAMFS_SOURCE!="" 36 depends on INITRAMFS_SOURCE!=""
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6e3b12c1925a..b3d151ee2a67 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -523,7 +523,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
523 int i; 523 int i;
524 struct kvm_memslots *slots; 524 struct kvm_memslots *slots;
525 525
526 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 526 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
527 if (!slots) 527 if (!slots)
528 return NULL; 528 return NULL;
529 529
@@ -708,18 +708,6 @@ out_err_no_disable:
708 return ERR_PTR(r); 708 return ERR_PTR(r);
709} 709}
710 710
711/*
712 * Avoid using vmalloc for a small buffer.
713 * Should not be used when the size is statically known.
714 */
715void *kvm_kvzalloc(unsigned long size)
716{
717 if (size > PAGE_SIZE)
718 return vzalloc(size);
719 else
720 return kzalloc(size, GFP_KERNEL);
721}
722
723static void kvm_destroy_devices(struct kvm *kvm) 711static void kvm_destroy_devices(struct kvm *kvm)
724{ 712{
725 struct kvm_device *dev, *tmp; 713 struct kvm_device *dev, *tmp;
@@ -801,7 +789,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
801{ 789{
802 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 790 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
803 791
804 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 792 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL);
805 if (!memslot->dirty_bitmap) 793 if (!memslot->dirty_bitmap)
806 return -ENOMEM; 794 return -ENOMEM;
807 795
@@ -1027,7 +1015,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
1027 goto out_free; 1015 goto out_free;
1028 } 1016 }
1029 1017
1030 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 1018 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
1031 if (!slots) 1019 if (!slots)
1032 goto out_free; 1020 goto out_free;
1033 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 1021 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));