summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-26 13:29:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-26 13:29:42 -0400
commitcbafe18c71028d5e0ee1626b4776fea5d5824a78 (patch)
tree2bb7db7db4ed8df2801f7c16553c69fb27379f7f
parentf41def397161053eb0d3ed6861ef65985efbf293 (diff)
parenta22fea94992a2bc5328005e62f368413ede49c14 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - almost all of the rest of -mm - various other subsystems Subsystems affected by this patch series: memcg, misc, core-kernel, lib, checkpatch, reiserfs, fat, fork, cpumask, kexec, uaccess, kconfig, kgdb, bug, ipc, lzo, kasan, madvise, cleanups, pagemap * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (77 commits) arch/sparc/include/asm/pgtable_64.h: fix build mm: treewide: clarify pgtable_page_{ctor,dtor}() naming ntfs: remove (un)?likely() from IS_ERR() conditions IB/hfi1: remove unlikely() from IS_ERR*() condition xfs: remove unlikely() from WARN_ON() condition wimax/i2400m: remove unlikely() from WARN*() condition fs: remove unlikely() from WARN_ON() condition xen/events: remove unlikely() from WARN() condition checkpatch: check for nested (un)?likely() calls hexagon: drop empty and unused free_initrd_mem mm: factor out common parts between MADV_COLD and MADV_PAGEOUT mm: introduce MADV_PAGEOUT mm: change PAGEREF_RECLAIM_CLEAN with PAGE_REFRECLAIM mm: introduce MADV_COLD mm: untag user pointers in mmap/munmap/mremap/brk vfio/type1: untag user pointers in vaddr_get_pfn tee/shm: untag user pointers in tee_shm_register media/v4l2-core: untag user pointers in videobuf_dma_contig_user_get drm/radeon: untag user pointers in radeon_gem_userptr_ioctl drm/amdgpu: untag user pointers ...
-rw-r--r--Documentation/core-api/kernel-api.rst3
-rw-r--r--Documentation/vm/split_page_table_lock.rst10
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arm/include/asm/tlb.h2
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/include/asm/tlb.h2
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/hexagon/include/asm/pgalloc.h2
-rw-r--r--arch/hexagon/mm/init.c13
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h2
-rw-r--r--arch/mips/include/asm/pgalloc.h2
-rw-r--r--arch/mips/include/uapi/asm/mman.h3
-rw-r--r--arch/nios2/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/include/asm/pgalloc.h6
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/powerpc/mm/pgtable-frag.c6
-rw-r--r--arch/riscv/include/asm/pgalloc.h2
-rw-r--r--arch/s390/mm/pgalloc.c6
-rw-r--r--arch/sh/include/asm/pgalloc.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h5
-rw-r--r--arch/sparc/mm/init_64.c4
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/um/include/asm/pgalloc.h2
-rw-r--r--arch/unicore32/include/asm/tlb.h2
-rw-r--r--arch/x86/mm/pat_rbtree.c19
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/xtensa/include/asm/pgalloc.h4
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h3
-rw-r--r--drivers/block/drbd/drbd_interval.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c9
-rw-r--r--drivers/net/wimax/i2400m/tx.c3
-rw-r--r--drivers/tee/tee_shm.c1
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--fs/fat/dir.c4
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ntfs/mft.c12
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ntfs/runlist.c2
-rw-r--r--fs/ntfs/super.c2
-rw-r--r--fs/open.c2
-rw-r--r--fs/reiserfs/do_balan.c15
-rw-r--r--fs/reiserfs/fix_node.c6
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/lbalance.c3
-rw-r--r--fs/reiserfs/objectid.c3
-rw-r--r--fs/reiserfs/prints.c3
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/userfaultfd.c22
-rw-r--r--fs/xfs/xfs_buf.c4
-rw-r--r--include/asm-generic/bug.h53
-rw-r--r--include/asm-generic/pgalloc.h8
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/interval_tree_generic.h22
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_types_task.h4
-rw-r--r--include/linux/printk.h22
-rw-r--r--include/linux/rbtree_augmented.h88
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/uaccess.h21
-rw-r--r--include/trace/events/writeback.h38
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/linux/coff.h5
-rw-r--r--ipc/mqueue.c22
-rw-r--r--ipc/sem.c3
-rw-r--r--kernel/debug/debug_core.c31
-rw-r--r--kernel/elfcore.c1
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/panic.c42
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/bug.c11
-rw-r--r--lib/extable.c1
-rw-r--r--lib/generic-radix-tree.c4
-rw-r--r--lib/hexdump.c21
-rw-r--r--lib/lzo/lzo1x_compress.c14
-rw-r--r--lib/rbtree_test.c37
-rw-r--r--lib/string.c12
-rw-r--r--lib/strncpy_from_user.c3
-rw-r--r--lib/strnlen_user.c3
-rw-r--r--mm/frame_vector.c2
-rw-r--r--mm/gup.c4
-rw-r--r--mm/internal.h2
-rw-r--r--mm/madvise.c268
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/msync.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/swap.c42
-rw-r--r--mm/vmalloc.c5
-rw-r--r--mm/vmscan.c62
-rwxr-xr-xscripts/checkpatch.pl69
-rw-r--r--scripts/gdb/linux/symbols.py4
-rw-r--r--tools/include/linux/rbtree.h71
-rw-r--r--tools/include/linux/rbtree_augmented.h119
-rw-r--r--tools/lib/rbtree.c37
114 files changed, 1005 insertions, 564 deletions
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 08af5caf036d..f77de49b1d51 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -42,6 +42,9 @@ String Manipulation
42.. kernel-doc:: lib/string.c 42.. kernel-doc:: lib/string.c
43 :export: 43 :export:
44 44
45.. kernel-doc:: include/linux/string.h
46 :internal:
47
45.. kernel-doc:: mm/util.c 48.. kernel-doc:: mm/util.c
46 :functions: kstrdup kstrdup_const kstrndup kmemdup kmemdup_nul memdup_user 49 :functions: kstrdup kstrdup_const kstrndup kmemdup kmemdup_nul memdup_user
47 vmemdup_user strndup_user memdup_user_nul 50 vmemdup_user strndup_user memdup_user_nul
diff --git a/Documentation/vm/split_page_table_lock.rst b/Documentation/vm/split_page_table_lock.rst
index 889b00be469f..ff51f4a5494d 100644
--- a/Documentation/vm/split_page_table_lock.rst
+++ b/Documentation/vm/split_page_table_lock.rst
@@ -54,9 +54,9 @@ Hugetlb-specific helpers:
54Support of split page table lock by an architecture 54Support of split page table lock by an architecture
55=================================================== 55===================================================
56 56
57There's no need in special enabling of PTE split page table lock: 57There's no need in special enabling of PTE split page table lock: everything
58everything required is done by pgtable_page_ctor() and pgtable_page_dtor(), 58required is done by pgtable_pte_page_ctor() and pgtable_pte_page_dtor(), which
59which must be called on PTE table allocation / freeing. 59must be called on PTE table allocation / freeing.
60 60
61Make sure the architecture doesn't use slab allocator for page table 61Make sure the architecture doesn't use slab allocator for page table
62allocation: slab uses page->slab_cache for its pages. 62allocation: slab uses page->slab_cache for its pages.
@@ -74,7 +74,7 @@ paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
74 74
75With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK. 75With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
76 76
77NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must 77NOTE: pgtable_pte_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
78be handled properly. 78be handled properly.
79 79
80page->ptl 80page->ptl
@@ -94,7 +94,7 @@ trick:
94 split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs 94 split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs
95 one more cache line for indirect access; 95 one more cache line for indirect access;
96 96
97The spinlock_t allocated in pgtable_page_ctor() for PTE table and in 97The spinlock_t allocated in pgtable_pte_page_ctor() for PTE table and in
98pgtable_pmd_page_ctor() for PMD table. 98pgtable_pmd_page_ctor() for PMD table.
99 99
100Please, never access page->ptl directly -- use appropriate helper. 100Please, never access page->ptl directly -- use appropriate helper.
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index ac23379b7a87..a18ec7f63888 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -68,6 +68,9 @@
68#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ 68#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
69#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ 69#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
70 70
71#define MADV_COLD 20 /* deactivate these pages */
72#define MADV_PAGEOUT 21 /* reclaim these pages */
73
71/* compatibility flags */ 74/* compatibility flags */
72#define MAP_FILE 0 75#define MAP_FILE 0
73 76
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 4751f2251cd9..b747f2ec2928 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -108,7 +108,7 @@ pte_alloc_one(struct mm_struct *mm)
108 return 0; 108 return 0;
109 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); 109 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
110 page = virt_to_page(pte_pg); 110 page = virt_to_page(pte_pg);
111 if (!pgtable_page_ctor(page)) { 111 if (!pgtable_pte_page_ctor(page)) {
112 __free_page(page); 112 __free_page(page);
113 return 0; 113 return 0;
114 } 114 }
@@ -123,7 +123,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
123 123
124static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) 124static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
125{ 125{
126 pgtable_page_dtor(virt_to_page(ptep)); 126 pgtable_pte_page_dtor(virt_to_page(ptep));
127 free_pages((unsigned long)ptep, __get_order_pte()); 127 free_pages((unsigned long)ptep, __get_order_pte());
128} 128}
129 129
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index b75ea15b85c0..669474add486 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -44,7 +44,7 @@ static inline void __tlb_remove_table(void *_table)
44static inline void 44static inline void
45__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) 45__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
46{ 46{
47 pgtable_page_dtor(pte); 47 pgtable_pte_page_dtor(pte);
48 48
49#ifndef CONFIG_ARM_LPAE 49#ifndef CONFIG_ARM_LPAE
50 /* 50 /*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25da9b2d9610..48c2888297dd 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -731,7 +731,7 @@ static void *__init late_alloc(unsigned long sz)
731{ 731{
732 void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz)); 732 void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
733 733
734 if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) 734 if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr)))
735 BUG(); 735 BUG();
736 return ptr; 736 return ptr;
737} 737}
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a95d1fcb7e21..b76df828e6b7 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -44,7 +44,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
44static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 44static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
45 unsigned long addr) 45 unsigned long addr)
46{ 46{
47 pgtable_page_dtor(pte); 47 pgtable_pte_page_dtor(pte);
48 tlb_remove_table(tlb, pte); 48 tlb_remove_table(tlb, pte);
49} 49}
50 50
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 53dc6f24cfb7..60c929f3683b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -384,7 +384,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
384 * folded, and if so pgtable_pmd_page_ctor() becomes nop. 384 * folded, and if so pgtable_pmd_page_ctor() becomes nop.
385 */ 385 */
386 if (shift == PAGE_SHIFT) 386 if (shift == PAGE_SHIFT)
387 BUG_ON(!pgtable_page_ctor(phys_to_page(pa))); 387 BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
388 else if (shift == PMD_SHIFT) 388 else if (shift == PMD_SHIFT)
389 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); 389 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
390 390
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index d089113fe41f..c7c1ed27e348 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -71,7 +71,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
71 71
72#define __pte_free_tlb(tlb, pte, address) \ 72#define __pte_free_tlb(tlb, pte, address) \
73do { \ 73do { \
74 pgtable_page_dtor(pte); \ 74 pgtable_pte_page_dtor(pte); \
75 tlb_remove_page(tlb, pte); \ 75 tlb_remove_page(tlb, pte); \
76} while (0) 76} while (0)
77 77
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 5a6e79e7926d..cc9be514a676 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -94,7 +94,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
94 94
95#define __pte_free_tlb(tlb, pte, addr) \ 95#define __pte_free_tlb(tlb, pte, addr) \
96do { \ 96do { \
97 pgtable_page_dtor((pte)); \ 97 pgtable_pte_page_dtor((pte)); \
98 tlb_remove_page((tlb), (pte)); \ 98 tlb_remove_page((tlb), (pte)); \
99} while (0) 99} while (0)
100 100
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index f1f6ebd537b7..c961773a6fff 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -71,19 +71,6 @@ void __init mem_init(void)
71 init_mm.context.ptbase = __pa(init_mm.pgd); 71 init_mm.context.ptbase = __pa(init_mm.pgd);
72} 72}
73 73
74/*
75 * free_initrd_mem - frees... initrd memory.
76 * @start - start of init memory
77 * @end - end of init memory
78 *
79 * Apparently has to be passed the address of the initrd memory.
80 *
81 * Wrapped by #ifdef CONFIG_BLKDEV_INITRD
82 */
83void free_initrd_mem(unsigned long start, unsigned long end)
84{
85}
86
87void sync_icache_dcache(pte_t pte) 74void sync_icache_dcache(pte_t pte)
88{ 75{
89 unsigned long addr; 76 unsigned long addr;
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 4399d712f6db..b34d44d666a4 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -41,7 +41,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
41static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, 41static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
42 unsigned long address) 42 unsigned long address)
43{ 43{
44 pgtable_page_dtor(page); 44 pgtable_pte_page_dtor(page);
45 __free_page(page); 45 __free_page(page);
46} 46}
47 47
@@ -54,7 +54,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
54 54
55 if (!page) 55 if (!page)
56 return NULL; 56 return NULL;
57 if (!pgtable_page_ctor(page)) { 57 if (!pgtable_pte_page_ctor(page)) {
58 __free_page(page); 58 __free_page(page);
59 return NULL; 59 return NULL;
60 } 60 }
@@ -73,7 +73,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
73 73
74static inline void pte_free(struct mm_struct *mm, struct page *page) 74static inline void pte_free(struct mm_struct *mm, struct page *page)
75{ 75{
76 pgtable_page_dtor(page); 76 pgtable_pte_page_dtor(page);
77 __free_page(page); 77 __free_page(page);
78} 78}
79 79
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index d04d9ba9b976..acab315c851f 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -36,7 +36,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
36 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); 36 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
37 if(!page) 37 if(!page)
38 return NULL; 38 return NULL;
39 if (!pgtable_page_ctor(page)) { 39 if (!pgtable_pte_page_ctor(page)) {
40 __free_page(page); 40 __free_page(page);
41 return NULL; 41 return NULL;
42 } 42 }
@@ -51,7 +51,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
51 51
52static inline void pte_free(struct mm_struct *mm, pgtable_t page) 52static inline void pte_free(struct mm_struct *mm, pgtable_t page)
53{ 53{
54 pgtable_page_dtor(page); 54 pgtable_pte_page_dtor(page);
55 cache_page(kmap(page)); 55 cache_page(kmap(page));
56 kunmap(page); 56 kunmap(page);
57 __free_page(page); 57 __free_page(page);
@@ -60,7 +60,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
60static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, 60static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
61 unsigned long address) 61 unsigned long address)
62{ 62{
63 pgtable_page_dtor(page); 63 pgtable_pte_page_dtor(page);
64 cache_page(kmap(page)); 64 cache_page(kmap(page));
65 kunmap(page); 65 kunmap(page);
66 __free_page(page); 66 __free_page(page);
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 1a8ddbd0d23c..856121122b91 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -21,7 +21,7 @@ extern const char bad_pmd_string[];
21 21
22#define __pte_free_tlb(tlb,pte,addr) \ 22#define __pte_free_tlb(tlb,pte,addr) \
23do { \ 23do { \
24 pgtable_page_dtor(pte); \ 24 pgtable_pte_page_dtor(pte); \
25 tlb_remove_page((tlb), pte); \ 25 tlb_remove_page((tlb), pte); \
26} while (0) 26} while (0)
27 27
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index aa73cb187a07..166842337eb2 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
54 54
55#define __pte_free_tlb(tlb,pte,address) \ 55#define __pte_free_tlb(tlb,pte,address) \
56do { \ 56do { \
57 pgtable_page_dtor(pte); \ 57 pgtable_pte_page_dtor(pte); \
58 tlb_remove_page((tlb), pte); \ 58 tlb_remove_page((tlb), pte); \
59} while (0) 59} while (0)
60 60
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index c2b40969eb1f..57dc2ac4f8bd 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -95,6 +95,9 @@
95#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ 95#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
96#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ 96#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
97 97
98#define MADV_COLD 20 /* deactivate these pages */
99#define MADV_PAGEOUT 21 /* reclaim these pages */
100
98/* compatibility flags */ 101/* compatibility flags */
99#define MAP_FILE 0 102#define MAP_FILE 0
100 103
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 750d18d5980b..0b146d773c85 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -41,7 +41,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
41 41
42#define __pte_free_tlb(tlb, pte, addr) \ 42#define __pte_free_tlb(tlb, pte, addr) \
43 do { \ 43 do { \
44 pgtable_page_dtor(pte); \ 44 pgtable_pte_page_dtor(pte); \
45 tlb_remove_page((tlb), (pte)); \ 45 tlb_remove_page((tlb), (pte)); \
46 } while (0) 46 } while (0)
47 47
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 787c1b9d2f6d..da12a4c38c4b 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -75,7 +75,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
75 if (!pte) 75 if (!pte)
76 return NULL; 76 return NULL;
77 clear_page(page_address(pte)); 77 clear_page(page_address(pte));
78 if (!pgtable_page_ctor(pte)) { 78 if (!pgtable_pte_page_ctor(pte)) {
79 __free_page(pte); 79 __free_page(pte);
80 return NULL; 80 return NULL;
81 } 81 }
@@ -89,13 +89,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
89 89
90static inline void pte_free(struct mm_struct *mm, struct page *pte) 90static inline void pte_free(struct mm_struct *mm, struct page *pte)
91{ 91{
92 pgtable_page_dtor(pte); 92 pgtable_pte_page_dtor(pte);
93 __free_page(pte); 93 __free_page(pte);
94} 94}
95 95
96#define __pte_free_tlb(tlb, pte, addr) \ 96#define __pte_free_tlb(tlb, pte, addr) \
97do { \ 97do { \
98 pgtable_page_dtor(pte); \ 98 pgtable_pte_page_dtor(pte); \
99 tlb_remove_page((tlb), (pte)); \ 99 tlb_remove_page((tlb), (pte)); \
100} while (0) 100} while (0)
101 101
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index c98162f494db..6fd8871e4081 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -48,6 +48,9 @@
48#define MADV_DONTFORK 10 /* don't inherit across fork */ 48#define MADV_DONTFORK 10 /* don't inherit across fork */
49#define MADV_DOFORK 11 /* do inherit across fork */ 49#define MADV_DOFORK 11 /* do inherit across fork */
50 50
51#define MADV_COLD 20 /* deactivate these pages */
52#define MADV_PAGEOUT 21 /* reclaim these pages */
53
51#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ 54#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
52#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ 55#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
53 56
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index a7b05214760c..ee4bd6d38602 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag)
25 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; 25 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
26 /* We allow PTE_FRAG_NR fragments from a PTE page */ 26 /* We allow PTE_FRAG_NR fragments from a PTE page */
27 if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { 27 if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
28 pgtable_page_dtor(page); 28 pgtable_pte_page_dtor(page);
29 __free_page(page); 29 __free_page(page);
30 } 30 }
31} 31}
@@ -61,7 +61,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
61 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); 61 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
62 if (!page) 62 if (!page)
63 return NULL; 63 return NULL;
64 if (!pgtable_page_ctor(page)) { 64 if (!pgtable_pte_page_ctor(page)) {
65 __free_page(page); 65 __free_page(page);
66 return NULL; 66 return NULL;
67 } 67 }
@@ -113,7 +113,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
113 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); 113 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
114 if (atomic_dec_and_test(&page->pt_frag_refcount)) { 114 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
115 if (!kernel) 115 if (!kernel)
116 pgtable_page_dtor(page); 116 pgtable_pte_page_dtor(page);
117 __free_page(page); 117 __free_page(page);
118 } 118 }
119} 119}
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index f66a00d8cb19..d59ea92285ec 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -78,7 +78,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
78 78
79#define __pte_free_tlb(tlb, pte, buf) \ 79#define __pte_free_tlb(tlb, pte, buf) \
80do { \ 80do { \
81 pgtable_page_dtor(pte); \ 81 pgtable_pte_page_dtor(pte); \
82 tlb_remove_page((tlb), pte); \ 82 tlb_remove_page((tlb), pte); \
83} while (0) 83} while (0)
84 84
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 54fcdf66ae96..3dd253f81a77 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -210,7 +210,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
210 page = alloc_page(GFP_KERNEL); 210 page = alloc_page(GFP_KERNEL);
211 if (!page) 211 if (!page)
212 return NULL; 212 return NULL;
213 if (!pgtable_page_ctor(page)) { 213 if (!pgtable_pte_page_ctor(page)) {
214 __free_page(page); 214 __free_page(page);
215 return NULL; 215 return NULL;
216 } 216 }
@@ -256,7 +256,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
256 atomic_xor_bits(&page->_refcount, 3U << 24); 256 atomic_xor_bits(&page->_refcount, 3U << 24);
257 } 257 }
258 258
259 pgtable_page_dtor(page); 259 pgtable_pte_page_dtor(page);
260 __free_page(page); 260 __free_page(page);
261} 261}
262 262
@@ -308,7 +308,7 @@ void __tlb_remove_table(void *_table)
308 case 3: /* 4K page table with pgstes */ 308 case 3: /* 4K page table with pgstes */
309 if (mask & 3) 309 if (mask & 3)
310 atomic_xor_bits(&page->_refcount, 3 << 24); 310 atomic_xor_bits(&page->_refcount, 3 << 24);
311 pgtable_page_dtor(page); 311 pgtable_pte_page_dtor(page);
312 __free_page(page); 312 __free_page(page);
313 break; 313 break;
314 } 314 }
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8c6341a4d807..22d968bfe9bb 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
29 29
30#define __pte_free_tlb(tlb,pte,addr) \ 30#define __pte_free_tlb(tlb,pte,addr) \
31do { \ 31do { \
32 pgtable_page_dtor(pte); \ 32 pgtable_pte_page_dtor(pte); \
33 tlb_remove_page((tlb), (pte)); \ 33 tlb_remove_page((tlb), (pte)); \
34} while (0) 34} while (0)
35 35
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b57f9c631eca..6ae8016ef4ec 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1078,7 +1078,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1078} 1078}
1079#define io_remap_pfn_range io_remap_pfn_range 1079#define io_remap_pfn_range io_remap_pfn_range
1080 1080
1081static inline unsigned long untagged_addr(unsigned long start) 1081static inline unsigned long __untagged_addr(unsigned long start)
1082{ 1082{
1083 if (adi_capable()) { 1083 if (adi_capable()) {
1084 long addr = start; 1084 long addr = start;
@@ -1098,7 +1098,8 @@ static inline unsigned long untagged_addr(unsigned long start)
1098 1098
1099 return start; 1099 return start;
1100} 1100}
1101#define untagged_addr untagged_addr 1101#define untagged_addr(addr) \
1102 ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1102 1103
1103static inline bool pte_access_permitted(pte_t pte, bool write) 1104static inline bool pte_access_permitted(pte_t pte, bool write)
1104{ 1105{
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4b099dd7a767..e6d91819da92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2903,7 +2903,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
2903 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2903 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2904 if (!page) 2904 if (!page)
2905 return NULL; 2905 return NULL;
2906 if (!pgtable_page_ctor(page)) { 2906 if (!pgtable_pte_page_ctor(page)) {
2907 free_unref_page(page); 2907 free_unref_page(page);
2908 return NULL; 2908 return NULL;
2909 } 2909 }
@@ -2919,7 +2919,7 @@ static void __pte_free(pgtable_t pte)
2919{ 2919{
2920 struct page *page = virt_to_page(pte); 2920 struct page *page = virt_to_page(pte);
2921 2921
2922 pgtable_page_dtor(page); 2922 pgtable_pte_page_dtor(page);
2923 __free_page(page); 2923 __free_page(page);
2924} 2924}
2925 2925
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index aaebbc00d262..cc3ad64479ac 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -378,7 +378,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
378 if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0) 378 if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
379 return NULL; 379 return NULL;
380 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); 380 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
381 if (!pgtable_page_ctor(page)) { 381 if (!pgtable_pte_page_ctor(page)) {
382 __free_page(page); 382 __free_page(page);
383 return NULL; 383 return NULL;
384 } 384 }
@@ -389,7 +389,7 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
389{ 389{
390 unsigned long p; 390 unsigned long p;
391 391
392 pgtable_page_dtor(pte); 392 pgtable_pte_page_dtor(pte);
393 p = (unsigned long)page_address(pte); /* Cached address (for test) */ 393 p = (unsigned long)page_address(pte); /* Cached address (for test) */
394 if (p == 0) 394 if (p == 0)
395 BUG(); 395 BUG();
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 446e0c0f4018..881e76da1938 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
29 29
30#define __pte_free_tlb(tlb,pte, address) \ 30#define __pte_free_tlb(tlb,pte, address) \
31do { \ 31do { \
32 pgtable_page_dtor(pte); \ 32 pgtable_pte_page_dtor(pte); \
33 tlb_remove_page((tlb),(pte)); \ 33 tlb_remove_page((tlb),(pte)); \
34} while (0) 34} while (0)
35 35
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
index 10d2356bfddd..4663d8cc80ef 100644
--- a/arch/unicore32/include/asm/tlb.h
+++ b/arch/unicore32/include/asm/tlb.h
@@ -15,7 +15,7 @@
15 15
16#define __pte_free_tlb(tlb, pte, addr) \ 16#define __pte_free_tlb(tlb, pte, addr) \
17 do { \ 17 do { \
18 pgtable_page_dtor(pte); \ 18 pgtable_pte_page_dtor(pte); \
19 tlb_remove_page((tlb), (pte)); \ 19 tlb_remove_page((tlb), (pte)); \
20 } while (0) 20 } while (0)
21 21
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index fa16036fa592..65ebe4b88f7c 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -54,23 +54,10 @@ static u64 get_subtree_max_end(struct rb_node *node)
54 return ret; 54 return ret;
55} 55}
56 56
57static u64 compute_subtree_max_end(struct memtype *data) 57#define NODE_END(node) ((node)->end)
58{
59 u64 max_end = data->end, child_max_end;
60
61 child_max_end = get_subtree_max_end(data->rb.rb_right);
62 if (child_max_end > max_end)
63 max_end = child_max_end;
64
65 child_max_end = get_subtree_max_end(data->rb.rb_left);
66 if (child_max_end > max_end)
67 max_end = child_max_end;
68
69 return max_end;
70}
71 58
72RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb, 59RB_DECLARE_CALLBACKS_MAX(static, memtype_rb_augment_cb,
73 u64, subtree_max_end, compute_subtree_max_end) 60 struct memtype, rb, u64, subtree_max_end, NODE_END)
74 61
75/* Find the first (lowest start addr) overlapping range from rb tree */ 62/* Find the first (lowest start addr) overlapping range from rb tree */
76static struct memtype *memtype_rb_lowest_match(struct rb_root *root, 63static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 463940faf52f..3e4b9035bb9a 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -45,7 +45,7 @@ early_param("userpte", setup_userpte);
45 45
46void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 46void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
47{ 47{
48 pgtable_page_dtor(pte); 48 pgtable_pte_page_dtor(pte);
49 paravirt_release_pte(page_to_pfn(pte)); 49 paravirt_release_pte(page_to_pfn(pte));
50 paravirt_tlb_remove_table(tlb, pte); 50 paravirt_tlb_remove_table(tlb, pte);
51} 51}
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index dd744aa450fa..1d38f0e755ba 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -55,7 +55,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
55 if (!pte) 55 if (!pte)
56 return NULL; 56 return NULL;
57 page = virt_to_page(pte); 57 page = virt_to_page(pte);
58 if (!pgtable_page_ctor(page)) { 58 if (!pgtable_pte_page_ctor(page)) {
59 __free_page(page); 59 __free_page(page);
60 return NULL; 60 return NULL;
61 } 61 }
@@ -69,7 +69,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
69 69
70static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 70static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
71{ 71{
72 pgtable_page_dtor(pte); 72 pgtable_pte_page_dtor(pte);
73 __free_page(pte); 73 __free_page(pte);
74} 74}
75#define pmd_pgtable(pmd) pmd_page(pmd) 75#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index ebbb48842190..e5e643752947 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -103,6 +103,9 @@
103#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ 103#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
104#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ 104#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
105 105
106#define MADV_COLD 20 /* deactivate these pages */
107#define MADV_PAGEOUT 21 /* reclaim these pages */
108
106/* compatibility flags */ 109/* compatibility flags */
107#define MAP_FILE 0 110#define MAP_FILE 0
108 111
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
index c58986556161..651bd0236a99 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -13,33 +13,10 @@ sector_t interval_end(struct rb_node *node)
13 return this->end; 13 return this->end;
14} 14}
15 15
16/** 16#define NODE_END(node) ((node)->sector + ((node)->size >> 9))
17 * compute_subtree_last - compute end of @node
18 *
19 * The end of an interval is the highest (start + (size >> 9)) value of this
20 * node and of its children. Called for @node and its parents whenever the end
21 * may have changed.
22 */
23static inline sector_t
24compute_subtree_last(struct drbd_interval *node)
25{
26 sector_t max = node->sector + (node->size >> 9);
27
28 if (node->rb.rb_left) {
29 sector_t left = interval_end(node->rb.rb_left);
30 if (left > max)
31 max = left;
32 }
33 if (node->rb.rb_right) {
34 sector_t right = interval_end(node->rb.rb_right);
35 if (right > max)
36 max = right;
37 }
38 return max;
39}
40 17
41RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb, 18RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
42 sector_t, end, compute_subtree_last); 19 struct drbd_interval, rb, sector_t, end, NODE_END);
43 20
44/** 21/**
45 * drbd_insert_interval - insert a new interval into a tree 22 * drbd_insert_interval - insert a new interval into a tree
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 42b936b6bbf1..6d021ecc8d59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1103,7 +1103,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1103 alloc_flags = 0; 1103 alloc_flags = 0;
1104 if (!offset || !*offset) 1104 if (!offset || !*offset)
1105 return -EINVAL; 1105 return -EINVAL;
1106 user_addr = *offset; 1106 user_addr = untagged_addr(*offset);
1107 } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL | 1107 } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1108 ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1108 ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1109 domain = AMDGPU_GEM_DOMAIN_GTT; 1109 domain = AMDGPU_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b174bd5eb38e..8ceb44925947 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -291,6 +291,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
291 uint32_t handle; 291 uint32_t handle;
292 int r; 292 int r;
293 293
294 args->addr = untagged_addr(args->addr);
295
294 if (offset_in_page(args->addr | args->size)) 296 if (offset_in_page(args->addr | args->size))
295 return -EINVAL; 297 return -EINVAL;
296 298
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 4cf58dbbe439..b2b076606f54 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -296,6 +296,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
296 uint32_t handle; 296 uint32_t handle;
297 int r; 297 int r;
298 298
299 args->addr = untagged_addr(args->addr);
300
299 if (offset_in_page(args->addr | args->size)) 301 if (offset_in_page(args->addr | args->size))
300 return -EINVAL; 302 return -EINVAL;
301 303
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 9f53f63b1453..7bff0a1e713d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1041,7 +1041,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1041 if (cb) 1041 if (cb)
1042 iowait_pio_inc(&priv->s_iowait); 1042 iowait_pio_inc(&priv->s_iowait);
1043 pbuf = sc_buffer_alloc(sc, plen, cb, qp); 1043 pbuf = sc_buffer_alloc(sc, plen, cb, qp);
1044 if (unlikely(IS_ERR_OR_NULL(pbuf))) { 1044 if (IS_ERR_OR_NULL(pbuf)) {
1045 if (cb) 1045 if (cb)
1046 verbs_pio_complete(qp, 0); 1046 verbs_pio_complete(qp, 0);
1047 if (IS_ERR(pbuf)) { 1047 if (IS_ERR(pbuf)) {
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 76b4ac7b1678..aeb2f497c683 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -157,6 +157,7 @@ static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
157static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem, 157static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
158 struct videobuf_buffer *vb) 158 struct videobuf_buffer *vb)
159{ 159{
160 unsigned long untagged_baddr = untagged_addr(vb->baddr);
160 struct mm_struct *mm = current->mm; 161 struct mm_struct *mm = current->mm;
161 struct vm_area_struct *vma; 162 struct vm_area_struct *vma;
162 unsigned long prev_pfn, this_pfn; 163 unsigned long prev_pfn, this_pfn;
@@ -164,22 +165,22 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
164 unsigned int offset; 165 unsigned int offset;
165 int ret; 166 int ret;
166 167
167 offset = vb->baddr & ~PAGE_MASK; 168 offset = untagged_baddr & ~PAGE_MASK;
168 mem->size = PAGE_ALIGN(vb->size + offset); 169 mem->size = PAGE_ALIGN(vb->size + offset);
169 ret = -EINVAL; 170 ret = -EINVAL;
170 171
171 down_read(&mm->mmap_sem); 172 down_read(&mm->mmap_sem);
172 173
173 vma = find_vma(mm, vb->baddr); 174 vma = find_vma(mm, untagged_baddr);
174 if (!vma) 175 if (!vma)
175 goto out_up; 176 goto out_up;
176 177
177 if ((vb->baddr + mem->size) > vma->vm_end) 178 if ((untagged_baddr + mem->size) > vma->vm_end)
178 goto out_up; 179 goto out_up;
179 180
180 pages_done = 0; 181 pages_done = 0;
181 prev_pfn = 0; /* kill warning */ 182 prev_pfn = 0; /* kill warning */
182 user_address = vb->baddr; 183 user_address = untagged_baddr;
183 184
184 while (pages_done < (mem->size >> PAGE_SHIFT)) { 185 while (pages_done < (mem->size >> PAGE_SHIFT)) {
185 ret = follow_pfn(vma, user_address, &this_pfn); 186 ret = follow_pfn(vma, user_address, &this_pfn);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index ebd64e083726..1255302e251e 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -654,8 +654,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
654 padding = aligned_size - tx_msg_moved->size; 654 padding = aligned_size - tx_msg_moved->size;
655 if (padding > 0) { 655 if (padding > 0) {
656 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0); 656 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
657 if (unlikely(WARN_ON(pad_buf == NULL 657 if (WARN_ON(pad_buf == NULL || pad_buf == TAIL_FULL)) {
658 || pad_buf == TAIL_FULL))) {
659 /* This should not happen -- append should verify 658 /* This should not happen -- append should verify
660 * there is always space left at least to append 659 * there is always space left at least to append
661 * tx_block_size */ 660 * tx_block_size */
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 2da026fd12c9..09ddcd06c715 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -254,6 +254,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
254 shm->teedev = teedev; 254 shm->teedev = teedev;
255 shm->ctx = ctx; 255 shm->ctx = ctx;
256 shm->id = -1; 256 shm->id = -1;
257 addr = untagged_addr(addr);
257 start = rounddown(addr, PAGE_SIZE); 258 start = rounddown(addr, PAGE_SIZE);
258 shm->offset = addr - start; 259 shm->offset = addr - start;
259 shm->size = length; 260 shm->size = length;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9a50b0558fa9..96fddc1dafc3 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -375,6 +375,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
375 375
376 down_read(&mm->mmap_sem); 376 down_read(&mm->mmap_sem);
377 377
378 vaddr = untagged_addr(vaddr);
379
378 vma = find_vma_intersection(mm, vaddr, vaddr + 1); 380 vma = find_vma_intersection(mm, vaddr, vaddr + 1);
379 381
380 if (vma && vma->vm_flags & VM_PFNMAP) { 382 if (vma && vma->vm_flags & VM_PFNMAP) {
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2e8570c09789..6c8843968a52 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -247,7 +247,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
247 */ 247 */
248unsigned int evtchn_from_irq(unsigned irq) 248unsigned int evtchn_from_irq(unsigned irq)
249{ 249{
250 if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))) 250 if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
251 return 0; 251 return 0;
252 252
253 return info_for_irq(irq)->evtchn; 253 return info_for_irq(irq)->evtchn;
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 814ad2c2ba80..054acd9fd033 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -88,9 +88,7 @@ static int fat__get_entry(struct inode *dir, loff_t *pos,
88 int err, offset; 88 int err, offset;
89 89
90next: 90next:
91 if (*bh) 91 brelse(*bh);
92 brelse(*bh);
93
94 *bh = NULL; 92 *bh = NULL;
95 iblock = *pos >> sb->s_blocksize_bits; 93 iblock = *pos >> sb->s_blocksize_bits;
96 err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false); 94 err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false);
diff --git a/fs/namespace.c b/fs/namespace.c
index b75d458f817d..fe0e9e1410fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3026,7 +3026,7 @@ void *copy_mount_options(const void __user * data)
3026 * the remainder of the page. 3026 * the remainder of the page.
3027 */ 3027 */
3028 /* copy_from_user cannot cross TASK_SIZE ! */ 3028 /* copy_from_user cannot cross TASK_SIZE ! */
3029 size = TASK_SIZE - (unsigned long)data; 3029 size = TASK_SIZE - (unsigned long)untagged_addr(data);
3030 if (size > PAGE_SIZE) 3030 if (size > PAGE_SIZE)
3031 size = PAGE_SIZE; 3031 size = PAGE_SIZE;
3032 3032
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 20c841a906f2..3aac5c917afe 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -71,7 +71,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
71 } 71 }
72 /* Read, map, and pin the page. */ 72 /* Read, map, and pin the page. */
73 page = ntfs_map_page(mft_vi->i_mapping, index); 73 page = ntfs_map_page(mft_vi->i_mapping, index);
74 if (likely(!IS_ERR(page))) { 74 if (!IS_ERR(page)) {
75 /* Catch multi sector transfer fixup errors. */ 75 /* Catch multi sector transfer fixup errors. */
76 if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) + 76 if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
77 ofs)))) { 77 ofs)))) {
@@ -154,7 +154,7 @@ MFT_RECORD *map_mft_record(ntfs_inode *ni)
154 mutex_lock(&ni->mrec_lock); 154 mutex_lock(&ni->mrec_lock);
155 155
156 m = map_mft_record_page(ni); 156 m = map_mft_record_page(ni);
157 if (likely(!IS_ERR(m))) 157 if (!IS_ERR(m))
158 return m; 158 return m;
159 159
160 mutex_unlock(&ni->mrec_lock); 160 mutex_unlock(&ni->mrec_lock);
@@ -271,7 +271,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
271 m = map_mft_record(ni); 271 m = map_mft_record(ni);
272 /* map_mft_record() has incremented this on success. */ 272 /* map_mft_record() has incremented this on success. */
273 atomic_dec(&ni->count); 273 atomic_dec(&ni->count);
274 if (likely(!IS_ERR(m))) { 274 if (!IS_ERR(m)) {
275 /* Verify the sequence number. */ 275 /* Verify the sequence number. */
276 if (likely(le16_to_cpu(m->sequence_number) == seq_no)) { 276 if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
277 ntfs_debug("Done 1."); 277 ntfs_debug("Done 1.");
@@ -1303,7 +1303,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
1303 read_unlock_irqrestore(&mftbmp_ni->size_lock, flags); 1303 read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
1304 rl = ntfs_attr_find_vcn_nolock(mftbmp_ni, 1304 rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
1305 (ll - 1) >> vol->cluster_size_bits, NULL); 1305 (ll - 1) >> vol->cluster_size_bits, NULL);
1306 if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) { 1306 if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
1307 up_write(&mftbmp_ni->runlist.lock); 1307 up_write(&mftbmp_ni->runlist.lock);
1308 ntfs_error(vol->sb, "Failed to determine last allocated " 1308 ntfs_error(vol->sb, "Failed to determine last allocated "
1309 "cluster of mft bitmap attribute."); 1309 "cluster of mft bitmap attribute.");
@@ -1734,7 +1734,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
1734 read_unlock_irqrestore(&mft_ni->size_lock, flags); 1734 read_unlock_irqrestore(&mft_ni->size_lock, flags);
1735 rl = ntfs_attr_find_vcn_nolock(mft_ni, 1735 rl = ntfs_attr_find_vcn_nolock(mft_ni,
1736 (ll - 1) >> vol->cluster_size_bits, NULL); 1736 (ll - 1) >> vol->cluster_size_bits, NULL);
1737 if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) { 1737 if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
1738 up_write(&mft_ni->runlist.lock); 1738 up_write(&mft_ni->runlist.lock);
1739 ntfs_error(vol->sb, "Failed to determine last allocated " 1739 ntfs_error(vol->sb, "Failed to determine last allocated "
1740 "cluster of mft data attribute."); 1740 "cluster of mft data attribute.");
@@ -1776,7 +1776,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
1776 do { 1776 do {
1777 rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE, 1777 rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
1778 true); 1778 true);
1779 if (likely(!IS_ERR(rl2))) 1779 if (!IS_ERR(rl2))
1780 break; 1780 break;
1781 if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) { 1781 if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
1782 ntfs_error(vol->sb, "Failed to allocate the minimal " 1782 ntfs_error(vol->sb, "Failed to allocate the minimal "
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 2d3cc9e3395d..4e6a44bc654c 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -115,7 +115,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
115 dent_ino = MREF(mref); 115 dent_ino = MREF(mref);
116 ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino); 116 ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
117 dent_inode = ntfs_iget(vol->sb, dent_ino); 117 dent_inode = ntfs_iget(vol->sb, dent_ino);
118 if (likely(!IS_ERR(dent_inode))) { 118 if (!IS_ERR(dent_inode)) {
119 /* Consistency check. */ 119 /* Consistency check. */
120 if (is_bad_inode(dent_inode) || MSEQNO(mref) == 120 if (is_bad_inode(dent_inode) || MSEQNO(mref) ==
121 NTFS_I(dent_inode)->seq_no || 121 NTFS_I(dent_inode)->seq_no ||
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 508744a93180..97932fb5179c 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -951,7 +951,7 @@ mpa_err:
951 } 951 }
952 /* Now combine the new and old runlists checking for overlaps. */ 952 /* Now combine the new and old runlists checking for overlaps. */
953 old_rl = ntfs_runlists_merge(old_rl, rl); 953 old_rl = ntfs_runlists_merge(old_rl, rl);
954 if (likely(!IS_ERR(old_rl))) 954 if (!IS_ERR(old_rl))
955 return old_rl; 955 return old_rl;
956 ntfs_free(rl); 956 ntfs_free(rl);
957 ntfs_error(vol->sb, "Failed to merge runlists."); 957 ntfs_error(vol->sb, "Failed to merge runlists.");
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 29621d40f448..7dc3bc604f78 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1475,7 +1475,7 @@ not_enabled:
1475 kfree(name); 1475 kfree(name);
1476 /* Get the inode. */ 1476 /* Get the inode. */
1477 tmp_ino = ntfs_iget(vol->sb, MREF(mref)); 1477 tmp_ino = ntfs_iget(vol->sb, MREF(mref));
1478 if (unlikely(IS_ERR(tmp_ino) || is_bad_inode(tmp_ino))) { 1478 if (IS_ERR(tmp_ino) || unlikely(is_bad_inode(tmp_ino))) {
1479 if (!IS_ERR(tmp_ino)) 1479 if (!IS_ERR(tmp_ino))
1480 iput(tmp_ino); 1480 iput(tmp_ino);
1481 ntfs_error(vol->sb, "Failed to load $UsnJrnl."); 1481 ntfs_error(vol->sb, "Failed to load $UsnJrnl.");
diff --git a/fs/open.c b/fs/open.c
index c60cd22cc052..b62f5c0923a8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -776,7 +776,7 @@ static int do_dentry_open(struct file *f,
776 f->f_mode |= FMODE_ATOMIC_POS; 776 f->f_mode |= FMODE_ATOMIC_POS;
777 777
778 f->f_op = fops_get(inode->i_fop); 778 f->f_op = fops_get(inode->i_fop);
779 if (unlikely(WARN_ON(!f->f_op))) { 779 if (WARN_ON(!f->f_op)) {
780 error = -ENODEV; 780 error = -ENODEV;
781 goto cleanup_all; 781 goto cleanup_all;
782 } 782 }
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 9c02d96d3a42..4075e41408b4 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -239,10 +239,8 @@ static int balance_leaf_when_delete_left(struct tree_balance *tb)
239static int balance_leaf_when_delete(struct tree_balance *tb, int flag) 239static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
240{ 240{
241 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 241 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
242 int item_pos = PATH_LAST_POSITION(tb->tb_path);
243 struct buffer_info bi; 242 struct buffer_info bi;
244 int n; 243 int n;
245 struct item_head *ih;
246 244
247 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1, 245 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
248 "vs- 12000: level: wrong FR %z", tb->FR[0]); 246 "vs- 12000: level: wrong FR %z", tb->FR[0]);
@@ -251,7 +249,6 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
251 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0), 249 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
252 "PAP-12010: tree can not be empty"); 250 "PAP-12010: tree can not be empty");
253 251
254 ih = item_head(tbS0, item_pos);
255 buffer_info_init_tbS0(tb, &bi); 252 buffer_info_init_tbS0(tb, &bi);
256 253
257 /* Delete or truncate the item */ 254 /* Delete or truncate the item */
@@ -298,7 +295,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
298 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 295 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
299 /* part of new item falls into L[0] */ 296 /* part of new item falls into L[0] */
300 int new_item_len, shift; 297 int new_item_len, shift;
301 int version;
302 298
303 ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1); 299 ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
304 300
@@ -317,8 +313,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
317 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, 313 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
318 min_t(int, tb->zeroes_num, ih_item_len(ih))); 314 min_t(int, tb->zeroes_num, ih_item_len(ih)));
319 315
320 version = ih_version(ih);
321
322 /* 316 /*
323 * Calculate key component, item length and body to 317 * Calculate key component, item length and body to
324 * insert into S[0] 318 * insert into S[0]
@@ -632,7 +626,6 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
632 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 626 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
633 int n = B_NR_ITEMS(tbS0); 627 int n = B_NR_ITEMS(tbS0);
634 struct buffer_info bi; 628 struct buffer_info bi;
635 int ret;
636 629
637 /* new item or part of it doesn't fall into R[0] */ 630 /* new item or part of it doesn't fall into R[0] */
638 if (n - tb->rnum[0] >= tb->item_pos) { 631 if (n - tb->rnum[0] >= tb->item_pos) {
@@ -646,13 +639,11 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
646 if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { 639 if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
647 loff_t old_key_comp, old_len, r_zeroes_number; 640 loff_t old_key_comp, old_len, r_zeroes_number;
648 const char *r_body; 641 const char *r_body;
649 int version, shift; 642 int shift;
650 loff_t offset; 643 loff_t offset;
651 644
652 leaf_shift_right(tb, tb->rnum[0] - 1, -1); 645 leaf_shift_right(tb, tb->rnum[0] - 1, -1);
653 646
654 version = ih_version(ih);
655
656 /* Remember key component and item length */ 647 /* Remember key component and item length */
657 old_key_comp = le_ih_k_offset(ih); 648 old_key_comp = le_ih_k_offset(ih);
658 old_len = ih_item_len(ih); 649 old_len = ih_item_len(ih);
@@ -698,7 +689,7 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
698 /* whole new item falls into R[0] */ 689 /* whole new item falls into R[0] */
699 690
700 /* Shift rnum[0]-1 items to R[0] */ 691 /* Shift rnum[0]-1 items to R[0] */
701 ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); 692 leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
702 693
703 /* Insert new item into R[0] */ 694 /* Insert new item into R[0] */
704 buffer_info_init_right(tb, &bi); 695 buffer_info_init_right(tb, &bi);
@@ -950,14 +941,12 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
950 if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) { 941 if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
951 int old_key_comp, old_len, r_zeroes_number; 942 int old_key_comp, old_len, r_zeroes_number;
952 const char *r_body; 943 const char *r_body;
953 int version;
954 944
955 /* Move snum[i]-1 items from S[0] to S_new[i] */ 945 /* Move snum[i]-1 items from S[0] to S_new[i] */
956 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1, 946 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
957 tb->S_new[i]); 947 tb->S_new[i]);
958 948
959 /* Remember key component and item length */ 949 /* Remember key component and item length */
960 version = ih_version(ih);
961 old_key_comp = le_ih_k_offset(ih); 950 old_key_comp = le_ih_k_offset(ih);
962 old_len = ih_item_len(ih); 951 old_len = ih_item_len(ih);
963 952
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 6b0ddb2a9091..117092224111 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -376,7 +376,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
376 int to, int to_bytes, short *snum012, int flow) 376 int to, int to_bytes, short *snum012, int flow)
377{ 377{
378 int i; 378 int i;
379 int cur_free;
380 int units; 379 int units;
381 struct virtual_node *vn = tb->tb_vn; 380 struct virtual_node *vn = tb->tb_vn;
382 int total_node_size, max_node_size, current_item_size; 381 int total_node_size, max_node_size, current_item_size;
@@ -438,7 +437,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
438 /* leaf level */ 437 /* leaf level */
439 needed_nodes = 1; 438 needed_nodes = 1;
440 total_node_size = 0; 439 total_node_size = 0;
441 cur_free = max_node_size;
442 440
443 /* start from 'from'-th item */ 441 /* start from 'from'-th item */
444 start_item = from; 442 start_item = from;
@@ -1734,14 +1732,12 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
1734 * and Fh is its father. 1732 * and Fh is its father.
1735 */ 1733 */
1736 struct buffer_head *Sh, *Fh; 1734 struct buffer_head *Sh, *Fh;
1737 int maxsize, ret; 1735 int ret;
1738 int lfree, rfree /* free space in L and R */ ; 1736 int lfree, rfree /* free space in L and R */ ;
1739 1737
1740 Sh = PATH_H_PBUFFER(tb->tb_path, h); 1738 Sh = PATH_H_PBUFFER(tb->tb_path, h);
1741 Fh = PATH_H_PPARENT(tb->tb_path, h); 1739 Fh = PATH_H_PPARENT(tb->tb_path, h);
1742 1740
1743 maxsize = MAX_CHILD_SIZE(Sh);
1744
1745 /* 1741 /*
1746 * using tb->insert_size[h], which is negative in this case, 1742 * using tb->insert_size[h], which is negative in this case,
1747 * create_virtual_node calculates: 1743 * create_virtual_node calculates:
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 4517a1394c6f..4b3e3e73b512 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -891,7 +891,6 @@ static int flush_older_commits(struct super_block *s,
891 struct list_head *entry; 891 struct list_head *entry;
892 unsigned int trans_id = jl->j_trans_id; 892 unsigned int trans_id = jl->j_trans_id;
893 unsigned int other_trans_id; 893 unsigned int other_trans_id;
894 unsigned int first_trans_id;
895 894
896find_first: 895find_first:
897 /* 896 /*
@@ -914,8 +913,6 @@ find_first:
914 return 0; 913 return 0;
915 } 914 }
916 915
917 first_trans_id = first_jl->j_trans_id;
918
919 entry = &first_jl->j_list; 916 entry = &first_jl->j_list;
920 while (1) { 917 while (1) {
921 other_jl = JOURNAL_LIST_ENTRY(entry); 918 other_jl = JOURNAL_LIST_ENTRY(entry);
@@ -1351,7 +1348,7 @@ static int flush_journal_list(struct super_block *s,
1351 struct reiserfs_journal_list *jl, int flushall) 1348 struct reiserfs_journal_list *jl, int flushall)
1352{ 1349{
1353 struct reiserfs_journal_list *pjl; 1350 struct reiserfs_journal_list *pjl;
1354 struct reiserfs_journal_cnode *cn, *last; 1351 struct reiserfs_journal_cnode *cn;
1355 int count; 1352 int count;
1356 int was_jwait = 0; 1353 int was_jwait = 0;
1357 int was_dirty = 0; 1354 int was_dirty = 0;
@@ -1509,7 +1506,6 @@ static int flush_journal_list(struct super_block *s,
1509 b_blocknr, __func__); 1506 b_blocknr, __func__);
1510 } 1507 }
1511free_cnode: 1508free_cnode:
1512 last = cn;
1513 cn = cn->next; 1509 cn = cn->next;
1514 if (saved_bh) { 1510 if (saved_bh) {
1515 /* 1511 /*
@@ -1792,7 +1788,6 @@ static int flush_used_journal_lists(struct super_block *s,
1792{ 1788{
1793 unsigned long len = 0; 1789 unsigned long len = 0;
1794 unsigned long cur_len; 1790 unsigned long cur_len;
1795 int ret;
1796 int i; 1791 int i;
1797 int limit = 256; 1792 int limit = 256;
1798 struct reiserfs_journal_list *tjl; 1793 struct reiserfs_journal_list *tjl;
@@ -1829,9 +1824,9 @@ static int flush_used_journal_lists(struct super_block *s,
1829 * transactions, but only bother if we've actually spanned 1824 * transactions, but only bother if we've actually spanned
1830 * across multiple lists 1825 * across multiple lists
1831 */ 1826 */
1832 if (flush_jl != jl) { 1827 if (flush_jl != jl)
1833 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); 1828 kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1834 } 1829
1835 flush_journal_list(s, flush_jl, 1); 1830 flush_journal_list(s, flush_jl, 1);
1836 put_journal_list(s, flush_jl); 1831 put_journal_list(s, flush_jl);
1837 put_journal_list(s, jl); 1832 put_journal_list(s, jl);
@@ -1911,7 +1906,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1911 struct super_block *sb, int error) 1906 struct super_block *sb, int error)
1912{ 1907{
1913 struct reiserfs_transaction_handle myth; 1908 struct reiserfs_transaction_handle myth;
1914 int flushed = 0;
1915 struct reiserfs_journal *journal = SB_JOURNAL(sb); 1909 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1916 1910
1917 /* 1911 /*
@@ -1933,7 +1927,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1933 1); 1927 1);
1934 journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb)); 1928 journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
1935 do_journal_end(&myth, FLUSH_ALL); 1929 do_journal_end(&myth, FLUSH_ALL);
1936 flushed = 1;
1937 } 1930 }
1938 } 1931 }
1939 1932
@@ -3444,9 +3437,8 @@ static int remove_from_transaction(struct super_block *sb,
3444 if (cn == journal->j_last) { 3437 if (cn == journal->j_last) {
3445 journal->j_last = cn->prev; 3438 journal->j_last = cn->prev;
3446 } 3439 }
3447 if (bh) 3440 remove_journal_hash(sb, journal->j_hash_table, NULL,
3448 remove_journal_hash(sb, journal->j_hash_table, NULL, 3441 bh->b_blocknr, 0);
3449 bh->b_blocknr, 0);
3450 clear_buffer_journaled(bh); /* don't log this one */ 3442 clear_buffer_journaled(bh); /* don't log this one */
3451 3443
3452 if (!already_cleaned) { 3444 if (!already_cleaned) {
@@ -3988,7 +3980,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
3988 struct buffer_head *c_bh; /* commit bh */ 3980 struct buffer_head *c_bh; /* commit bh */
3989 struct buffer_head *d_bh; /* desc bh */ 3981 struct buffer_head *d_bh; /* desc bh */
3990 int cur_write_start = 0; /* start index of current log write */ 3982 int cur_write_start = 0; /* start index of current log write */
3991 int old_start;
3992 int i; 3983 int i;
3993 int flush; 3984 int flush;
3994 int wait_on_commit; 3985 int wait_on_commit;
@@ -4245,7 +4236,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
4245 journal->j_num_work_lists++; 4236 journal->j_num_work_lists++;
4246 4237
4247 /* reset journal values for the next transaction */ 4238 /* reset journal values for the next transaction */
4248 old_start = journal->j_start;
4249 journal->j_start = 4239 journal->j_start =
4250 (journal->j_start + journal->j_len + 4240 (journal->j_start + journal->j_len +
4251 2) % SB_ONDISK_JOURNAL_SIZE(sb); 4241 2) % SB_ONDISK_JOURNAL_SIZE(sb);
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index f5cebd70d903..7f868569d4d0 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -1322,7 +1322,7 @@ void leaf_paste_entries(struct buffer_info *bi,
1322 char *item; 1322 char *item;
1323 struct reiserfs_de_head *deh; 1323 struct reiserfs_de_head *deh;
1324 char *insert_point; 1324 char *insert_point;
1325 int i, old_entry_num; 1325 int i;
1326 struct buffer_head *bh = bi->bi_bh; 1326 struct buffer_head *bh = bi->bi_bh;
1327 1327
1328 if (new_entry_count == 0) 1328 if (new_entry_count == 0)
@@ -1362,7 +1362,6 @@ void leaf_paste_entries(struct buffer_info *bi,
1362 put_deh_location(&deh[i], 1362 put_deh_location(&deh[i],
1363 deh_location(&deh[i]) + paste_size); 1363 deh_location(&deh[i]) + paste_size);
1364 1364
1365 old_entry_num = ih_entry_count(ih);
1366 put_ih_entry_count(ih, ih_entry_count(ih) + new_entry_count); 1365 put_ih_entry_count(ih, ih_entry_count(ih) + new_entry_count);
1367 1366
1368 /* prepare space for pasted records */ 1367 /* prepare space for pasted records */
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index 415d66ca87d1..34baf5c0f265 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -183,13 +183,12 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
183 int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2; 183 int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
184 int old_max = sb_oid_maxsize(disk_sb); 184 int old_max = sb_oid_maxsize(disk_sb);
185 struct reiserfs_super_block_v1 *disk_sb_v1; 185 struct reiserfs_super_block_v1 *disk_sb_v1;
186 __le32 *objectid_map, *new_objectid_map; 186 __le32 *objectid_map;
187 int i; 187 int i;
188 188
189 disk_sb_v1 = 189 disk_sb_v1 =
190 (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data); 190 (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
191 objectid_map = (__le32 *) (disk_sb_v1 + 1); 191 objectid_map = (__le32 *) (disk_sb_v1 + 1);
192 new_objectid_map = (__le32 *) (disk_sb + 1);
193 192
194 if (cur_size > new_size) { 193 if (cur_size > new_size) {
195 /* 194 /*
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 9fed1c05f1f4..500f2000eb41 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -746,9 +746,6 @@ static void check_leaf_block_head(struct buffer_head *bh)
746 746
747static void check_internal_block_head(struct buffer_head *bh) 747static void check_internal_block_head(struct buffer_head *bh)
748{ 748{
749 struct block_head *blkh;
750
751 blkh = B_BLK_HEAD(bh);
752 if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT)) 749 if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
753 reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh); 750 reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
754 751
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 0037aea97d39..da9ebe33882b 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -593,7 +593,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
593 struct buffer_head *bh; 593 struct buffer_head *bh;
594 struct path_element *last_element; 594 struct path_element *last_element;
595 int node_level, retval; 595 int node_level, retval;
596 int right_neighbor_of_leaf_node;
597 int fs_gen; 596 int fs_gen;
598 struct buffer_head *reada_bh[SEARCH_BY_KEY_READA]; 597 struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
599 b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA]; 598 b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA];
@@ -614,8 +613,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
614 613
615 pathrelse(search_path); 614 pathrelse(search_path);
616 615
617 right_neighbor_of_leaf_node = 0;
618
619 /* 616 /*
620 * With each iteration of this loop we search through the items in the 617 * With each iteration of this loop we search through the items in the
621 * current node, and calculate the next current node(next path element) 618 * current node, and calculate the next current node(next path element)
@@ -701,7 +698,6 @@ io_error:
701 */ 698 */
702 block_number = SB_ROOT_BLOCK(sb); 699 block_number = SB_ROOT_BLOCK(sb);
703 expected_level = -1; 700 expected_level = -1;
704 right_neighbor_of_leaf_node = 0;
705 701
706 /* repeat search from the root */ 702 /* repeat search from the root */
707 continue; 703 continue;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index fe6d804a38dc..f9fd18670e22 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1272,21 +1272,23 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1272} 1272}
1273 1273
1274static __always_inline int validate_range(struct mm_struct *mm, 1274static __always_inline int validate_range(struct mm_struct *mm,
1275 __u64 start, __u64 len) 1275 __u64 *start, __u64 len)
1276{ 1276{
1277 __u64 task_size = mm->task_size; 1277 __u64 task_size = mm->task_size;
1278 1278
1279 if (start & ~PAGE_MASK) 1279 *start = untagged_addr(*start);
1280
1281 if (*start & ~PAGE_MASK)
1280 return -EINVAL; 1282 return -EINVAL;
1281 if (len & ~PAGE_MASK) 1283 if (len & ~PAGE_MASK)
1282 return -EINVAL; 1284 return -EINVAL;
1283 if (!len) 1285 if (!len)
1284 return -EINVAL; 1286 return -EINVAL;
1285 if (start < mmap_min_addr) 1287 if (*start < mmap_min_addr)
1286 return -EINVAL; 1288 return -EINVAL;
1287 if (start >= task_size) 1289 if (*start >= task_size)
1288 return -EINVAL; 1290 return -EINVAL;
1289 if (len > task_size - start) 1291 if (len > task_size - *start)
1290 return -EINVAL; 1292 return -EINVAL;
1291 return 0; 1293 return 0;
1292} 1294}
@@ -1336,7 +1338,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1336 goto out; 1338 goto out;
1337 } 1339 }
1338 1340
1339 ret = validate_range(mm, uffdio_register.range.start, 1341 ret = validate_range(mm, &uffdio_register.range.start,
1340 uffdio_register.range.len); 1342 uffdio_register.range.len);
1341 if (ret) 1343 if (ret)
1342 goto out; 1344 goto out;
@@ -1525,7 +1527,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1525 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) 1527 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1526 goto out; 1528 goto out;
1527 1529
1528 ret = validate_range(mm, uffdio_unregister.start, 1530 ret = validate_range(mm, &uffdio_unregister.start,
1529 uffdio_unregister.len); 1531 uffdio_unregister.len);
1530 if (ret) 1532 if (ret)
1531 goto out; 1533 goto out;
@@ -1676,7 +1678,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1676 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) 1678 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1677 goto out; 1679 goto out;
1678 1680
1679 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); 1681 ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
1680 if (ret) 1682 if (ret)
1681 goto out; 1683 goto out;
1682 1684
@@ -1716,7 +1718,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1716 sizeof(uffdio_copy)-sizeof(__s64))) 1718 sizeof(uffdio_copy)-sizeof(__s64)))
1717 goto out; 1719 goto out;
1718 1720
1719 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); 1721 ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
1720 if (ret) 1722 if (ret)
1721 goto out; 1723 goto out;
1722 /* 1724 /*
@@ -1772,7 +1774,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1772 sizeof(uffdio_zeropage)-sizeof(__s64))) 1774 sizeof(uffdio_zeropage)-sizeof(__s64)))
1773 goto out; 1775 goto out;
1774 1776
1775 ret = validate_range(ctx->mm, uffdio_zeropage.range.start, 1777 ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
1776 uffdio_zeropage.range.len); 1778 uffdio_zeropage.range.len);
1777 if (ret) 1779 if (ret)
1778 goto out; 1780 goto out;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 120ef99d09e8..21c243622a79 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -2097,7 +2097,7 @@ xfs_verify_magic(
2097 int idx; 2097 int idx;
2098 2098
2099 idx = xfs_sb_version_hascrc(&mp->m_sb); 2099 idx = xfs_sb_version_hascrc(&mp->m_sb);
2100 if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))) 2100 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2101 return false; 2101 return false;
2102 return dmagic == bp->b_ops->magic[idx]; 2102 return dmagic == bp->b_ops->magic[idx];
2103} 2103}
@@ -2115,7 +2115,7 @@ xfs_verify_magic16(
2115 int idx; 2115 int idx;
2116 2116
2117 idx = xfs_sb_version_hascrc(&mp->m_sb); 2117 idx = xfs_sb_version_hascrc(&mp->m_sb);
2118 if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))) 2118 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2119 return false; 2119 return false;
2120 return dmagic == bp->b_ops->magic16[idx]; 2120 return dmagic == bp->b_ops->magic16[idx];
2121} 2121}
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7357a3c942a0..384b5c835ced 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
10#define BUGFLAG_WARNING (1 << 0) 10#define BUGFLAG_WARNING (1 << 0)
11#define BUGFLAG_ONCE (1 << 1) 11#define BUGFLAG_ONCE (1 << 1)
12#define BUGFLAG_DONE (1 << 2) 12#define BUGFLAG_DONE (1 << 2)
13#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
13#define BUGFLAG_TAINT(taint) ((taint) << 8) 14#define BUGFLAG_TAINT(taint) ((taint) << 8)
14#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) 15#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
15#endif 16#endif
@@ -61,18 +62,6 @@ struct bug_entry {
61#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) 62#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
62#endif 63#endif
63 64
64#ifdef __WARN_FLAGS
65#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
66#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
67
68#define WARN_ON_ONCE(condition) ({ \
69 int __ret_warn_on = !!(condition); \
70 if (unlikely(__ret_warn_on)) \
71 __WARN_ONCE_TAINT(TAINT_WARN); \
72 unlikely(__ret_warn_on); \
73})
74#endif
75
76/* 65/*
77 * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report 66 * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
78 * significant kernel issues that need prompt attention if they should ever 67 * significant kernel issues that need prompt attention if they should ever
@@ -89,27 +78,27 @@ struct bug_entry {
89 * 78 *
90 * Use the versions with printk format strings to provide better diagnostics. 79 * Use the versions with printk format strings to provide better diagnostics.
91 */ 80 */
92#ifndef __WARN_TAINT 81#ifndef __WARN_FLAGS
93extern __printf(3, 4)
94void warn_slowpath_fmt(const char *file, const int line,
95 const char *fmt, ...);
96extern __printf(4, 5) 82extern __printf(4, 5)
97void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, 83void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
98 const char *fmt, ...); 84 const char *fmt, ...);
99extern void warn_slowpath_null(const char *file, const int line); 85#define __WARN() __WARN_printf(TAINT_WARN, NULL)
100#define WANT_WARN_ON_SLOWPATH 86#define __WARN_printf(taint, arg...) \
101#define __WARN() warn_slowpath_null(__FILE__, __LINE__) 87 warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
102#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
103#define __WARN_printf_taint(taint, arg...) \
104 warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
105#else 88#else
106extern __printf(1, 2) void __warn_printk(const char *fmt, ...); 89extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
107#define __WARN() do { \ 90#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
108 printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \ 91#define __WARN_printf(taint, arg...) do { \
109} while (0) 92 __warn_printk(arg); \
110#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg) 93 __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
111#define __WARN_printf_taint(taint, arg...) \ 94 } while (0)
112 do { __warn_printk(arg); __WARN_TAINT(taint); } while (0) 95#define WARN_ON_ONCE(condition) ({ \
96 int __ret_warn_on = !!(condition); \
97 if (unlikely(__ret_warn_on)) \
98 __WARN_FLAGS(BUGFLAG_ONCE | \
99 BUGFLAG_TAINT(TAINT_WARN)); \
100 unlikely(__ret_warn_on); \
101})
113#endif 102#endif
114 103
115/* used internally by panic.c */ 104/* used internally by panic.c */
@@ -132,7 +121,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
132#define WARN(condition, format...) ({ \ 121#define WARN(condition, format...) ({ \
133 int __ret_warn_on = !!(condition); \ 122 int __ret_warn_on = !!(condition); \
134 if (unlikely(__ret_warn_on)) \ 123 if (unlikely(__ret_warn_on)) \
135 __WARN_printf(format); \ 124 __WARN_printf(TAINT_WARN, format); \
136 unlikely(__ret_warn_on); \ 125 unlikely(__ret_warn_on); \
137}) 126})
138#endif 127#endif
@@ -140,7 +129,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
140#define WARN_TAINT(condition, taint, format...) ({ \ 129#define WARN_TAINT(condition, taint, format...) ({ \
141 int __ret_warn_on = !!(condition); \ 130 int __ret_warn_on = !!(condition); \
142 if (unlikely(__ret_warn_on)) \ 131 if (unlikely(__ret_warn_on)) \
143 __WARN_printf_taint(taint, format); \ 132 __WARN_printf(taint, format); \
144 unlikely(__ret_warn_on); \ 133 unlikely(__ret_warn_on); \
145}) 134})
146 135
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 6f8cc06ee44e..73f7421413cb 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -49,7 +49,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
49 * @mm: the mm_struct of the current context 49 * @mm: the mm_struct of the current context
50 * @gfp: GFP flags to use for the allocation 50 * @gfp: GFP flags to use for the allocation
51 * 51 *
52 * Allocates a page and runs the pgtable_page_ctor(). 52 * Allocates a page and runs the pgtable_pte_page_ctor().
53 * 53 *
54 * This function is intended for architectures that need 54 * This function is intended for architectures that need
55 * anything beyond simple page allocation or must have custom GFP flags. 55 * anything beyond simple page allocation or must have custom GFP flags.
@@ -63,7 +63,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
63 pte = alloc_page(gfp); 63 pte = alloc_page(gfp);
64 if (!pte) 64 if (!pte)
65 return NULL; 65 return NULL;
66 if (!pgtable_page_ctor(pte)) { 66 if (!pgtable_pte_page_ctor(pte)) {
67 __free_page(pte); 67 __free_page(pte);
68 return NULL; 68 return NULL;
69 } 69 }
@@ -76,7 +76,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
76 * pte_alloc_one - allocate a page for PTE-level user page table 76 * pte_alloc_one - allocate a page for PTE-level user page table
77 * @mm: the mm_struct of the current context 77 * @mm: the mm_struct of the current context
78 * 78 *
79 * Allocates a page and runs the pgtable_page_ctor(). 79 * Allocates a page and runs the pgtable_pte_page_ctor().
80 * 80 *
81 * Return: `struct page` initialized as page table or %NULL on error 81 * Return: `struct page` initialized as page table or %NULL on error
82 */ 82 */
@@ -98,7 +98,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
98 */ 98 */
99static inline void pte_free(struct mm_struct *mm, struct page *pte_page) 99static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
100{ 100{
101 pgtable_page_dtor(pte_page); 101 pgtable_pte_page_dtor(pte_page);
102 __free_page(pte_page); 102 __free_page(pte_page);
103} 103}
104 104
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b5a5a1ed9efd..78a73eba64dd 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -200,8 +200,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
200 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 200 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
201#define for_each_cpu_wrap(cpu, mask, start) \ 201#define for_each_cpu_wrap(cpu, mask, start) \
202 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) 202 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
203#define for_each_cpu_and(cpu, mask, and) \ 203#define for_each_cpu_and(cpu, mask1, mask2) \
204 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) 204 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
205#else 205#else
206/** 206/**
207 * cpumask_first - get the first cpu in a cpumask 207 * cpumask_first - get the first cpu in a cpumask
@@ -290,20 +290,20 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
290/** 290/**
291 * for_each_cpu_and - iterate over every cpu in both masks 291 * for_each_cpu_and - iterate over every cpu in both masks
292 * @cpu: the (optionally unsigned) integer iterator 292 * @cpu: the (optionally unsigned) integer iterator
293 * @mask: the first cpumask pointer 293 * @mask1: the first cpumask pointer
294 * @and: the second cpumask pointer 294 * @mask2: the second cpumask pointer
295 * 295 *
296 * This saves a temporary CPU mask in many places. It is equivalent to: 296 * This saves a temporary CPU mask in many places. It is equivalent to:
297 * struct cpumask tmp; 297 * struct cpumask tmp;
298 * cpumask_and(&tmp, &mask, &and); 298 * cpumask_and(&tmp, &mask1, &mask2);
299 * for_each_cpu(cpu, &tmp) 299 * for_each_cpu(cpu, &tmp)
300 * ... 300 * ...
301 * 301 *
302 * After the loop, cpu is >= nr_cpu_ids. 302 * After the loop, cpu is >= nr_cpu_ids.
303 */ 303 */
304#define for_each_cpu_and(cpu, mask, and) \ 304#define for_each_cpu_and(cpu, mask1, mask2) \
305 for ((cpu) = -1; \ 305 for ((cpu) = -1; \
306 (cpu) = cpumask_next_and((cpu), (mask), (and)), \ 306 (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
307 (cpu) < nr_cpu_ids;) 307 (cpu) < nr_cpu_ids;)
308#endif /* SMP */ 308#endif /* SMP */
309 309
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 855476145fe1..aaa8a0767aa3 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -30,26 +30,8 @@
30 \ 30 \
31/* Callbacks for augmented rbtree insert and remove */ \ 31/* Callbacks for augmented rbtree insert and remove */ \
32 \ 32 \
33static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \ 33RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
34{ \ 34 ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
35 ITTYPE max = ITLAST(node), subtree_last; \
36 if (node->ITRB.rb_left) { \
37 subtree_last = rb_entry(node->ITRB.rb_left, \
38 ITSTRUCT, ITRB)->ITSUBTREE; \
39 if (max < subtree_last) \
40 max = subtree_last; \
41 } \
42 if (node->ITRB.rb_right) { \
43 subtree_last = rb_entry(node->ITRB.rb_right, \
44 ITSTRUCT, ITRB)->ITSUBTREE; \
45 if (max < subtree_last) \
46 max = subtree_last; \
47 } \
48 return max; \
49} \
50 \
51RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
52 ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
53 \ 35 \
54/* Insert / remove interval nodes from the tree */ \ 36/* Insert / remove interval nodes from the tree */ \
55 \ 37 \
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f0b809258ed3..cc162f3e6461 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -183,6 +183,8 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
183 bool get_value); 183 bool get_value);
184void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); 184void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
185 185
186int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
187 unsigned long buf_len);
186void * __weak arch_kexec_kernel_image_load(struct kimage *image); 188void * __weak arch_kexec_kernel_image_load(struct kimage *image);
187int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, 189int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
188 Elf_Shdr *section, 190 Elf_Shdr *section,
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index fbf144aaa749..b072aeb1fd78 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -326,8 +326,10 @@ extern atomic_t kgdb_active;
326 (raw_smp_processor_id() == atomic_read(&kgdb_active)) 326 (raw_smp_processor_id() == atomic_read(&kgdb_active))
327extern bool dbg_is_early; 327extern bool dbg_is_early;
328extern void __init dbg_late_init(void); 328extern void __init dbg_late_init(void);
329extern void kgdb_panic(const char *msg);
329#else /* ! CONFIG_KGDB */ 330#else /* ! CONFIG_KGDB */
330#define in_dbg_master() (0) 331#define in_dbg_master() (0)
331#define dbg_late_init() 332#define dbg_late_init()
333static inline void kgdb_panic(const char *msg) {}
332#endif /* ! CONFIG_KGDB */ 334#endif /* ! CONFIG_KGDB */
333#endif /* _KGDB_H_ */ 335#endif /* _KGDB_H_ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 294a67b94147..cc292273e6ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1949,7 +1949,7 @@ static inline void pgtable_init(void)
1949 pgtable_cache_init(); 1949 pgtable_cache_init();
1950} 1950}
1951 1951
1952static inline bool pgtable_page_ctor(struct page *page) 1952static inline bool pgtable_pte_page_ctor(struct page *page)
1953{ 1953{
1954 if (!ptlock_init(page)) 1954 if (!ptlock_init(page))
1955 return false; 1955 return false;
@@ -1958,7 +1958,7 @@ static inline bool pgtable_page_ctor(struct page *page)
1958 return true; 1958 return true;
1959} 1959}
1960 1960
1961static inline void pgtable_page_dtor(struct page *page) 1961static inline void pgtable_pte_page_dtor(struct page *page)
1962{ 1962{
1963 ptlock_free(page); 1963 ptlock_free(page);
1964 __ClearPageTable(page); 1964 __ClearPageTable(page);
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index d7016dcb245e..c1bc6731125c 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -36,6 +36,10 @@ struct vmacache {
36 struct vm_area_struct *vmas[VMACACHE_SIZE]; 36 struct vm_area_struct *vmas[VMACACHE_SIZE];
37}; 37};
38 38
39/*
40 * When updating this, please also update struct resident_page_types[] in
41 * kernel/fork.c
42 */
39enum { 43enum {
40 MM_FILEPAGES, /* Resident file mapping pages */ 44 MM_FILEPAGES, /* Resident file mapping pages */
41 MM_ANONPAGES, /* Resident anonymous pages */ 45 MM_ANONPAGES, /* Resident anonymous pages */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cefd374c47b1..c09d67edda3a 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -488,13 +488,6 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
488extern void print_hex_dump(const char *level, const char *prefix_str, 488extern void print_hex_dump(const char *level, const char *prefix_str,
489 int prefix_type, int rowsize, int groupsize, 489 int prefix_type, int rowsize, int groupsize,
490 const void *buf, size_t len, bool ascii); 490 const void *buf, size_t len, bool ascii);
491#if defined(CONFIG_DYNAMIC_DEBUG)
492#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
493 dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
494#else
495extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
496 const void *buf, size_t len);
497#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
498#else 491#else
499static inline void print_hex_dump(const char *level, const char *prefix_str, 492static inline void print_hex_dump(const char *level, const char *prefix_str,
500 int prefix_type, int rowsize, int groupsize, 493 int prefix_type, int rowsize, int groupsize,
@@ -526,4 +519,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
526} 519}
527#endif 520#endif
528 521
522/**
523 * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
524 * @prefix_str: string to prefix each line with;
525 * caller supplies trailing spaces for alignment if desired
526 * @prefix_type: controls whether prefix of an offset, address, or none
527 * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
528 * @buf: data blob to dump
529 * @len: number of bytes in the @buf
530 *
531 * Calls print_hex_dump(), with log level of KERN_DEBUG,
532 * rowsize of 16, groupsize of 1, and ASCII output included.
533 */
534#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
535 print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
536
529#endif 537#endif
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 179faab29f52..fdd421b8d9ae 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,41 +60,87 @@ rb_insert_augmented_cached(struct rb_node *node,
60 rb_insert_augmented(node, &root->rb_root, augment); 60 rb_insert_augmented(node, &root->rb_root, augment);
61} 61}
62 62
63#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ 63/*
64 rbtype, rbaugmented, rbcompute) \ 64 * Template for declaring augmented rbtree callbacks (generic case)
65 *
66 * RBSTATIC: 'static' or empty
67 * RBNAME: name of the rb_augment_callbacks structure
68 * RBSTRUCT: struct type of the tree nodes
69 * RBFIELD: name of struct rb_node field within RBSTRUCT
70 * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
71 * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
72 */
73
74#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
75 RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
65static inline void \ 76static inline void \
66rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ 77RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
67{ \ 78{ \
68 while (rb != stop) { \ 79 while (rb != stop) { \
69 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ 80 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
70 rbtype augmented = rbcompute(node); \ 81 if (RBCOMPUTE(node, true)) \
71 if (node->rbaugmented == augmented) \
72 break; \ 82 break; \
73 node->rbaugmented = augmented; \ 83 rb = rb_parent(&node->RBFIELD); \
74 rb = rb_parent(&node->rbfield); \
75 } \ 84 } \
76} \ 85} \
77static inline void \ 86static inline void \
78rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ 87RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
79{ \ 88{ \
80 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ 89 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
81 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ 90 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
82 new->rbaugmented = old->rbaugmented; \ 91 new->RBAUGMENTED = old->RBAUGMENTED; \
83} \ 92} \
84static void \ 93static void \
85rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ 94RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
86{ \ 95{ \
87 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ 96 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
88 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ 97 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
89 new->rbaugmented = old->rbaugmented; \ 98 new->RBAUGMENTED = old->RBAUGMENTED; \
90 old->rbaugmented = rbcompute(old); \ 99 RBCOMPUTE(old, false); \
91} \ 100} \
92rbstatic const struct rb_augment_callbacks rbname = { \ 101RBSTATIC const struct rb_augment_callbacks RBNAME = { \
93 .propagate = rbname ## _propagate, \ 102 .propagate = RBNAME ## _propagate, \
94 .copy = rbname ## _copy, \ 103 .copy = RBNAME ## _copy, \
95 .rotate = rbname ## _rotate \ 104 .rotate = RBNAME ## _rotate \
96}; 105};
97 106
107/*
108 * Template for declaring augmented rbtree callbacks,
109 * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
110 *
111 * RBSTATIC: 'static' or empty
112 * RBNAME: name of the rb_augment_callbacks structure
113 * RBSTRUCT: struct type of the tree nodes
114 * RBFIELD: name of struct rb_node field within RBSTRUCT
115 * RBTYPE: type of the RBAUGMENTED field
116 * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
117 * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
118 */
119
120#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
121 RBTYPE, RBAUGMENTED, RBCOMPUTE) \
122static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
123{ \
124 RBSTRUCT *child; \
125 RBTYPE max = RBCOMPUTE(node); \
126 if (node->RBFIELD.rb_left) { \
127 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
128 if (child->RBAUGMENTED > max) \
129 max = child->RBAUGMENTED; \
130 } \
131 if (node->RBFIELD.rb_right) { \
132 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
133 if (child->RBAUGMENTED > max) \
134 max = child->RBAUGMENTED; \
135 } \
136 if (exit && node->RBAUGMENTED == max) \
137 return true; \
138 node->RBAUGMENTED = max; \
139 return false; \
140} \
141RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
142 RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
143
98 144
99#define RB_RED 0 145#define RB_RED 0
100#define RB_BLACK 1 146#define RB_BLACK 1
diff --git a/include/linux/string.h b/include/linux/string.h
index 4deb11f7976b..b2f9df7f0761 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -474,8 +474,9 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len,
474 * But this can lead to bugs due to typos, or if prefix is a pointer 474 * But this can lead to bugs due to typos, or if prefix is a pointer
475 * and not a constant. Instead use str_has_prefix(). 475 * and not a constant. Instead use str_has_prefix().
476 * 476 *
477 * Returns: 0 if @str does not start with @prefix 477 * Returns:
478 strlen(@prefix) if @str does start with @prefix 478 * * strlen(@prefix) if @str starts with @prefix
479 * * 0 if @str does not start with @prefix
479 */ 480 */
480static __always_inline size_t str_has_prefix(const char *str, const char *prefix) 481static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
481{ 482{
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de2c67a33b7e..063c0c1e112b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -340,6 +340,7 @@ extern void lru_add_drain_cpu(int cpu);
340extern void lru_add_drain_all(void); 340extern void lru_add_drain_all(void);
341extern void rotate_reclaimable_page(struct page *page); 341extern void rotate_reclaimable_page(struct page *page);
342extern void deactivate_file_page(struct page *page); 342extern void deactivate_file_page(struct page *page);
343extern void deactivate_page(struct page *page);
343extern void mark_page_lazyfree(struct page *page); 344extern void mark_page_lazyfree(struct page *page);
344extern void swap_setup(void); 345extern void swap_setup(void);
345 346
@@ -364,6 +365,7 @@ extern int vm_swappiness;
364extern int remove_mapping(struct address_space *mapping, struct page *page); 365extern int remove_mapping(struct address_space *mapping, struct page *page);
365extern unsigned long vm_total_pages; 366extern unsigned long vm_total_pages;
366 367
368extern unsigned long reclaim_pages(struct list_head *page_list);
367#ifdef CONFIG_NUMA 369#ifdef CONFIG_NUMA
368extern int node_reclaim_mode; 370extern int node_reclaim_mode;
369extern int sysctl_min_unmapped_ratio; 371extern int sysctl_min_unmapped_ratio;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 8d8821b3689a..659a4400517b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count)
134 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 134 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
135} 135}
136 136
137static __always_inline bool 137static __always_inline __must_check bool
138check_copy_size(const void *addr, size_t bytes, bool is_source) 138check_copy_size(const void *addr, size_t bytes, bool is_source)
139{ 139{
140 int sz = __compiletime_object_size(addr); 140 int sz = __compiletime_object_size(addr);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 34a038563d97..70bbdc38dc37 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -55,7 +55,7 @@
55 * as usual) and both source and destination can trigger faults. 55 * as usual) and both source and destination can trigger faults.
56 */ 56 */
57 57
58static __always_inline unsigned long 58static __always_inline __must_check unsigned long
59__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 59__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60{ 60{
61 kasan_check_write(to, n); 61 kasan_check_write(to, n);
@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
63 return raw_copy_from_user(to, from, n); 63 return raw_copy_from_user(to, from, n);
64} 64}
65 65
66static __always_inline unsigned long 66static __always_inline __must_check unsigned long
67__copy_from_user(void *to, const void __user *from, unsigned long n) 67__copy_from_user(void *to, const void __user *from, unsigned long n)
68{ 68{
69 might_fault(); 69 might_fault();
@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
85 * The caller should also make sure he pins the user space address 85 * The caller should also make sure he pins the user space address
86 * so that we don't result in page fault and sleep. 86 * so that we don't result in page fault and sleep.
87 */ 87 */
88static __always_inline unsigned long 88static __always_inline __must_check unsigned long
89__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 89__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
90{ 90{
91 kasan_check_read(from, n); 91 kasan_check_read(from, n);
@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
93 return raw_copy_to_user(to, from, n); 93 return raw_copy_to_user(to, from, n);
94} 94}
95 95
96static __always_inline unsigned long 96static __always_inline __must_check unsigned long
97__copy_to_user(void __user *to, const void *from, unsigned long n) 97__copy_to_user(void __user *to, const void *from, unsigned long n)
98{ 98{
99 might_fault(); 99 might_fault();
@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
103} 103}
104 104
105#ifdef INLINE_COPY_FROM_USER 105#ifdef INLINE_COPY_FROM_USER
106static inline unsigned long 106static inline __must_check unsigned long
107_copy_from_user(void *to, const void __user *from, unsigned long n) 107_copy_from_user(void *to, const void __user *from, unsigned long n)
108{ 108{
109 unsigned long res = n; 109 unsigned long res = n;
@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
117 return res; 117 return res;
118} 118}
119#else 119#else
120extern unsigned long 120extern __must_check unsigned long
121_copy_from_user(void *, const void __user *, unsigned long); 121_copy_from_user(void *, const void __user *, unsigned long);
122#endif 122#endif
123 123
124#ifdef INLINE_COPY_TO_USER 124#ifdef INLINE_COPY_TO_USER
125static inline unsigned long 125static inline __must_check unsigned long
126_copy_to_user(void __user *to, const void *from, unsigned long n) 126_copy_to_user(void __user *to, const void *from, unsigned long n)
127{ 127{
128 might_fault(); 128 might_fault();
@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
133 return n; 133 return n;
134} 134}
135#else 135#else
136extern unsigned long 136extern __must_check unsigned long
137_copy_to_user(void __user *, const void *, unsigned long); 137_copy_to_user(void __user *, const void *, unsigned long);
138#endif 138#endif
139 139
@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
222 222
223#ifndef ARCH_HAS_NOCACHE_UACCESS 223#ifndef ARCH_HAS_NOCACHE_UACCESS
224 224
225static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 225static inline __must_check unsigned long
226 const void __user *from, unsigned long n) 226__copy_from_user_inatomic_nocache(void *to, const void __user *from,
227 unsigned long n)
227{ 228{
228 return __copy_from_user_inatomic(to, from, n); 229 return __copy_from_user_inatomic(to, from, n);
229} 230}
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 3a27335fce2c..c2ce6480b4b1 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -66,8 +66,9 @@ DECLARE_EVENT_CLASS(writeback_page_template,
66 ), 66 ),
67 67
68 TP_fast_assign( 68 TP_fast_assign(
69 strncpy(__entry->name, 69 strscpy_pad(__entry->name,
70 mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); 70 mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
71 32);
71 __entry->ino = mapping ? mapping->host->i_ino : 0; 72 __entry->ino = mapping ? mapping->host->i_ino : 0;
72 __entry->index = page->index; 73 __entry->index = page->index;
73 ), 74 ),
@@ -110,8 +111,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
110 struct backing_dev_info *bdi = inode_to_bdi(inode); 111 struct backing_dev_info *bdi = inode_to_bdi(inode);
111 112
112 /* may be called for files on pseudo FSes w/ unregistered bdi */ 113 /* may be called for files on pseudo FSes w/ unregistered bdi */
113 strncpy(__entry->name, 114 strscpy_pad(__entry->name,
114 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); 115 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
115 __entry->ino = inode->i_ino; 116 __entry->ino = inode->i_ino;
116 __entry->state = inode->i_state; 117 __entry->state = inode->i_state;
117 __entry->flags = flags; 118 __entry->flags = flags;
@@ -316,8 +317,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
316 ), 317 ),
317 318
318 TP_fast_assign( 319 TP_fast_assign(
319 strncpy(__entry->name, 320 strscpy_pad(__entry->name,
320 dev_name(inode_to_bdi(inode)->dev), 32); 321 dev_name(inode_to_bdi(inode)->dev), 32);
321 __entry->ino = inode->i_ino; 322 __entry->ino = inode->i_ino;
322 __entry->sync_mode = wbc->sync_mode; 323 __entry->sync_mode = wbc->sync_mode;
323 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); 324 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -360,8 +361,9 @@ DECLARE_EVENT_CLASS(writeback_work_class,
360 __field(unsigned int, cgroup_ino) 361 __field(unsigned int, cgroup_ino)
361 ), 362 ),
362 TP_fast_assign( 363 TP_fast_assign(
363 strncpy(__entry->name, 364 strscpy_pad(__entry->name,
364 wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32); 365 wb->bdi->dev ? dev_name(wb->bdi->dev) :
366 "(unknown)", 32);
365 __entry->nr_pages = work->nr_pages; 367 __entry->nr_pages = work->nr_pages;
366 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 368 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
367 __entry->sync_mode = work->sync_mode; 369 __entry->sync_mode = work->sync_mode;
@@ -414,7 +416,7 @@ DECLARE_EVENT_CLASS(writeback_class,
414 __field(unsigned int, cgroup_ino) 416 __field(unsigned int, cgroup_ino)
415 ), 417 ),
416 TP_fast_assign( 418 TP_fast_assign(
417 strncpy(__entry->name, dev_name(wb->bdi->dev), 32); 419 strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
418 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 420 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
419 ), 421 ),
420 TP_printk("bdi %s: cgroup_ino=%u", 422 TP_printk("bdi %s: cgroup_ino=%u",
@@ -436,7 +438,7 @@ TRACE_EVENT(writeback_bdi_register,
436 __array(char, name, 32) 438 __array(char, name, 32)
437 ), 439 ),
438 TP_fast_assign( 440 TP_fast_assign(
439 strncpy(__entry->name, dev_name(bdi->dev), 32); 441 strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
440 ), 442 ),
441 TP_printk("bdi %s", 443 TP_printk("bdi %s",
442 __entry->name 444 __entry->name
@@ -461,7 +463,7 @@ DECLARE_EVENT_CLASS(wbc_class,
461 ), 463 ),
462 464
463 TP_fast_assign( 465 TP_fast_assign(
464 strncpy(__entry->name, dev_name(bdi->dev), 32); 466 strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
465 __entry->nr_to_write = wbc->nr_to_write; 467 __entry->nr_to_write = wbc->nr_to_write;
466 __entry->pages_skipped = wbc->pages_skipped; 468 __entry->pages_skipped = wbc->pages_skipped;
467 __entry->sync_mode = wbc->sync_mode; 469 __entry->sync_mode = wbc->sync_mode;
@@ -512,7 +514,7 @@ TRACE_EVENT(writeback_queue_io,
512 ), 514 ),
513 TP_fast_assign( 515 TP_fast_assign(
514 unsigned long *older_than_this = work->older_than_this; 516 unsigned long *older_than_this = work->older_than_this;
515 strncpy(__entry->name, dev_name(wb->bdi->dev), 32); 517 strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
516 __entry->older = older_than_this ? *older_than_this : 0; 518 __entry->older = older_than_this ? *older_than_this : 0;
517 __entry->age = older_than_this ? 519 __entry->age = older_than_this ?
518 (jiffies - *older_than_this) * 1000 / HZ : -1; 520 (jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -598,7 +600,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
598 ), 600 ),
599 601
600 TP_fast_assign( 602 TP_fast_assign(
601 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); 603 strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
602 __entry->write_bw = KBps(wb->write_bandwidth); 604 __entry->write_bw = KBps(wb->write_bandwidth);
603 __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); 605 __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
604 __entry->dirty_rate = KBps(dirty_rate); 606 __entry->dirty_rate = KBps(dirty_rate);
@@ -663,7 +665,7 @@ TRACE_EVENT(balance_dirty_pages,
663 665
664 TP_fast_assign( 666 TP_fast_assign(
665 unsigned long freerun = (thresh + bg_thresh) / 2; 667 unsigned long freerun = (thresh + bg_thresh) / 2;
666 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); 668 strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
667 669
668 __entry->limit = global_wb_domain.dirty_limit; 670 __entry->limit = global_wb_domain.dirty_limit;
669 __entry->setpoint = (global_wb_domain.dirty_limit + 671 __entry->setpoint = (global_wb_domain.dirty_limit +
@@ -723,8 +725,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
723 ), 725 ),
724 726
725 TP_fast_assign( 727 TP_fast_assign(
726 strncpy(__entry->name, 728 strscpy_pad(__entry->name,
727 dev_name(inode_to_bdi(inode)->dev), 32); 729 dev_name(inode_to_bdi(inode)->dev), 32);
728 __entry->ino = inode->i_ino; 730 __entry->ino = inode->i_ino;
729 __entry->state = inode->i_state; 731 __entry->state = inode->i_state;
730 __entry->dirtied_when = inode->dirtied_when; 732 __entry->dirtied_when = inode->dirtied_when;
@@ -797,8 +799,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
797 ), 799 ),
798 800
799 TP_fast_assign( 801 TP_fast_assign(
800 strncpy(__entry->name, 802 strscpy_pad(__entry->name,
801 dev_name(inode_to_bdi(inode)->dev), 32); 803 dev_name(inode_to_bdi(inode)->dev), 32);
802 __entry->ino = inode->i_ino; 804 __entry->ino = inode->i_ino;
803 __entry->state = inode->i_state; 805 __entry->state = inode->i_state;
804 __entry->dirtied_when = inode->dirtied_when; 806 __entry->dirtied_when = inode->dirtied_when;
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 63b1f506ea67..c160a5354eb6 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -67,6 +67,9 @@
67#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ 67#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
68#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ 68#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
69 69
70#define MADV_COLD 20 /* deactivate these pages */
71#define MADV_PAGEOUT 21 /* reclaim these pages */
72
70/* compatibility flags */ 73/* compatibility flags */
71#define MAP_FILE 0 74#define MAP_FILE 0
72 75
diff --git a/include/uapi/linux/coff.h b/include/uapi/linux/coff.h
index e4a79f80b9a0..ab5c7e847eed 100644
--- a/include/uapi/linux/coff.h
+++ b/include/uapi/linux/coff.h
@@ -11,6 +11,9 @@
11 more information about COFF, then O'Reilly has a very excellent book. 11 more information about COFF, then O'Reilly has a very excellent book.
12*/ 12*/
13 13
14#ifndef _UAPI_LINUX_COFF_H
15#define _UAPI_LINUX_COFF_H
16
14#define E_SYMNMLEN 8 /* Number of characters in a symbol name */ 17#define E_SYMNMLEN 8 /* Number of characters in a symbol name */
15#define E_FILNMLEN 14 /* Number of characters in a file name */ 18#define E_FILNMLEN 14 /* Number of characters in a file name */
16#define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */ 19#define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */
@@ -350,3 +353,5 @@ struct COFF_reloc {
350 353
351/* For new sections we haven't heard of before */ 354/* For new sections we haven't heard of before */
352#define COFF_DEF_SECTION_ALIGNMENT 4 355#define COFF_DEF_SECTION_ALIGNMENT 4
356
357#endif /* _UAPI_LINUX_COFF_H */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 7c15729d9d25..3d920ff15c80 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1240,15 +1240,14 @@ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1240 1240
1241 /* create the notify skb */ 1241 /* create the notify skb */
1242 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1242 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1243 if (!nc) { 1243 if (!nc)
1244 ret = -ENOMEM; 1244 return -ENOMEM;
1245 goto out; 1245
1246 }
1247 if (copy_from_user(nc->data, 1246 if (copy_from_user(nc->data,
1248 notification->sigev_value.sival_ptr, 1247 notification->sigev_value.sival_ptr,
1249 NOTIFY_COOKIE_LEN)) { 1248 NOTIFY_COOKIE_LEN)) {
1250 ret = -EFAULT; 1249 ret = -EFAULT;
1251 goto out; 1250 goto free_skb;
1252 } 1251 }
1253 1252
1254 /* TODO: add a header? */ 1253 /* TODO: add a header? */
@@ -1264,8 +1263,7 @@ retry:
1264 fdput(f); 1263 fdput(f);
1265 if (IS_ERR(sock)) { 1264 if (IS_ERR(sock)) {
1266 ret = PTR_ERR(sock); 1265 ret = PTR_ERR(sock);
1267 sock = NULL; 1266 goto free_skb;
1268 goto out;
1269 } 1267 }
1270 1268
1271 timeo = MAX_SCHEDULE_TIMEOUT; 1269 timeo = MAX_SCHEDULE_TIMEOUT;
@@ -1274,11 +1272,8 @@ retry:
1274 sock = NULL; 1272 sock = NULL;
1275 goto retry; 1273 goto retry;
1276 } 1274 }
1277 if (ret) { 1275 if (ret)
1278 sock = NULL; 1276 return ret;
1279 nc = NULL;
1280 goto out;
1281 }
1282 } 1277 }
1283 } 1278 }
1284 1279
@@ -1333,7 +1328,8 @@ out_fput:
1333out: 1328out:
1334 if (sock) 1329 if (sock)
1335 netlink_detachskb(sock, nc); 1330 netlink_detachskb(sock, nc);
1336 else if (nc) 1331 else
1332free_skb:
1337 dev_kfree_skb(nc); 1333 dev_kfree_skb(nc);
1338 1334
1339 return ret; 1335 return ret;
diff --git a/ipc/sem.c b/ipc/sem.c
index 7da4504bcc7c..ec97a7072413 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1852,7 +1852,8 @@ static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1852{ 1852{
1853 struct sem_undo *un; 1853 struct sem_undo *un;
1854 1854
1855 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { 1855 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1856 spin_is_locked(&ulp->lock)) {
1856 if (un->semid == semid) 1857 if (un->semid == semid)
1857 return un; 1858 return un;
1858 } 1859 }
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 10f1187b3907..f76d6f77dd5e 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -893,30 +893,25 @@ static struct sysrq_key_op sysrq_dbg_op = {
893}; 893};
894#endif 894#endif
895 895
896static int kgdb_panic_event(struct notifier_block *self, 896void kgdb_panic(const char *msg)
897 unsigned long val,
898 void *data)
899{ 897{
898 if (!kgdb_io_module_registered)
899 return;
900
900 /* 901 /*
901 * Avoid entering the debugger if we were triggered due to a panic 902 * We don't want to get stuck waiting for input from user if
902 * We don't want to get stuck waiting for input from user in such case. 903 * "panic_timeout" indicates the system should automatically
903 * panic_timeout indicates the system should automatically
904 * reboot on panic. 904 * reboot on panic.
905 */ 905 */
906 if (panic_timeout) 906 if (panic_timeout)
907 return NOTIFY_DONE; 907 return;
908 908
909 if (dbg_kdb_mode) 909 if (dbg_kdb_mode)
910 kdb_printf("PANIC: %s\n", (char *)data); 910 kdb_printf("PANIC: %s\n", msg);
911
911 kgdb_breakpoint(); 912 kgdb_breakpoint();
912 return NOTIFY_DONE;
913} 913}
914 914
915static struct notifier_block kgdb_panic_event_nb = {
916 .notifier_call = kgdb_panic_event,
917 .priority = INT_MAX,
918};
919
920void __weak kgdb_arch_late(void) 915void __weak kgdb_arch_late(void)
921{ 916{
922} 917}
@@ -965,8 +960,6 @@ static void kgdb_register_callbacks(void)
965 kgdb_arch_late(); 960 kgdb_arch_late();
966 register_module_notifier(&dbg_module_load_nb); 961 register_module_notifier(&dbg_module_load_nb);
967 register_reboot_notifier(&dbg_reboot_notifier); 962 register_reboot_notifier(&dbg_reboot_notifier);
968 atomic_notifier_chain_register(&panic_notifier_list,
969 &kgdb_panic_event_nb);
970#ifdef CONFIG_MAGIC_SYSRQ 963#ifdef CONFIG_MAGIC_SYSRQ
971 register_sysrq_key('g', &sysrq_dbg_op); 964 register_sysrq_key('g', &sysrq_dbg_op);
972#endif 965#endif
@@ -980,16 +973,14 @@ static void kgdb_register_callbacks(void)
980static void kgdb_unregister_callbacks(void) 973static void kgdb_unregister_callbacks(void)
981{ 974{
982 /* 975 /*
983 * When this routine is called KGDB should unregister from the 976 * When this routine is called KGDB should unregister from
984 * panic handler and clean up, making sure it is not handling any 977 * handlers and clean up, making sure it is not handling any
985 * break exceptions at the time. 978 * break exceptions at the time.
986 */ 979 */
987 if (kgdb_io_module_registered) { 980 if (kgdb_io_module_registered) {
988 kgdb_io_module_registered = 0; 981 kgdb_io_module_registered = 0;
989 unregister_reboot_notifier(&dbg_reboot_notifier); 982 unregister_reboot_notifier(&dbg_reboot_notifier);
990 unregister_module_notifier(&dbg_module_load_nb); 983 unregister_module_notifier(&dbg_module_load_nb);
991 atomic_notifier_chain_unregister(&panic_notifier_list,
992 &kgdb_panic_event_nb);
993 kgdb_arch_exit(); 984 kgdb_arch_exit();
994#ifdef CONFIG_MAGIC_SYSRQ 985#ifdef CONFIG_MAGIC_SYSRQ
995 unregister_sysrq_key('g', &sysrq_dbg_op); 986 unregister_sysrq_key('g', &sysrq_dbg_op);
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index fc482c8e0bd8..57fb4dcff434 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -3,6 +3,7 @@
3#include <linux/fs.h> 3#include <linux/fs.h>
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/binfmts.h> 5#include <linux/binfmts.h>
6#include <linux/elfcore.h>
6 7
7Elf_Half __weak elf_core_extra_phdrs(void) 8Elf_Half __weak elf_core_extra_phdrs(void)
8{ 9{
diff --git a/kernel/fork.c b/kernel/fork.c
index 5a0fd518e04e..60763c043aa3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -125,6 +125,15 @@ int nr_threads; /* The idle threads do not count.. */
125 125
126static int max_threads; /* tunable limit on nr_threads */ 126static int max_threads; /* tunable limit on nr_threads */
127 127
128#define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
129
130static const char * const resident_page_types[] = {
131 NAMED_ARRAY_INDEX(MM_FILEPAGES),
132 NAMED_ARRAY_INDEX(MM_ANONPAGES),
133 NAMED_ARRAY_INDEX(MM_SWAPENTS),
134 NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
135};
136
128DEFINE_PER_CPU(unsigned long, process_counts) = 0; 137DEFINE_PER_CPU(unsigned long, process_counts) = 0;
129 138
130__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 139__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
@@ -645,12 +654,15 @@ static void check_mm(struct mm_struct *mm)
645{ 654{
646 int i; 655 int i;
647 656
657 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
658 "Please make sure 'struct resident_page_types[]' is updated as well");
659
648 for (i = 0; i < NR_MM_COUNTERS; i++) { 660 for (i = 0; i < NR_MM_COUNTERS; i++) {
649 long x = atomic_long_read(&mm->rss_stat.count[i]); 661 long x = atomic_long_read(&mm->rss_stat.count[i]);
650 662
651 if (unlikely(x)) 663 if (unlikely(x))
652 printk(KERN_ALERT "BUG: Bad rss-counter state " 664 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
653 "mm:%p idx:%d val:%ld\n", mm, i, x); 665 mm, resident_page_types[i], x);
654 } 666 }
655 667
656 if (mm_pgtables_bytes(mm)) 668 if (mm_pgtables_bytes(mm))
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index d5870723b8ad..15d70a90b50d 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
300{ 300{
301 struct page *pages; 301 struct page *pages;
302 302
303 if (fatal_signal_pending(current))
304 return NULL;
303 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); 305 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
304 if (pages) { 306 if (pages) {
305 unsigned int count, i; 307 unsigned int count, i;
diff --git a/kernel/panic.c b/kernel/panic.c
index 057540b6eee9..47e8ebccc22b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -12,6 +12,7 @@
12#include <linux/debug_locks.h> 12#include <linux/debug_locks.h>
13#include <linux/sched/debug.h> 13#include <linux/sched/debug.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/kgdb.h>
15#include <linux/kmsg_dump.h> 16#include <linux/kmsg_dump.h>
16#include <linux/kallsyms.h> 17#include <linux/kallsyms.h>
17#include <linux/notifier.h> 18#include <linux/notifier.h>
@@ -220,6 +221,13 @@ void panic(const char *fmt, ...)
220#endif 221#endif
221 222
222 /* 223 /*
224 * If kgdb is enabled, give it a chance to run before we stop all
225 * the other CPUs or else we won't be able to debug processes left
226 * running on them.
227 */
228 kgdb_panic(buf);
229
230 /*
223 * If we have crashed and we have a crash kernel loaded let it handle 231 * If we have crashed and we have a crash kernel loaded let it handle
224 * everything else. 232 * everything else.
225 * If we want to run this after calling panic_notifiers, pass 233 * If we want to run this after calling panic_notifiers, pass
@@ -551,9 +559,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
551{ 559{
552 disable_trace_on_warning(); 560 disable_trace_on_warning();
553 561
554 if (args)
555 pr_warn(CUT_HERE);
556
557 if (file) 562 if (file)
558 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", 563 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
559 raw_smp_processor_id(), current->pid, file, line, 564 raw_smp_processor_id(), current->pid, file, line,
@@ -591,37 +596,26 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
591 add_taint(taint, LOCKDEP_STILL_OK); 596 add_taint(taint, LOCKDEP_STILL_OK);
592} 597}
593 598
594#ifdef WANT_WARN_ON_SLOWPATH 599#ifndef __WARN_FLAGS
595void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) 600void warn_slowpath_fmt(const char *file, int line, unsigned taint,
601 const char *fmt, ...)
596{ 602{
597 struct warn_args args; 603 struct warn_args args;
598 604
599 args.fmt = fmt; 605 pr_warn(CUT_HERE);
600 va_start(args.args, fmt);
601 __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
602 &args);
603 va_end(args.args);
604}
605EXPORT_SYMBOL(warn_slowpath_fmt);
606 606
607void warn_slowpath_fmt_taint(const char *file, int line, 607 if (!fmt) {
608 unsigned taint, const char *fmt, ...) 608 __warn(file, line, __builtin_return_address(0), taint,
609{ 609 NULL, NULL);
610 struct warn_args args; 610 return;
611 }
611 612
612 args.fmt = fmt; 613 args.fmt = fmt;
613 va_start(args.args, fmt); 614 va_start(args.args, fmt);
614 __warn(file, line, __builtin_return_address(0), taint, NULL, &args); 615 __warn(file, line, __builtin_return_address(0), taint, NULL, &args);
615 va_end(args.args); 616 va_end(args.args);
616} 617}
617EXPORT_SYMBOL(warn_slowpath_fmt_taint); 618EXPORT_SYMBOL(warn_slowpath_fmt);
618
619void warn_slowpath_null(const char *file, int line)
620{
621 pr_warn(CUT_HERE);
622 __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
623}
624EXPORT_SYMBOL(warn_slowpath_null);
625#else 619#else
626void __warn_printk(const char *fmt, ...) 620void __warn_printk(const char *fmt, ...)
627{ 621{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6b1b1703a646..93d97f9b0157 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -311,7 +311,7 @@ config HEADERS_CHECK
311 relevant for userspace, say 'Y'. 311 relevant for userspace, say 'Y'.
312 312
313config OPTIMIZE_INLINING 313config OPTIMIZE_INLINING
314 bool "Allow compiler to uninline functions marked 'inline'" 314 def_bool y
315 help 315 help
316 This option determines if the kernel forces gcc to inline the functions 316 This option determines if the kernel forces gcc to inline the functions
317 developers have marked 'inline'. Doing so takes away freedom from gcc to 317 developers have marked 'inline'. Doing so takes away freedom from gcc to
@@ -322,8 +322,6 @@ config OPTIMIZE_INLINING
322 decision will become the default in the future. Until then this option 322 decision will become the default in the future. Until then this option
323 is there to test gcc for this. 323 is there to test gcc for this.
324 324
325 If unsure, say N.
326
327config DEBUG_SECTION_MISMATCH 325config DEBUG_SECTION_MISMATCH
328 bool "Enable full Section mismatch analysis" 326 bool "Enable full Section mismatch analysis"
329 help 327 help
diff --git a/lib/bug.c b/lib/bug.c
index 1077366f496b..8c98af0bf585 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -181,6 +181,15 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
181 } 181 }
182 } 182 }
183 183
184 /*
185 * BUG() and WARN_ON() families don't print a custom debug message
186 * before triggering the exception handler, so we must add the
187 * "cut here" line now. WARN() issues its own "cut here" before the
188 * extra debugging message it writes before triggering the handler.
189 */
190 if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
191 printk(KERN_DEFAULT CUT_HERE);
192
184 if (warning) { 193 if (warning) {
185 /* this is a WARN_ON rather than BUG/BUG_ON */ 194 /* this is a WARN_ON rather than BUG/BUG_ON */
186 __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs, 195 __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
@@ -188,8 +197,6 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
188 return BUG_TRAP_TYPE_WARN; 197 return BUG_TRAP_TYPE_WARN;
189 } 198 }
190 199
191 printk(KERN_DEFAULT CUT_HERE);
192
193 if (file) 200 if (file)
194 pr_crit("kernel BUG at %s:%u!\n", file, line); 201 pr_crit("kernel BUG at %s:%u!\n", file, line);
195 else 202 else
diff --git a/lib/extable.c b/lib/extable.c
index 25da4071122a..c3e59caf7ffa 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -10,6 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/sort.h> 11#include <linux/sort.h>
12#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/extable.h>
13 14
14#ifndef ARCH_HAS_RELATIVE_EXTABLE 15#ifndef ARCH_HAS_RELATIVE_EXTABLE
15#define ex_to_insn(x) ((x)->insn) 16#define ex_to_insn(x) ((x)->insn)
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index a7bafc413730..ae25e2fa2187 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -36,12 +36,12 @@ static inline size_t genradix_depth_size(unsigned depth)
36#define GENRADIX_DEPTH_MASK \ 36#define GENRADIX_DEPTH_MASK \
37 ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) 37 ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
38 38
39unsigned genradix_root_to_depth(struct genradix_root *r) 39static inline unsigned genradix_root_to_depth(struct genradix_root *r)
40{ 40{
41 return (unsigned long) r & GENRADIX_DEPTH_MASK; 41 return (unsigned long) r & GENRADIX_DEPTH_MASK;
42} 42}
43 43
44struct genradix_node *genradix_root_to_node(struct genradix_root *r) 44static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
45{ 45{
46 return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK); 46 return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
47} 47}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index b1d55b669ae2..147133f8eb2f 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -270,25 +270,4 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
270} 270}
271EXPORT_SYMBOL(print_hex_dump); 271EXPORT_SYMBOL(print_hex_dump);
272 272
273#if !defined(CONFIG_DYNAMIC_DEBUG)
274/**
275 * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
276 * @prefix_str: string to prefix each line with;
277 * caller supplies trailing spaces for alignment if desired
278 * @prefix_type: controls whether prefix of an offset, address, or none
279 * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
280 * @buf: data blob to dump
281 * @len: number of bytes in the @buf
282 *
283 * Calls print_hex_dump(), with log level of KERN_DEBUG,
284 * rowsize of 16, groupsize of 1, and ASCII output included.
285 */
286void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
287 const void *buf, size_t len)
288{
289 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
290 buf, len, true);
291}
292EXPORT_SYMBOL(print_hex_dump_bytes);
293#endif /* !defined(CONFIG_DYNAMIC_DEBUG) */
294#endif /* defined(CONFIG_PRINTK) */ 273#endif /* defined(CONFIG_PRINTK) */
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index ba16c08e8cb9..717c940112f9 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -83,17 +83,19 @@ next:
83 ALIGN((uintptr_t)ir, 4)) && 83 ALIGN((uintptr_t)ir, 4)) &&
84 (ir < limit) && (*ir == 0)) 84 (ir < limit) && (*ir == 0))
85 ir++; 85 ir++;
86 for (; (ir + 4) <= limit; ir += 4) { 86 if (IS_ALIGNED((uintptr_t)ir, 4)) {
87 dv = *((u32 *)ir); 87 for (; (ir + 4) <= limit; ir += 4) {
88 if (dv) { 88 dv = *((u32 *)ir);
89 if (dv) {
89# if defined(__LITTLE_ENDIAN) 90# if defined(__LITTLE_ENDIAN)
90 ir += __builtin_ctz(dv) >> 3; 91 ir += __builtin_ctz(dv) >> 3;
91# elif defined(__BIG_ENDIAN) 92# elif defined(__BIG_ENDIAN)
92 ir += __builtin_clz(dv) >> 3; 93 ir += __builtin_clz(dv) >> 3;
93# else 94# else
94# error "missing endian definition" 95# error "missing endian definition"
95# endif 96# endif
96 break; 97 break;
98 }
97 } 99 }
98 } 100 }
99#endif 101#endif
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 62b8ee92643d..41ae3c7570d3 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -77,26 +77,10 @@ static inline void erase_cached(struct test_node *node, struct rb_root_cached *r
77} 77}
78 78
79 79
80static inline u32 augment_recompute(struct test_node *node) 80#define NODE_VAL(node) ((node)->val)
81{
82 u32 max = node->val, child_augmented;
83 if (node->rb.rb_left) {
84 child_augmented = rb_entry(node->rb.rb_left, struct test_node,
85 rb)->augmented;
86 if (max < child_augmented)
87 max = child_augmented;
88 }
89 if (node->rb.rb_right) {
90 child_augmented = rb_entry(node->rb.rb_right, struct test_node,
91 rb)->augmented;
92 if (max < child_augmented)
93 max = child_augmented;
94 }
95 return max;
96}
97 81
98RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb, 82RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
99 u32, augmented, augment_recompute) 83 struct test_node, rb, u32, augmented, NODE_VAL)
100 84
101static void insert_augmented(struct test_node *node, 85static void insert_augmented(struct test_node *node,
102 struct rb_root_cached *root) 86 struct rb_root_cached *root)
@@ -238,7 +222,20 @@ static void check_augmented(int nr_nodes)
238 check(nr_nodes); 222 check(nr_nodes);
239 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { 223 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
240 struct test_node *node = rb_entry(rb, struct test_node, rb); 224 struct test_node *node = rb_entry(rb, struct test_node, rb);
241 WARN_ON_ONCE(node->augmented != augment_recompute(node)); 225 u32 subtree, max = node->val;
226 if (node->rb.rb_left) {
227 subtree = rb_entry(node->rb.rb_left, struct test_node,
228 rb)->augmented;
229 if (max < subtree)
230 max = subtree;
231 }
232 if (node->rb.rb_right) {
233 subtree = rb_entry(node->rb.rb_right, struct test_node,
234 rb)->augmented;
235 if (max < subtree)
236 max = subtree;
237 }
238 WARN_ON_ONCE(node->augmented != max);
242 } 239 }
243} 240}
244 241
diff --git a/lib/string.c b/lib/string.c
index 461fb620f85f..cd7a10c19210 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -173,8 +173,9 @@ EXPORT_SYMBOL(strlcpy);
173 * doesn't unnecessarily force the tail of the destination buffer to be 173 * doesn't unnecessarily force the tail of the destination buffer to be
174 * zeroed. If zeroing is desired please use strscpy_pad(). 174 * zeroed. If zeroing is desired please use strscpy_pad().
175 * 175 *
176 * Return: The number of characters copied (not including the trailing 176 * Returns:
177 * %NUL) or -E2BIG if the destination buffer wasn't big enough. 177 * * The number of characters copied (not including the trailing %NUL)
178 * * -E2BIG if count is 0 or @src was truncated.
178 */ 179 */
179ssize_t strscpy(char *dest, const char *src, size_t count) 180ssize_t strscpy(char *dest, const char *src, size_t count)
180{ 181{
@@ -182,7 +183,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
182 size_t max = count; 183 size_t max = count;
183 long res = 0; 184 long res = 0;
184 185
185 if (count == 0) 186 if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
186 return -E2BIG; 187 return -E2BIG;
187 188
188#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 189#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -253,8 +254,9 @@ EXPORT_SYMBOL(strscpy);
253 * For full explanation of why you may want to consider using the 254 * For full explanation of why you may want to consider using the
254 * 'strscpy' functions please see the function docstring for strscpy(). 255 * 'strscpy' functions please see the function docstring for strscpy().
255 * 256 *
256 * Return: The number of characters copied (not including the trailing 257 * Returns:
257 * %NUL) or -E2BIG if the destination buffer wasn't big enough. 258 * * The number of characters copied (not including the trailing %NUL)
259 * * -E2BIG if count is 0 or @src was truncated.
258 */ 260 */
259ssize_t strscpy_pad(char *dest, const char *src, size_t count) 261ssize_t strscpy_pad(char *dest, const char *src, size_t count)
260{ 262{
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 023ba9f3b99f..dccb95af6003 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -6,6 +6,7 @@
6#include <linux/uaccess.h> 6#include <linux/uaccess.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/mm.h>
9 10
10#include <asm/byteorder.h> 11#include <asm/byteorder.h>
11#include <asm/word-at-a-time.h> 12#include <asm/word-at-a-time.h>
@@ -108,7 +109,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
108 return 0; 109 return 0;
109 110
110 max_addr = user_addr_max(); 111 max_addr = user_addr_max();
111 src_addr = (unsigned long)src; 112 src_addr = (unsigned long)untagged_addr(src);
112 if (likely(src_addr < max_addr)) { 113 if (likely(src_addr < max_addr)) {
113 unsigned long max = max_addr - src_addr; 114 unsigned long max = max_addr - src_addr;
114 long retval; 115 long retval;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 7f2db3fe311f..28ff554a1be8 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -2,6 +2,7 @@
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/export.h> 3#include <linux/export.h>
4#include <linux/uaccess.h> 4#include <linux/uaccess.h>
5#include <linux/mm.h>
5 6
6#include <asm/word-at-a-time.h> 7#include <asm/word-at-a-time.h>
7 8
@@ -109,7 +110,7 @@ long strnlen_user(const char __user *str, long count)
109 return 0; 110 return 0;
110 111
111 max_addr = user_addr_max(); 112 max_addr = user_addr_max();
112 src_addr = (unsigned long)str; 113 src_addr = (unsigned long)untagged_addr(str);
113 if (likely(src_addr < max_addr)) { 114 if (likely(src_addr < max_addr)) {
114 unsigned long max = max_addr - src_addr; 115 unsigned long max = max_addr - src_addr;
115 long retval; 116 long retval;
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index c64dca6e27c2..c431ca81dad5 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -46,6 +46,8 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
46 if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) 46 if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
47 nr_frames = vec->nr_allocated; 47 nr_frames = vec->nr_allocated;
48 48
49 start = untagged_addr(start);
50
49 down_read(&mm->mmap_sem); 51 down_read(&mm->mmap_sem);
50 locked = 1; 52 locked = 1;
51 vma = find_vma_intersection(mm, start, start + 1); 53 vma = find_vma_intersection(mm, start, start + 1);
diff --git a/mm/gup.c b/mm/gup.c
index 60c3915c8ee6..23a9f9c9d377 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -788,6 +788,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
788 if (!nr_pages) 788 if (!nr_pages)
789 return 0; 789 return 0;
790 790
791 start = untagged_addr(start);
792
791 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 793 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
792 794
793 /* 795 /*
@@ -950,6 +952,8 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
950 struct vm_area_struct *vma; 952 struct vm_area_struct *vma;
951 vm_fault_t ret, major = 0; 953 vm_fault_t ret, major = 0;
952 954
955 address = untagged_addr(address);
956
953 if (unlocked) 957 if (unlocked)
954 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 958 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
955 959
diff --git a/mm/internal.h b/mm/internal.h
index e32390802fd3..0d5f720c75ab 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -39,7 +39,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
39void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 39void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
40 unsigned long floor, unsigned long ceiling); 40 unsigned long floor, unsigned long ceiling);
41 41
42static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma) 42static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
43{ 43{
44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); 44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
45} 45}
diff --git a/mm/madvise.c b/mm/madvise.c
index 68ab988ad433..2be9f3fdb05e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,6 +11,7 @@
11#include <linux/syscalls.h> 11#include <linux/syscalls.h>
12#include <linux/mempolicy.h> 12#include <linux/mempolicy.h>
13#include <linux/page-isolation.h> 13#include <linux/page-isolation.h>
14#include <linux/page_idle.h>
14#include <linux/userfaultfd_k.h> 15#include <linux/userfaultfd_k.h>
15#include <linux/hugetlb.h> 16#include <linux/hugetlb.h>
16#include <linux/falloc.h> 17#include <linux/falloc.h>
@@ -31,6 +32,11 @@
31 32
32#include "internal.h" 33#include "internal.h"
33 34
35struct madvise_walk_private {
36 struct mmu_gather *tlb;
37 bool pageout;
38};
39
34/* 40/*
35 * Any behaviour which results in changes to the vma->vm_flags needs to 41 * Any behaviour which results in changes to the vma->vm_flags needs to
36 * take mmap_sem for writing. Others, which simply traverse vmas, need 42 * take mmap_sem for writing. Others, which simply traverse vmas, need
@@ -42,6 +48,8 @@ static int madvise_need_mmap_write(int behavior)
42 case MADV_REMOVE: 48 case MADV_REMOVE:
43 case MADV_WILLNEED: 49 case MADV_WILLNEED:
44 case MADV_DONTNEED: 50 case MADV_DONTNEED:
51 case MADV_COLD:
52 case MADV_PAGEOUT:
45 case MADV_FREE: 53 case MADV_FREE:
46 return 0; 54 return 0;
47 default: 55 default:
@@ -289,6 +297,254 @@ static long madvise_willneed(struct vm_area_struct *vma,
289 return 0; 297 return 0;
290} 298}
291 299
300static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
301 unsigned long addr, unsigned long end,
302 struct mm_walk *walk)
303{
304 struct madvise_walk_private *private = walk->private;
305 struct mmu_gather *tlb = private->tlb;
306 bool pageout = private->pageout;
307 struct mm_struct *mm = tlb->mm;
308 struct vm_area_struct *vma = walk->vma;
309 pte_t *orig_pte, *pte, ptent;
310 spinlock_t *ptl;
311 struct page *page = NULL;
312 LIST_HEAD(page_list);
313
314 if (fatal_signal_pending(current))
315 return -EINTR;
316
317#ifdef CONFIG_TRANSPARENT_HUGEPAGE
318 if (pmd_trans_huge(*pmd)) {
319 pmd_t orig_pmd;
320 unsigned long next = pmd_addr_end(addr, end);
321
322 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
323 ptl = pmd_trans_huge_lock(pmd, vma);
324 if (!ptl)
325 return 0;
326
327 orig_pmd = *pmd;
328 if (is_huge_zero_pmd(orig_pmd))
329 goto huge_unlock;
330
331 if (unlikely(!pmd_present(orig_pmd))) {
332 VM_BUG_ON(thp_migration_supported() &&
333 !is_pmd_migration_entry(orig_pmd));
334 goto huge_unlock;
335 }
336
337 page = pmd_page(orig_pmd);
338 if (next - addr != HPAGE_PMD_SIZE) {
339 int err;
340
341 if (page_mapcount(page) != 1)
342 goto huge_unlock;
343
344 get_page(page);
345 spin_unlock(ptl);
346 lock_page(page);
347 err = split_huge_page(page);
348 unlock_page(page);
349 put_page(page);
350 if (!err)
351 goto regular_page;
352 return 0;
353 }
354
355 if (pmd_young(orig_pmd)) {
356 pmdp_invalidate(vma, addr, pmd);
357 orig_pmd = pmd_mkold(orig_pmd);
358
359 set_pmd_at(mm, addr, pmd, orig_pmd);
360 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
361 }
362
363 ClearPageReferenced(page);
364 test_and_clear_page_young(page);
365 if (pageout) {
366 if (!isolate_lru_page(page))
367 list_add(&page->lru, &page_list);
368 } else
369 deactivate_page(page);
370huge_unlock:
371 spin_unlock(ptl);
372 if (pageout)
373 reclaim_pages(&page_list);
374 return 0;
375 }
376
377 if (pmd_trans_unstable(pmd))
378 return 0;
379regular_page:
380#endif
381 tlb_change_page_size(tlb, PAGE_SIZE);
382 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
383 flush_tlb_batched_pending(mm);
384 arch_enter_lazy_mmu_mode();
385 for (; addr < end; pte++, addr += PAGE_SIZE) {
386 ptent = *pte;
387
388 if (pte_none(ptent))
389 continue;
390
391 if (!pte_present(ptent))
392 continue;
393
394 page = vm_normal_page(vma, addr, ptent);
395 if (!page)
396 continue;
397
398 /*
399 * Creating a THP page is expensive so split it only if we
400 * are sure it's worth. Split it if we are only owner.
401 */
402 if (PageTransCompound(page)) {
403 if (page_mapcount(page) != 1)
404 break;
405 get_page(page);
406 if (!trylock_page(page)) {
407 put_page(page);
408 break;
409 }
410 pte_unmap_unlock(orig_pte, ptl);
411 if (split_huge_page(page)) {
412 unlock_page(page);
413 put_page(page);
414 pte_offset_map_lock(mm, pmd, addr, &ptl);
415 break;
416 }
417 unlock_page(page);
418 put_page(page);
419 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
420 pte--;
421 addr -= PAGE_SIZE;
422 continue;
423 }
424
425 VM_BUG_ON_PAGE(PageTransCompound(page), page);
426
427 if (pte_young(ptent)) {
428 ptent = ptep_get_and_clear_full(mm, addr, pte,
429 tlb->fullmm);
430 ptent = pte_mkold(ptent);
431 set_pte_at(mm, addr, pte, ptent);
432 tlb_remove_tlb_entry(tlb, pte, addr);
433 }
434
435 /*
436 * We are deactivating a page for accelerating reclaiming.
437 * VM couldn't reclaim the page unless we clear PG_young.
438 * As a side effect, it makes confuse idle-page tracking
439 * because they will miss recent referenced history.
440 */
441 ClearPageReferenced(page);
442 test_and_clear_page_young(page);
443 if (pageout) {
444 if (!isolate_lru_page(page))
445 list_add(&page->lru, &page_list);
446 } else
447 deactivate_page(page);
448 }
449
450 arch_leave_lazy_mmu_mode();
451 pte_unmap_unlock(orig_pte, ptl);
452 if (pageout)
453 reclaim_pages(&page_list);
454 cond_resched();
455
456 return 0;
457}
458
459static const struct mm_walk_ops cold_walk_ops = {
460 .pmd_entry = madvise_cold_or_pageout_pte_range,
461};
462
463static void madvise_cold_page_range(struct mmu_gather *tlb,
464 struct vm_area_struct *vma,
465 unsigned long addr, unsigned long end)
466{
467 struct madvise_walk_private walk_private = {
468 .pageout = false,
469 .tlb = tlb,
470 };
471
472 tlb_start_vma(tlb, vma);
473 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
474 tlb_end_vma(tlb, vma);
475}
476
477static long madvise_cold(struct vm_area_struct *vma,
478 struct vm_area_struct **prev,
479 unsigned long start_addr, unsigned long end_addr)
480{
481 struct mm_struct *mm = vma->vm_mm;
482 struct mmu_gather tlb;
483
484 *prev = vma;
485 if (!can_madv_lru_vma(vma))
486 return -EINVAL;
487
488 lru_add_drain();
489 tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
490 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
491 tlb_finish_mmu(&tlb, start_addr, end_addr);
492
493 return 0;
494}
495
496static void madvise_pageout_page_range(struct mmu_gather *tlb,
497 struct vm_area_struct *vma,
498 unsigned long addr, unsigned long end)
499{
500 struct madvise_walk_private walk_private = {
501 .pageout = true,
502 .tlb = tlb,
503 };
504
505 tlb_start_vma(tlb, vma);
506 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
507 tlb_end_vma(tlb, vma);
508}
509
510static inline bool can_do_pageout(struct vm_area_struct *vma)
511{
512 if (vma_is_anonymous(vma))
513 return true;
514 if (!vma->vm_file)
515 return false;
516 /*
517 * paging out pagecache only for non-anonymous mappings that correspond
518 * to the files the calling process could (if tried) open for writing;
519 * otherwise we'd be including shared non-exclusive mappings, which
520 * opens a side channel.
521 */
522 return inode_owner_or_capable(file_inode(vma->vm_file)) ||
523 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
524}
525
526static long madvise_pageout(struct vm_area_struct *vma,
527 struct vm_area_struct **prev,
528 unsigned long start_addr, unsigned long end_addr)
529{
530 struct mm_struct *mm = vma->vm_mm;
531 struct mmu_gather tlb;
532
533 *prev = vma;
534 if (!can_madv_lru_vma(vma))
535 return -EINVAL;
536
537 if (!can_do_pageout(vma))
538 return 0;
539
540 lru_add_drain();
541 tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
542 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
543 tlb_finish_mmu(&tlb, start_addr, end_addr);
544
545 return 0;
546}
547
292static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 548static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
293 unsigned long end, struct mm_walk *walk) 549 unsigned long end, struct mm_walk *walk)
294 550
@@ -493,7 +749,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
493 int behavior) 749 int behavior)
494{ 750{
495 *prev = vma; 751 *prev = vma;
496 if (!can_madv_dontneed_vma(vma)) 752 if (!can_madv_lru_vma(vma))
497 return -EINVAL; 753 return -EINVAL;
498 754
499 if (!userfaultfd_remove(vma, start, end)) { 755 if (!userfaultfd_remove(vma, start, end)) {
@@ -515,7 +771,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
515 */ 771 */
516 return -ENOMEM; 772 return -ENOMEM;
517 } 773 }
518 if (!can_madv_dontneed_vma(vma)) 774 if (!can_madv_lru_vma(vma))
519 return -EINVAL; 775 return -EINVAL;
520 if (end > vma->vm_end) { 776 if (end > vma->vm_end) {
521 /* 777 /*
@@ -669,6 +925,10 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
669 return madvise_remove(vma, prev, start, end); 925 return madvise_remove(vma, prev, start, end);
670 case MADV_WILLNEED: 926 case MADV_WILLNEED:
671 return madvise_willneed(vma, prev, start, end); 927 return madvise_willneed(vma, prev, start, end);
928 case MADV_COLD:
929 return madvise_cold(vma, prev, start, end);
930 case MADV_PAGEOUT:
931 return madvise_pageout(vma, prev, start, end);
672 case MADV_FREE: 932 case MADV_FREE:
673 case MADV_DONTNEED: 933 case MADV_DONTNEED:
674 return madvise_dontneed_free(vma, prev, start, end, behavior); 934 return madvise_dontneed_free(vma, prev, start, end, behavior);
@@ -690,6 +950,8 @@ madvise_behavior_valid(int behavior)
690 case MADV_WILLNEED: 950 case MADV_WILLNEED:
691 case MADV_DONTNEED: 951 case MADV_DONTNEED:
692 case MADV_FREE: 952 case MADV_FREE:
953 case MADV_COLD:
954 case MADV_PAGEOUT:
693#ifdef CONFIG_KSM 955#ifdef CONFIG_KSM
694 case MADV_MERGEABLE: 956 case MADV_MERGEABLE:
695 case MADV_UNMERGEABLE: 957 case MADV_UNMERGEABLE:
@@ -784,6 +1046,8 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
784 size_t len; 1046 size_t len;
785 struct blk_plug plug; 1047 struct blk_plug plug;
786 1048
1049 start = untagged_addr(start);
1050
787 if (!madvise_behavior_valid(behavior)) 1051 if (!madvise_behavior_valid(behavior))
788 return error; 1052 return error;
789 1053
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2156ef775d04..c313c49074ca 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2943,6 +2943,16 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2943 2943
2944 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2944 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2945 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2945 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2946
2947 /*
2948 * Enforce __GFP_NOFAIL allocation because callers are not
2949 * prepared to see failures and likely do not have any failure
2950 * handling code.
2951 */
2952 if (gfp & __GFP_NOFAIL) {
2953 page_counter_charge(&memcg->kmem, nr_pages);
2954 return 0;
2955 }
2946 cancel_charge(memcg, nr_pages); 2956 cancel_charge(memcg, nr_pages);
2947 return -ENOMEM; 2957 return -ENOMEM;
2948 } 2958 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 464406e8da91..de27d08b1ff8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1405,6 +1405,7 @@ static long kernel_mbind(unsigned long start, unsigned long len,
1405 int err; 1405 int err;
1406 unsigned short mode_flags; 1406 unsigned short mode_flags;
1407 1407
1408 start = untagged_addr(start);
1408 mode_flags = mode & MPOL_MODE_FLAGS; 1409 mode_flags = mode & MPOL_MODE_FLAGS;
1409 mode &= ~MPOL_MODE_FLAGS; 1410 mode &= ~MPOL_MODE_FLAGS;
1410 if (mode >= MPOL_MAX) 1411 if (mode >= MPOL_MAX)
@@ -1558,6 +1559,8 @@ static int kernel_get_mempolicy(int __user *policy,
1558 int uninitialized_var(pval); 1559 int uninitialized_var(pval);
1559 nodemask_t nodes; 1560 nodemask_t nodes;
1560 1561
1562 addr = untagged_addr(addr);
1563
1561 if (nmask != NULL && maxnode < nr_node_ids) 1564 if (nmask != NULL && maxnode < nr_node_ids)
1562 return -EINVAL; 1565 return -EINVAL;
1563 1566
diff --git a/mm/migrate.c b/mm/migrate.c
index 73d476d690b1..4fe45d1428c8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1612,7 +1612,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1612 goto out_flush; 1612 goto out_flush;
1613 if (get_user(node, nodes + i)) 1613 if (get_user(node, nodes + i))
1614 goto out_flush; 1614 goto out_flush;
1615 addr = (unsigned long)p; 1615 addr = (unsigned long)untagged_addr(p);
1616 1616
1617 err = -ENODEV; 1617 err = -ENODEV;
1618 if (node < 0 || node >= MAX_NUMNODES) 1618 if (node < 0 || node >= MAX_NUMNODES)
diff --git a/mm/mincore.c b/mm/mincore.c
index f9a9dbe8cd33..49b6fa2f6aa1 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -256,6 +256,8 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
256 unsigned long pages; 256 unsigned long pages;
257 unsigned char *tmp; 257 unsigned char *tmp;
258 258
259 start = untagged_addr(start);
260
259 /* Check the start address: needs to be page-aligned.. */ 261 /* Check the start address: needs to be page-aligned.. */
260 if (start & ~PAGE_MASK) 262 if (start & ~PAGE_MASK)
261 return -EINVAL; 263 return -EINVAL;
diff --git a/mm/mlock.c b/mm/mlock.c
index a90099da4fb4..a72c1eeded77 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -674,6 +674,8 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
674 unsigned long lock_limit; 674 unsigned long lock_limit;
675 int error = -ENOMEM; 675 int error = -ENOMEM;
676 676
677 start = untagged_addr(start);
678
677 if (!can_do_mlock()) 679 if (!can_do_mlock())
678 return -EPERM; 680 return -EPERM;
679 681
@@ -735,6 +737,8 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
735{ 737{
736 int ret; 738 int ret;
737 739
740 start = untagged_addr(start);
741
738 len = PAGE_ALIGN(len + (offset_in_page(start))); 742 len = PAGE_ALIGN(len + (offset_in_page(start)));
739 start &= PAGE_MASK; 743 start &= PAGE_MASK;
740 744
diff --git a/mm/mmap.c b/mm/mmap.c
index f1e8c7f93e04..a7d8c84d19b7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -201,6 +201,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
201 bool downgraded = false; 201 bool downgraded = false;
202 LIST_HEAD(uf); 202 LIST_HEAD(uf);
203 203
204 brk = untagged_addr(brk);
205
204 if (down_write_killable(&mm->mmap_sem)) 206 if (down_write_killable(&mm->mmap_sem))
205 return -EINTR; 207 return -EINTR;
206 208
@@ -289,9 +291,9 @@ out:
289 return retval; 291 return retval;
290} 292}
291 293
292static long vma_compute_subtree_gap(struct vm_area_struct *vma) 294static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
293{ 295{
294 unsigned long max, prev_end, subtree_gap; 296 unsigned long gap, prev_end;
295 297
296 /* 298 /*
297 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we 299 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
@@ -299,14 +301,21 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
299 * an unmapped area; whereas when expanding we only require one. 301 * an unmapped area; whereas when expanding we only require one.
300 * That's a little inconsistent, but keeps the code here simpler. 302 * That's a little inconsistent, but keeps the code here simpler.
301 */ 303 */
302 max = vm_start_gap(vma); 304 gap = vm_start_gap(vma);
303 if (vma->vm_prev) { 305 if (vma->vm_prev) {
304 prev_end = vm_end_gap(vma->vm_prev); 306 prev_end = vm_end_gap(vma->vm_prev);
305 if (max > prev_end) 307 if (gap > prev_end)
306 max -= prev_end; 308 gap -= prev_end;
307 else 309 else
308 max = 0; 310 gap = 0;
309 } 311 }
312 return gap;
313}
314
315#ifdef CONFIG_DEBUG_VM_RB
316static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
317{
318 unsigned long max = vma_compute_gap(vma), subtree_gap;
310 if (vma->vm_rb.rb_left) { 319 if (vma->vm_rb.rb_left) {
311 subtree_gap = rb_entry(vma->vm_rb.rb_left, 320 subtree_gap = rb_entry(vma->vm_rb.rb_left,
312 struct vm_area_struct, vm_rb)->rb_subtree_gap; 321 struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -322,7 +331,6 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
322 return max; 331 return max;
323} 332}
324 333
325#ifdef CONFIG_DEBUG_VM_RB
326static int browse_rb(struct mm_struct *mm) 334static int browse_rb(struct mm_struct *mm)
327{ 335{
328 struct rb_root *root = &mm->mm_rb; 336 struct rb_root *root = &mm->mm_rb;
@@ -428,8 +436,9 @@ static void validate_mm(struct mm_struct *mm)
428#define validate_mm(mm) do { } while (0) 436#define validate_mm(mm) do { } while (0)
429#endif 437#endif
430 438
431RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, 439RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
432 unsigned long, rb_subtree_gap, vma_compute_subtree_gap) 440 struct vm_area_struct, vm_rb,
441 unsigned long, rb_subtree_gap, vma_compute_gap)
433 442
434/* 443/*
435 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or 444 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
@@ -439,8 +448,8 @@ RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
439static void vma_gap_update(struct vm_area_struct *vma) 448static void vma_gap_update(struct vm_area_struct *vma)
440{ 449{
441 /* 450 /*
442 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback 451 * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
443 * function that does exactly what we want. 452 * a callback function that does exactly what we want.
444 */ 453 */
445 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); 454 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
446} 455}
@@ -1580,6 +1589,8 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1580 struct file *file = NULL; 1589 struct file *file = NULL;
1581 unsigned long retval; 1590 unsigned long retval;
1582 1591
1592 addr = untagged_addr(addr);
1593
1583 if (!(flags & MAP_ANONYMOUS)) { 1594 if (!(flags & MAP_ANONYMOUS)) {
1584 audit_mmap_fd(fd, flags); 1595 audit_mmap_fd(fd, flags);
1585 file = fget(fd); 1596 file = fget(fd);
@@ -2878,6 +2889,7 @@ EXPORT_SYMBOL(vm_munmap);
2878 2889
2879SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 2890SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2880{ 2891{
2892 addr = untagged_addr(addr);
2881 profile_munmap(addr); 2893 profile_munmap(addr);
2882 return __vm_munmap(addr, len, true); 2894 return __vm_munmap(addr, len, true);
2883} 2895}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 675e5d34a507..7967825f6d33 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -459,6 +459,8 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
459 const bool rier = (current->personality & READ_IMPLIES_EXEC) && 459 const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
460 (prot & PROT_READ); 460 (prot & PROT_READ);
461 461
462 start = untagged_addr(start);
463
462 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 464 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
463 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 465 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
464 return -EINVAL; 466 return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index fc241d23cd97..1fc8a29fbe3f 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -606,6 +606,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
606 LIST_HEAD(uf_unmap_early); 606 LIST_HEAD(uf_unmap_early);
607 LIST_HEAD(uf_unmap); 607 LIST_HEAD(uf_unmap);
608 608
609 addr = untagged_addr(addr);
610 new_addr = untagged_addr(new_addr);
611
609 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) 612 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
610 return ret; 613 return ret;
611 614
diff --git a/mm/msync.c b/mm/msync.c
index ef30a429623a..c3bd3e75f687 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -37,6 +37,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
37 int unmapped_error = 0; 37 int unmapped_error = 0;
38 int error = -EINVAL; 38 int error = -EINVAL;
39 39
40 start = untagged_addr(start);
41
40 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC)) 42 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
41 goto out; 43 goto out;
42 if (offset_in_page(start)) 44 if (offset_in_page(start))
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c1d9496b4c43..71e3acea7817 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -523,7 +523,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
523 set_bit(MMF_UNSTABLE, &mm->flags); 523 set_bit(MMF_UNSTABLE, &mm->flags);
524 524
525 for (vma = mm->mmap ; vma; vma = vma->vm_next) { 525 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
526 if (!can_madv_dontneed_vma(vma)) 526 if (!can_madv_lru_vma(vma))
527 continue; 527 continue;
528 528
529 /* 529 /*
diff --git a/mm/swap.c b/mm/swap.c
index 784dc1620620..38c3fa4308e2 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,6 +47,7 @@ int page_cluster;
47static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 47static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
48static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 48static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
49static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); 49static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
50static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
50static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); 51static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
51#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
52static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 53static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
@@ -538,6 +539,22 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
538 update_page_reclaim_stat(lruvec, file, 0); 539 update_page_reclaim_stat(lruvec, file, 0);
539} 540}
540 541
542static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
543 void *arg)
544{
545 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
546 int file = page_is_file_cache(page);
547 int lru = page_lru_base_type(page);
548
549 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
550 ClearPageActive(page);
551 ClearPageReferenced(page);
552 add_page_to_lru_list(page, lruvec, lru);
553
554 __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
555 update_page_reclaim_stat(lruvec, file, 0);
556 }
557}
541 558
542static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, 559static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
543 void *arg) 560 void *arg)
@@ -590,6 +607,10 @@ void lru_add_drain_cpu(int cpu)
590 if (pagevec_count(pvec)) 607 if (pagevec_count(pvec))
591 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 608 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
592 609
610 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
611 if (pagevec_count(pvec))
612 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
613
593 pvec = &per_cpu(lru_lazyfree_pvecs, cpu); 614 pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
594 if (pagevec_count(pvec)) 615 if (pagevec_count(pvec))
595 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); 616 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
@@ -623,6 +644,26 @@ void deactivate_file_page(struct page *page)
623 } 644 }
624} 645}
625 646
647/*
648 * deactivate_page - deactivate a page
649 * @page: page to deactivate
650 *
651 * deactivate_page() moves @page to the inactive list if @page was on the active
652 * list and was not an unevictable page. This is done to accelerate the reclaim
653 * of @page.
654 */
655void deactivate_page(struct page *page)
656{
657 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
658 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
659
660 get_page(page);
661 if (!pagevec_add(pvec, page) || PageCompound(page))
662 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
663 put_cpu_var(lru_deactivate_pvecs);
664 }
665}
666
626/** 667/**
627 * mark_page_lazyfree - make an anon page lazyfree 668 * mark_page_lazyfree - make an anon page lazyfree
628 * @page: page to deactivate 669 * @page: page to deactivate
@@ -687,6 +728,7 @@ void lru_add_drain_all(void)
687 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 728 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
688 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 729 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
689 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || 730 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
731 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
690 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || 732 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
691 need_activate_page_drain(cpu)) { 733 need_activate_page_drain(cpu)) {
692 INIT_WORK(work, lru_add_drain_per_cpu); 734 INIT_WORK(work, lru_add_drain_per_cpu);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fcadd3e25c0c..a3c70e275f4e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -396,9 +396,8 @@ compute_subtree_max_size(struct vmap_area *va)
396 get_subtree_max_size(va->rb_node.rb_right)); 396 get_subtree_max_size(va->rb_node.rb_right));
397} 397}
398 398
399RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb, 399RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
400 struct vmap_area, rb_node, unsigned long, subtree_max_size, 400 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
401 compute_subtree_max_size)
402 401
403static void purge_vmap_area_lazy(void); 402static void purge_vmap_area_lazy(void);
404static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 403static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4911754c93b7..e5d52d6a24af 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1123,7 +1123,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1123 struct scan_control *sc, 1123 struct scan_control *sc,
1124 enum ttu_flags ttu_flags, 1124 enum ttu_flags ttu_flags,
1125 struct reclaim_stat *stat, 1125 struct reclaim_stat *stat,
1126 bool force_reclaim) 1126 bool ignore_references)
1127{ 1127{
1128 LIST_HEAD(ret_pages); 1128 LIST_HEAD(ret_pages);
1129 LIST_HEAD(free_pages); 1129 LIST_HEAD(free_pages);
@@ -1137,7 +1137,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1137 struct address_space *mapping; 1137 struct address_space *mapping;
1138 struct page *page; 1138 struct page *page;
1139 int may_enter_fs; 1139 int may_enter_fs;
1140 enum page_references references = PAGEREF_RECLAIM_CLEAN; 1140 enum page_references references = PAGEREF_RECLAIM;
1141 bool dirty, writeback; 1141 bool dirty, writeback;
1142 unsigned int nr_pages; 1142 unsigned int nr_pages;
1143 1143
@@ -1268,7 +1268,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1268 } 1268 }
1269 } 1269 }
1270 1270
1271 if (!force_reclaim) 1271 if (!ignore_references)
1272 references = page_check_references(page, sc); 1272 references = page_check_references(page, sc);
1273 1273
1274 switch (references) { 1274 switch (references) {
@@ -2145,6 +2145,62 @@ static void shrink_active_list(unsigned long nr_to_scan,
2145 nr_deactivate, nr_rotated, sc->priority, file); 2145 nr_deactivate, nr_rotated, sc->priority, file);
2146} 2146}
2147 2147
2148unsigned long reclaim_pages(struct list_head *page_list)
2149{
2150 int nid = -1;
2151 unsigned long nr_reclaimed = 0;
2152 LIST_HEAD(node_page_list);
2153 struct reclaim_stat dummy_stat;
2154 struct page *page;
2155 struct scan_control sc = {
2156 .gfp_mask = GFP_KERNEL,
2157 .priority = DEF_PRIORITY,
2158 .may_writepage = 1,
2159 .may_unmap = 1,
2160 .may_swap = 1,
2161 };
2162
2163 while (!list_empty(page_list)) {
2164 page = lru_to_page(page_list);
2165 if (nid == -1) {
2166 nid = page_to_nid(page);
2167 INIT_LIST_HEAD(&node_page_list);
2168 }
2169
2170 if (nid == page_to_nid(page)) {
2171 ClearPageActive(page);
2172 list_move(&page->lru, &node_page_list);
2173 continue;
2174 }
2175
2176 nr_reclaimed += shrink_page_list(&node_page_list,
2177 NODE_DATA(nid),
2178 &sc, 0,
2179 &dummy_stat, false);
2180 while (!list_empty(&node_page_list)) {
2181 page = lru_to_page(&node_page_list);
2182 list_del(&page->lru);
2183 putback_lru_page(page);
2184 }
2185
2186 nid = -1;
2187 }
2188
2189 if (!list_empty(&node_page_list)) {
2190 nr_reclaimed += shrink_page_list(&node_page_list,
2191 NODE_DATA(nid),
2192 &sc, 0,
2193 &dummy_stat, false);
2194 while (!list_empty(&node_page_list)) {
2195 page = lru_to_page(&node_page_list);
2196 list_del(&page->lru);
2197 putback_lru_page(page);
2198 }
2199 }
2200
2201 return nr_reclaimed;
2202}
2203
2148/* 2204/*
2149 * The inactive anon list should be small enough that the VM never has 2205 * The inactive anon list should be small enough that the VM never has
2150 * to do too much work. 2206 * to do too much work.
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 93a7edfe0f05..6fcc66afb088 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -62,6 +62,8 @@ my $conststructsfile = "$D/const_structs.checkpatch";
62my $typedefsfile = ""; 62my $typedefsfile = "";
63my $color = "auto"; 63my $color = "auto";
64my $allow_c99_comments = 1; # Can be overridden by --ignore C99_COMMENT_TOLERANCE 64my $allow_c99_comments = 1; # Can be overridden by --ignore C99_COMMENT_TOLERANCE
65# git output parsing needs US English output, so first set backtick child process LANGUAGE
66my $git_command ='export LANGUAGE=en_US.UTF-8; git';
65 67
66sub help { 68sub help {
67 my ($exitcode) = @_; 69 my ($exitcode) = @_;
@@ -904,7 +906,7 @@ sub seed_camelcase_includes {
904 $camelcase_seeded = 1; 906 $camelcase_seeded = 1;
905 907
906 if (-e ".git") { 908 if (-e ".git") {
907 my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`; 909 my $git_last_include_commit = `${git_command} log --no-merges --pretty=format:"%h%n" -1 -- include`;
908 chomp $git_last_include_commit; 910 chomp $git_last_include_commit;
909 $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit"; 911 $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
910 } else { 912 } else {
@@ -932,7 +934,7 @@ sub seed_camelcase_includes {
932 } 934 }
933 935
934 if (-e ".git") { 936 if (-e ".git") {
935 $files = `git ls-files "include/*.h"`; 937 $files = `${git_command} ls-files "include/*.h"`;
936 @include_files = split('\n', $files); 938 @include_files = split('\n', $files);
937 } 939 }
938 940
@@ -956,13 +958,13 @@ sub git_commit_info {
956 958
957 return ($id, $desc) if ((which("git") eq "") || !(-e ".git")); 959 return ($id, $desc) if ((which("git") eq "") || !(-e ".git"));
958 960
959 my $output = `git log --no-color --format='%H %s' -1 $commit 2>&1`; 961 my $output = `${git_command} log --no-color --format='%H %s' -1 $commit 2>&1`;
960 $output =~ s/^\s*//gm; 962 $output =~ s/^\s*//gm;
961 my @lines = split("\n", $output); 963 my @lines = split("\n", $output);
962 964
963 return ($id, $desc) if ($#lines < 0); 965 return ($id, $desc) if ($#lines < 0);
964 966
965 if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous\./) { 967 if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous/) {
966# Maybe one day convert this block of bash into something that returns 968# Maybe one day convert this block of bash into something that returns
967# all matching commit ids, but it's very slow... 969# all matching commit ids, but it's very slow...
968# 970#
@@ -1006,7 +1008,7 @@ if ($git) {
1006 } else { 1008 } else {
1007 $git_range = "-1 $commit_expr"; 1009 $git_range = "-1 $commit_expr";
1008 } 1010 }
1009 my $lines = `git log --no-color --no-merges --pretty=format:'%H %s' $git_range`; 1011 my $lines = `${git_command} log --no-color --no-merges --pretty=format:'%H %s' $git_range`;
1010 foreach my $line (split(/\n/, $lines)) { 1012 foreach my $line (split(/\n/, $lines)) {
1011 $line =~ /^([0-9a-fA-F]{40,40}) (.*)$/; 1013 $line =~ /^([0-9a-fA-F]{40,40}) (.*)$/;
1012 next if (!defined($1) || !defined($2)); 1014 next if (!defined($1) || !defined($2));
@@ -2725,8 +2727,10 @@ sub process {
2725 ($line =~ /^\s*(?:WARNING:|BUG:)/ || 2727 ($line =~ /^\s*(?:WARNING:|BUG:)/ ||
2726 $line =~ /^\s*\[\s*\d+\.\d{6,6}\s*\]/ || 2728 $line =~ /^\s*\[\s*\d+\.\d{6,6}\s*\]/ ||
2727 # timestamp 2729 # timestamp
2728 $line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/)) { 2730 $line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/) ||
2729 # stack dump address 2731 $line =~ /^(?:\s+\w+:\s+[0-9a-fA-F]+){3,3}/ ||
2732 $line =~ /^\s*\#\d+\s*\[[0-9a-fA-F]+\]\s*\w+ at [0-9a-fA-F]+/) {
2733 # stack dump address styles
2730 $commit_log_possible_stack_dump = 1; 2734 $commit_log_possible_stack_dump = 1;
2731 } 2735 }
2732 2736
@@ -2898,6 +2902,17 @@ sub process {
2898 } 2902 }
2899 } 2903 }
2900 2904
2905# check for invalid commit id
2906 if ($in_commit_log && $line =~ /(^fixes:|\bcommit)\s+([0-9a-f]{6,40})\b/i) {
2907 my $id;
2908 my $description;
2909 ($id, $description) = git_commit_info($2, undef, undef);
2910 if (!defined($id)) {
2911 WARN("UNKNOWN_COMMIT_ID",
2912 "Unknown commit id '$2', maybe rebased or not pulled?\n" . $herecurr);
2913 }
2914 }
2915
2901# ignore non-hunk lines and lines being removed 2916# ignore non-hunk lines and lines being removed
2902 next if (!$hunk_line || $line =~ /^-/); 2917 next if (!$hunk_line || $line =~ /^-/);
2903 2918
@@ -3069,21 +3084,21 @@ sub process {
3069# check SPDX comment style for .[chsS] files 3084# check SPDX comment style for .[chsS] files
3070 if ($realfile =~ /\.[chsS]$/ && 3085 if ($realfile =~ /\.[chsS]$/ &&
3071 $rawline =~ /SPDX-License-Identifier:/ && 3086 $rawline =~ /SPDX-License-Identifier:/ &&
3072 $rawline !~ /^\+\s*\Q$comment\E\s*/) { 3087 $rawline !~ m@^\+\s*\Q$comment\E\s*@) {
3073 WARN("SPDX_LICENSE_TAG", 3088 WARN("SPDX_LICENSE_TAG",
3074 "Improper SPDX comment style for '$realfile', please use '$comment' instead\n" . $herecurr); 3089 "Improper SPDX comment style for '$realfile', please use '$comment' instead\n" . $herecurr);
3075 } 3090 }
3076 3091
3077 if ($comment !~ /^$/ && 3092 if ($comment !~ /^$/ &&
3078 $rawline !~ /^\+\Q$comment\E SPDX-License-Identifier: /) { 3093 $rawline !~ m@^\+\Q$comment\E SPDX-License-Identifier: @) {
3079 WARN("SPDX_LICENSE_TAG", 3094 WARN("SPDX_LICENSE_TAG",
3080 "Missing or malformed SPDX-License-Identifier tag in line $checklicenseline\n" . $herecurr); 3095 "Missing or malformed SPDX-License-Identifier tag in line $checklicenseline\n" . $herecurr);
3081 } elsif ($rawline =~ /(SPDX-License-Identifier: .*)/) { 3096 } elsif ($rawline =~ /(SPDX-License-Identifier: .*)/) {
3082 my $spdx_license = $1; 3097 my $spdx_license = $1;
3083 if (!is_SPDX_License_valid($spdx_license)) { 3098 if (!is_SPDX_License_valid($spdx_license)) {
3084 WARN("SPDX_LICENSE_TAG", 3099 WARN("SPDX_LICENSE_TAG",
3085 "'$spdx_license' is not supported in LICENSES/...\n" . $herecurr); 3100 "'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
3086 } 3101 }
3087 } 3102 }
3088 } 3103 }
3089 } 3104 }
@@ -4660,7 +4675,7 @@ sub process {
4660 4675
4661# closing brace should have a space following it when it has anything 4676# closing brace should have a space following it when it has anything
4662# on the line 4677# on the line
4663 if ($line =~ /}(?!(?:,|;|\)))\S/) { 4678 if ($line =~ /}(?!(?:,|;|\)|\}))\S/) {
4664 if (ERROR("SPACING", 4679 if (ERROR("SPACING",
4665 "space required after that close brace '}'\n" . $herecurr) && 4680 "space required after that close brace '}'\n" . $herecurr) &&
4666 $fix) { 4681 $fix) {
@@ -5191,7 +5206,7 @@ sub process {
5191 next if ($arg =~ /\.\.\./); 5206 next if ($arg =~ /\.\.\./);
5192 next if ($arg =~ /^type$/i); 5207 next if ($arg =~ /^type$/i);
5193 my $tmp_stmt = $define_stmt; 5208 my $tmp_stmt = $define_stmt;
5194 $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; 5209 $tmp_stmt =~ s/\b(sizeof|typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
5195 $tmp_stmt =~ s/\#+\s*$arg\b//g; 5210 $tmp_stmt =~ s/\#+\s*$arg\b//g;
5196 $tmp_stmt =~ s/\b$arg\s*\#\#//g; 5211 $tmp_stmt =~ s/\b$arg\s*\#\#//g;
5197 my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g; 5212 my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g;
@@ -5873,6 +5888,18 @@ sub process {
5873 "__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr); 5888 "__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
5874 } 5889 }
5875 5890
5891# Check for __attribute__ section, prefer __section
5892 if ($realfile !~ m@\binclude/uapi/@ &&
5893 $line =~ /\b__attribute__\s*\(\s*\(.*_*section_*\s*\(\s*("[^"]*")/) {
5894 my $old = substr($rawline, $-[1], $+[1] - $-[1]);
5895 my $new = substr($old, 1, -1);
5896 if (WARN("PREFER_SECTION",
5897 "__section($new) is preferred over __attribute__((section($old)))\n" . $herecurr) &&
5898 $fix) {
5899 $fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*_*section_*\s*\(\s*\Q$old\E\s*\)\s*\)\s*\)/__section($new)/;
5900 }
5901 }
5902
5876# Check for __attribute__ format(printf, prefer __printf 5903# Check for __attribute__ format(printf, prefer __printf
5877 if ($realfile !~ m@\binclude/uapi/@ && 5904 if ($realfile !~ m@\binclude/uapi/@ &&
5878 $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) { 5905 $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
@@ -6480,6 +6507,12 @@ sub process {
6480 "Using $1 should generally have parentheses around the comparison\n" . $herecurr); 6507 "Using $1 should generally have parentheses around the comparison\n" . $herecurr);
6481 } 6508 }
6482 6509
6510# nested likely/unlikely calls
6511 if ($line =~ /\b(?:(?:un)?likely)\s*\(\s*!?\s*(IS_ERR(?:_OR_NULL|_VALUE)?|WARN)/) {
6512 WARN("LIKELY_MISUSE",
6513 "nested (un)?likely() calls, $1 already uses unlikely() internally\n" . $herecurr);
6514 }
6515
6483# whine mightly about in_atomic 6516# whine mightly about in_atomic
6484 if ($line =~ /\bin_atomic\s*\(/) { 6517 if ($line =~ /\bin_atomic\s*\(/) {
6485 if ($realfile =~ m@^drivers/@) { 6518 if ($realfile =~ m@^drivers/@) {
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 2f5b95f09fa0..34e40e96dee2 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -77,12 +77,12 @@ lx-symbols command."""
77 gdb.write("scanning for modules in {0}\n".format(path)) 77 gdb.write("scanning for modules in {0}\n".format(path))
78 for root, dirs, files in os.walk(path): 78 for root, dirs, files in os.walk(path):
79 for name in files: 79 for name in files:
80 if name.endswith(".ko"): 80 if name.endswith(".ko") or name.endswith(".ko.debug"):
81 self.module_files.append(root + "/" + name) 81 self.module_files.append(root + "/" + name)
82 self.module_files_updated = True 82 self.module_files_updated = True
83 83
84 def _get_module_file(self, module_name): 84 def _get_module_file(self, module_name):
85 module_pattern = ".*/{0}\.ko$".format( 85 module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
86 module_name.replace("_", r"[_\-]")) 86 module_name.replace("_", r"[_\-]"))
87 for name in self.module_files: 87 for name in self.module_files:
88 if re.match(module_pattern, name) and os.path.exists(name): 88 if re.match(module_pattern, name) and os.path.exists(name):
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
index d83763a5327c..e03b1ea23e0e 100644
--- a/tools/include/linux/rbtree.h
+++ b/tools/include/linux/rbtree.h
@@ -31,25 +31,9 @@ struct rb_root {
31 struct rb_node *rb_node; 31 struct rb_node *rb_node;
32}; 32};
33 33
34/*
35 * Leftmost-cached rbtrees.
36 *
37 * We do not cache the rightmost node based on footprint
38 * size vs number of potential users that could benefit
39 * from O(1) rb_last(). Just not worth it, users that want
40 * this feature can always implement the logic explicitly.
41 * Furthermore, users that want to cache both pointers may
42 * find it a bit asymmetric, but that's ok.
43 */
44struct rb_root_cached {
45 struct rb_root rb_root;
46 struct rb_node *rb_leftmost;
47};
48
49#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) 34#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
50 35
51#define RB_ROOT (struct rb_root) { NULL, } 36#define RB_ROOT (struct rb_root) { NULL, }
52#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
53#define rb_entry(ptr, type, member) container_of(ptr, type, member) 37#define rb_entry(ptr, type, member) container_of(ptr, type, member)
54 38
55#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) 39#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -71,12 +55,6 @@ extern struct rb_node *rb_prev(const struct rb_node *);
71extern struct rb_node *rb_first(const struct rb_root *); 55extern struct rb_node *rb_first(const struct rb_root *);
72extern struct rb_node *rb_last(const struct rb_root *); 56extern struct rb_node *rb_last(const struct rb_root *);
73 57
74extern void rb_insert_color_cached(struct rb_node *,
75 struct rb_root_cached *, bool);
76extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
77/* Same as rb_first(), but O(1) */
78#define rb_first_cached(root) (root)->rb_leftmost
79
80/* Postorder iteration - always visit the parent after its children */ 58/* Postorder iteration - always visit the parent after its children */
81extern struct rb_node *rb_first_postorder(const struct rb_root *); 59extern struct rb_node *rb_first_postorder(const struct rb_root *);
82extern struct rb_node *rb_next_postorder(const struct rb_node *); 60extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -84,8 +62,6 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
84/* Fast replacement of a single node without remove/rebalance/add/rebalance */ 62/* Fast replacement of a single node without remove/rebalance/add/rebalance */
85extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 63extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
86 struct rb_root *root); 64 struct rb_root *root);
87extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
88 struct rb_root_cached *root);
89 65
90static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, 66static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
91 struct rb_node **rb_link) 67 struct rb_node **rb_link)
@@ -129,4 +105,51 @@ static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
129 rb_erase(n, root); 105 rb_erase(n, root);
130 RB_CLEAR_NODE(n); 106 RB_CLEAR_NODE(n);
131} 107}
108
109/*
110 * Leftmost-cached rbtrees.
111 *
112 * We do not cache the rightmost node based on footprint
113 * size vs number of potential users that could benefit
114 * from O(1) rb_last(). Just not worth it, users that want
115 * this feature can always implement the logic explicitly.
116 * Furthermore, users that want to cache both pointers may
117 * find it a bit asymmetric, but that's ok.
118 */
119struct rb_root_cached {
120 struct rb_root rb_root;
121 struct rb_node *rb_leftmost;
122};
123
124#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
125
126/* Same as rb_first(), but O(1) */
127#define rb_first_cached(root) (root)->rb_leftmost
128
129static inline void rb_insert_color_cached(struct rb_node *node,
130 struct rb_root_cached *root,
131 bool leftmost)
132{
133 if (leftmost)
134 root->rb_leftmost = node;
135 rb_insert_color(node, &root->rb_root);
136}
137
138static inline void rb_erase_cached(struct rb_node *node,
139 struct rb_root_cached *root)
140{
141 if (root->rb_leftmost == node)
142 root->rb_leftmost = rb_next(node);
143 rb_erase(node, &root->rb_root);
144}
145
146static inline void rb_replace_node_cached(struct rb_node *victim,
147 struct rb_node *new,
148 struct rb_root_cached *root)
149{
150 if (root->rb_leftmost == victim)
151 root->rb_leftmost = new;
152 rb_replace_node(victim, new, &root->rb_root);
153}
154
132#endif /* __TOOLS_LINUX_PERF_RBTREE_H */ 155#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
index ddd01006ece5..381aa948610d 100644
--- a/tools/include/linux/rbtree_augmented.h
+++ b/tools/include/linux/rbtree_augmented.h
@@ -32,17 +32,16 @@ struct rb_augment_callbacks {
32 void (*rotate)(struct rb_node *old, struct rb_node *new); 32 void (*rotate)(struct rb_node *old, struct rb_node *new);
33}; 33};
34 34
35extern void __rb_insert_augmented(struct rb_node *node, 35extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
36 struct rb_root *root,
37 bool newleft, struct rb_node **leftmost,
38 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 36 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
37
39/* 38/*
40 * Fixup the rbtree and update the augmented information when rebalancing. 39 * Fixup the rbtree and update the augmented information when rebalancing.
41 * 40 *
42 * On insertion, the user must update the augmented information on the path 41 * On insertion, the user must update the augmented information on the path
43 * leading to the inserted node, then call rb_link_node() as usual and 42 * leading to the inserted node, then call rb_link_node() as usual and
44 * rb_augment_inserted() instead of the usual rb_insert_color() call. 43 * rb_insert_augmented() instead of the usual rb_insert_color() call.
45 * If rb_augment_inserted() rebalances the rbtree, it will callback into 44 * If rb_insert_augmented() rebalances the rbtree, it will callback into
46 * a user provided function to update the augmented information on the 45 * a user provided function to update the augmented information on the
47 * affected subtrees. 46 * affected subtrees.
48 */ 47 */
@@ -50,7 +49,7 @@ static inline void
50rb_insert_augmented(struct rb_node *node, struct rb_root *root, 49rb_insert_augmented(struct rb_node *node, struct rb_root *root,
51 const struct rb_augment_callbacks *augment) 50 const struct rb_augment_callbacks *augment)
52{ 51{
53 __rb_insert_augmented(node, root, false, NULL, augment->rotate); 52 __rb_insert_augmented(node, root, augment->rotate);
54} 53}
55 54
56static inline void 55static inline void
@@ -58,45 +57,92 @@ rb_insert_augmented_cached(struct rb_node *node,
58 struct rb_root_cached *root, bool newleft, 57 struct rb_root_cached *root, bool newleft,
59 const struct rb_augment_callbacks *augment) 58 const struct rb_augment_callbacks *augment)
60{ 59{
61 __rb_insert_augmented(node, &root->rb_root, 60 if (newleft)
62 newleft, &root->rb_leftmost, augment->rotate); 61 root->rb_leftmost = node;
62 rb_insert_augmented(node, &root->rb_root, augment);
63} 63}
64 64
65#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ 65/*
66 rbtype, rbaugmented, rbcompute) \ 66 * Template for declaring augmented rbtree callbacks (generic case)
67 *
68 * RBSTATIC: 'static' or empty
69 * RBNAME: name of the rb_augment_callbacks structure
70 * RBSTRUCT: struct type of the tree nodes
71 * RBFIELD: name of struct rb_node field within RBSTRUCT
72 * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
73 * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
74 */
75
76#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
77 RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
67static inline void \ 78static inline void \
68rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ 79RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
69{ \ 80{ \
70 while (rb != stop) { \ 81 while (rb != stop) { \
71 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ 82 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
72 rbtype augmented = rbcompute(node); \ 83 if (RBCOMPUTE(node, true)) \
73 if (node->rbaugmented == augmented) \
74 break; \ 84 break; \
75 node->rbaugmented = augmented; \ 85 rb = rb_parent(&node->RBFIELD); \
76 rb = rb_parent(&node->rbfield); \
77 } \ 86 } \
78} \ 87} \
79static inline void \ 88static inline void \
80rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ 89RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
81{ \ 90{ \
82 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ 91 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
83 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ 92 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
84 new->rbaugmented = old->rbaugmented; \ 93 new->RBAUGMENTED = old->RBAUGMENTED; \
85} \ 94} \
86static void \ 95static void \
87rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ 96RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
88{ \ 97{ \
89 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ 98 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
90 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ 99 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
91 new->rbaugmented = old->rbaugmented; \ 100 new->RBAUGMENTED = old->RBAUGMENTED; \
92 old->rbaugmented = rbcompute(old); \ 101 RBCOMPUTE(old, false); \
93} \ 102} \
94rbstatic const struct rb_augment_callbacks rbname = { \ 103RBSTATIC const struct rb_augment_callbacks RBNAME = { \
95 .propagate = rbname ## _propagate, \ 104 .propagate = RBNAME ## _propagate, \
96 .copy = rbname ## _copy, \ 105 .copy = RBNAME ## _copy, \
97 .rotate = rbname ## _rotate \ 106 .rotate = RBNAME ## _rotate \
98}; 107};
99 108
109/*
110 * Template for declaring augmented rbtree callbacks,
111 * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
112 *
113 * RBSTATIC: 'static' or empty
114 * RBNAME: name of the rb_augment_callbacks structure
115 * RBSTRUCT: struct type of the tree nodes
116 * RBFIELD: name of struct rb_node field within RBSTRUCT
117 * RBTYPE: type of the RBAUGMENTED field
118 * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
119 * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
120 */
121
122#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
123 RBTYPE, RBAUGMENTED, RBCOMPUTE) \
124static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
125{ \
126 RBSTRUCT *child; \
127 RBTYPE max = RBCOMPUTE(node); \
128 if (node->RBFIELD.rb_left) { \
129 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
130 if (child->RBAUGMENTED > max) \
131 max = child->RBAUGMENTED; \
132 } \
133 if (node->RBFIELD.rb_right) { \
134 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
135 if (child->RBAUGMENTED > max) \
136 max = child->RBAUGMENTED; \
137 } \
138 if (exit && node->RBAUGMENTED == max) \
139 return true; \
140 node->RBAUGMENTED = max; \
141 return false; \
142} \
143RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
144 RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
145
100 146
101#define RB_RED 0 147#define RB_RED 0
102#define RB_BLACK 1 148#define RB_BLACK 1
@@ -139,7 +185,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
139 185
140static __always_inline struct rb_node * 186static __always_inline struct rb_node *
141__rb_erase_augmented(struct rb_node *node, struct rb_root *root, 187__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
142 struct rb_node **leftmost,
143 const struct rb_augment_callbacks *augment) 188 const struct rb_augment_callbacks *augment)
144{ 189{
145 struct rb_node *child = node->rb_right; 190 struct rb_node *child = node->rb_right;
@@ -147,9 +192,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
147 struct rb_node *parent, *rebalance; 192 struct rb_node *parent, *rebalance;
148 unsigned long pc; 193 unsigned long pc;
149 194
150 if (leftmost && node == *leftmost)
151 *leftmost = rb_next(node);
152
153 if (!tmp) { 195 if (!tmp) {
154 /* 196 /*
155 * Case 1: node to erase has no more than 1 child (easy!) 197 * Case 1: node to erase has no more than 1 child (easy!)
@@ -249,8 +291,7 @@ static __always_inline void
249rb_erase_augmented(struct rb_node *node, struct rb_root *root, 291rb_erase_augmented(struct rb_node *node, struct rb_root *root,
250 const struct rb_augment_callbacks *augment) 292 const struct rb_augment_callbacks *augment)
251{ 293{
252 struct rb_node *rebalance = __rb_erase_augmented(node, root, 294 struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
253 NULL, augment);
254 if (rebalance) 295 if (rebalance)
255 __rb_erase_color(rebalance, root, augment->rotate); 296 __rb_erase_color(rebalance, root, augment->rotate);
256} 297}
@@ -259,11 +300,9 @@ static __always_inline void
259rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, 300rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
260 const struct rb_augment_callbacks *augment) 301 const struct rb_augment_callbacks *augment)
261{ 302{
262 struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, 303 if (root->rb_leftmost == node)
263 &root->rb_leftmost, 304 root->rb_leftmost = rb_next(node);
264 augment); 305 rb_erase_augmented(node, &root->rb_root, augment);
265 if (rebalance)
266 __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
267} 306}
268 307
269#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */ 308#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 804f145e3113..2548ff8c4d9c 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
83 83
84static __always_inline void 84static __always_inline void
85__rb_insert(struct rb_node *node, struct rb_root *root, 85__rb_insert(struct rb_node *node, struct rb_root *root,
86 bool newleft, struct rb_node **leftmost,
87 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 86 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
88{ 87{
89 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; 88 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
90 89
91 if (newleft)
92 *leftmost = node;
93
94 while (true) { 90 while (true) {
95 /* 91 /*
96 * Loop invariant: node is red. 92 * Loop invariant: node is red.
@@ -436,34 +432,17 @@ static const struct rb_augment_callbacks dummy_callbacks = {
436 432
437void rb_insert_color(struct rb_node *node, struct rb_root *root) 433void rb_insert_color(struct rb_node *node, struct rb_root *root)
438{ 434{
439 __rb_insert(node, root, false, NULL, dummy_rotate); 435 __rb_insert(node, root, dummy_rotate);
440} 436}
441 437
442void rb_erase(struct rb_node *node, struct rb_root *root) 438void rb_erase(struct rb_node *node, struct rb_root *root)
443{ 439{
444 struct rb_node *rebalance; 440 struct rb_node *rebalance;
445 rebalance = __rb_erase_augmented(node, root, 441 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
446 NULL, &dummy_callbacks);
447 if (rebalance) 442 if (rebalance)
448 ____rb_erase_color(rebalance, root, dummy_rotate); 443 ____rb_erase_color(rebalance, root, dummy_rotate);
449} 444}
450 445
451void rb_insert_color_cached(struct rb_node *node,
452 struct rb_root_cached *root, bool leftmost)
453{
454 __rb_insert(node, &root->rb_root, leftmost,
455 &root->rb_leftmost, dummy_rotate);
456}
457
458void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
459{
460 struct rb_node *rebalance;
461 rebalance = __rb_erase_augmented(node, &root->rb_root,
462 &root->rb_leftmost, &dummy_callbacks);
463 if (rebalance)
464 ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
465}
466
467/* 446/*
468 * Augmented rbtree manipulation functions. 447 * Augmented rbtree manipulation functions.
469 * 448 *
@@ -472,10 +451,9 @@ void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
472 */ 451 */
473 452
474void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 453void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
475 bool newleft, struct rb_node **leftmost,
476 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 454 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
477{ 455{
478 __rb_insert(node, root, newleft, leftmost, augment_rotate); 456 __rb_insert(node, root, augment_rotate);
479} 457}
480 458
481/* 459/*
@@ -580,15 +558,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
580 __rb_change_child(victim, new, parent, root); 558 __rb_change_child(victim, new, parent, root);
581} 559}
582 560
583void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
584 struct rb_root_cached *root)
585{
586 rb_replace_node(victim, new, &root->rb_root);
587
588 if (root->rb_leftmost == victim)
589 root->rb_leftmost = new;
590}
591
592static struct rb_node *rb_left_deepest_node(const struct rb_node *node) 561static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
593{ 562{
594 for (;;) { 563 for (;;) {