diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 22:55:54 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 22:55:54 -0400 |
| commit | 0e06f5c0deeef0332a5da2ecb8f1fcf3e024d958 (patch) | |
| tree | e0f0af4aadf10c713c5cf1b65356844b3c9b3215 /include | |
| parent | f7816ad0f878dacd5f0120476f9b836ccf8699ea (diff) | |
| parent | 8f19b0c058d93a678a99dd6fec03af2e769943f2 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few misc bits
- ocfs2
- most(?) of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (125 commits)
thp: fix comments of __pmd_trans_huge_lock()
cgroup: remove unnecessary 0 check from css_from_id()
cgroup: fix idr leak for the first cgroup root
mm: memcontrol: fix documentation for compound parameter
mm: memcontrol: remove BUG_ON in uncharge_list
mm: fix build warnings in <linux/compaction.h>
mm, thp: convert from optimistic swapin collapsing to conservative
mm, thp: fix comment inconsistency for swapin readahead functions
thp: update Documentation/{vm/transhuge,filesystems/proc}.txt
shmem: split huge pages beyond i_size under memory pressure
thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE
khugepaged: add support of collapse for tmpfs/shmem pages
shmem: make shmem_inode_info::lock irq-safe
khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page()
thp: extract khugepaged from mm/huge_memory.c
shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings
shmem: add huge pages support
shmem: get_unmapped_area align huge page
shmem: prepare huge= mount option and sysfs knob
mm, rmap: account shmem thp pages
...
Diffstat (limited to 'include')
36 files changed, 455 insertions, 344 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 9dbb739cafa0..c6d667187608 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
| @@ -107,6 +107,12 @@ struct mmu_gather { | |||
| 107 | struct mmu_gather_batch local; | 107 | struct mmu_gather_batch local; |
| 108 | struct page *__pages[MMU_GATHER_BUNDLE]; | 108 | struct page *__pages[MMU_GATHER_BUNDLE]; |
| 109 | unsigned int batch_count; | 109 | unsigned int batch_count; |
| 110 | /* | ||
| 111 | * __tlb_adjust_range will track the new addr here, | ||
| 112 | * that that we can adjust the range after the flush | ||
| 113 | */ | ||
| 114 | unsigned long addr; | ||
| 115 | int page_size; | ||
| 110 | }; | 116 | }; |
| 111 | 117 | ||
| 112 | #define HAVE_GENERIC_MMU_GATHER | 118 | #define HAVE_GENERIC_MMU_GATHER |
| @@ -115,23 +121,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long | |||
| 115 | void tlb_flush_mmu(struct mmu_gather *tlb); | 121 | void tlb_flush_mmu(struct mmu_gather *tlb); |
| 116 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, | 122 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
| 117 | unsigned long end); | 123 | unsigned long end); |
| 118 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); | 124 | extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, |
| 119 | 125 | int page_size); | |
| 120 | /* tlb_remove_page | ||
| 121 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | ||
| 122 | * required. | ||
| 123 | */ | ||
| 124 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
| 125 | { | ||
| 126 | if (!__tlb_remove_page(tlb, page)) | ||
| 127 | tlb_flush_mmu(tlb); | ||
| 128 | } | ||
| 129 | 126 | ||
| 130 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, | 127 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
| 131 | unsigned long address) | 128 | unsigned long address) |
| 132 | { | 129 | { |
| 133 | tlb->start = min(tlb->start, address); | 130 | tlb->start = min(tlb->start, address); |
| 134 | tlb->end = max(tlb->end, address + PAGE_SIZE); | 131 | tlb->end = max(tlb->end, address + PAGE_SIZE); |
| 132 | /* | ||
| 133 | * Track the last address with which we adjusted the range. This | ||
| 134 | * will be used later to adjust again after a mmu_flush due to | ||
| 135 | * failed __tlb_remove_page | ||
| 136 | */ | ||
| 137 | tlb->addr = address; | ||
| 135 | } | 138 | } |
| 136 | 139 | ||
| 137 | static inline void __tlb_reset_range(struct mmu_gather *tlb) | 140 | static inline void __tlb_reset_range(struct mmu_gather *tlb) |
| @@ -144,6 +147,40 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) | |||
| 144 | } | 147 | } |
| 145 | } | 148 | } |
| 146 | 149 | ||
| 150 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, | ||
| 151 | struct page *page, int page_size) | ||
| 152 | { | ||
| 153 | if (__tlb_remove_page_size(tlb, page, page_size)) { | ||
| 154 | tlb_flush_mmu(tlb); | ||
| 155 | tlb->page_size = page_size; | ||
| 156 | __tlb_adjust_range(tlb, tlb->addr); | ||
| 157 | __tlb_remove_page_size(tlb, page, page_size); | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
| 162 | { | ||
| 163 | return __tlb_remove_page_size(tlb, page, PAGE_SIZE); | ||
| 164 | } | ||
| 165 | |||
| 166 | /* tlb_remove_page | ||
| 167 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | ||
| 168 | * required. | ||
| 169 | */ | ||
| 170 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
| 171 | { | ||
| 172 | return tlb_remove_page_size(tlb, page, PAGE_SIZE); | ||
| 173 | } | ||
| 174 | |||
| 175 | static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) | ||
| 176 | { | ||
| 177 | /* active->nr should be zero when we call this */ | ||
| 178 | VM_BUG_ON_PAGE(tlb->active->nr, page); | ||
| 179 | tlb->page_size = PAGE_SIZE; | ||
| 180 | __tlb_adjust_range(tlb, tlb->addr); | ||
| 181 | return __tlb_remove_page(tlb, page); | ||
| 182 | } | ||
| 183 | |||
| 147 | /* | 184 | /* |
| 148 | * In the case of tlb vma handling, we can optimise these away in the | 185 | * In the case of tlb vma handling, we can optimise these away in the |
| 149 | * case where we're doing a full MM flush. When we're doing a munmap, | 186 | * case where we're doing a full MM flush. When we're doing a munmap, |
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 9b0a15d06a4f..79542b2698ec 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #include <linux/migrate.h> | 48 | #include <linux/migrate.h> |
| 49 | #include <linux/gfp.h> | 49 | #include <linux/gfp.h> |
| 50 | #include <linux/err.h> | 50 | #include <linux/err.h> |
| 51 | #include <linux/fs.h> | ||
| 51 | 52 | ||
| 52 | /* | 53 | /* |
| 53 | * Balloon device information descriptor. | 54 | * Balloon device information descriptor. |
| @@ -62,6 +63,7 @@ struct balloon_dev_info { | |||
| 62 | struct list_head pages; /* Pages enqueued & handled to Host */ | 63 | struct list_head pages; /* Pages enqueued & handled to Host */ |
| 63 | int (*migratepage)(struct balloon_dev_info *, struct page *newpage, | 64 | int (*migratepage)(struct balloon_dev_info *, struct page *newpage, |
| 64 | struct page *page, enum migrate_mode mode); | 65 | struct page *page, enum migrate_mode mode); |
| 66 | struct inode *inode; | ||
| 65 | }; | 67 | }; |
| 66 | 68 | ||
| 67 | extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); | 69 | extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); |
| @@ -73,45 +75,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) | |||
| 73 | spin_lock_init(&balloon->pages_lock); | 75 | spin_lock_init(&balloon->pages_lock); |
| 74 | INIT_LIST_HEAD(&balloon->pages); | 76 | INIT_LIST_HEAD(&balloon->pages); |
| 75 | balloon->migratepage = NULL; | 77 | balloon->migratepage = NULL; |
| 78 | balloon->inode = NULL; | ||
| 76 | } | 79 | } |
| 77 | 80 | ||
| 78 | #ifdef CONFIG_BALLOON_COMPACTION | 81 | #ifdef CONFIG_BALLOON_COMPACTION |
| 79 | extern bool balloon_page_isolate(struct page *page); | 82 | extern const struct address_space_operations balloon_aops; |
| 83 | extern bool balloon_page_isolate(struct page *page, | ||
| 84 | isolate_mode_t mode); | ||
| 80 | extern void balloon_page_putback(struct page *page); | 85 | extern void balloon_page_putback(struct page *page); |
| 81 | extern int balloon_page_migrate(struct page *newpage, | 86 | extern int balloon_page_migrate(struct address_space *mapping, |
| 87 | struct page *newpage, | ||
| 82 | struct page *page, enum migrate_mode mode); | 88 | struct page *page, enum migrate_mode mode); |
| 83 | 89 | ||
| 84 | /* | 90 | /* |
| 85 | * __is_movable_balloon_page - helper to perform @page PageBalloon tests | ||
| 86 | */ | ||
| 87 | static inline bool __is_movable_balloon_page(struct page *page) | ||
| 88 | { | ||
| 89 | return PageBalloon(page); | ||
| 90 | } | ||
| 91 | |||
| 92 | /* | ||
| 93 | * balloon_page_movable - test PageBalloon to identify balloon pages | ||
| 94 | * and PagePrivate to check that the page is not | ||
| 95 | * isolated and can be moved by compaction/migration. | ||
| 96 | * | ||
| 97 | * As we might return false positives in the case of a balloon page being just | ||
| 98 | * released under us, this need to be re-tested later, under the page lock. | ||
| 99 | */ | ||
| 100 | static inline bool balloon_page_movable(struct page *page) | ||
| 101 | { | ||
| 102 | return PageBalloon(page) && PagePrivate(page); | ||
| 103 | } | ||
| 104 | |||
| 105 | /* | ||
| 106 | * isolated_balloon_page - identify an isolated balloon page on private | ||
| 107 | * compaction/migration page lists. | ||
| 108 | */ | ||
| 109 | static inline bool isolated_balloon_page(struct page *page) | ||
| 110 | { | ||
| 111 | return PageBalloon(page); | ||
| 112 | } | ||
| 113 | |||
| 114 | /* | ||
| 115 | * balloon_page_insert - insert a page into the balloon's page list and make | 91 | * balloon_page_insert - insert a page into the balloon's page list and make |
| 116 | * the page->private assignment accordingly. | 92 | * the page->private assignment accordingly. |
| 117 | * @balloon : pointer to balloon device | 93 | * @balloon : pointer to balloon device |
| @@ -124,7 +100,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, | |||
| 124 | struct page *page) | 100 | struct page *page) |
| 125 | { | 101 | { |
| 126 | __SetPageBalloon(page); | 102 | __SetPageBalloon(page); |
| 127 | SetPagePrivate(page); | 103 | __SetPageMovable(page, balloon->inode->i_mapping); |
| 128 | set_page_private(page, (unsigned long)balloon); | 104 | set_page_private(page, (unsigned long)balloon); |
| 129 | list_add(&page->lru, &balloon->pages); | 105 | list_add(&page->lru, &balloon->pages); |
| 130 | } | 106 | } |
| @@ -140,11 +116,14 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, | |||
| 140 | static inline void balloon_page_delete(struct page *page) | 116 | static inline void balloon_page_delete(struct page *page) |
| 141 | { | 117 | { |
| 142 | __ClearPageBalloon(page); | 118 | __ClearPageBalloon(page); |
| 119 | __ClearPageMovable(page); | ||
| 143 | set_page_private(page, 0); | 120 | set_page_private(page, 0); |
| 144 | if (PagePrivate(page)) { | 121 | /* |
| 145 | ClearPagePrivate(page); | 122 | * No touch page.lru field once @page has been isolated |
| 123 | * because VM is using the field. | ||
| 124 | */ | ||
| 125 | if (!PageIsolated(page)) | ||
| 146 | list_del(&page->lru); | 126 | list_del(&page->lru); |
| 147 | } | ||
| 148 | } | 127 | } |
| 149 | 128 | ||
| 150 | /* | 129 | /* |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index a58c852a268f..1a02dab16646 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
| @@ -212,6 +212,7 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i | |||
| 212 | #endif /* CONFIG_COMPACTION */ | 212 | #endif /* CONFIG_COMPACTION */ |
| 213 | 213 | ||
| 214 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | 214 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
| 215 | struct node; | ||
| 215 | extern int compaction_register_node(struct node *node); | 216 | extern int compaction_register_node(struct node *node); |
| 216 | extern void compaction_unregister_node(struct node *node); | 217 | extern void compaction_unregister_node(struct node *node); |
| 217 | 218 | ||
diff --git a/include/linux/dax.h b/include/linux/dax.h index 43d5f0b799c7..9c6dc7704043 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
| @@ -14,7 +14,6 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, | |||
| 14 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); | 14 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); |
| 15 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | 15 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); |
| 16 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); | 16 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); |
| 17 | int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); | ||
| 18 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); | 17 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
| 19 | void dax_wake_mapping_entry_waiter(struct address_space *mapping, | 18 | void dax_wake_mapping_entry_waiter(struct address_space *mapping, |
| 20 | pgoff_t index, bool wake_all); | 19 | pgoff_t index, bool wake_all); |
| @@ -46,19 +45,15 @@ static inline int __dax_zero_page_range(struct block_device *bdev, | |||
| 46 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) | 45 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 47 | int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, | 46 | int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, |
| 48 | unsigned int flags, get_block_t); | 47 | unsigned int flags, get_block_t); |
| 49 | int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, | ||
| 50 | unsigned int flags, get_block_t); | ||
| 51 | #else | 48 | #else |
| 52 | static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, | 49 | static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, |
| 53 | pmd_t *pmd, unsigned int flags, get_block_t gb) | 50 | pmd_t *pmd, unsigned int flags, get_block_t gb) |
| 54 | { | 51 | { |
| 55 | return VM_FAULT_FALLBACK; | 52 | return VM_FAULT_FALLBACK; |
| 56 | } | 53 | } |
| 57 | #define __dax_pmd_fault dax_pmd_fault | ||
| 58 | #endif | 54 | #endif |
| 59 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); | 55 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); |
| 60 | #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) | 56 | #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) |
| 61 | #define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb) | ||
| 62 | 57 | ||
| 63 | static inline bool vma_is_dax(struct vm_area_struct *vma) | 58 | static inline bool vma_is_dax(struct vm_area_struct *vma) |
| 64 | { | 59 | { |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 46056cb161fc..d82bf1994485 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
| @@ -38,7 +38,7 @@ struct debug_obj { | |||
| 38 | * @name: name of the object typee | 38 | * @name: name of the object typee |
| 39 | * @debug_hint: function returning address, which have associated | 39 | * @debug_hint: function returning address, which have associated |
| 40 | * kernel symbol, to allow identify the object | 40 | * kernel symbol, to allow identify the object |
| 41 | * @is_static_object return true if the obj is static, otherwise return false | 41 | * @is_static_object: return true if the obj is static, otherwise return false |
| 42 | * @fixup_init: fixup function, which is called when the init check | 42 | * @fixup_init: fixup function, which is called when the init check |
| 43 | * fails. All fixup functions must return true if fixup | 43 | * fails. All fixup functions must return true if fixup |
| 44 | * was successful, otherwise return false | 44 | * was successful, otherwise return false |
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index e65ef959546c..c46d2aa16d81 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/swap.h> | 4 | #include <linux/swap.h> |
| 5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
| 6 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
| 7 | #include <linux/jump_label.h> | ||
| 7 | 8 | ||
| 8 | struct frontswap_ops { | 9 | struct frontswap_ops { |
| 9 | void (*init)(unsigned); /* this swap type was just swapon'ed */ | 10 | void (*init)(unsigned); /* this swap type was just swapon'ed */ |
| @@ -14,7 +15,6 @@ struct frontswap_ops { | |||
| 14 | struct frontswap_ops *next; /* private pointer to next ops */ | 15 | struct frontswap_ops *next; /* private pointer to next ops */ |
| 15 | }; | 16 | }; |
| 16 | 17 | ||
| 17 | extern bool frontswap_enabled; | ||
| 18 | extern void frontswap_register_ops(struct frontswap_ops *ops); | 18 | extern void frontswap_register_ops(struct frontswap_ops *ops); |
| 19 | extern void frontswap_shrink(unsigned long); | 19 | extern void frontswap_shrink(unsigned long); |
| 20 | extern unsigned long frontswap_curr_pages(void); | 20 | extern unsigned long frontswap_curr_pages(void); |
| @@ -30,7 +30,12 @@ extern void __frontswap_invalidate_page(unsigned, pgoff_t); | |||
| 30 | extern void __frontswap_invalidate_area(unsigned); | 30 | extern void __frontswap_invalidate_area(unsigned); |
| 31 | 31 | ||
| 32 | #ifdef CONFIG_FRONTSWAP | 32 | #ifdef CONFIG_FRONTSWAP |
| 33 | #define frontswap_enabled (1) | 33 | extern struct static_key_false frontswap_enabled_key; |
| 34 | |||
| 35 | static inline bool frontswap_enabled(void) | ||
| 36 | { | ||
| 37 | return static_branch_unlikely(&frontswap_enabled_key); | ||
| 38 | } | ||
| 34 | 39 | ||
| 35 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) | 40 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) |
| 36 | { | 41 | { |
| @@ -50,7 +55,10 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) | |||
| 50 | #else | 55 | #else |
| 51 | /* all inline routines become no-ops and all externs are ignored */ | 56 | /* all inline routines become no-ops and all externs are ignored */ |
| 52 | 57 | ||
| 53 | #define frontswap_enabled (0) | 58 | static inline bool frontswap_enabled(void) |
| 59 | { | ||
| 60 | return false; | ||
| 61 | } | ||
| 54 | 62 | ||
| 55 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) | 63 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) |
| 56 | { | 64 | { |
| @@ -70,37 +78,35 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) | |||
| 70 | 78 | ||
| 71 | static inline int frontswap_store(struct page *page) | 79 | static inline int frontswap_store(struct page *page) |
| 72 | { | 80 | { |
| 73 | int ret = -1; | 81 | if (frontswap_enabled()) |
| 82 | return __frontswap_store(page); | ||
| 74 | 83 | ||
| 75 | if (frontswap_enabled) | 84 | return -1; |
| 76 | ret = __frontswap_store(page); | ||
| 77 | return ret; | ||
| 78 | } | 85 | } |
| 79 | 86 | ||
| 80 | static inline int frontswap_load(struct page *page) | 87 | static inline int frontswap_load(struct page *page) |
| 81 | { | 88 | { |
| 82 | int ret = -1; | 89 | if (frontswap_enabled()) |
| 90 | return __frontswap_load(page); | ||
| 83 | 91 | ||
| 84 | if (frontswap_enabled) | 92 | return -1; |
| 85 | ret = __frontswap_load(page); | ||
| 86 | return ret; | ||
| 87 | } | 93 | } |
| 88 | 94 | ||
| 89 | static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) | 95 | static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) |
| 90 | { | 96 | { |
| 91 | if (frontswap_enabled) | 97 | if (frontswap_enabled()) |
| 92 | __frontswap_invalidate_page(type, offset); | 98 | __frontswap_invalidate_page(type, offset); |
| 93 | } | 99 | } |
| 94 | 100 | ||
| 95 | static inline void frontswap_invalidate_area(unsigned type) | 101 | static inline void frontswap_invalidate_area(unsigned type) |
| 96 | { | 102 | { |
| 97 | if (frontswap_enabled) | 103 | if (frontswap_enabled()) |
| 98 | __frontswap_invalidate_area(type); | 104 | __frontswap_invalidate_area(type); |
| 99 | } | 105 | } |
| 100 | 106 | ||
| 101 | static inline void frontswap_init(unsigned type, unsigned long *map) | 107 | static inline void frontswap_init(unsigned type, unsigned long *map) |
| 102 | { | 108 | { |
| 103 | if (frontswap_enabled) | 109 | if (frontswap_enabled()) |
| 104 | __frontswap_init(type, map); | 110 | __frontswap_init(type, map); |
| 105 | } | 111 | } |
| 106 | 112 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index dc488662ce0b..f2a69f20926f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -398,6 +398,8 @@ struct address_space_operations { | |||
| 398 | */ | 398 | */ |
| 399 | int (*migratepage) (struct address_space *, | 399 | int (*migratepage) (struct address_space *, |
| 400 | struct page *, struct page *, enum migrate_mode); | 400 | struct page *, struct page *, enum migrate_mode); |
| 401 | bool (*isolate_page)(struct page *, isolate_mode_t); | ||
| 402 | void (*putback_page)(struct page *); | ||
| 401 | int (*launder_page) (struct page *); | 403 | int (*launder_page) (struct page *); |
| 402 | int (*is_partially_uptodate) (struct page *, unsigned long, | 404 | int (*is_partially_uptodate) (struct page *, unsigned long, |
| 403 | unsigned long); | 405 | unsigned long); |
| @@ -661,6 +663,7 @@ struct inode { | |||
| 661 | #endif | 663 | #endif |
| 662 | struct list_head i_lru; /* inode LRU list */ | 664 | struct list_head i_lru; /* inode LRU list */ |
| 663 | struct list_head i_sb_list; | 665 | struct list_head i_sb_list; |
| 666 | struct list_head i_wb_list; /* backing dev writeback list */ | ||
| 664 | union { | 667 | union { |
| 665 | struct hlist_head i_dentry; | 668 | struct hlist_head i_dentry; |
| 666 | struct rcu_head i_rcu; | 669 | struct rcu_head i_rcu; |
| @@ -1444,6 +1447,9 @@ struct super_block { | |||
| 1444 | /* s_inode_list_lock protects s_inodes */ | 1447 | /* s_inode_list_lock protects s_inodes */ |
| 1445 | spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; | 1448 | spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; |
| 1446 | struct list_head s_inodes; /* all inodes */ | 1449 | struct list_head s_inodes; /* all inodes */ |
| 1450 | |||
| 1451 | spinlock_t s_inode_wblist_lock; | ||
| 1452 | struct list_head s_inodes_wb; /* writeback inodes */ | ||
| 1447 | }; | 1453 | }; |
| 1448 | 1454 | ||
| 1449 | extern struct timespec current_fs_time(struct super_block *sb); | 1455 | extern struct timespec current_fs_time(struct super_block *sb); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 570383a41853..c29e9d347bc6 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -78,8 +78,7 @@ struct vm_area_struct; | |||
| 78 | * __GFP_THISNODE forces the allocation to be satisified from the requested | 78 | * __GFP_THISNODE forces the allocation to be satisified from the requested |
| 79 | * node with no fallbacks or placement policy enforcements. | 79 | * node with no fallbacks or placement policy enforcements. |
| 80 | * | 80 | * |
| 81 | * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant | 81 | * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. |
| 82 | * to kmem allocations). | ||
| 83 | */ | 82 | */ |
| 84 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) | 83 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) |
| 85 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) | 84 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) |
| @@ -486,10 +485,6 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, | |||
| 486 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ | 485 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ |
| 487 | alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) | 486 | alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) |
| 488 | 487 | ||
| 489 | extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); | ||
| 490 | extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, | ||
| 491 | unsigned int order); | ||
| 492 | |||
| 493 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | 488 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
| 494 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | 489 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); |
| 495 | 490 | ||
| @@ -513,9 +508,6 @@ extern void *__alloc_page_frag(struct page_frag_cache *nc, | |||
| 513 | unsigned int fragsz, gfp_t gfp_mask); | 508 | unsigned int fragsz, gfp_t gfp_mask); |
| 514 | extern void __free_page_frag(void *addr); | 509 | extern void __free_page_frag(void *addr); |
| 515 | 510 | ||
| 516 | extern void __free_kmem_pages(struct page *page, unsigned int order); | ||
| 517 | extern void free_kmem_pages(unsigned long addr, unsigned int order); | ||
| 518 | |||
| 519 | #define __free_page(page) __free_pages((page), 0) | 511 | #define __free_page(page) __free_pages((page), 0) |
| 520 | #define free_page(addr) free_pages((addr), 0) | 512 | #define free_page(addr) free_pages((addr), 0) |
| 521 | 513 | ||
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f0a7a0320300..92ce91c03cd0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
| @@ -1,20 +1,12 @@ | |||
| 1 | #ifndef _LINUX_HUGE_MM_H | 1 | #ifndef _LINUX_HUGE_MM_H |
| 2 | #define _LINUX_HUGE_MM_H | 2 | #define _LINUX_HUGE_MM_H |
| 3 | 3 | ||
| 4 | extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, | 4 | extern int do_huge_pmd_anonymous_page(struct fault_env *fe); |
| 5 | struct vm_area_struct *vma, | ||
| 6 | unsigned long address, pmd_t *pmd, | ||
| 7 | unsigned int flags); | ||
| 8 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | 5 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 9 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | 6 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
| 10 | struct vm_area_struct *vma); | 7 | struct vm_area_struct *vma); |
| 11 | extern void huge_pmd_set_accessed(struct mm_struct *mm, | 8 | extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); |
| 12 | struct vm_area_struct *vma, | 9 | extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); |
| 13 | unsigned long address, pmd_t *pmd, | ||
| 14 | pmd_t orig_pmd, int dirty); | ||
| 15 | extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
| 16 | unsigned long address, pmd_t *pmd, | ||
| 17 | pmd_t orig_pmd); | ||
| 18 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | 10 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
| 19 | unsigned long addr, | 11 | unsigned long addr, |
| 20 | pmd_t *pmd, | 12 | pmd_t *pmd, |
| @@ -49,6 +41,18 @@ enum transparent_hugepage_flag { | |||
| 49 | #endif | 41 | #endif |
| 50 | }; | 42 | }; |
| 51 | 43 | ||
| 44 | struct kobject; | ||
| 45 | struct kobj_attribute; | ||
| 46 | |||
| 47 | extern ssize_t single_hugepage_flag_store(struct kobject *kobj, | ||
| 48 | struct kobj_attribute *attr, | ||
| 49 | const char *buf, size_t count, | ||
| 50 | enum transparent_hugepage_flag flag); | ||
| 51 | extern ssize_t single_hugepage_flag_show(struct kobject *kobj, | ||
| 52 | struct kobj_attribute *attr, char *buf, | ||
| 53 | enum transparent_hugepage_flag flag); | ||
| 54 | extern struct kobj_attribute shmem_enabled_attr; | ||
| 55 | |||
| 52 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) | 56 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
| 53 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | 57 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) |
| 54 | 58 | ||
| @@ -134,8 +138,7 @@ static inline int hpage_nr_pages(struct page *page) | |||
| 134 | return 1; | 138 | return 1; |
| 135 | } | 139 | } |
| 136 | 140 | ||
| 137 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | 141 | extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); |
| 138 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); | ||
| 139 | 142 | ||
| 140 | extern struct page *huge_zero_page; | 143 | extern struct page *huge_zero_page; |
| 141 | 144 | ||
| @@ -152,6 +155,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) | |||
| 152 | struct page *get_huge_zero_page(void); | 155 | struct page *get_huge_zero_page(void); |
| 153 | void put_huge_zero_page(void); | 156 | void put_huge_zero_page(void); |
| 154 | 157 | ||
| 158 | #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) | ||
| 159 | |||
| 155 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | 160 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 156 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) | 161 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
| 157 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) | 162 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) |
| @@ -161,6 +166,8 @@ void put_huge_zero_page(void); | |||
| 161 | 166 | ||
| 162 | #define transparent_hugepage_enabled(__vma) 0 | 167 | #define transparent_hugepage_enabled(__vma) 0 |
| 163 | 168 | ||
| 169 | static inline void prep_transhuge_page(struct page *page) {} | ||
| 170 | |||
| 164 | #define transparent_hugepage_flags 0UL | 171 | #define transparent_hugepage_flags 0UL |
| 165 | static inline int | 172 | static inline int |
| 166 | split_huge_page_to_list(struct page *page, struct list_head *list) | 173 | split_huge_page_to_list(struct page *page, struct list_head *list) |
| @@ -196,8 +203,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, | |||
| 196 | return NULL; | 203 | return NULL; |
| 197 | } | 204 | } |
| 198 | 205 | ||
| 199 | static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | 206 | static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) |
| 200 | unsigned long addr, pmd_t pmd, pmd_t *pmdp) | ||
| 201 | { | 207 | { |
| 202 | return 0; | 208 | return 0; |
| 203 | } | 209 | } |
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index eeb307985715..1e032a1ddb3e 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h | |||
| @@ -4,6 +4,11 @@ | |||
| 4 | #include <linux/sched.h> /* MMF_VM_HUGEPAGE */ | 4 | #include <linux/sched.h> /* MMF_VM_HUGEPAGE */ |
| 5 | 5 | ||
| 6 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 6 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 7 | extern struct attribute_group khugepaged_attr_group; | ||
| 8 | |||
| 9 | extern int khugepaged_init(void); | ||
| 10 | extern void khugepaged_destroy(void); | ||
| 11 | extern int start_stop_khugepaged(void); | ||
| 7 | extern int __khugepaged_enter(struct mm_struct *mm); | 12 | extern int __khugepaged_enter(struct mm_struct *mm); |
| 8 | extern void __khugepaged_exit(struct mm_struct *mm); | 13 | extern void __khugepaged_exit(struct mm_struct *mm); |
| 9 | extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, | 14 | extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 7ae216a39c9e..481c8c4627ca 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
| @@ -43,8 +43,7 @@ static inline struct stable_node *page_stable_node(struct page *page) | |||
| 43 | static inline void set_page_stable_node(struct page *page, | 43 | static inline void set_page_stable_node(struct page *page, |
| 44 | struct stable_node *stable_node) | 44 | struct stable_node *stable_node) |
| 45 | { | 45 | { |
| 46 | page->mapping = (void *)stable_node + | 46 | page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); |
| 47 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | ||
| 48 | } | 47 | } |
| 49 | 48 | ||
| 50 | /* | 49 | /* |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 3106ac1c895e..6c14b6179727 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -73,8 +73,8 @@ extern bool movable_node_enabled; | |||
| 73 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | 73 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
| 74 | 74 | ||
| 75 | phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, | 75 | phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, |
| 76 | phys_addr_t start, phys_addr_t end, | 76 | phys_addr_t start, phys_addr_t end, |
| 77 | int nid, ulong flags); | 77 | int nid, ulong flags); |
| 78 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, | 78 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, |
| 79 | phys_addr_t size, phys_addr_t align); | 79 | phys_addr_t size, phys_addr_t align); |
| 80 | phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); | 80 | phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); |
| @@ -110,7 +110,7 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, | |||
| 110 | phys_addr_t *out_end, int *out_nid); | 110 | phys_addr_t *out_end, int *out_nid); |
| 111 | 111 | ||
| 112 | void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, | 112 | void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, |
| 113 | phys_addr_t *out_end); | 113 | phys_addr_t *out_end); |
| 114 | 114 | ||
| 115 | /** | 115 | /** |
| 116 | * for_each_mem_range - iterate through memblock areas from type_a and not | 116 | * for_each_mem_range - iterate through memblock areas from type_a and not |
| @@ -148,7 +148,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, | |||
| 148 | p_start, p_end, p_nid) \ | 148 | p_start, p_end, p_nid) \ |
| 149 | for (i = (u64)ULLONG_MAX, \ | 149 | for (i = (u64)ULLONG_MAX, \ |
| 150 | __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ | 150 | __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ |
| 151 | p_start, p_end, p_nid); \ | 151 | p_start, p_end, p_nid); \ |
| 152 | i != (u64)ULLONG_MAX; \ | 152 | i != (u64)ULLONG_MAX; \ |
| 153 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ | 153 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
| 154 | p_start, p_end, p_nid)) | 154 | p_start, p_end, p_nid)) |
| @@ -163,8 +163,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, | |||
| 163 | * is initialized. | 163 | * is initialized. |
| 164 | */ | 164 | */ |
| 165 | #define for_each_reserved_mem_region(i, p_start, p_end) \ | 165 | #define for_each_reserved_mem_region(i, p_start, p_end) \ |
| 166 | for (i = 0UL, \ | 166 | for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ |
| 167 | __next_reserved_mem_region(&i, p_start, p_end); \ | ||
| 168 | i != (u64)ULLONG_MAX; \ | 167 | i != (u64)ULLONG_MAX; \ |
| 169 | __next_reserved_mem_region(&i, p_start, p_end)) | 168 | __next_reserved_mem_region(&i, p_start, p_end)) |
| 170 | 169 | ||
| @@ -403,15 +402,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo | |||
| 403 | } | 402 | } |
| 404 | 403 | ||
| 405 | #define for_each_memblock(memblock_type, region) \ | 404 | #define for_each_memblock(memblock_type, region) \ |
| 406 | for (region = memblock.memblock_type.regions; \ | 405 | for (region = memblock.memblock_type.regions; \ |
| 407 | region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ | 406 | region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ |
| 408 | region++) | 407 | region++) |
| 409 | 408 | ||
| 410 | #define for_each_memblock_type(memblock_type, rgn) \ | 409 | #define for_each_memblock_type(memblock_type, rgn) \ |
| 411 | idx = 0; \ | 410 | for (idx = 0, rgn = &memblock_type->regions[0]; \ |
| 412 | rgn = &memblock_type->regions[idx]; \ | 411 | idx < memblock_type->cnt; \ |
| 413 | for (idx = 0; idx < memblock_type->cnt; \ | 412 | idx++, rgn = &memblock_type->regions[idx]) |
| 414 | idx++,rgn = &memblock_type->regions[idx]) | ||
| 415 | 413 | ||
| 416 | #ifdef CONFIG_MEMTEST | 414 | #ifdef CONFIG_MEMTEST |
| 417 | extern void early_memtest(phys_addr_t start, phys_addr_t end); | 415 | extern void early_memtest(phys_addr_t start, phys_addr_t end); |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 56e6069d2452..71aff733a497 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -749,6 +749,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) | |||
| 749 | } | 749 | } |
| 750 | #endif | 750 | #endif |
| 751 | 751 | ||
| 752 | struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); | ||
| 753 | void memcg_kmem_put_cache(struct kmem_cache *cachep); | ||
| 754 | int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | ||
| 755 | struct mem_cgroup *memcg); | ||
| 756 | int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); | ||
| 757 | void memcg_kmem_uncharge(struct page *page, int order); | ||
| 758 | |||
| 752 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) | 759 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
| 753 | extern struct static_key_false memcg_kmem_enabled_key; | 760 | extern struct static_key_false memcg_kmem_enabled_key; |
| 754 | 761 | ||
| @@ -770,22 +777,6 @@ static inline bool memcg_kmem_enabled(void) | |||
| 770 | } | 777 | } |
| 771 | 778 | ||
| 772 | /* | 779 | /* |
| 773 | * In general, we'll do everything in our power to not incur in any overhead | ||
| 774 | * for non-memcg users for the kmem functions. Not even a function call, if we | ||
| 775 | * can avoid it. | ||
| 776 | * | ||
| 777 | * Therefore, we'll inline all those functions so that in the best case, we'll | ||
| 778 | * see that kmemcg is off for everybody and proceed quickly. If it is on, | ||
| 779 | * we'll still do most of the flag checking inline. We check a lot of | ||
| 780 | * conditions, but because they are pretty simple, they are expected to be | ||
| 781 | * fast. | ||
| 782 | */ | ||
| 783 | int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | ||
| 784 | struct mem_cgroup *memcg); | ||
| 785 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); | ||
| 786 | void __memcg_kmem_uncharge(struct page *page, int order); | ||
| 787 | |||
| 788 | /* | ||
| 789 | * helper for accessing a memcg's index. It will be used as an index in the | 780 | * helper for accessing a memcg's index. It will be used as an index in the |
| 790 | * child cache array in kmem_cache, and also to derive its name. This function | 781 | * child cache array in kmem_cache, and also to derive its name. This function |
| 791 | * will return -1 when this is not a kmem-limited memcg. | 782 | * will return -1 when this is not a kmem-limited memcg. |
| @@ -795,67 +786,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) | |||
| 795 | return memcg ? memcg->kmemcg_id : -1; | 786 | return memcg ? memcg->kmemcg_id : -1; |
| 796 | } | 787 | } |
| 797 | 788 | ||
| 798 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); | ||
| 799 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); | ||
| 800 | |||
| 801 | static inline bool __memcg_kmem_bypass(void) | ||
| 802 | { | ||
| 803 | if (!memcg_kmem_enabled()) | ||
| 804 | return true; | ||
| 805 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | ||
| 806 | return true; | ||
| 807 | return false; | ||
| 808 | } | ||
| 809 | |||
| 810 | /** | ||
| 811 | * memcg_kmem_charge: charge a kmem page | ||
| 812 | * @page: page to charge | ||
| 813 | * @gfp: reclaim mode | ||
| 814 | * @order: allocation order | ||
| 815 | * | ||
| 816 | * Returns 0 on success, an error code on failure. | ||
| 817 | */ | ||
| 818 | static __always_inline int memcg_kmem_charge(struct page *page, | ||
| 819 | gfp_t gfp, int order) | ||
| 820 | { | ||
| 821 | if (__memcg_kmem_bypass()) | ||
| 822 | return 0; | ||
| 823 | if (!(gfp & __GFP_ACCOUNT)) | ||
| 824 | return 0; | ||
| 825 | return __memcg_kmem_charge(page, gfp, order); | ||
| 826 | } | ||
| 827 | |||
| 828 | /** | ||
| 829 | * memcg_kmem_uncharge: uncharge a kmem page | ||
| 830 | * @page: page to uncharge | ||
| 831 | * @order: allocation order | ||
| 832 | */ | ||
| 833 | static __always_inline void memcg_kmem_uncharge(struct page *page, int order) | ||
| 834 | { | ||
| 835 | if (memcg_kmem_enabled()) | ||
| 836 | __memcg_kmem_uncharge(page, order); | ||
| 837 | } | ||
| 838 | |||
| 839 | /** | ||
| 840 | * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation | ||
| 841 | * @cachep: the original global kmem cache | ||
| 842 | * | ||
| 843 | * All memory allocated from a per-memcg cache is charged to the owner memcg. | ||
| 844 | */ | ||
| 845 | static __always_inline struct kmem_cache * | ||
| 846 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | ||
| 847 | { | ||
| 848 | if (__memcg_kmem_bypass()) | ||
| 849 | return cachep; | ||
| 850 | return __memcg_kmem_get_cache(cachep, gfp); | ||
| 851 | } | ||
| 852 | |||
| 853 | static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | ||
| 854 | { | ||
| 855 | if (memcg_kmem_enabled()) | ||
| 856 | __memcg_kmem_put_cache(cachep); | ||
| 857 | } | ||
| 858 | |||
| 859 | /** | 789 | /** |
| 860 | * memcg_kmem_update_page_stat - update kmem page state statistics | 790 | * memcg_kmem_update_page_stat - update kmem page state statistics |
| 861 | * @page: the page | 791 | * @page: the page |
| @@ -878,15 +808,6 @@ static inline bool memcg_kmem_enabled(void) | |||
| 878 | return false; | 808 | return false; |
| 879 | } | 809 | } |
| 880 | 810 | ||
| 881 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) | ||
| 882 | { | ||
| 883 | return 0; | ||
| 884 | } | ||
| 885 | |||
| 886 | static inline void memcg_kmem_uncharge(struct page *page, int order) | ||
| 887 | { | ||
| 888 | } | ||
| 889 | |||
| 890 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | 811 | static inline int memcg_cache_id(struct mem_cgroup *memcg) |
| 891 | { | 812 | { |
| 892 | return -1; | 813 | return -1; |
| @@ -900,16 +821,6 @@ static inline void memcg_put_cache_ids(void) | |||
| 900 | { | 821 | { |
| 901 | } | 822 | } |
| 902 | 823 | ||
| 903 | static inline struct kmem_cache * | ||
| 904 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | ||
| 905 | { | ||
| 906 | return cachep; | ||
| 907 | } | ||
| 908 | |||
| 909 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | ||
| 910 | { | ||
| 911 | } | ||
| 912 | |||
| 913 | static inline void memcg_kmem_update_page_stat(struct page *page, | 824 | static inline void memcg_kmem_update_page_stat(struct page *page, |
| 914 | enum mem_cgroup_stat_index idx, int val) | 825 | enum mem_cgroup_stat_index idx, int val) |
| 915 | { | 826 | { |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 5145620ba48a..01033fadea47 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
| @@ -284,5 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | |||
| 284 | unsigned long map_offset); | 284 | unsigned long map_offset); |
| 285 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, | 285 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
| 286 | unsigned long pnum); | 286 | unsigned long pnum); |
| 287 | extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, | ||
| 288 | enum zone_type target); | ||
| 287 | 289 | ||
| 288 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ | 290 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 9b50325e4ddf..ae8d475a9385 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
| @@ -37,6 +37,8 @@ extern int migrate_page(struct address_space *, | |||
| 37 | struct page *, struct page *, enum migrate_mode); | 37 | struct page *, struct page *, enum migrate_mode); |
| 38 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, | 38 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, |
| 39 | unsigned long private, enum migrate_mode mode, int reason); | 39 | unsigned long private, enum migrate_mode mode, int reason); |
| 40 | extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); | ||
| 41 | extern void putback_movable_page(struct page *page); | ||
| 40 | 42 | ||
| 41 | extern int migrate_prep(void); | 43 | extern int migrate_prep(void); |
| 42 | extern int migrate_prep_local(void); | 44 | extern int migrate_prep_local(void); |
| @@ -69,6 +71,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
| 69 | 71 | ||
| 70 | #endif /* CONFIG_MIGRATION */ | 72 | #endif /* CONFIG_MIGRATION */ |
| 71 | 73 | ||
| 74 | #ifdef CONFIG_COMPACTION | ||
| 75 | extern int PageMovable(struct page *page); | ||
| 76 | extern void __SetPageMovable(struct page *page, struct address_space *mapping); | ||
| 77 | extern void __ClearPageMovable(struct page *page); | ||
| 78 | #else | ||
| 79 | static inline int PageMovable(struct page *page) { return 0; }; | ||
| 80 | static inline void __SetPageMovable(struct page *page, | ||
| 81 | struct address_space *mapping) | ||
| 82 | { | ||
| 83 | } | ||
| 84 | static inline void __ClearPageMovable(struct page *page) | ||
| 85 | { | ||
| 86 | } | ||
| 87 | #endif | ||
| 88 | |||
| 72 | #ifdef CONFIG_NUMA_BALANCING | 89 | #ifdef CONFIG_NUMA_BALANCING |
| 73 | extern bool pmd_trans_migrating(pmd_t pmd); | 90 | extern bool pmd_trans_migrating(pmd_t pmd); |
| 74 | extern int migrate_misplaced_page(struct page *page, | 91 | extern int migrate_misplaced_page(struct page *page, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index ece042dfe23c..192c1bbe5fcd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -309,10 +309,34 @@ struct vm_fault { | |||
| 309 | * VM_FAULT_DAX_LOCKED and fill in | 309 | * VM_FAULT_DAX_LOCKED and fill in |
| 310 | * entry here. | 310 | * entry here. |
| 311 | */ | 311 | */ |
| 312 | /* for ->map_pages() only */ | 312 | }; |
| 313 | pgoff_t max_pgoff; /* map pages for offset from pgoff till | 313 | |
| 314 | * max_pgoff inclusive */ | 314 | /* |
| 315 | pte_t *pte; /* pte entry associated with ->pgoff */ | 315 | * Page fault context: passes though page fault handler instead of endless list |
| 316 | * of function arguments. | ||
| 317 | */ | ||
| 318 | struct fault_env { | ||
| 319 | struct vm_area_struct *vma; /* Target VMA */ | ||
| 320 | unsigned long address; /* Faulting virtual address */ | ||
| 321 | unsigned int flags; /* FAULT_FLAG_xxx flags */ | ||
| 322 | pmd_t *pmd; /* Pointer to pmd entry matching | ||
| 323 | * the 'address' | ||
| 324 | */ | ||
| 325 | pte_t *pte; /* Pointer to pte entry matching | ||
| 326 | * the 'address'. NULL if the page | ||
| 327 | * table hasn't been allocated. | ||
| 328 | */ | ||
| 329 | spinlock_t *ptl; /* Page table lock. | ||
| 330 | * Protects pte page table if 'pte' | ||
| 331 | * is not NULL, otherwise pmd. | ||
| 332 | */ | ||
| 333 | pgtable_t prealloc_pte; /* Pre-allocated pte page table. | ||
| 334 | * vm_ops->map_pages() calls | ||
| 335 | * alloc_set_pte() from atomic context. | ||
| 336 | * do_fault_around() pre-allocates | ||
| 337 | * page table to avoid allocation from | ||
| 338 | * atomic context. | ||
| 339 | */ | ||
| 316 | }; | 340 | }; |
| 317 | 341 | ||
| 318 | /* | 342 | /* |
| @@ -327,7 +351,8 @@ struct vm_operations_struct { | |||
| 327 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 351 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 328 | int (*pmd_fault)(struct vm_area_struct *, unsigned long address, | 352 | int (*pmd_fault)(struct vm_area_struct *, unsigned long address, |
| 329 | pmd_t *, unsigned int flags); | 353 | pmd_t *, unsigned int flags); |
| 330 | void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); | 354 | void (*map_pages)(struct fault_env *fe, |
| 355 | pgoff_t start_pgoff, pgoff_t end_pgoff); | ||
| 331 | 356 | ||
| 332 | /* notification that a previously read-only page is about to become | 357 | /* notification that a previously read-only page is about to become |
| 333 | * writable, if an error is returned it will cause a SIGBUS */ | 358 | * writable, if an error is returned it will cause a SIGBUS */ |
| @@ -537,7 +562,6 @@ void __put_page(struct page *page); | |||
| 537 | void put_pages_list(struct list_head *pages); | 562 | void put_pages_list(struct list_head *pages); |
| 538 | 563 | ||
| 539 | void split_page(struct page *page, unsigned int order); | 564 | void split_page(struct page *page, unsigned int order); |
| 540 | int split_free_page(struct page *page); | ||
| 541 | 565 | ||
| 542 | /* | 566 | /* |
| 543 | * Compound pages have a destructor function. Provide a | 567 | * Compound pages have a destructor function. Provide a |
| @@ -601,8 +625,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
| 601 | return pte; | 625 | return pte; |
| 602 | } | 626 | } |
| 603 | 627 | ||
| 604 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, | 628 | int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, |
| 605 | struct page *page, pte_t *pte, bool write, bool anon); | 629 | struct page *page); |
| 606 | #endif | 630 | #endif |
| 607 | 631 | ||
| 608 | /* | 632 | /* |
| @@ -1035,6 +1059,7 @@ static inline pgoff_t page_file_index(struct page *page) | |||
| 1035 | } | 1059 | } |
| 1036 | 1060 | ||
| 1037 | bool page_mapped(struct page *page); | 1061 | bool page_mapped(struct page *page); |
| 1062 | struct address_space *page_mapping(struct page *page); | ||
| 1038 | 1063 | ||
| 1039 | /* | 1064 | /* |
| 1040 | * Return true only if the page has been allocated with | 1065 | * Return true only if the page has been allocated with |
| @@ -1215,15 +1240,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); | |||
| 1215 | int invalidate_inode_page(struct page *page); | 1240 | int invalidate_inode_page(struct page *page); |
| 1216 | 1241 | ||
| 1217 | #ifdef CONFIG_MMU | 1242 | #ifdef CONFIG_MMU |
| 1218 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 1243 | extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, |
| 1219 | unsigned long address, unsigned int flags); | 1244 | unsigned int flags); |
| 1220 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | 1245 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, |
| 1221 | unsigned long address, unsigned int fault_flags, | 1246 | unsigned long address, unsigned int fault_flags, |
| 1222 | bool *unlocked); | 1247 | bool *unlocked); |
| 1223 | #else | 1248 | #else |
| 1224 | static inline int handle_mm_fault(struct mm_struct *mm, | 1249 | static inline int handle_mm_fault(struct vm_area_struct *vma, |
| 1225 | struct vm_area_struct *vma, unsigned long address, | 1250 | unsigned long address, unsigned int flags) |
| 1226 | unsigned int flags) | ||
| 1227 | { | 1251 | { |
| 1228 | /* should never happen if there's no MMU */ | 1252 | /* should never happen if there's no MMU */ |
| 1229 | BUG(); | 1253 | BUG(); |
| @@ -2063,7 +2087,8 @@ extern void truncate_inode_pages_final(struct address_space *); | |||
| 2063 | 2087 | ||
| 2064 | /* generic vm_area_ops exported for stackable file systems */ | 2088 | /* generic vm_area_ops exported for stackable file systems */ |
| 2065 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | 2089 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); |
| 2066 | extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); | 2090 | extern void filemap_map_pages(struct fault_env *fe, |
| 2091 | pgoff_t start_pgoff, pgoff_t end_pgoff); | ||
| 2067 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 2092 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 2068 | 2093 | ||
| 2069 | /* mm/page-writeback.c */ | 2094 | /* mm/page-writeback.c */ |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 917f2b6a0cde..79472b22d23f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -60,51 +60,52 @@ struct page { | |||
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| 62 | /* Second double word */ | 62 | /* Second double word */ |
| 63 | struct { | 63 | union { |
| 64 | union { | 64 | pgoff_t index; /* Our offset within mapping. */ |
| 65 | pgoff_t index; /* Our offset within mapping. */ | 65 | void *freelist; /* sl[aou]b first free object */ |
| 66 | void *freelist; /* sl[aou]b first free object */ | 66 | /* page_deferred_list().prev -- second tail page */ |
| 67 | /* page_deferred_list().prev -- second tail page */ | 67 | }; |
| 68 | }; | ||
| 69 | 68 | ||
| 70 | union { | 69 | union { |
| 71 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ | 70 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ |
| 72 | defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) | 71 | defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) |
| 73 | /* Used for cmpxchg_double in slub */ | 72 | /* Used for cmpxchg_double in slub */ |
| 74 | unsigned long counters; | 73 | unsigned long counters; |
| 75 | #else | 74 | #else |
| 76 | /* | 75 | /* |
| 77 | * Keep _refcount separate from slub cmpxchg_double | 76 | * Keep _refcount separate from slub cmpxchg_double data. |
| 78 | * data. As the rest of the double word is protected by | 77 | * As the rest of the double word is protected by slab_lock |
| 79 | * slab_lock but _refcount is not. | 78 | * but _refcount is not. |
| 80 | */ | 79 | */ |
| 81 | unsigned counters; | 80 | unsigned counters; |
| 82 | #endif | 81 | #endif |
| 82 | struct { | ||
| 83 | 83 | ||
| 84 | struct { | 84 | union { |
| 85 | |||
| 86 | union { | ||
| 87 | /* | ||
| 88 | * Count of ptes mapped in mms, to show | ||
| 89 | * when page is mapped & limit reverse | ||
| 90 | * map searches. | ||
| 91 | */ | ||
| 92 | atomic_t _mapcount; | ||
| 93 | |||
| 94 | struct { /* SLUB */ | ||
| 95 | unsigned inuse:16; | ||
| 96 | unsigned objects:15; | ||
| 97 | unsigned frozen:1; | ||
| 98 | }; | ||
| 99 | int units; /* SLOB */ | ||
| 100 | }; | ||
| 101 | /* | 85 | /* |
| 102 | * Usage count, *USE WRAPPER FUNCTION* | 86 | * Count of ptes mapped in mms, to show when |
| 103 | * when manual accounting. See page_ref.h | 87 | * page is mapped & limit reverse map searches. |
| 88 | * | ||
| 89 | * Extra information about page type may be | ||
| 90 | * stored here for pages that are never mapped, | ||
| 91 | * in which case the value MUST BE <= -2. | ||
| 92 | * See page-flags.h for more details. | ||
| 104 | */ | 93 | */ |
| 105 | atomic_t _refcount; | 94 | atomic_t _mapcount; |
| 95 | |||
| 96 | unsigned int active; /* SLAB */ | ||
| 97 | struct { /* SLUB */ | ||
| 98 | unsigned inuse:16; | ||
| 99 | unsigned objects:15; | ||
| 100 | unsigned frozen:1; | ||
| 101 | }; | ||
| 102 | int units; /* SLOB */ | ||
| 106 | }; | 103 | }; |
| 107 | unsigned int active; /* SLAB */ | 104 | /* |
| 105 | * Usage count, *USE WRAPPER FUNCTION* when manual | ||
| 106 | * accounting. See page_ref.h | ||
| 107 | */ | ||
| 108 | atomic_t _refcount; | ||
| 108 | }; | 109 | }; |
| 109 | }; | 110 | }; |
| 110 | 111 | ||
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index de7be78c6f0e..451a811f48f2 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h | |||
| @@ -39,6 +39,7 @@ void dump_mm(const struct mm_struct *mm); | |||
| 39 | #define VM_WARN_ON(cond) WARN_ON(cond) | 39 | #define VM_WARN_ON(cond) WARN_ON(cond) |
| 40 | #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) | 40 | #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) |
| 41 | #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) | 41 | #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) |
| 42 | #define VM_WARN(cond, format...) WARN(cond, format) | ||
| 42 | #else | 43 | #else |
| 43 | #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) | 44 | #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) |
| 44 | #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) | 45 | #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) |
| @@ -47,6 +48,7 @@ void dump_mm(const struct mm_struct *mm); | |||
| 47 | #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) | 48 | #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) |
| 48 | #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) | 49 | #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) |
| 49 | #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) | 50 | #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) |
| 51 | #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) | ||
| 50 | #endif | 52 | #endif |
| 51 | 53 | ||
| 52 | #ifdef CONFIG_DEBUG_VIRTUAL | 54 | #ifdef CONFIG_DEBUG_VIRTUAL |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 02069c23486d..19425e988bdc 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -140,6 +140,9 @@ enum zone_stat_item { | |||
| 140 | NR_DIRTIED, /* page dirtyings since bootup */ | 140 | NR_DIRTIED, /* page dirtyings since bootup */ |
| 141 | NR_WRITTEN, /* page writings since bootup */ | 141 | NR_WRITTEN, /* page writings since bootup */ |
| 142 | NR_PAGES_SCANNED, /* pages scanned since last reclaim */ | 142 | NR_PAGES_SCANNED, /* pages scanned since last reclaim */ |
| 143 | #if IS_ENABLED(CONFIG_ZSMALLOC) | ||
| 144 | NR_ZSPAGES, /* allocated in zsmalloc */ | ||
| 145 | #endif | ||
| 143 | #ifdef CONFIG_NUMA | 146 | #ifdef CONFIG_NUMA |
| 144 | NUMA_HIT, /* allocated in intended node */ | 147 | NUMA_HIT, /* allocated in intended node */ |
| 145 | NUMA_MISS, /* allocated in non intended node */ | 148 | NUMA_MISS, /* allocated in non intended node */ |
| @@ -151,7 +154,9 @@ enum zone_stat_item { | |||
| 151 | WORKINGSET_REFAULT, | 154 | WORKINGSET_REFAULT, |
| 152 | WORKINGSET_ACTIVATE, | 155 | WORKINGSET_ACTIVATE, |
| 153 | WORKINGSET_NODERECLAIM, | 156 | WORKINGSET_NODERECLAIM, |
| 154 | NR_ANON_TRANSPARENT_HUGEPAGES, | 157 | NR_ANON_THPS, |
| 158 | NR_SHMEM_THPS, | ||
| 159 | NR_SHMEM_PMDMAPPED, | ||
| 155 | NR_FREE_CMA_PAGES, | 160 | NR_FREE_CMA_PAGES, |
| 156 | NR_VM_ZONE_STAT_ITEMS }; | 161 | NR_VM_ZONE_STAT_ITEMS }; |
| 157 | 162 | ||
| @@ -524,7 +529,6 @@ struct zone { | |||
| 524 | 529 | ||
| 525 | enum zone_flags { | 530 | enum zone_flags { |
| 526 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | 531 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
| 527 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ | ||
| 528 | ZONE_CONGESTED, /* zone has many dirty pages backed by | 532 | ZONE_CONGESTED, /* zone has many dirty pages backed by |
| 529 | * a congested BDI | 533 | * a congested BDI |
| 530 | */ | 534 | */ |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 83469522690a..606137b3b778 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
| @@ -23,6 +23,9 @@ struct oom_control { | |||
| 23 | /* Used to determine mempolicy */ | 23 | /* Used to determine mempolicy */ |
| 24 | nodemask_t *nodemask; | 24 | nodemask_t *nodemask; |
| 25 | 25 | ||
| 26 | /* Memory cgroup in which oom is invoked, or NULL for global oom */ | ||
| 27 | struct mem_cgroup *memcg; | ||
| 28 | |||
| 26 | /* Used to determine cpuset and node locality requirement */ | 29 | /* Used to determine cpuset and node locality requirement */ |
| 27 | const gfp_t gfp_mask; | 30 | const gfp_t gfp_mask; |
| 28 | 31 | ||
| @@ -83,14 +86,13 @@ extern unsigned long oom_badness(struct task_struct *p, | |||
| 83 | 86 | ||
| 84 | extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, | 87 | extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, |
| 85 | unsigned int points, unsigned long totalpages, | 88 | unsigned int points, unsigned long totalpages, |
| 86 | struct mem_cgroup *memcg, const char *message); | 89 | const char *message); |
| 87 | 90 | ||
| 88 | extern void check_panic_on_oom(struct oom_control *oc, | 91 | extern void check_panic_on_oom(struct oom_control *oc, |
| 89 | enum oom_constraint constraint, | 92 | enum oom_constraint constraint); |
| 90 | struct mem_cgroup *memcg); | ||
| 91 | 93 | ||
| 92 | extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, | 94 | extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, |
| 93 | struct task_struct *task, unsigned long totalpages); | 95 | struct task_struct *task); |
| 94 | 96 | ||
| 95 | extern bool out_of_memory(struct oom_control *oc); | 97 | extern bool out_of_memory(struct oom_control *oc); |
| 96 | 98 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e5a32445f930..74e4dda91238 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -129,6 +129,9 @@ enum pageflags { | |||
| 129 | 129 | ||
| 130 | /* Compound pages. Stored in first tail page's flags */ | 130 | /* Compound pages. Stored in first tail page's flags */ |
| 131 | PG_double_map = PG_private_2, | 131 | PG_double_map = PG_private_2, |
| 132 | |||
| 133 | /* non-lru isolated movable page */ | ||
| 134 | PG_isolated = PG_reclaim, | ||
| 132 | }; | 135 | }; |
| 133 | 136 | ||
| 134 | #ifndef __GENERATING_BOUNDS_H | 137 | #ifndef __GENERATING_BOUNDS_H |
| @@ -292,11 +295,11 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) | |||
| 292 | */ | 295 | */ |
| 293 | TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) | 296 | TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) |
| 294 | TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) | 297 | TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) |
| 295 | PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND) | 298 | PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) |
| 296 | 299 | ||
| 297 | /* PG_readahead is only used for reads; PG_reclaim is only for writes */ | 300 | /* PG_readahead is only used for reads; PG_reclaim is only for writes */ |
| 298 | PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND) | 301 | PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) |
| 299 | TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND) | 302 | TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) |
| 300 | PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) | 303 | PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) |
| 301 | TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) | 304 | TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) |
| 302 | 305 | ||
| @@ -357,29 +360,37 @@ PAGEFLAG(Idle, idle, PF_ANY) | |||
| 357 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. | 360 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. |
| 358 | * | 361 | * |
| 359 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, | 362 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, |
| 360 | * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; | 363 | * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON |
| 361 | * and then page->mapping points, not to an anon_vma, but to a private | 364 | * bit; and then page->mapping points, not to an anon_vma, but to a private |
| 362 | * structure which KSM associates with that merged page. See ksm.h. | 365 | * structure which KSM associates with that merged page. See ksm.h. |
| 363 | * | 366 | * |
| 364 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. | 367 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable |
| 368 | * page and then page->mapping points a struct address_space. | ||
| 365 | * | 369 | * |
| 366 | * Please note that, confusingly, "page_mapping" refers to the inode | 370 | * Please note that, confusingly, "page_mapping" refers to the inode |
| 367 | * address_space which maps the page from disk; whereas "page_mapped" | 371 | * address_space which maps the page from disk; whereas "page_mapped" |
| 368 | * refers to user virtual address space into which the page is mapped. | 372 | * refers to user virtual address space into which the page is mapped. |
| 369 | */ | 373 | */ |
| 370 | #define PAGE_MAPPING_ANON 1 | 374 | #define PAGE_MAPPING_ANON 0x1 |
| 371 | #define PAGE_MAPPING_KSM 2 | 375 | #define PAGE_MAPPING_MOVABLE 0x2 |
| 372 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) | 376 | #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) |
| 377 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) | ||
| 373 | 378 | ||
| 374 | static __always_inline int PageAnonHead(struct page *page) | 379 | static __always_inline int PageMappingFlags(struct page *page) |
| 375 | { | 380 | { |
| 376 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; | 381 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; |
| 377 | } | 382 | } |
| 378 | 383 | ||
| 379 | static __always_inline int PageAnon(struct page *page) | 384 | static __always_inline int PageAnon(struct page *page) |
| 380 | { | 385 | { |
| 381 | page = compound_head(page); | 386 | page = compound_head(page); |
| 382 | return PageAnonHead(page); | 387 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
| 388 | } | ||
| 389 | |||
| 390 | static __always_inline int __PageMovable(struct page *page) | ||
| 391 | { | ||
| 392 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == | ||
| 393 | PAGE_MAPPING_MOVABLE; | ||
| 383 | } | 394 | } |
| 384 | 395 | ||
| 385 | #ifdef CONFIG_KSM | 396 | #ifdef CONFIG_KSM |
| @@ -393,7 +404,7 @@ static __always_inline int PageKsm(struct page *page) | |||
| 393 | { | 404 | { |
| 394 | page = compound_head(page); | 405 | page = compound_head(page); |
| 395 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == | 406 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
| 396 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | 407 | PAGE_MAPPING_KSM; |
| 397 | } | 408 | } |
| 398 | #else | 409 | #else |
| 399 | TESTPAGEFLAG_FALSE(Ksm) | 410 | TESTPAGEFLAG_FALSE(Ksm) |
| @@ -570,6 +581,17 @@ static inline int PageDoubleMap(struct page *page) | |||
| 570 | return PageHead(page) && test_bit(PG_double_map, &page[1].flags); | 581 | return PageHead(page) && test_bit(PG_double_map, &page[1].flags); |
| 571 | } | 582 | } |
| 572 | 583 | ||
| 584 | static inline void SetPageDoubleMap(struct page *page) | ||
| 585 | { | ||
| 586 | VM_BUG_ON_PAGE(!PageHead(page), page); | ||
| 587 | set_bit(PG_double_map, &page[1].flags); | ||
| 588 | } | ||
| 589 | |||
| 590 | static inline void ClearPageDoubleMap(struct page *page) | ||
| 591 | { | ||
| 592 | VM_BUG_ON_PAGE(!PageHead(page), page); | ||
| 593 | clear_bit(PG_double_map, &page[1].flags); | ||
| 594 | } | ||
| 573 | static inline int TestSetPageDoubleMap(struct page *page) | 595 | static inline int TestSetPageDoubleMap(struct page *page) |
| 574 | { | 596 | { |
| 575 | VM_BUG_ON_PAGE(!PageHead(page), page); | 597 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| @@ -587,59 +609,59 @@ TESTPAGEFLAG_FALSE(TransHuge) | |||
| 587 | TESTPAGEFLAG_FALSE(TransCompound) | 609 | TESTPAGEFLAG_FALSE(TransCompound) |
| 588 | TESTPAGEFLAG_FALSE(TransCompoundMap) | 610 | TESTPAGEFLAG_FALSE(TransCompoundMap) |
| 589 | TESTPAGEFLAG_FALSE(TransTail) | 611 | TESTPAGEFLAG_FALSE(TransTail) |
| 590 | TESTPAGEFLAG_FALSE(DoubleMap) | 612 | PAGEFLAG_FALSE(DoubleMap) |
| 591 | TESTSETFLAG_FALSE(DoubleMap) | 613 | TESTSETFLAG_FALSE(DoubleMap) |
| 592 | TESTCLEARFLAG_FALSE(DoubleMap) | 614 | TESTCLEARFLAG_FALSE(DoubleMap) |
| 593 | #endif | 615 | #endif |
| 594 | 616 | ||
| 595 | /* | 617 | /* |
| 618 | * For pages that are never mapped to userspace, page->mapcount may be | ||
| 619 | * used for storing extra information about page type. Any value used | ||
| 620 | * for this purpose must be <= -2, but it's better start not too close | ||
| 621 | * to -2 so that an underflow of the page_mapcount() won't be mistaken | ||
| 622 | * for a special page. | ||
| 623 | */ | ||
| 624 | #define PAGE_MAPCOUNT_OPS(uname, lname) \ | ||
| 625 | static __always_inline int Page##uname(struct page *page) \ | ||
| 626 | { \ | ||
| 627 | return atomic_read(&page->_mapcount) == \ | ||
| 628 | PAGE_##lname##_MAPCOUNT_VALUE; \ | ||
| 629 | } \ | ||
| 630 | static __always_inline void __SetPage##uname(struct page *page) \ | ||
| 631 | { \ | ||
| 632 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ | ||
| 633 | atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ | ||
| 634 | } \ | ||
| 635 | static __always_inline void __ClearPage##uname(struct page *page) \ | ||
| 636 | { \ | ||
| 637 | VM_BUG_ON_PAGE(!Page##uname(page), page); \ | ||
| 638 | atomic_set(&page->_mapcount, -1); \ | ||
| 639 | } | ||
| 640 | |||
| 641 | /* | ||
| 596 | * PageBuddy() indicate that the page is free and in the buddy system | 642 | * PageBuddy() indicate that the page is free and in the buddy system |
| 597 | * (see mm/page_alloc.c). | 643 | * (see mm/page_alloc.c). |
| 598 | * | ||
| 599 | * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to | ||
| 600 | * -2 so that an underflow of the page_mapcount() won't be mistaken | ||
| 601 | * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very | ||
| 602 | * efficiently by most CPU architectures. | ||
| 603 | */ | 644 | */ |
| 604 | #define PAGE_BUDDY_MAPCOUNT_VALUE (-128) | 645 | #define PAGE_BUDDY_MAPCOUNT_VALUE (-128) |
| 605 | 646 | PAGE_MAPCOUNT_OPS(Buddy, BUDDY) | |
| 606 | static inline int PageBuddy(struct page *page) | ||
| 607 | { | ||
| 608 | return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; | ||
| 609 | } | ||
| 610 | 647 | ||
| 611 | static inline void __SetPageBuddy(struct page *page) | 648 | /* |
| 612 | { | 649 | * PageBalloon() is set on pages that are on the balloon page list |
| 613 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); | 650 | * (see mm/balloon_compaction.c). |
| 614 | atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); | 651 | */ |
| 615 | } | 652 | #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) |
| 653 | PAGE_MAPCOUNT_OPS(Balloon, BALLOON) | ||
| 616 | 654 | ||
| 617 | static inline void __ClearPageBuddy(struct page *page) | 655 | /* |
| 618 | { | 656 | * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on |
| 619 | VM_BUG_ON_PAGE(!PageBuddy(page), page); | 657 | * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. |
| 620 | atomic_set(&page->_mapcount, -1); | 658 | */ |
| 621 | } | 659 | #define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) |
| 660 | PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG) | ||
| 622 | 661 | ||
| 623 | extern bool is_free_buddy_page(struct page *page); | 662 | extern bool is_free_buddy_page(struct page *page); |
| 624 | 663 | ||
| 625 | #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) | 664 | __PAGEFLAG(Isolated, isolated, PF_ANY); |
| 626 | |||
| 627 | static inline int PageBalloon(struct page *page) | ||
| 628 | { | ||
| 629 | return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; | ||
| 630 | } | ||
| 631 | |||
| 632 | static inline void __SetPageBalloon(struct page *page) | ||
| 633 | { | ||
| 634 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); | ||
| 635 | atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); | ||
| 636 | } | ||
| 637 | |||
| 638 | static inline void __ClearPageBalloon(struct page *page) | ||
| 639 | { | ||
| 640 | VM_BUG_ON_PAGE(!PageBalloon(page), page); | ||
| 641 | atomic_set(&page->_mapcount, -1); | ||
| 642 | } | ||
| 643 | 665 | ||
| 644 | /* | 666 | /* |
| 645 | * If network-based swap is enabled, sl*b must keep track of whether pages | 667 | * If network-based swap is enabled, sl*b must keep track of whether pages |
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index e1fe7cf5bddf..03f2a3e7d76d 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <linux/stacktrace.h> | 5 | #include <linux/stacktrace.h> |
| 6 | #include <linux/stackdepot.h> | ||
| 6 | 7 | ||
| 7 | struct pglist_data; | 8 | struct pglist_data; |
| 8 | struct page_ext_operations { | 9 | struct page_ext_operations { |
| @@ -44,9 +45,8 @@ struct page_ext { | |||
| 44 | #ifdef CONFIG_PAGE_OWNER | 45 | #ifdef CONFIG_PAGE_OWNER |
| 45 | unsigned int order; | 46 | unsigned int order; |
| 46 | gfp_t gfp_mask; | 47 | gfp_t gfp_mask; |
| 47 | unsigned int nr_entries; | ||
| 48 | int last_migrate_reason; | 48 | int last_migrate_reason; |
| 49 | unsigned long trace_entries[8]; | 49 | depot_stack_handle_t handle; |
| 50 | #endif | 50 | #endif |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 46f1b939948c..30583ab0ffb1 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h | |||
| @@ -10,7 +10,7 @@ extern struct page_ext_operations page_owner_ops; | |||
| 10 | extern void __reset_page_owner(struct page *page, unsigned int order); | 10 | extern void __reset_page_owner(struct page *page, unsigned int order); |
| 11 | extern void __set_page_owner(struct page *page, | 11 | extern void __set_page_owner(struct page *page, |
| 12 | unsigned int order, gfp_t gfp_mask); | 12 | unsigned int order, gfp_t gfp_mask); |
| 13 | extern gfp_t __get_page_owner_gfp(struct page *page); | 13 | extern void __split_page_owner(struct page *page, unsigned int order); |
| 14 | extern void __copy_page_owner(struct page *oldpage, struct page *newpage); | 14 | extern void __copy_page_owner(struct page *oldpage, struct page *newpage); |
| 15 | extern void __set_page_owner_migrate_reason(struct page *page, int reason); | 15 | extern void __set_page_owner_migrate_reason(struct page *page, int reason); |
| 16 | extern void __dump_page_owner(struct page *page); | 16 | extern void __dump_page_owner(struct page *page); |
| @@ -28,12 +28,10 @@ static inline void set_page_owner(struct page *page, | |||
| 28 | __set_page_owner(page, order, gfp_mask); | 28 | __set_page_owner(page, order, gfp_mask); |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static inline gfp_t get_page_owner_gfp(struct page *page) | 31 | static inline void split_page_owner(struct page *page, unsigned int order) |
| 32 | { | 32 | { |
| 33 | if (static_branch_unlikely(&page_owner_inited)) | 33 | if (static_branch_unlikely(&page_owner_inited)) |
| 34 | return __get_page_owner_gfp(page); | 34 | __split_page_owner(page, order); |
| 35 | else | ||
| 36 | return 0; | ||
| 37 | } | 35 | } |
| 38 | static inline void copy_page_owner(struct page *oldpage, struct page *newpage) | 36 | static inline void copy_page_owner(struct page *oldpage, struct page *newpage) |
| 39 | { | 37 | { |
| @@ -58,9 +56,9 @@ static inline void set_page_owner(struct page *page, | |||
| 58 | unsigned int order, gfp_t gfp_mask) | 56 | unsigned int order, gfp_t gfp_mask) |
| 59 | { | 57 | { |
| 60 | } | 58 | } |
| 61 | static inline gfp_t get_page_owner_gfp(struct page *page) | 59 | static inline void split_page_owner(struct page *page, |
| 60 | unsigned int order) | ||
| 62 | { | 61 | { |
| 63 | return 0; | ||
| 64 | } | 62 | } |
| 65 | static inline void copy_page_owner(struct page *oldpage, struct page *newpage) | 63 | static inline void copy_page_owner(struct page *oldpage, struct page *newpage) |
| 66 | { | 64 | { |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 97354102794d..81363b834900 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -209,10 +209,10 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) | |||
| 209 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); | 209 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | static inline struct page *page_cache_alloc_readahead(struct address_space *x) | 212 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
| 213 | { | 213 | { |
| 214 | return __page_cache_alloc(mapping_gfp_mask(x) | | 214 | return mapping_gfp_mask(x) | |
| 215 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); | 215 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; |
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | typedef int filler_t(void *, struct page *); | 218 | typedef int filler_t(void *, struct page *); |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index eca6f626c16e..cbfee507c839 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -291,6 +291,7 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, | |||
| 291 | unsigned long first_index, unsigned int max_items); | 291 | unsigned long first_index, unsigned int max_items); |
| 292 | int radix_tree_preload(gfp_t gfp_mask); | 292 | int radix_tree_preload(gfp_t gfp_mask); |
| 293 | int radix_tree_maybe_preload(gfp_t gfp_mask); | 293 | int radix_tree_maybe_preload(gfp_t gfp_mask); |
| 294 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); | ||
| 294 | void radix_tree_init(void); | 295 | void radix_tree_init(void); |
| 295 | void *radix_tree_tag_set(struct radix_tree_root *root, | 296 | void *radix_tree_tag_set(struct radix_tree_root *root, |
| 296 | unsigned long index, unsigned int tag); | 297 | unsigned long index, unsigned int tag); |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 2b0fad83683f..b46bb5620a76 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -165,7 +165,7 @@ void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, | |||
| 165 | unsigned long, int); | 165 | unsigned long, int); |
| 166 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, | 166 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
| 167 | unsigned long, bool); | 167 | unsigned long, bool); |
| 168 | void page_add_file_rmap(struct page *); | 168 | void page_add_file_rmap(struct page *, bool); |
| 169 | void page_remove_rmap(struct page *, bool); | 169 | void page_remove_rmap(struct page *, bool); |
| 170 | 170 | ||
| 171 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, | 171 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 4d4780c00d34..ff078e7043b6 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
| @@ -16,8 +16,9 @@ struct shmem_inode_info { | |||
| 16 | unsigned long flags; | 16 | unsigned long flags; |
| 17 | unsigned long alloced; /* data pages alloced to file */ | 17 | unsigned long alloced; /* data pages alloced to file */ |
| 18 | unsigned long swapped; /* subtotal assigned to swap */ | 18 | unsigned long swapped; /* subtotal assigned to swap */ |
| 19 | struct shared_policy policy; /* NUMA memory alloc policy */ | 19 | struct list_head shrinklist; /* shrinkable hpage inodes */ |
| 20 | struct list_head swaplist; /* chain of maybes on swap */ | 20 | struct list_head swaplist; /* chain of maybes on swap */ |
| 21 | struct shared_policy policy; /* NUMA memory alloc policy */ | ||
| 21 | struct simple_xattrs xattrs; /* list of xattrs */ | 22 | struct simple_xattrs xattrs; /* list of xattrs */ |
| 22 | struct inode vfs_inode; | 23 | struct inode vfs_inode; |
| 23 | }; | 24 | }; |
| @@ -28,10 +29,14 @@ struct shmem_sb_info { | |||
| 28 | unsigned long max_inodes; /* How many inodes are allowed */ | 29 | unsigned long max_inodes; /* How many inodes are allowed */ |
| 29 | unsigned long free_inodes; /* How many are left for allocation */ | 30 | unsigned long free_inodes; /* How many are left for allocation */ |
| 30 | spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ | 31 | spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ |
| 32 | umode_t mode; /* Mount mode for root directory */ | ||
| 33 | unsigned char huge; /* Whether to try for hugepages */ | ||
| 31 | kuid_t uid; /* Mount uid for root directory */ | 34 | kuid_t uid; /* Mount uid for root directory */ |
| 32 | kgid_t gid; /* Mount gid for root directory */ | 35 | kgid_t gid; /* Mount gid for root directory */ |
| 33 | umode_t mode; /* Mount mode for root directory */ | ||
| 34 | struct mempolicy *mpol; /* default memory policy for mappings */ | 36 | struct mempolicy *mpol; /* default memory policy for mappings */ |
| 37 | spinlock_t shrinklist_lock; /* Protects shrinklist */ | ||
| 38 | struct list_head shrinklist; /* List of shinkable inodes */ | ||
| 39 | unsigned long shrinklist_len; /* Length of shrinklist */ | ||
| 35 | }; | 40 | }; |
| 36 | 41 | ||
| 37 | static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) | 42 | static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) |
| @@ -49,6 +54,8 @@ extern struct file *shmem_file_setup(const char *name, | |||
| 49 | extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, | 54 | extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, |
| 50 | unsigned long flags); | 55 | unsigned long flags); |
| 51 | extern int shmem_zero_setup(struct vm_area_struct *); | 56 | extern int shmem_zero_setup(struct vm_area_struct *); |
| 57 | extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, | ||
| 58 | unsigned long len, unsigned long pgoff, unsigned long flags); | ||
| 52 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); | 59 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); |
| 53 | extern bool shmem_mapping(struct address_space *mapping); | 60 | extern bool shmem_mapping(struct address_space *mapping); |
| 54 | extern void shmem_unlock_mapping(struct address_space *mapping); | 61 | extern void shmem_unlock_mapping(struct address_space *mapping); |
| @@ -61,6 +68,19 @@ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); | |||
| 61 | extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, | 68 | extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, |
| 62 | pgoff_t start, pgoff_t end); | 69 | pgoff_t start, pgoff_t end); |
| 63 | 70 | ||
| 71 | /* Flag allocation requirements to shmem_getpage */ | ||
| 72 | enum sgp_type { | ||
| 73 | SGP_READ, /* don't exceed i_size, don't allocate page */ | ||
| 74 | SGP_CACHE, /* don't exceed i_size, may allocate page */ | ||
| 75 | SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ | ||
| 76 | SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ | ||
| 77 | SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ | ||
| 78 | SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ | ||
| 79 | }; | ||
| 80 | |||
| 81 | extern int shmem_getpage(struct inode *inode, pgoff_t index, | ||
| 82 | struct page **pagep, enum sgp_type sgp); | ||
| 83 | |||
| 64 | static inline struct page *shmem_read_mapping_page( | 84 | static inline struct page *shmem_read_mapping_page( |
| 65 | struct address_space *mapping, pgoff_t index) | 85 | struct address_space *mapping, pgoff_t index) |
| 66 | { | 86 | { |
| @@ -68,6 +88,18 @@ static inline struct page *shmem_read_mapping_page( | |||
| 68 | mapping_gfp_mask(mapping)); | 88 | mapping_gfp_mask(mapping)); |
| 69 | } | 89 | } |
| 70 | 90 | ||
| 91 | static inline bool shmem_file(struct file *file) | ||
| 92 | { | ||
| 93 | if (!IS_ENABLED(CONFIG_SHMEM)) | ||
| 94 | return false; | ||
| 95 | if (!file || !file->f_mapping) | ||
| 96 | return false; | ||
| 97 | return shmem_mapping(file->f_mapping); | ||
| 98 | } | ||
| 99 | |||
| 100 | extern bool shmem_charge(struct inode *inode, long pages); | ||
| 101 | extern void shmem_uncharge(struct inode *inode, long pages); | ||
| 102 | |||
| 71 | #ifdef CONFIG_TMPFS | 103 | #ifdef CONFIG_TMPFS |
| 72 | 104 | ||
| 73 | extern int shmem_add_seals(struct file *file, unsigned int seals); | 105 | extern int shmem_add_seals(struct file *file, unsigned int seals); |
| @@ -83,4 +115,13 @@ static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) | |||
| 83 | 115 | ||
| 84 | #endif | 116 | #endif |
| 85 | 117 | ||
| 118 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE | ||
| 119 | extern bool shmem_huge_enabled(struct vm_area_struct *vma); | ||
| 120 | #else | ||
| 121 | static inline bool shmem_huge_enabled(struct vm_area_struct *vma) | ||
| 122 | { | ||
| 123 | return false; | ||
| 124 | } | ||
| 125 | #endif | ||
| 126 | |||
| 86 | #endif | 127 | #endif |
diff --git a/include/linux/slab.h b/include/linux/slab.h index aeb3e6d00a66..1a4ea551aae5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -565,6 +565,8 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) | |||
| 565 | { | 565 | { |
| 566 | if (size != 0 && n > SIZE_MAX / size) | 566 | if (size != 0 && n > SIZE_MAX / size) |
| 567 | return NULL; | 567 | return NULL; |
| 568 | if (__builtin_constant_p(n) && __builtin_constant_p(size)) | ||
| 569 | return kmalloc(n * size, flags); | ||
| 568 | return __kmalloc(n * size, flags); | 570 | return __kmalloc(n * size, flags); |
| 569 | } | 571 | } |
| 570 | 572 | ||
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8694f7a5d92b..339ba027ade9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -81,7 +81,7 @@ struct kmem_cache { | |||
| 81 | #endif | 81 | #endif |
| 82 | 82 | ||
| 83 | #ifdef CONFIG_SLAB_FREELIST_RANDOM | 83 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
| 84 | void *random_seq; | 84 | unsigned int *random_seq; |
| 85 | #endif | 85 | #endif |
| 86 | 86 | ||
| 87 | struct kmem_cache_node *node[MAX_NUMNODES]; | 87 | struct kmem_cache_node *node[MAX_NUMNODES]; |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d1faa019c02a..5624c1f3eb0a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -99,6 +99,11 @@ struct kmem_cache { | |||
| 99 | */ | 99 | */ |
| 100 | int remote_node_defrag_ratio; | 100 | int remote_node_defrag_ratio; |
| 101 | #endif | 101 | #endif |
| 102 | |||
| 103 | #ifdef CONFIG_SLAB_FREELIST_RANDOM | ||
| 104 | unsigned int *random_seq; | ||
| 105 | #endif | ||
| 106 | |||
| 102 | struct kmem_cache_node *node[MAX_NUMNODES]; | 107 | struct kmem_cache_node *node[MAX_NUMNODES]; |
| 103 | }; | 108 | }; |
| 104 | 109 | ||
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 587480ad41b7..dd66a952e8cd 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h | |||
| @@ -27,8 +27,7 @@ | |||
| 27 | #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) | 27 | #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) |
| 28 | #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) | 28 | #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) |
| 29 | 29 | ||
| 30 | extern int handle_userfault(struct vm_area_struct *vma, unsigned long address, | 30 | extern int handle_userfault(struct fault_env *fe, unsigned long reason); |
| 31 | unsigned int flags, unsigned long reason); | ||
| 32 | 31 | ||
| 33 | extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, | 32 | extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, |
| 34 | unsigned long src_start, unsigned long len); | 33 | unsigned long src_start, unsigned long len); |
| @@ -56,10 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) | |||
| 56 | #else /* CONFIG_USERFAULTFD */ | 55 | #else /* CONFIG_USERFAULTFD */ |
| 57 | 56 | ||
| 58 | /* mm helpers */ | 57 | /* mm helpers */ |
| 59 | static inline int handle_userfault(struct vm_area_struct *vma, | 58 | static inline int handle_userfault(struct fault_env *fe, unsigned long reason) |
| 60 | unsigned long address, | ||
| 61 | unsigned int flags, | ||
| 62 | unsigned long reason) | ||
| 63 | { | 59 | { |
| 64 | return VM_FAULT_SIGBUS; | 60 | return VM_FAULT_SIGBUS; |
| 65 | } | 61 | } |
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ec084321fe09..42604173f122 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
| @@ -70,6 +70,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 70 | THP_FAULT_FALLBACK, | 70 | THP_FAULT_FALLBACK, |
| 71 | THP_COLLAPSE_ALLOC, | 71 | THP_COLLAPSE_ALLOC, |
| 72 | THP_COLLAPSE_ALLOC_FAILED, | 72 | THP_COLLAPSE_ALLOC_FAILED, |
| 73 | THP_FILE_ALLOC, | ||
| 74 | THP_FILE_MAPPED, | ||
| 73 | THP_SPLIT_PAGE, | 75 | THP_SPLIT_PAGE, |
| 74 | THP_SPLIT_PAGE_FAILED, | 76 | THP_SPLIT_PAGE_FAILED, |
| 75 | THP_DEFERRED_SPLIT_PAGE, | 77 | THP_DEFERRED_SPLIT_PAGE, |
| @@ -100,4 +102,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 100 | NR_VM_EVENT_ITEMS | 102 | NR_VM_EVENT_ITEMS |
| 101 | }; | 103 | }; |
| 102 | 104 | ||
| 105 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 106 | #define THP_FILE_ALLOC ({ BUILD_BUG(); 0; }) | ||
| 107 | #define THP_FILE_MAPPED ({ BUILD_BUG(); 0; }) | ||
| 108 | #endif | ||
| 109 | |||
| 103 | #endif /* VM_EVENT_ITEM_H_INCLUDED */ | 110 | #endif /* VM_EVENT_ITEM_H_INCLUDED */ |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d0b5ca5d4e08..717e6149e753 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -384,4 +384,7 @@ void tag_pages_for_writeback(struct address_space *mapping, | |||
| 384 | 384 | ||
| 385 | void account_page_redirty(struct page *page); | 385 | void account_page_redirty(struct page *page); |
| 386 | 386 | ||
| 387 | void sb_mark_inode_writeback(struct inode *inode); | ||
| 388 | void sb_clear_inode_writeback(struct inode *inode); | ||
| 389 | |||
| 387 | #endif /* WRITEBACK_H */ | 390 | #endif /* WRITEBACK_H */ |
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 551ba4acde4d..04f58acda8e8 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ | 13 | EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ |
| 14 | EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ | 14 | EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ |
| 15 | EM( SCAN_PAGE_RO, "no_writable_page") \ | 15 | EM( SCAN_PAGE_RO, "no_writable_page") \ |
| 16 | EM( SCAN_NO_REFERENCED_PAGE, "no_referenced_page") \ | 16 | EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \ |
| 17 | EM( SCAN_PAGE_NULL, "page_null") \ | 17 | EM( SCAN_PAGE_NULL, "page_null") \ |
| 18 | EM( SCAN_SCAN_ABORT, "scan_aborted") \ | 18 | EM( SCAN_SCAN_ABORT, "scan_aborted") \ |
| 19 | EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \ | 19 | EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \ |
| @@ -28,7 +28,9 @@ | |||
| 28 | EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \ | 28 | EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \ |
| 29 | EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ | 29 | EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ |
| 30 | EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ | 30 | EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ |
| 31 | EMe( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") | 31 | EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ |
| 32 | EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ | ||
| 33 | EMe(SCAN_TRUNCATED, "truncated") \ | ||
| 32 | 34 | ||
| 33 | #undef EM | 35 | #undef EM |
| 34 | #undef EMe | 36 | #undef EMe |
| @@ -45,17 +47,18 @@ SCAN_STATUS | |||
| 45 | TRACE_EVENT(mm_khugepaged_scan_pmd, | 47 | TRACE_EVENT(mm_khugepaged_scan_pmd, |
| 46 | 48 | ||
| 47 | TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, | 49 | TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, |
| 48 | bool referenced, int none_or_zero, int status), | 50 | int referenced, int none_or_zero, int status, int unmapped), |
| 49 | 51 | ||
| 50 | TP_ARGS(mm, page, writable, referenced, none_or_zero, status), | 52 | TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), |
| 51 | 53 | ||
| 52 | TP_STRUCT__entry( | 54 | TP_STRUCT__entry( |
| 53 | __field(struct mm_struct *, mm) | 55 | __field(struct mm_struct *, mm) |
| 54 | __field(unsigned long, pfn) | 56 | __field(unsigned long, pfn) |
| 55 | __field(bool, writable) | 57 | __field(bool, writable) |
| 56 | __field(bool, referenced) | 58 | __field(int, referenced) |
| 57 | __field(int, none_or_zero) | 59 | __field(int, none_or_zero) |
| 58 | __field(int, status) | 60 | __field(int, status) |
| 61 | __field(int, unmapped) | ||
| 59 | ), | 62 | ), |
| 60 | 63 | ||
| 61 | TP_fast_assign( | 64 | TP_fast_assign( |
| @@ -65,15 +68,17 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, | |||
| 65 | __entry->referenced = referenced; | 68 | __entry->referenced = referenced; |
| 66 | __entry->none_or_zero = none_or_zero; | 69 | __entry->none_or_zero = none_or_zero; |
| 67 | __entry->status = status; | 70 | __entry->status = status; |
| 71 | __entry->unmapped = unmapped; | ||
| 68 | ), | 72 | ), |
| 69 | 73 | ||
| 70 | TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s", | 74 | TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d", |
| 71 | __entry->mm, | 75 | __entry->mm, |
| 72 | __entry->pfn, | 76 | __entry->pfn, |
| 73 | __entry->writable, | 77 | __entry->writable, |
| 74 | __entry->referenced, | 78 | __entry->referenced, |
| 75 | __entry->none_or_zero, | 79 | __entry->none_or_zero, |
| 76 | __print_symbolic(__entry->status, SCAN_STATUS)) | 80 | __print_symbolic(__entry->status, SCAN_STATUS), |
| 81 | __entry->unmapped) | ||
| 77 | ); | 82 | ); |
| 78 | 83 | ||
| 79 | TRACE_EVENT(mm_collapse_huge_page, | 84 | TRACE_EVENT(mm_collapse_huge_page, |
| @@ -103,14 +108,14 @@ TRACE_EVENT(mm_collapse_huge_page, | |||
| 103 | TRACE_EVENT(mm_collapse_huge_page_isolate, | 108 | TRACE_EVENT(mm_collapse_huge_page_isolate, |
| 104 | 109 | ||
| 105 | TP_PROTO(struct page *page, int none_or_zero, | 110 | TP_PROTO(struct page *page, int none_or_zero, |
| 106 | bool referenced, bool writable, int status), | 111 | int referenced, bool writable, int status), |
| 107 | 112 | ||
| 108 | TP_ARGS(page, none_or_zero, referenced, writable, status), | 113 | TP_ARGS(page, none_or_zero, referenced, writable, status), |
| 109 | 114 | ||
| 110 | TP_STRUCT__entry( | 115 | TP_STRUCT__entry( |
| 111 | __field(unsigned long, pfn) | 116 | __field(unsigned long, pfn) |
| 112 | __field(int, none_or_zero) | 117 | __field(int, none_or_zero) |
| 113 | __field(bool, referenced) | 118 | __field(int, referenced) |
| 114 | __field(bool, writable) | 119 | __field(bool, writable) |
| 115 | __field(int, status) | 120 | __field(int, status) |
| 116 | ), | 121 | ), |
| @@ -131,5 +136,32 @@ TRACE_EVENT(mm_collapse_huge_page_isolate, | |||
| 131 | __print_symbolic(__entry->status, SCAN_STATUS)) | 136 | __print_symbolic(__entry->status, SCAN_STATUS)) |
| 132 | ); | 137 | ); |
| 133 | 138 | ||
| 139 | TRACE_EVENT(mm_collapse_huge_page_swapin, | ||
| 140 | |||
| 141 | TP_PROTO(struct mm_struct *mm, int swapped_in, int referenced, int ret), | ||
| 142 | |||
| 143 | TP_ARGS(mm, swapped_in, referenced, ret), | ||
| 144 | |||
| 145 | TP_STRUCT__entry( | ||
| 146 | __field(struct mm_struct *, mm) | ||
| 147 | __field(int, swapped_in) | ||
| 148 | __field(int, referenced) | ||
| 149 | __field(int, ret) | ||
| 150 | ), | ||
| 151 | |||
| 152 | TP_fast_assign( | ||
| 153 | __entry->mm = mm; | ||
| 154 | __entry->swapped_in = swapped_in; | ||
| 155 | __entry->referenced = referenced; | ||
| 156 | __entry->ret = ret; | ||
| 157 | ), | ||
| 158 | |||
| 159 | TP_printk("mm=%p, swapped_in=%d, referenced=%d, ret=%d", | ||
| 160 | __entry->mm, | ||
| 161 | __entry->swapped_in, | ||
| 162 | __entry->referenced, | ||
| 163 | __entry->ret) | ||
| 164 | ); | ||
| 165 | |||
| 134 | #endif /* __HUGE_MEMORY_H */ | 166 | #endif /* __HUGE_MEMORY_H */ |
| 135 | #include <trace/define_trace.h> | 167 | #include <trace/define_trace.h> |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 73614ce1d204..531f5811ff6b 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
| @@ -696,7 +696,7 @@ DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, | |||
| 696 | TP_ARGS(inode, wbc, nr_to_write) | 696 | TP_ARGS(inode, wbc, nr_to_write) |
| 697 | ); | 697 | ); |
| 698 | 698 | ||
| 699 | DECLARE_EVENT_CLASS(writeback_lazytime_template, | 699 | DECLARE_EVENT_CLASS(writeback_inode_template, |
| 700 | TP_PROTO(struct inode *inode), | 700 | TP_PROTO(struct inode *inode), |
| 701 | 701 | ||
| 702 | TP_ARGS(inode), | 702 | TP_ARGS(inode), |
| @@ -723,25 +723,39 @@ DECLARE_EVENT_CLASS(writeback_lazytime_template, | |||
| 723 | show_inode_state(__entry->state), __entry->mode) | 723 | show_inode_state(__entry->state), __entry->mode) |
| 724 | ); | 724 | ); |
| 725 | 725 | ||
| 726 | DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime, | 726 | DEFINE_EVENT(writeback_inode_template, writeback_lazytime, |
| 727 | TP_PROTO(struct inode *inode), | 727 | TP_PROTO(struct inode *inode), |
| 728 | 728 | ||
| 729 | TP_ARGS(inode) | 729 | TP_ARGS(inode) |
| 730 | ); | 730 | ); |
| 731 | 731 | ||
| 732 | DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput, | 732 | DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput, |
| 733 | TP_PROTO(struct inode *inode), | 733 | TP_PROTO(struct inode *inode), |
| 734 | 734 | ||
| 735 | TP_ARGS(inode) | 735 | TP_ARGS(inode) |
| 736 | ); | 736 | ); |
| 737 | 737 | ||
| 738 | DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue, | 738 | DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue, |
| 739 | 739 | ||
| 740 | TP_PROTO(struct inode *inode), | 740 | TP_PROTO(struct inode *inode), |
| 741 | 741 | ||
| 742 | TP_ARGS(inode) | 742 | TP_ARGS(inode) |
| 743 | ); | 743 | ); |
| 744 | 744 | ||
| 745 | /* | ||
| 746 | * Inode writeback list tracking. | ||
| 747 | */ | ||
| 748 | |||
| 749 | DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback, | ||
| 750 | TP_PROTO(struct inode *inode), | ||
| 751 | TP_ARGS(inode) | ||
| 752 | ); | ||
| 753 | |||
| 754 | DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback, | ||
| 755 | TP_PROTO(struct inode *inode), | ||
| 756 | TP_ARGS(inode) | ||
| 757 | ); | ||
| 758 | |||
| 745 | #endif /* _TRACE_WRITEBACK_H */ | 759 | #endif /* _TRACE_WRITEBACK_H */ |
| 746 | 760 | ||
| 747 | /* This part must be outside protection */ | 761 | /* This part must be outside protection */ |
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 546b38886e11..e398beac67b8 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h | |||
| @@ -80,5 +80,7 @@ | |||
| 80 | #define BPF_FS_MAGIC 0xcafe4a11 | 80 | #define BPF_FS_MAGIC 0xcafe4a11 |
| 81 | /* Since UDF 2.01 is ISO 13346 based... */ | 81 | /* Since UDF 2.01 is ISO 13346 based... */ |
| 82 | #define UDF_SUPER_MAGIC 0x15013346 | 82 | #define UDF_SUPER_MAGIC 0x15013346 |
| 83 | #define BALLOON_KVM_MAGIC 0x13661366 | ||
| 84 | #define ZSMALLOC_MAGIC 0x58295829 | ||
| 83 | 85 | ||
| 84 | #endif /* __LINUX_MAGIC_H__ */ | 86 | #endif /* __LINUX_MAGIC_H__ */ |
