diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
| commit | 8dcd175bc3d50b78413c56d5b17d4bddd77412ef (patch) | |
| tree | 2c2fb25759b43f2e73830f07ef3b444d76825280 /include | |
| parent | afe6fe7036c6efdcb46cabc64bec9b6e4a005210 (diff) | |
| parent | fff04900ea79915939ef6a3aad78fca6511a3034 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- a few misc things
- ocfs2 updates
- most of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (159 commits)
tools/testing/selftests/proc/proc-self-syscall.c: remove duplicate include
proc: more robust bulk read test
proc: test /proc/*/maps, smaps, smaps_rollup, statm
proc: use seq_puts() everywhere
proc: read kernel cpu stat pointer once
proc: remove unused argument in proc_pid_lookup()
fs/proc/thread_self.c: code cleanup for proc_setup_thread_self()
fs/proc/self.c: code cleanup for proc_setup_self()
proc: return exit code 4 for skipped tests
mm,mremap: bail out earlier in mremap_to under map pressure
mm/sparse: fix a bad comparison
mm/memory.c: do_fault: avoid usage of stale vm_area_struct
writeback: fix inode cgroup switching comment
mm/huge_memory.c: fix "orig_pud" set but not used
mm/hotplug: fix an imbalance with DEBUG_PAGEALLOC
mm/memcontrol.c: fix bad line in comment
mm/cma.c: cma_declare_contiguous: correct err handling
mm/page_ext.c: fix an imbalance with kmemleak
mm/compaction: pass pgdat to too_many_isolated() instead of zone
mm: remove zone_lru_lock() function, access ->lru_lock directly
...
Diffstat (limited to 'include')
30 files changed, 305 insertions, 119 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 05e61e6c843f..fa782fba51ee 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
| @@ -606,7 +606,7 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd) | |||
| 606 | return 0; | 606 | return 0; |
| 607 | } | 607 | } |
| 608 | 608 | ||
| 609 | static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, | 609 | static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, |
| 610 | unsigned long addr, | 610 | unsigned long addr, |
| 611 | pte_t *ptep) | 611 | pte_t *ptep) |
| 612 | { | 612 | { |
| @@ -615,10 +615,10 @@ static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, | |||
| 615 | * non-present, preventing the hardware from asynchronously | 615 | * non-present, preventing the hardware from asynchronously |
| 616 | * updating it. | 616 | * updating it. |
| 617 | */ | 617 | */ |
| 618 | return ptep_get_and_clear(mm, addr, ptep); | 618 | return ptep_get_and_clear(vma->vm_mm, addr, ptep); |
| 619 | } | 619 | } |
| 620 | 620 | ||
| 621 | static inline void __ptep_modify_prot_commit(struct mm_struct *mm, | 621 | static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, |
| 622 | unsigned long addr, | 622 | unsigned long addr, |
| 623 | pte_t *ptep, pte_t pte) | 623 | pte_t *ptep, pte_t pte) |
| 624 | { | 624 | { |
| @@ -626,7 +626,7 @@ static inline void __ptep_modify_prot_commit(struct mm_struct *mm, | |||
| 626 | * The pte is non-present, so there's no hardware state to | 626 | * The pte is non-present, so there's no hardware state to |
| 627 | * preserve. | 627 | * preserve. |
| 628 | */ | 628 | */ |
| 629 | set_pte_at(mm, addr, ptep, pte); | 629 | set_pte_at(vma->vm_mm, addr, ptep, pte); |
| 630 | } | 630 | } |
| 631 | 631 | ||
| 632 | #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION | 632 | #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
| @@ -644,22 +644,22 @@ static inline void __ptep_modify_prot_commit(struct mm_struct *mm, | |||
| 644 | * queue the update to be done at some later time. The update must be | 644 | * queue the update to be done at some later time. The update must be |
| 645 | * actually committed before the pte lock is released, however. | 645 | * actually committed before the pte lock is released, however. |
| 646 | */ | 646 | */ |
| 647 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | 647 | static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, |
| 648 | unsigned long addr, | 648 | unsigned long addr, |
| 649 | pte_t *ptep) | 649 | pte_t *ptep) |
| 650 | { | 650 | { |
| 651 | return __ptep_modify_prot_start(mm, addr, ptep); | 651 | return __ptep_modify_prot_start(vma, addr, ptep); |
| 652 | } | 652 | } |
| 653 | 653 | ||
| 654 | /* | 654 | /* |
| 655 | * Commit an update to a pte, leaving any hardware-controlled bits in | 655 | * Commit an update to a pte, leaving any hardware-controlled bits in |
| 656 | * the PTE unmodified. | 656 | * the PTE unmodified. |
| 657 | */ | 657 | */ |
| 658 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, | 658 | static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, |
| 659 | unsigned long addr, | 659 | unsigned long addr, |
| 660 | pte_t *ptep, pte_t pte) | 660 | pte_t *ptep, pte_t old_pte, pte_t pte) |
| 661 | { | 661 | { |
| 662 | __ptep_modify_prot_commit(mm, addr, ptep, pte); | 662 | __ptep_modify_prot_commit(vma, addr, ptep, pte); |
| 663 | } | 663 | } |
| 664 | #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ | 664 | #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ |
| 665 | #endif /* CONFIG_MMU */ | 665 | #endif /* CONFIG_MMU */ |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c28a47cbe355..f9b029180241 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -365,7 +365,7 @@ unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) | |||
| 365 | rcu_read_lock(); | 365 | rcu_read_lock(); |
| 366 | 366 | ||
| 367 | /* | 367 | /* |
| 368 | * Paired with store_release in inode_switch_wb_work_fn() and | 368 | * Paired with store_release in inode_switch_wbs_work_fn() and |
| 369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. | 369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. |
| 370 | */ | 370 | */ |
| 371 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; | 371 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; |
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 53051f3d8f25..f111c780ef1d 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h | |||
| @@ -4,15 +4,18 @@ | |||
| 4 | * | 4 | * |
| 5 | * Common interface definitions for making balloon pages movable by compaction. | 5 | * Common interface definitions for making balloon pages movable by compaction. |
| 6 | * | 6 | * |
| 7 | * Despite being perfectly possible to perform ballooned pages migration, they | 7 | * Balloon page migration makes use of the general non-lru movable page |
| 8 | * make a special corner case to compaction scans because balloon pages are not | 8 | * feature. |
| 9 | * enlisted at any LRU list like the other pages we do compact / migrate. | 9 | * |
| 10 | * page->private is used to reference the responsible balloon device. | ||
| 11 | * page->mapping is used in context of non-lru page migration to reference | ||
| 12 | * the address space operations for page isolation/migration/compaction. | ||
| 10 | * | 13 | * |
| 11 | * As the page isolation scanning step a compaction thread does is a lockless | 14 | * As the page isolation scanning step a compaction thread does is a lockless |
| 12 | * procedure (from a page standpoint), it might bring some racy situations while | 15 | * procedure (from a page standpoint), it might bring some racy situations while |
| 13 | * performing balloon page compaction. In order to sort out these racy scenarios | 16 | * performing balloon page compaction. In order to sort out these racy scenarios |
| 14 | * and safely perform balloon's page compaction and migration we must, always, | 17 | * and safely perform balloon's page compaction and migration we must, always, |
| 15 | * ensure following these three simple rules: | 18 | * ensure following these simple rules: |
| 16 | * | 19 | * |
| 17 | * i. when updating a balloon's page ->mapping element, strictly do it under | 20 | * i. when updating a balloon's page ->mapping element, strictly do it under |
| 18 | * the following lock order, independently of the far superior | 21 | * the following lock order, independently of the far superior |
| @@ -21,19 +24,8 @@ | |||
| 21 | * +--spin_lock_irq(&b_dev_info->pages_lock); | 24 | * +--spin_lock_irq(&b_dev_info->pages_lock); |
| 22 | * ... page->mapping updates here ... | 25 | * ... page->mapping updates here ... |
| 23 | * | 26 | * |
| 24 | * ii. before isolating or dequeueing a balloon page from the balloon device | 27 | * ii. isolation or dequeueing procedure must remove the page from balloon |
| 25 | * pages list, the page reference counter must be raised by one and the | 28 | * device page list under b_dev_info->pages_lock. |
| 26 | * extra refcount must be dropped when the page is enqueued back into | ||
| 27 | * the balloon device page list, thus a balloon page keeps its reference | ||
| 28 | * counter raised only while it is under our special handling; | ||
| 29 | * | ||
| 30 | * iii. after the lockless scan step have selected a potential balloon page for | ||
| 31 | * isolation, re-test the PageBalloon mark and the PagePrivate flag | ||
| 32 | * under the proper page lock, to ensure isolating a valid balloon page | ||
| 33 | * (not yet isolated, nor under release procedure) | ||
| 34 | * | ||
| 35 | * iv. isolation or dequeueing procedure must clear PagePrivate flag under | ||
| 36 | * page lock together with removing page from balloon device page list. | ||
| 37 | * | 29 | * |
| 38 | * The functions provided by this interface are placed to help on coping with | 30 | * The functions provided by this interface are placed to help on coping with |
| 39 | * the aforementioned balloon page corner case, as well as to ensure the simple | 31 | * the aforementioned balloon page corner case, as well as to ensure the simple |
| @@ -103,7 +95,7 @@ extern int balloon_page_migrate(struct address_space *mapping, | |||
| 103 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, | 95 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, |
| 104 | struct page *page) | 96 | struct page *page) |
| 105 | { | 97 | { |
| 106 | __SetPageBalloon(page); | 98 | __SetPageOffline(page); |
| 107 | __SetPageMovable(page, balloon->inode->i_mapping); | 99 | __SetPageMovable(page, balloon->inode->i_mapping); |
| 108 | set_page_private(page, (unsigned long)balloon); | 100 | set_page_private(page, (unsigned long)balloon); |
| 109 | list_add(&page->lru, &balloon->pages); | 101 | list_add(&page->lru, &balloon->pages); |
| @@ -119,7 +111,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, | |||
| 119 | */ | 111 | */ |
| 120 | static inline void balloon_page_delete(struct page *page) | 112 | static inline void balloon_page_delete(struct page *page) |
| 121 | { | 113 | { |
| 122 | __ClearPageBalloon(page); | 114 | __ClearPageOffline(page); |
| 123 | __ClearPageMovable(page); | 115 | __ClearPageMovable(page); |
| 124 | set_page_private(page, 0); | 116 | set_page_private(page, 0); |
| 125 | /* | 117 | /* |
| @@ -149,13 +141,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void) | |||
| 149 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, | 141 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, |
| 150 | struct page *page) | 142 | struct page *page) |
| 151 | { | 143 | { |
| 152 | __SetPageBalloon(page); | 144 | __SetPageOffline(page); |
| 153 | list_add(&page->lru, &balloon->pages); | 145 | list_add(&page->lru, &balloon->pages); |
| 154 | } | 146 | } |
| 155 | 147 | ||
| 156 | static inline void balloon_page_delete(struct page *page) | 148 | static inline void balloon_page_delete(struct page *page) |
| 157 | { | 149 | { |
| 158 | __ClearPageBalloon(page); | 150 | __ClearPageOffline(page); |
| 159 | list_del(&page->lru); | 151 | list_del(&page->lru); |
| 160 | } | 152 | } |
| 161 | 153 | ||
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8fcbae1b8db0..aad3babef007 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
| @@ -32,6 +32,7 @@ struct kernfs_node; | |||
| 32 | struct kernfs_ops; | 32 | struct kernfs_ops; |
| 33 | struct kernfs_open_file; | 33 | struct kernfs_open_file; |
| 34 | struct seq_file; | 34 | struct seq_file; |
| 35 | struct poll_table_struct; | ||
| 35 | 36 | ||
| 36 | #define MAX_CGROUP_TYPE_NAMELEN 32 | 37 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
| 37 | #define MAX_CGROUP_ROOT_NAMELEN 64 | 38 | #define MAX_CGROUP_ROOT_NAMELEN 64 |
| @@ -574,6 +575,9 @@ struct cftype { | |||
| 574 | ssize_t (*write)(struct kernfs_open_file *of, | 575 | ssize_t (*write)(struct kernfs_open_file *of, |
| 575 | char *buf, size_t nbytes, loff_t off); | 576 | char *buf, size_t nbytes, loff_t off); |
| 576 | 577 | ||
| 578 | __poll_t (*poll)(struct kernfs_open_file *of, | ||
| 579 | struct poll_table_struct *pt); | ||
| 580 | |||
| 577 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 581 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 578 | struct lock_class_key lockdep_key; | 582 | struct lock_class_key lockdep_key; |
| 579 | #endif | 583 | #endif |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 68250a57aace..9569e7c786d3 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
| @@ -88,14 +88,13 @@ extern int sysctl_compact_memory; | |||
| 88 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, | 88 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 89 | void __user *buffer, size_t *length, loff_t *ppos); | 89 | void __user *buffer, size_t *length, loff_t *ppos); |
| 90 | extern int sysctl_extfrag_threshold; | 90 | extern int sysctl_extfrag_threshold; |
| 91 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | ||
| 92 | void __user *buffer, size_t *length, loff_t *ppos); | ||
| 93 | extern int sysctl_compact_unevictable_allowed; | 91 | extern int sysctl_compact_unevictable_allowed; |
| 94 | 92 | ||
| 95 | extern int fragmentation_index(struct zone *zone, unsigned int order); | 93 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
| 96 | extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, | 94 | extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, |
| 97 | unsigned int order, unsigned int alloc_flags, | 95 | unsigned int order, unsigned int alloc_flags, |
| 98 | const struct alloc_context *ac, enum compact_priority prio); | 96 | const struct alloc_context *ac, enum compact_priority prio, |
| 97 | struct page **page); | ||
| 99 | extern void reset_isolation_suitable(pg_data_t *pgdat); | 98 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
| 100 | extern enum compact_result compaction_suitable(struct zone *zone, int order, | 99 | extern enum compact_result compaction_suitable(struct zone *zone, int order, |
| 101 | unsigned int alloc_flags, int classzone_idx); | 100 | unsigned int alloc_flags, int classzone_idx); |
| @@ -227,8 +226,8 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i | |||
| 227 | 226 | ||
| 228 | #endif /* CONFIG_COMPACTION */ | 227 | #endif /* CONFIG_COMPACTION */ |
| 229 | 228 | ||
| 230 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | ||
| 231 | struct node; | 229 | struct node; |
| 230 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | ||
| 232 | extern int compaction_register_node(struct node *node); | 231 | extern int compaction_register_node(struct node *node); |
| 233 | extern void compaction_unregister_node(struct node *node); | 232 | extern void compaction_unregister_node(struct node *node); |
| 234 | 233 | ||
diff --git a/include/linux/device.h b/include/linux/device.h index 6cb4640b6160..4d2f13e8c540 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -1095,7 +1095,7 @@ static inline void set_dev_node(struct device *dev, int node) | |||
| 1095 | #else | 1095 | #else |
| 1096 | static inline int dev_to_node(struct device *dev) | 1096 | static inline int dev_to_node(struct device *dev) |
| 1097 | { | 1097 | { |
| 1098 | return -1; | 1098 | return NUMA_NO_NODE; |
| 1099 | } | 1099 | } |
| 1100 | static inline void set_dev_node(struct device *dev, int node) | 1100 | static inline void set_dev_node(struct device *dev, int node) |
| 1101 | { | 1101 | { |
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index 011965c08b93..6d775984905b 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h | |||
| @@ -7,6 +7,13 @@ | |||
| 7 | #include <linux/bitops.h> | 7 | #include <linux/bitops.h> |
| 8 | #include <linux/jump_label.h> | 8 | #include <linux/jump_label.h> |
| 9 | 9 | ||
| 10 | /* | ||
| 11 | * Return code to denote that requested number of | ||
| 12 | * frontswap pages are unused(moved to page cache). | ||
| 13 | * Used in in shmem_unuse and try_to_unuse. | ||
| 14 | */ | ||
| 15 | #define FRONTSWAP_PAGES_UNUSED 2 | ||
| 16 | |||
| 10 | struct frontswap_ops { | 17 | struct frontswap_ops { |
| 11 | void (*init)(unsigned); /* this swap type was just swapon'ed */ | 18 | void (*init)(unsigned); /* this swap type was just swapon'ed */ |
| 12 | int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ | 19 | int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index fd423fec8d83..08f26046233e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -2091,7 +2091,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) | |||
| 2091 | * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to | 2091 | * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to |
| 2092 | * synchronize competing switching instances and to tell | 2092 | * synchronize competing switching instances and to tell |
| 2093 | * wb stat updates to grab the i_pages lock. See | 2093 | * wb stat updates to grab the i_pages lock. See |
| 2094 | * inode_switch_wb_work_fn() for details. | 2094 | * inode_switch_wbs_work_fn() for details. |
| 2095 | * | 2095 | * |
| 2096 | * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper | 2096 | * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper |
| 2097 | * and work dirs among overlayfs mounts. | 2097 | * and work dirs among overlayfs mounts. |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 5f5e25fd6149..fdab7de7490d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -24,21 +24,21 @@ struct vm_area_struct; | |||
| 24 | #define ___GFP_HIGH 0x20u | 24 | #define ___GFP_HIGH 0x20u |
| 25 | #define ___GFP_IO 0x40u | 25 | #define ___GFP_IO 0x40u |
| 26 | #define ___GFP_FS 0x80u | 26 | #define ___GFP_FS 0x80u |
| 27 | #define ___GFP_WRITE 0x100u | 27 | #define ___GFP_ZERO 0x100u |
| 28 | #define ___GFP_NOWARN 0x200u | 28 | #define ___GFP_ATOMIC 0x200u |
| 29 | #define ___GFP_RETRY_MAYFAIL 0x400u | 29 | #define ___GFP_DIRECT_RECLAIM 0x400u |
| 30 | #define ___GFP_NOFAIL 0x800u | 30 | #define ___GFP_KSWAPD_RECLAIM 0x800u |
| 31 | #define ___GFP_NORETRY 0x1000u | 31 | #define ___GFP_WRITE 0x1000u |
| 32 | #define ___GFP_MEMALLOC 0x2000u | 32 | #define ___GFP_NOWARN 0x2000u |
| 33 | #define ___GFP_COMP 0x4000u | 33 | #define ___GFP_RETRY_MAYFAIL 0x4000u |
| 34 | #define ___GFP_ZERO 0x8000u | 34 | #define ___GFP_NOFAIL 0x8000u |
| 35 | #define ___GFP_NOMEMALLOC 0x10000u | 35 | #define ___GFP_NORETRY 0x10000u |
| 36 | #define ___GFP_HARDWALL 0x20000u | 36 | #define ___GFP_MEMALLOC 0x20000u |
| 37 | #define ___GFP_THISNODE 0x40000u | 37 | #define ___GFP_COMP 0x40000u |
| 38 | #define ___GFP_ATOMIC 0x80000u | 38 | #define ___GFP_NOMEMALLOC 0x80000u |
| 39 | #define ___GFP_ACCOUNT 0x100000u | 39 | #define ___GFP_HARDWALL 0x100000u |
| 40 | #define ___GFP_DIRECT_RECLAIM 0x200000u | 40 | #define ___GFP_THISNODE 0x200000u |
| 41 | #define ___GFP_KSWAPD_RECLAIM 0x400000u | 41 | #define ___GFP_ACCOUNT 0x400000u |
| 42 | #ifdef CONFIG_LOCKDEP | 42 | #ifdef CONFIG_LOCKDEP |
| 43 | #define ___GFP_NOLOCKDEP 0x800000u | 43 | #define ___GFP_NOLOCKDEP 0x800000u |
| 44 | #else | 44 | #else |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 087fd5f48c91..ea35263eb76b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -371,6 +371,8 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, | |||
| 371 | nodemask_t *nmask); | 371 | nodemask_t *nmask); |
| 372 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, | 372 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, |
| 373 | unsigned long address); | 373 | unsigned long address); |
| 374 | struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, | ||
| 375 | int nid, nodemask_t *nmask); | ||
| 374 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, | 376 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
| 375 | pgoff_t idx); | 377 | pgoff_t idx); |
| 376 | 378 | ||
| @@ -493,17 +495,54 @@ static inline pgoff_t basepage_index(struct page *page) | |||
| 493 | extern int dissolve_free_huge_page(struct page *page); | 495 | extern int dissolve_free_huge_page(struct page *page); |
| 494 | extern int dissolve_free_huge_pages(unsigned long start_pfn, | 496 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
| 495 | unsigned long end_pfn); | 497 | unsigned long end_pfn); |
| 496 | static inline bool hugepage_migration_supported(struct hstate *h) | 498 | |
| 497 | { | ||
| 498 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | 499 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
| 500 | #ifndef arch_hugetlb_migration_supported | ||
| 501 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | ||
| 502 | { | ||
| 499 | if ((huge_page_shift(h) == PMD_SHIFT) || | 503 | if ((huge_page_shift(h) == PMD_SHIFT) || |
| 500 | (huge_page_shift(h) == PGDIR_SHIFT)) | 504 | (huge_page_shift(h) == PUD_SHIFT) || |
| 505 | (huge_page_shift(h) == PGDIR_SHIFT)) | ||
| 501 | return true; | 506 | return true; |
| 502 | else | 507 | else |
| 503 | return false; | 508 | return false; |
| 509 | } | ||
| 510 | #endif | ||
| 504 | #else | 511 | #else |
| 512 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | ||
| 513 | { | ||
| 505 | return false; | 514 | return false; |
| 515 | } | ||
| 506 | #endif | 516 | #endif |
| 517 | |||
| 518 | static inline bool hugepage_migration_supported(struct hstate *h) | ||
| 519 | { | ||
| 520 | return arch_hugetlb_migration_supported(h); | ||
| 521 | } | ||
| 522 | |||
| 523 | /* | ||
| 524 | * Movability check is different as compared to migration check. | ||
| 525 | * It determines whether or not a huge page should be placed on | ||
| 526 | * movable zone or not. Movability of any huge page should be | ||
| 527 | * required only if huge page size is supported for migration. | ||
| 528 | * There wont be any reason for the huge page to be movable if | ||
| 529 | * it is not migratable to start with. Also the size of the huge | ||
| 530 | * page should be large enough to be placed under a movable zone | ||
| 531 | * and still feasible enough to be migratable. Just the presence | ||
| 532 | * in movable zone does not make the migration feasible. | ||
| 533 | * | ||
| 534 | * So even though large huge page sizes like the gigantic ones | ||
| 535 | * are migratable they should not be movable because its not | ||
| 536 | * feasible to migrate them from movable zone. | ||
| 537 | */ | ||
| 538 | static inline bool hugepage_movable_supported(struct hstate *h) | ||
| 539 | { | ||
| 540 | if (!hugepage_migration_supported(h)) | ||
| 541 | return false; | ||
| 542 | |||
| 543 | if (hstate_is_gigantic(h)) | ||
| 544 | return false; | ||
| 545 | return true; | ||
| 507 | } | 546 | } |
| 508 | 547 | ||
| 509 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 548 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| @@ -543,6 +582,26 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr | |||
| 543 | set_huge_pte_at(mm, addr, ptep, pte); | 582 | set_huge_pte_at(mm, addr, ptep, pte); |
| 544 | } | 583 | } |
| 545 | #endif | 584 | #endif |
| 585 | |||
| 586 | #ifndef huge_ptep_modify_prot_start | ||
| 587 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start | ||
| 588 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, | ||
| 589 | unsigned long addr, pte_t *ptep) | ||
| 590 | { | ||
| 591 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | ||
| 592 | } | ||
| 593 | #endif | ||
| 594 | |||
| 595 | #ifndef huge_ptep_modify_prot_commit | ||
| 596 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit | ||
| 597 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, | ||
| 598 | unsigned long addr, pte_t *ptep, | ||
| 599 | pte_t old_pte, pte_t pte) | ||
| 600 | { | ||
| 601 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | ||
| 602 | } | ||
| 603 | #endif | ||
| 604 | |||
| 546 | #else /* CONFIG_HUGETLB_PAGE */ | 605 | #else /* CONFIG_HUGETLB_PAGE */ |
| 547 | struct hstate {}; | 606 | struct hstate {}; |
| 548 | #define alloc_huge_page(v, a, r) NULL | 607 | #define alloc_huge_page(v, a, r) NULL |
| @@ -602,6 +661,11 @@ static inline bool hugepage_migration_supported(struct hstate *h) | |||
| 602 | return false; | 661 | return false; |
| 603 | } | 662 | } |
| 604 | 663 | ||
| 664 | static inline bool hugepage_movable_supported(struct hstate *h) | ||
| 665 | { | ||
| 666 | return false; | ||
| 667 | } | ||
| 668 | |||
| 605 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 669 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| 606 | struct mm_struct *mm, pte_t *pte) | 670 | struct mm_struct *mm, pte_t *pte) |
| 607 | { | 671 | { |
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index d314150658a4..a61dc075e2ce 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #ifndef _LINUX_KASAN_CHECKS_H | 2 | #ifndef _LINUX_KASAN_CHECKS_H |
| 3 | #define _LINUX_KASAN_CHECKS_H | 3 | #define _LINUX_KASAN_CHECKS_H |
| 4 | 4 | ||
| 5 | #ifdef CONFIG_KASAN | 5 | #if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL) |
| 6 | void kasan_check_read(const volatile void *p, unsigned int size); | 6 | void kasan_check_read(const volatile void *p, unsigned int size); |
| 7 | void kasan_check_write(const volatile void *p, unsigned int size); | 7 | void kasan_check_write(const volatile void *p, unsigned int size); |
| 8 | #else | 8 | #else |
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5b36b1287a5a..0cac1207bb00 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
| @@ -25,6 +25,7 @@ struct seq_file; | |||
| 25 | struct vm_area_struct; | 25 | struct vm_area_struct; |
| 26 | struct super_block; | 26 | struct super_block; |
| 27 | struct file_system_type; | 27 | struct file_system_type; |
| 28 | struct poll_table_struct; | ||
| 28 | 29 | ||
| 29 | struct kernfs_open_node; | 30 | struct kernfs_open_node; |
| 30 | struct kernfs_iattrs; | 31 | struct kernfs_iattrs; |
| @@ -261,6 +262,9 @@ struct kernfs_ops { | |||
| 261 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, | 262 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, |
| 262 | loff_t off); | 263 | loff_t off); |
| 263 | 264 | ||
| 265 | __poll_t (*poll)(struct kernfs_open_file *of, | ||
| 266 | struct poll_table_struct *pt); | ||
| 267 | |||
| 264 | int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); | 268 | int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); |
| 265 | 269 | ||
| 266 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 270 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| @@ -350,6 +354,8 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, | |||
| 350 | int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, | 354 | int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, |
| 351 | const char *new_name, const void *new_ns); | 355 | const char *new_name, const void *new_ns); |
| 352 | int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); | 356 | int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); |
| 357 | __poll_t kernfs_generic_poll(struct kernfs_open_file *of, | ||
| 358 | struct poll_table_struct *pt); | ||
| 353 | void kernfs_notify(struct kernfs_node *kn); | 359 | void kernfs_notify(struct kernfs_node *kn); |
| 354 | 360 | ||
| 355 | const void *kernfs_super_ns(struct super_block *sb); | 361 | const void *kernfs_super_ns(struct super_block *sb); |
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 161e8164abcf..e48b1e453ff5 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
| @@ -53,6 +53,8 @@ struct page *ksm_might_need_to_copy(struct page *page, | |||
| 53 | 53 | ||
| 54 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); | 54 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); |
| 55 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); | 55 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); |
| 56 | bool reuse_ksm_page(struct page *page, | ||
| 57 | struct vm_area_struct *vma, unsigned long address); | ||
| 56 | 58 | ||
| 57 | #else /* !CONFIG_KSM */ | 59 | #else /* !CONFIG_KSM */ |
| 58 | 60 | ||
| @@ -86,6 +88,11 @@ static inline void rmap_walk_ksm(struct page *page, | |||
| 86 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) | 88 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
| 87 | { | 89 | { |
| 88 | } | 90 | } |
| 91 | static inline bool reuse_ksm_page(struct page *page, | ||
| 92 | struct vm_area_struct *vma, unsigned long address) | ||
| 93 | { | ||
| 94 | return false; | ||
| 95 | } | ||
| 89 | #endif /* CONFIG_MMU */ | 96 | #endif /* CONFIG_MMU */ |
| 90 | #endif /* !CONFIG_KSM */ | 97 | #endif /* !CONFIG_KSM */ |
| 91 | 98 | ||
diff --git a/include/linux/list.h b/include/linux/list.h index edb7628e46ed..79626b5ab36c 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
| @@ -207,6 +207,17 @@ static inline void list_bulk_move_tail(struct list_head *head, | |||
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | /** | 209 | /** |
| 210 | * list_is_first -- tests whether @ list is the first entry in list @head | ||
| 211 | * @list: the entry to test | ||
| 212 | * @head: the head of the list | ||
| 213 | */ | ||
| 214 | static inline int list_is_first(const struct list_head *list, | ||
| 215 | const struct list_head *head) | ||
| 216 | { | ||
| 217 | return list->prev == head; | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 210 | * list_is_last - tests whether @list is the last entry in list @head | 221 | * list_is_last - tests whether @list is the last entry in list @head |
| 211 | * @list: the entry to test | 222 | * @list: the entry to test |
| 212 | * @head: the head of the list | 223 | * @head: the head of the list |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 83ae11cbd12c..1f3d880b7ca1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -429,6 +429,11 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) | |||
| 429 | } | 429 | } |
| 430 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); | 430 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
| 431 | 431 | ||
| 432 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) | ||
| 433 | { | ||
| 434 | return mem_cgroup_from_css(seq_css(m)); | ||
| 435 | } | ||
| 436 | |||
| 432 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) | 437 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
| 433 | { | 438 | { |
| 434 | struct mem_cgroup_per_node *mz; | 439 | struct mem_cgroup_per_node *mz; |
| @@ -937,6 +942,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |||
| 937 | return NULL; | 942 | return NULL; |
| 938 | } | 943 | } |
| 939 | 944 | ||
| 945 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) | ||
| 946 | { | ||
| 947 | return NULL; | ||
| 948 | } | ||
| 949 | |||
| 940 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) | 950 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
| 941 | { | 951 | { |
| 942 | return NULL; | 952 | return NULL; |
| @@ -1273,12 +1283,12 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) | |||
| 1273 | 1283 | ||
| 1274 | struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); | 1284 | struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); |
| 1275 | void memcg_kmem_put_cache(struct kmem_cache *cachep); | 1285 | void memcg_kmem_put_cache(struct kmem_cache *cachep); |
| 1276 | int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | ||
| 1277 | struct mem_cgroup *memcg); | ||
| 1278 | 1286 | ||
| 1279 | #ifdef CONFIG_MEMCG_KMEM | 1287 | #ifdef CONFIG_MEMCG_KMEM |
| 1280 | int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); | 1288 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); |
| 1281 | void memcg_kmem_uncharge(struct page *page, int order); | 1289 | void __memcg_kmem_uncharge(struct page *page, int order); |
| 1290 | int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | ||
| 1291 | struct mem_cgroup *memcg); | ||
| 1282 | 1292 | ||
| 1283 | extern struct static_key_false memcg_kmem_enabled_key; | 1293 | extern struct static_key_false memcg_kmem_enabled_key; |
| 1284 | extern struct workqueue_struct *memcg_kmem_cache_wq; | 1294 | extern struct workqueue_struct *memcg_kmem_cache_wq; |
| @@ -1300,6 +1310,26 @@ static inline bool memcg_kmem_enabled(void) | |||
| 1300 | return static_branch_unlikely(&memcg_kmem_enabled_key); | 1310 | return static_branch_unlikely(&memcg_kmem_enabled_key); |
| 1301 | } | 1311 | } |
| 1302 | 1312 | ||
| 1313 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) | ||
| 1314 | { | ||
| 1315 | if (memcg_kmem_enabled()) | ||
| 1316 | return __memcg_kmem_charge(page, gfp, order); | ||
| 1317 | return 0; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | static inline void memcg_kmem_uncharge(struct page *page, int order) | ||
| 1321 | { | ||
| 1322 | if (memcg_kmem_enabled()) | ||
| 1323 | __memcg_kmem_uncharge(page, order); | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, | ||
| 1327 | int order, struct mem_cgroup *memcg) | ||
| 1328 | { | ||
| 1329 | if (memcg_kmem_enabled()) | ||
| 1330 | return __memcg_kmem_charge_memcg(page, gfp, order, memcg); | ||
| 1331 | return 0; | ||
| 1332 | } | ||
| 1303 | /* | 1333 | /* |
| 1304 | * helper for accessing a memcg's index. It will be used as an index in the | 1334 | * helper for accessing a memcg's index. It will be used as an index in the |
| 1305 | * child cache array in kmem_cache, and also to derive its name. This function | 1335 | * child cache array in kmem_cache, and also to derive its name. This function |
| @@ -1325,6 +1355,15 @@ static inline void memcg_kmem_uncharge(struct page *page, int order) | |||
| 1325 | { | 1355 | { |
| 1326 | } | 1356 | } |
| 1327 | 1357 | ||
| 1358 | static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) | ||
| 1359 | { | ||
| 1360 | return 0; | ||
| 1361 | } | ||
| 1362 | |||
| 1363 | static inline void __memcg_kmem_uncharge(struct page *page, int order) | ||
| 1364 | { | ||
| 1365 | } | ||
| 1366 | |||
| 1328 | #define for_each_memcg_cache_index(_idx) \ | 1367 | #define for_each_memcg_cache_index(_idx) \ |
| 1329 | for (; NULL; ) | 1368 | for (; NULL; ) |
| 1330 | 1369 | ||
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 368267c1b71b..52869d6d38b3 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
| @@ -89,7 +89,7 @@ extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
| 89 | unsigned long *valid_start, unsigned long *valid_end); | 89 | unsigned long *valid_start, unsigned long *valid_end); |
| 90 | extern void __offline_isolated_pages(unsigned long, unsigned long); | 90 | extern void __offline_isolated_pages(unsigned long, unsigned long); |
| 91 | 91 | ||
| 92 | typedef void (*online_page_callback_t)(struct page *page); | 92 | typedef void (*online_page_callback_t)(struct page *page, unsigned int order); |
| 93 | 93 | ||
| 94 | extern int set_online_page_callback(online_page_callback_t callback); | 94 | extern int set_online_page_callback(online_page_callback_t callback); |
| 95 | extern int restore_online_page_callback(online_page_callback_t callback); | 95 | extern int restore_online_page_callback(online_page_callback_t callback); |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 80bb6408fe73..20ec56f8e2bb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1536,7 +1536,8 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | |||
| 1536 | unsigned int gup_flags, struct page **pages, int *locked); | 1536 | unsigned int gup_flags, struct page **pages, int *locked); |
| 1537 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 1537 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 1538 | struct page **pages, unsigned int gup_flags); | 1538 | struct page **pages, unsigned int gup_flags); |
| 1539 | #ifdef CONFIG_FS_DAX | 1539 | |
| 1540 | #if defined(CONFIG_FS_DAX) || defined(CONFIG_CMA) | ||
| 1540 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, | 1541 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, |
| 1541 | unsigned int gup_flags, struct page **pages, | 1542 | unsigned int gup_flags, struct page **pages, |
| 1542 | struct vm_area_struct **vmas); | 1543 | struct vm_area_struct **vmas); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0a36a22228e7..ab9b48420200 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -80,7 +80,7 @@ struct page { | |||
| 80 | struct { /* Page cache and anonymous pages */ | 80 | struct { /* Page cache and anonymous pages */ |
| 81 | /** | 81 | /** |
| 82 | * @lru: Pageout list, eg. active_list protected by | 82 | * @lru: Pageout list, eg. active_list protected by |
| 83 | * zone_lru_lock. Sometimes used as a generic list | 83 | * pgdat->lru_lock. Sometimes used as a generic list |
| 84 | * by the page owner. | 84 | * by the page owner. |
| 85 | */ | 85 | */ |
| 86 | struct list_head lru; | 86 | struct list_head lru; |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 842f9189537b..fba7741533be 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -480,6 +480,8 @@ struct zone { | |||
| 480 | unsigned long compact_cached_free_pfn; | 480 | unsigned long compact_cached_free_pfn; |
| 481 | /* pfn where async and sync compaction migration scanner should start */ | 481 | /* pfn where async and sync compaction migration scanner should start */ |
| 482 | unsigned long compact_cached_migrate_pfn[2]; | 482 | unsigned long compact_cached_migrate_pfn[2]; |
| 483 | unsigned long compact_init_migrate_pfn; | ||
| 484 | unsigned long compact_init_free_pfn; | ||
| 483 | #endif | 485 | #endif |
| 484 | 486 | ||
| 485 | #ifdef CONFIG_COMPACTION | 487 | #ifdef CONFIG_COMPACTION |
| @@ -728,10 +730,6 @@ typedef struct pglist_data { | |||
| 728 | 730 | ||
| 729 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 731 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
| 730 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) | 732 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
| 731 | static inline spinlock_t *zone_lru_lock(struct zone *zone) | ||
| 732 | { | ||
| 733 | return &zone->zone_pgdat->lru_lock; | ||
| 734 | } | ||
| 735 | 733 | ||
| 736 | static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) | 734 | static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) |
| 737 | { | 735 | { |
| @@ -1299,7 +1297,7 @@ void memory_present(int nid, unsigned long start, unsigned long end); | |||
| 1299 | 1297 | ||
| 1300 | /* | 1298 | /* |
| 1301 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | 1299 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we |
| 1302 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | 1300 | * need to check pfn validity within that MAX_ORDER_NR_PAGES block. |
| 1303 | * pfn_valid_within() should be used in this case; we optimise this away | 1301 | * pfn_valid_within() should be used in this case; we optimise this away |
| 1304 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | 1302 | * when we have no holes within a MAX_ORDER_NR_PAGES block. |
| 1305 | */ | 1303 | */ |
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 5a30ad594ccc..27e7fa36f707 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
| @@ -444,8 +444,8 @@ static inline int next_memory_node(int nid) | |||
| 444 | return next_node(nid, node_states[N_MEMORY]); | 444 | return next_node(nid, node_states[N_MEMORY]); |
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | extern int nr_node_ids; | 447 | extern unsigned int nr_node_ids; |
| 448 | extern int nr_online_nodes; | 448 | extern unsigned int nr_online_nodes; |
| 449 | 449 | ||
| 450 | static inline void node_set_online(int nid) | 450 | static inline void node_set_online(int nid) |
| 451 | { | 451 | { |
| @@ -485,8 +485,8 @@ static inline int num_node_state(enum node_states state) | |||
| 485 | #define first_online_node 0 | 485 | #define first_online_node 0 |
| 486 | #define first_memory_node 0 | 486 | #define first_memory_node 0 |
| 487 | #define next_online_node(nid) (MAX_NUMNODES) | 487 | #define next_online_node(nid) (MAX_NUMNODES) |
| 488 | #define nr_node_ids 1 | 488 | #define nr_node_ids 1U |
| 489 | #define nr_online_nodes 1 | 489 | #define nr_online_nodes 1U |
| 490 | 490 | ||
| 491 | #define node_set_online(node) node_set_state((node), N_ONLINE) | 491 | #define node_set_online(node) node_set_state((node), N_ONLINE) |
| 492 | #define node_set_offline(node) node_clear_state((node), N_ONLINE) | 492 | #define node_set_offline(node) node_clear_state((node), N_ONLINE) |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 39b4494e29f1..9f8712a4b1a5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -17,8 +17,37 @@ | |||
| 17 | /* | 17 | /* |
| 18 | * Various page->flags bits: | 18 | * Various page->flags bits: |
| 19 | * | 19 | * |
| 20 | * PG_reserved is set for special pages, which can never be swapped out. Some | 20 | * PG_reserved is set for special pages. The "struct page" of such a page |
| 21 | * of them might not even exist... | 21 | * should in general not be touched (e.g. set dirty) except by its owner. |
| 22 | * Pages marked as PG_reserved include: | ||
| 23 | * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, | ||
| 24 | * initrd, HW tables) | ||
| 25 | * - Pages reserved or allocated early during boot (before the page allocator | ||
| 26 | * was initialized). This includes (depending on the architecture) the | ||
| 27 | * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much | ||
| 28 | * much more. Once (if ever) freed, PG_reserved is cleared and they will | ||
| 29 | * be given to the page allocator. | ||
| 30 | * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying | ||
| 31 | * to read/write these pages might end badly. Don't touch! | ||
| 32 | * - The zero page(s) | ||
| 33 | * - Pages not added to the page allocator when onlining a section because | ||
| 34 | * they were excluded via the online_page_callback() or because they are | ||
| 35 | * PG_hwpoison. | ||
| 36 | * - Pages allocated in the context of kexec/kdump (loaded kernel image, | ||
| 37 | * control pages, vmcoreinfo) | ||
| 38 | * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are | ||
| 39 | * not marked PG_reserved (as they might be in use by somebody else who does | ||
| 40 | * not respect the caching strategy). | ||
| 41 | * - Pages part of an offline section (struct pages of offline sections should | ||
| 42 | * not be trusted as they will be initialized when first onlined). | ||
| 43 | * - MCA pages on ia64 | ||
| 44 | * - Pages holding CPU notes for POWER Firmware Assisted Dump | ||
| 45 | * - Device memory (e.g. PMEM, DAX, HMM) | ||
| 46 | * Some PG_reserved pages will be excluded from the hibernation image. | ||
| 47 | * PG_reserved does in general not hinder anybody from dumping or swapping | ||
| 48 | * and is no longer required for remap_pfn_range(). ioremap might require it. | ||
| 49 | * Consequently, PG_reserved for a page mapped into user space can indicate | ||
| 50 | * the zero page, the vDSO, MMIO pages or device memory. | ||
| 22 | * | 51 | * |
| 23 | * The PG_private bitflag is set on pagecache pages if they contain filesystem | 52 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
| 24 | * specific data (which is normally at page->private). It can be used by | 53 | * specific data (which is normally at page->private). It can be used by |
| @@ -671,7 +700,7 @@ PAGEFLAG_FALSE(DoubleMap) | |||
| 671 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ | 700 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ |
| 672 | #define PAGE_MAPCOUNT_RESERVE -128 | 701 | #define PAGE_MAPCOUNT_RESERVE -128 |
| 673 | #define PG_buddy 0x00000080 | 702 | #define PG_buddy 0x00000080 |
| 674 | #define PG_balloon 0x00000100 | 703 | #define PG_offline 0x00000100 |
| 675 | #define PG_kmemcg 0x00000200 | 704 | #define PG_kmemcg 0x00000200 |
| 676 | #define PG_table 0x00000400 | 705 | #define PG_table 0x00000400 |
| 677 | 706 | ||
| @@ -706,10 +735,13 @@ static __always_inline void __ClearPage##uname(struct page *page) \ | |||
| 706 | PAGE_TYPE_OPS(Buddy, buddy) | 735 | PAGE_TYPE_OPS(Buddy, buddy) |
| 707 | 736 | ||
| 708 | /* | 737 | /* |
| 709 | * PageBalloon() is true for pages that are on the balloon page list | 738 | * PageOffline() indicates that the page is logically offline although the |
| 710 | * (see mm/balloon_compaction.c). | 739 | * containing section is online. (e.g. inflated in a balloon driver or |
| 740 | * not onlined when onlining the section). | ||
| 741 | * The content of these pages is effectively stale. Such pages should not | ||
| 742 | * be touched (read/write/dump/save) except by their owner. | ||
| 711 | */ | 743 | */ |
| 712 | PAGE_TYPE_OPS(Balloon, balloon) | 744 | PAGE_TYPE_OPS(Offline, offline) |
| 713 | 745 | ||
| 714 | /* | 746 | /* |
| 715 | * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on | 747 | * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e2d7039af6a3..b477a70cc2e4 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -164,7 +164,7 @@ void release_pages(struct page **pages, int nr); | |||
| 164 | * will find the page or it will not. Likewise, the old find_get_page could run | 164 | * will find the page or it will not. Likewise, the old find_get_page could run |
| 165 | * either before the insertion or afterwards, depending on timing. | 165 | * either before the insertion or afterwards, depending on timing. |
| 166 | */ | 166 | */ |
| 167 | static inline int page_cache_get_speculative(struct page *page) | 167 | static inline int __page_cache_add_speculative(struct page *page, int count) |
| 168 | { | 168 | { |
| 169 | #ifdef CONFIG_TINY_RCU | 169 | #ifdef CONFIG_TINY_RCU |
| 170 | # ifdef CONFIG_PREEMPT_COUNT | 170 | # ifdef CONFIG_PREEMPT_COUNT |
| @@ -180,10 +180,10 @@ static inline int page_cache_get_speculative(struct page *page) | |||
| 180 | * SMP requires. | 180 | * SMP requires. |
| 181 | */ | 181 | */ |
| 182 | VM_BUG_ON_PAGE(page_count(page) == 0, page); | 182 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
| 183 | page_ref_inc(page); | 183 | page_ref_add(page, count); |
| 184 | 184 | ||
| 185 | #else | 185 | #else |
| 186 | if (unlikely(!get_page_unless_zero(page))) { | 186 | if (unlikely(!page_ref_add_unless(page, count, 0))) { |
| 187 | /* | 187 | /* |
| 188 | * Either the page has been freed, or will be freed. | 188 | * Either the page has been freed, or will be freed. |
| 189 | * In either case, retry here and the caller should | 189 | * In either case, retry here and the caller should |
| @@ -197,27 +197,14 @@ static inline int page_cache_get_speculative(struct page *page) | |||
| 197 | return 1; | 197 | return 1; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | /* | 200 | static inline int page_cache_get_speculative(struct page *page) |
| 201 | * Same as above, but add instead of inc (could just be merged) | ||
| 202 | */ | ||
| 203 | static inline int page_cache_add_speculative(struct page *page, int count) | ||
| 204 | { | 201 | { |
| 205 | VM_BUG_ON(in_interrupt()); | 202 | return __page_cache_add_speculative(page, 1); |
| 206 | 203 | } | |
| 207 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) | ||
| 208 | # ifdef CONFIG_PREEMPT_COUNT | ||
| 209 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); | ||
| 210 | # endif | ||
| 211 | VM_BUG_ON_PAGE(page_count(page) == 0, page); | ||
| 212 | page_ref_add(page, count); | ||
| 213 | |||
| 214 | #else | ||
| 215 | if (unlikely(!page_ref_add_unless(page, count, 0))) | ||
| 216 | return 0; | ||
| 217 | #endif | ||
| 218 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); | ||
| 219 | 204 | ||
| 220 | return 1; | 205 | static inline int page_cache_add_speculative(struct page *page, int count) |
| 206 | { | ||
| 207 | return __page_cache_add_speculative(page, count); | ||
| 221 | } | 208 | } |
| 222 | 209 | ||
| 223 | #ifdef CONFIG_NUMA | 210 | #ifdef CONFIG_NUMA |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 15927ebc22f2..5046bad0c1c5 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | */ | 30 | */ |
| 31 | #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) | 31 | #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) |
| 32 | 32 | ||
| 33 | /********** mm/debug-pagealloc.c **********/ | 33 | /********** mm/page_poison.c **********/ |
| 34 | #ifdef CONFIG_PAGE_POISONING_ZERO | 34 | #ifdef CONFIG_PAGE_POISONING_ZERO |
| 35 | #define PAGE_POISON 0x00 | 35 | #define PAGE_POISON 0x00 |
| 36 | #else | 36 | #else |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 903ef29b62c3..f073bd59df32 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -48,6 +48,7 @@ struct pid_namespace; | |||
| 48 | struct pipe_inode_info; | 48 | struct pipe_inode_info; |
| 49 | struct rcu_node; | 49 | struct rcu_node; |
| 50 | struct reclaim_state; | 50 | struct reclaim_state; |
| 51 | struct capture_control; | ||
| 51 | struct robust_list_head; | 52 | struct robust_list_head; |
| 52 | struct sched_attr; | 53 | struct sched_attr; |
| 53 | struct sched_param; | 54 | struct sched_param; |
| @@ -950,6 +951,9 @@ struct task_struct { | |||
| 950 | 951 | ||
| 951 | struct io_context *io_context; | 952 | struct io_context *io_context; |
| 952 | 953 | ||
| 954 | #ifdef CONFIG_COMPACTION | ||
| 955 | struct capture_control *capture_control; | ||
| 956 | #endif | ||
| 953 | /* Ptrace state: */ | 957 | /* Ptrace state: */ |
| 954 | unsigned long ptrace_message; | 958 | unsigned long ptrace_message; |
| 955 | kernel_siginfo_t *last_siginfo; | 959 | kernel_siginfo_t *last_siginfo; |
| @@ -1395,6 +1399,7 @@ extern struct pid *cad_pid; | |||
| 1395 | #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ | 1399 | #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ |
| 1396 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ | 1400 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
| 1397 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1401 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
| 1402 | #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ | ||
| 1398 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ | 1403 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
| 1399 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ | 1404 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ |
| 1400 | 1405 | ||
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 3bfa6a0cbba4..0cd9f10423fb 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
| @@ -148,17 +148,25 @@ static inline bool in_vfork(struct task_struct *tsk) | |||
| 148 | * Applies per-task gfp context to the given allocation flags. | 148 | * Applies per-task gfp context to the given allocation flags. |
| 149 | * PF_MEMALLOC_NOIO implies GFP_NOIO | 149 | * PF_MEMALLOC_NOIO implies GFP_NOIO |
| 150 | * PF_MEMALLOC_NOFS implies GFP_NOFS | 150 | * PF_MEMALLOC_NOFS implies GFP_NOFS |
| 151 | * PF_MEMALLOC_NOCMA implies no allocation from CMA region. | ||
| 151 | */ | 152 | */ |
| 152 | static inline gfp_t current_gfp_context(gfp_t flags) | 153 | static inline gfp_t current_gfp_context(gfp_t flags) |
| 153 | { | 154 | { |
| 154 | /* | 155 | if (unlikely(current->flags & |
| 155 | * NOIO implies both NOIO and NOFS and it is a weaker context | 156 | (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) { |
| 156 | * so always make sure it makes precedence | 157 | /* |
| 157 | */ | 158 | * NOIO implies both NOIO and NOFS and it is a weaker context |
| 158 | if (unlikely(current->flags & PF_MEMALLOC_NOIO)) | 159 | * so always make sure it makes precedence |
| 159 | flags &= ~(__GFP_IO | __GFP_FS); | 160 | */ |
| 160 | else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) | 161 | if (current->flags & PF_MEMALLOC_NOIO) |
| 161 | flags &= ~__GFP_FS; | 162 | flags &= ~(__GFP_IO | __GFP_FS); |
| 163 | else if (current->flags & PF_MEMALLOC_NOFS) | ||
| 164 | flags &= ~__GFP_FS; | ||
| 165 | #ifdef CONFIG_CMA | ||
| 166 | if (current->flags & PF_MEMALLOC_NOCMA) | ||
| 167 | flags &= ~__GFP_MOVABLE; | ||
| 168 | #endif | ||
| 169 | } | ||
| 162 | return flags; | 170 | return flags; |
| 163 | } | 171 | } |
| 164 | 172 | ||
| @@ -248,6 +256,30 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) | |||
| 248 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; | 256 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; |
| 249 | } | 257 | } |
| 250 | 258 | ||
| 259 | #ifdef CONFIG_CMA | ||
| 260 | static inline unsigned int memalloc_nocma_save(void) | ||
| 261 | { | ||
| 262 | unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; | ||
| 263 | |||
| 264 | current->flags |= PF_MEMALLOC_NOCMA; | ||
| 265 | return flags; | ||
| 266 | } | ||
| 267 | |||
| 268 | static inline void memalloc_nocma_restore(unsigned int flags) | ||
| 269 | { | ||
| 270 | current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; | ||
| 271 | } | ||
| 272 | #else | ||
| 273 | static inline unsigned int memalloc_nocma_save(void) | ||
| 274 | { | ||
| 275 | return 0; | ||
| 276 | } | ||
| 277 | |||
| 278 | static inline void memalloc_nocma_restore(unsigned int flags) | ||
| 279 | { | ||
| 280 | } | ||
| 281 | #endif | ||
| 282 | |||
| 251 | #ifdef CONFIG_MEMCG | 283 | #ifdef CONFIG_MEMCG |
| 252 | /** | 284 | /** |
| 253 | * memalloc_use_memcg - Starts the remote memcg charging scope. | 285 | * memalloc_use_memcg - Starts the remote memcg charging scope. |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f155dc607112..f3fb1edb3526 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
| @@ -72,7 +72,8 @@ extern void shmem_unlock_mapping(struct address_space *mapping); | |||
| 72 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, | 72 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
| 73 | pgoff_t index, gfp_t gfp_mask); | 73 | pgoff_t index, gfp_t gfp_mask); |
| 74 | extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); | 74 | extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); |
| 75 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | 75 | extern int shmem_unuse(unsigned int type, bool frontswap, |
| 76 | unsigned long *fs_pages_to_unuse); | ||
| 76 | 77 | ||
| 77 | extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); | 78 | extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); |
| 78 | extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, | 79 | extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 3a1a1dbc6f49..d2153789bd9f 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -81,12 +81,12 @@ struct kmem_cache_order_objects { | |||
| 81 | */ | 81 | */ |
| 82 | struct kmem_cache { | 82 | struct kmem_cache { |
| 83 | struct kmem_cache_cpu __percpu *cpu_slab; | 83 | struct kmem_cache_cpu __percpu *cpu_slab; |
| 84 | /* Used for retriving partial slabs etc */ | 84 | /* Used for retrieving partial slabs, etc. */ |
| 85 | slab_flags_t flags; | 85 | slab_flags_t flags; |
| 86 | unsigned long min_partial; | 86 | unsigned long min_partial; |
| 87 | unsigned int size; /* The size of an object including meta data */ | 87 | unsigned int size; /* The size of an object including metadata */ |
| 88 | unsigned int object_size;/* The size of an object without meta data */ | 88 | unsigned int object_size;/* The size of an object without metadata */ |
| 89 | unsigned int offset; /* Free pointer offset. */ | 89 | unsigned int offset; /* Free pointer offset */ |
| 90 | #ifdef CONFIG_SLUB_CPU_PARTIAL | 90 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 91 | /* Number of per cpu partial objects to keep around */ | 91 | /* Number of per cpu partial objects to keep around */ |
| 92 | unsigned int cpu_partial; | 92 | unsigned int cpu_partial; |
| @@ -110,7 +110,7 @@ struct kmem_cache { | |||
| 110 | #endif | 110 | #endif |
| 111 | #ifdef CONFIG_MEMCG | 111 | #ifdef CONFIG_MEMCG |
| 112 | struct memcg_cache_params memcg_params; | 112 | struct memcg_cache_params memcg_params; |
| 113 | /* for propagation, maximum size of a stored attr */ | 113 | /* For propagation, maximum size of a stored attr */ |
| 114 | unsigned int max_attr_size; | 114 | unsigned int max_attr_size; |
| 115 | #ifdef CONFIG_SYSFS | 115 | #ifdef CONFIG_SYSFS |
| 116 | struct kset *memcg_kset; | 116 | struct kset *memcg_kset; |
| @@ -151,7 +151,7 @@ struct kmem_cache { | |||
| 151 | #else | 151 | #else |
| 152 | #define slub_cpu_partial(s) (0) | 152 | #define slub_cpu_partial(s) (0) |
| 153 | #define slub_set_cpu_partial(s, n) | 153 | #define slub_set_cpu_partial(s, n) |
| 154 | #endif // CONFIG_SLUB_CPU_PARTIAL | 154 | #endif /* CONFIG_SLUB_CPU_PARTIAL */ |
| 155 | 155 | ||
| 156 | #ifdef CONFIG_SYSFS | 156 | #ifdef CONFIG_SYSFS |
| 157 | #define SLAB_SUPPORTS_SYSFS | 157 | #define SLAB_SUPPORTS_SYSFS |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 622025ac1461..fc50e21b3b88 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -307,7 +307,7 @@ struct vma_swap_readahead { | |||
| 307 | }; | 307 | }; |
| 308 | 308 | ||
| 309 | /* linux/mm/workingset.c */ | 309 | /* linux/mm/workingset.c */ |
| 310 | void *workingset_eviction(struct address_space *mapping, struct page *page); | 310 | void *workingset_eviction(struct page *page); |
| 311 | void workingset_refault(struct page *page, void *shadow); | 311 | void workingset_refault(struct page *page, void *shadow); |
| 312 | void workingset_activation(struct page *page); | 312 | void workingset_activation(struct page *page); |
| 313 | 313 | ||
| @@ -625,7 +625,7 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) | |||
| 625 | return vm_swappiness; | 625 | return vm_swappiness; |
| 626 | 626 | ||
| 627 | /* root ? */ | 627 | /* root ? */ |
| 628 | if (mem_cgroup_disabled() || !memcg->css.parent) | 628 | if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) |
| 629 | return vm_swappiness; | 629 | return vm_swappiness; |
| 630 | 630 | ||
| 631 | return memcg->swappiness; | 631 | return memcg->swappiness; |
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index 6448cdd9a350..a2f8658f1c55 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ | 41 | #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ |
| 42 | #define F_SEAL_GROW 0x0004 /* prevent file from growing */ | 42 | #define F_SEAL_GROW 0x0004 /* prevent file from growing */ |
| 43 | #define F_SEAL_WRITE 0x0008 /* prevent writes */ | 43 | #define F_SEAL_WRITE 0x0008 /* prevent writes */ |
| 44 | #define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */ | ||
| 44 | /* (1U << 31) is reserved for signed error codes */ | 45 | /* (1U << 31) is reserved for signed error codes */ |
| 45 | 46 | ||
| 46 | /* | 47 | /* |
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h index 21b9113c69da..6f2f2720f3ac 100644 --- a/include/uapi/linux/kernel-page-flags.h +++ b/include/uapi/linux/kernel-page-flags.h | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | #define KPF_KSM 21 | 33 | #define KPF_KSM 21 |
| 34 | #define KPF_THP 22 | 34 | #define KPF_THP 22 |
| 35 | #define KPF_BALLOON 23 | 35 | #define KPF_OFFLINE 23 |
| 36 | #define KPF_ZERO_PAGE 24 | 36 | #define KPF_ZERO_PAGE 24 |
| 37 | #define KPF_IDLE 25 | 37 | #define KPF_IDLE 25 |
| 38 | #define KPF_PGTABLE 26 | 38 | #define KPF_PGTABLE 26 |
