diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 22:42:40 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 22:42:40 -0500 |
commit | 7c225c69f86c934e3be9be63ecde754e286838d7 (patch) | |
tree | ff2df419b0c4886b37407235f7d21215e4cf45e4 /include/linux/mm.h | |
parent | 6363b3f3ac5be096d08c8c504128befa0c033529 (diff) | |
parent | 1b7176aea0a924ac59c6a283129d3e8eb00aa915 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few misc bits
- ocfs2 updates
- almost all of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (131 commits)
memory hotplug: fix comments when adding section
mm: make alloc_node_mem_map a void call if we don't have CONFIG_FLAT_NODE_MEM_MAP
mm: simplify nodemask printing
mm,oom_reaper: remove pointless kthread_run() error check
mm/page_ext.c: check if page_ext is not prepared
writeback: remove unused function parameter
mm: do not rely on preempt_count in print_vma_addr
mm, sparse: do not swamp log with huge vmemmap allocation failures
mm/hmm: remove redundant variable align_end
mm/list_lru.c: mark expected switch fall-through
mm/shmem.c: mark expected switch fall-through
mm/page_alloc.c: broken deferred calculation
mm: don't warn about allocations which stall for too long
fs: fuse: account fuse_inode slab memory as reclaimable
mm, page_alloc: fix potential false positive in __zone_watermark_ok
mm: mlock: remove lru_add_drain_all()
mm, sysctl: make NUMA stats configurable
shmem: convert shmem_init_inodecache() to void
Unify migrate_pages and move_pages access checks
mm, pagevec: rename pagevec drained field
...
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 83 |
1 files changed, 66 insertions, 17 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 91b46f99b4d2..c7b1d617dff6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -96,6 +96,15 @@ extern int mmap_rnd_compat_bits __read_mostly; | |||
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * On some architectures it is expensive to call memset() for small sizes. | ||
100 | * Those architectures should provide their own implementation of "struct page" | ||
101 | * zeroing by defining this macro in <asm/pgtable.h>. | ||
102 | */ | ||
103 | #ifndef mm_zero_struct_page | ||
104 | #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) | ||
105 | #endif | ||
106 | |||
107 | /* | ||
99 | * Default maximum number of active map areas, this limits the number of vmas | 108 | * Default maximum number of active map areas, this limits the number of vmas |
100 | * per mm struct. Users can overwrite this number by sysctl but there is a | 109 | * per mm struct. Users can overwrite this number by sysctl but there is a |
101 | * problem. | 110 | * problem. |
@@ -1431,7 +1440,13 @@ void account_page_cleaned(struct page *page, struct address_space *mapping, | |||
1431 | struct bdi_writeback *wb); | 1440 | struct bdi_writeback *wb); |
1432 | int set_page_dirty(struct page *page); | 1441 | int set_page_dirty(struct page *page); |
1433 | int set_page_dirty_lock(struct page *page); | 1442 | int set_page_dirty_lock(struct page *page); |
1434 | void cancel_dirty_page(struct page *page); | 1443 | void __cancel_dirty_page(struct page *page); |
1444 | static inline void cancel_dirty_page(struct page *page) | ||
1445 | { | ||
1446 | /* Avoid atomic ops, locking, etc. when not actually needed. */ | ||
1447 | if (PageDirty(page)) | ||
1448 | __cancel_dirty_page(page); | ||
1449 | } | ||
1435 | int clear_page_dirty_for_io(struct page *page); | 1450 | int clear_page_dirty_for_io(struct page *page); |
1436 | 1451 | ||
1437 | int get_cmdline(struct task_struct *task, char *buffer, int buflen); | 1452 | int get_cmdline(struct task_struct *task, char *buffer, int buflen); |
@@ -1599,26 +1614,32 @@ static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, | |||
1599 | int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); | 1614 | int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); |
1600 | #endif | 1615 | #endif |
1601 | 1616 | ||
1602 | #ifdef __PAGETABLE_PUD_FOLDED | 1617 | #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) |
1603 | static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, | 1618 | static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, |
1604 | unsigned long address) | 1619 | unsigned long address) |
1605 | { | 1620 | { |
1606 | return 0; | 1621 | return 0; |
1607 | } | 1622 | } |
1623 | static inline void mm_inc_nr_puds(struct mm_struct *mm) {} | ||
1624 | static inline void mm_dec_nr_puds(struct mm_struct *mm) {} | ||
1625 | |||
1608 | #else | 1626 | #else |
1609 | int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); | 1627 | int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); |
1610 | #endif | ||
1611 | 1628 | ||
1612 | #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) | 1629 | static inline void mm_inc_nr_puds(struct mm_struct *mm) |
1613 | static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, | ||
1614 | unsigned long address) | ||
1615 | { | 1630 | { |
1616 | return 0; | 1631 | atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); |
1617 | } | 1632 | } |
1618 | 1633 | ||
1619 | static inline void mm_nr_pmds_init(struct mm_struct *mm) {} | 1634 | static inline void mm_dec_nr_puds(struct mm_struct *mm) |
1635 | { | ||
1636 | atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); | ||
1637 | } | ||
1638 | #endif | ||
1620 | 1639 | ||
1621 | static inline unsigned long mm_nr_pmds(struct mm_struct *mm) | 1640 | #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) |
1641 | static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, | ||
1642 | unsigned long address) | ||
1622 | { | 1643 | { |
1623 | return 0; | 1644 | return 0; |
1624 | } | 1645 | } |
@@ -1629,25 +1650,47 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} | |||
1629 | #else | 1650 | #else |
1630 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); | 1651 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); |
1631 | 1652 | ||
1632 | static inline void mm_nr_pmds_init(struct mm_struct *mm) | 1653 | static inline void mm_inc_nr_pmds(struct mm_struct *mm) |
1633 | { | 1654 | { |
1634 | atomic_long_set(&mm->nr_pmds, 0); | 1655 | atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); |
1635 | } | 1656 | } |
1636 | 1657 | ||
1637 | static inline unsigned long mm_nr_pmds(struct mm_struct *mm) | 1658 | static inline void mm_dec_nr_pmds(struct mm_struct *mm) |
1638 | { | 1659 | { |
1639 | return atomic_long_read(&mm->nr_pmds); | 1660 | atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); |
1640 | } | 1661 | } |
1662 | #endif | ||
1641 | 1663 | ||
1642 | static inline void mm_inc_nr_pmds(struct mm_struct *mm) | 1664 | #ifdef CONFIG_MMU |
1665 | static inline void mm_pgtables_bytes_init(struct mm_struct *mm) | ||
1643 | { | 1666 | { |
1644 | atomic_long_inc(&mm->nr_pmds); | 1667 | atomic_long_set(&mm->pgtables_bytes, 0); |
1645 | } | 1668 | } |
1646 | 1669 | ||
1647 | static inline void mm_dec_nr_pmds(struct mm_struct *mm) | 1670 | static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) |
1671 | { | ||
1672 | return atomic_long_read(&mm->pgtables_bytes); | ||
1673 | } | ||
1674 | |||
1675 | static inline void mm_inc_nr_ptes(struct mm_struct *mm) | ||
1676 | { | ||
1677 | atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); | ||
1678 | } | ||
1679 | |||
1680 | static inline void mm_dec_nr_ptes(struct mm_struct *mm) | ||
1648 | { | 1681 | { |
1649 | atomic_long_dec(&mm->nr_pmds); | 1682 | atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); |
1650 | } | 1683 | } |
1684 | #else | ||
1685 | |||
1686 | static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} | ||
1687 | static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) | ||
1688 | { | ||
1689 | return 0; | ||
1690 | } | ||
1691 | |||
1692 | static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} | ||
1693 | static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} | ||
1651 | #endif | 1694 | #endif |
1652 | 1695 | ||
1653 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); | 1696 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); |
@@ -2002,6 +2045,12 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, | |||
2002 | struct mminit_pfnnid_cache *state); | 2045 | struct mminit_pfnnid_cache *state); |
2003 | #endif | 2046 | #endif |
2004 | 2047 | ||
2048 | #ifdef CONFIG_HAVE_MEMBLOCK | ||
2049 | void zero_resv_unavail(void); | ||
2050 | #else | ||
2051 | static inline void zero_resv_unavail(void) {} | ||
2052 | #endif | ||
2053 | |||
2005 | extern void set_dma_reserve(unsigned long new_dma_reserve); | 2054 | extern void set_dma_reserve(unsigned long new_dma_reserve); |
2006 | extern void memmap_init_zone(unsigned long, int, unsigned long, | 2055 | extern void memmap_init_zone(unsigned long, int, unsigned long, |
2007 | unsigned long, enum memmap_context); | 2056 | unsigned long, enum memmap_context); |