aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h83
1 files changed, 66 insertions, 17 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 91b46f99b4d2..c7b1d617dff6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -96,6 +96,15 @@ extern int mmap_rnd_compat_bits __read_mostly;
96#endif 96#endif
97 97
98/* 98/*
99 * On some architectures it is expensive to call memset() for small sizes.
100 * Those architectures should provide their own implementation of "struct page"
101 * zeroing by defining this macro in <asm/pgtable.h>.
102 */
103#ifndef mm_zero_struct_page
104#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
105#endif
106
107/*
99 * Default maximum number of active map areas, this limits the number of vmas 108 * Default maximum number of active map areas, this limits the number of vmas
100 * per mm struct. Users can overwrite this number by sysctl but there is a 109 * per mm struct. Users can overwrite this number by sysctl but there is a
101 * problem. 110 * problem.
@@ -1431,7 +1440,13 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
1431 struct bdi_writeback *wb); 1440 struct bdi_writeback *wb);
1432int set_page_dirty(struct page *page); 1441int set_page_dirty(struct page *page);
1433int set_page_dirty_lock(struct page *page); 1442int set_page_dirty_lock(struct page *page);
1434void cancel_dirty_page(struct page *page); 1443void __cancel_dirty_page(struct page *page);
1444static inline void cancel_dirty_page(struct page *page)
1445{
1446 /* Avoid atomic ops, locking, etc. when not actually needed. */
1447 if (PageDirty(page))
1448 __cancel_dirty_page(page);
1449}
1435int clear_page_dirty_for_io(struct page *page); 1450int clear_page_dirty_for_io(struct page *page);
1436 1451
1437int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1452int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -1599,26 +1614,32 @@ static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1599int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1614int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1600#endif 1615#endif
1601 1616
1602#ifdef __PAGETABLE_PUD_FOLDED 1617#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1603static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, 1618static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1604 unsigned long address) 1619 unsigned long address)
1605{ 1620{
1606 return 0; 1621 return 0;
1607} 1622}
1623static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1624static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1625
1608#else 1626#else
1609int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 1627int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1610#endif
1611 1628
1612#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 1629static inline void mm_inc_nr_puds(struct mm_struct *mm)
1613static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1614 unsigned long address)
1615{ 1630{
1616 return 0; 1631 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1617} 1632}
1618 1633
1619static inline void mm_nr_pmds_init(struct mm_struct *mm) {} 1634static inline void mm_dec_nr_puds(struct mm_struct *mm)
1635{
1636 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1637}
1638#endif
1620 1639
1621static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1640#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1641static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1642 unsigned long address)
1622{ 1643{
1623 return 0; 1644 return 0;
1624} 1645}
@@ -1629,25 +1650,47 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1629#else 1650#else
1630int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1651int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1631 1652
1632static inline void mm_nr_pmds_init(struct mm_struct *mm) 1653static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1633{ 1654{
1634 atomic_long_set(&mm->nr_pmds, 0); 1655 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1635} 1656}
1636 1657
1637static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1658static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1638{ 1659{
1639 return atomic_long_read(&mm->nr_pmds); 1660 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1640} 1661}
1662#endif
1641 1663
1642static inline void mm_inc_nr_pmds(struct mm_struct *mm) 1664#ifdef CONFIG_MMU
1665static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1643{ 1666{
1644 atomic_long_inc(&mm->nr_pmds); 1667 atomic_long_set(&mm->pgtables_bytes, 0);
1645} 1668}
1646 1669
1647static inline void mm_dec_nr_pmds(struct mm_struct *mm) 1670static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1671{
1672 return atomic_long_read(&mm->pgtables_bytes);
1673}
1674
1675static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1676{
1677 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1678}
1679
1680static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1648{ 1681{
1649 atomic_long_dec(&mm->nr_pmds); 1682 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1650} 1683}
1684#else
1685
1686static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1687static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1688{
1689 return 0;
1690}
1691
1692static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1693static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1651#endif 1694#endif
1652 1695
1653int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 1696int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
@@ -2002,6 +2045,12 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2002 struct mminit_pfnnid_cache *state); 2045 struct mminit_pfnnid_cache *state);
2003#endif 2046#endif
2004 2047
2048#ifdef CONFIG_HAVE_MEMBLOCK
2049void zero_resv_unavail(void);
2050#else
2051static inline void zero_resv_unavail(void) {}
2052#endif
2053
2005extern void set_dma_reserve(unsigned long new_dma_reserve); 2054extern void set_dma_reserve(unsigned long new_dma_reserve);
2006extern void memmap_init_zone(unsigned long, int, unsigned long, 2055extern void memmap_init_zone(unsigned long, int, unsigned long,
2007 unsigned long, enum memmap_context); 2056 unsigned long, enum memmap_context);