aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h115
1 files changed, 88 insertions, 27 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 35527173cf50..c1b7414c7bef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -5,6 +5,7 @@
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7 7
8#include <linux/mmdebug.h>
8#include <linux/gfp.h> 9#include <linux/gfp.h>
9#include <linux/bug.h> 10#include <linux/bug.h>
10#include <linux/list.h> 11#include <linux/list.h>
@@ -57,6 +58,15 @@ extern int sysctl_legacy_va_layout;
57extern unsigned long sysctl_user_reserve_kbytes; 58extern unsigned long sysctl_user_reserve_kbytes;
58extern unsigned long sysctl_admin_reserve_kbytes; 59extern unsigned long sysctl_admin_reserve_kbytes;
59 60
61extern int sysctl_overcommit_memory;
62extern int sysctl_overcommit_ratio;
63extern unsigned long sysctl_overcommit_kbytes;
64
65extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
66 size_t *, loff_t *);
67extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
68 size_t *, loff_t *);
69
60#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 70#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
61 71
62/* to align the pointer to the (next) page boundary */ 72/* to align the pointer to the (next) page boundary */
@@ -165,7 +175,7 @@ extern unsigned int kobjsize(const void *objp);
165 * Special vmas that are non-mergable, non-mlock()able. 175 * Special vmas that are non-mergable, non-mlock()able.
166 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 176 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
167 */ 177 */
168#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) 178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
169 179
170/* 180/*
171 * mapping from the currently active vm_flags protection bits (the 181 * mapping from the currently active vm_flags protection bits (the
@@ -294,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
294 */ 304 */
295static inline int put_page_testzero(struct page *page) 305static inline int put_page_testzero(struct page *page)
296{ 306{
297 VM_BUG_ON(atomic_read(&page->_count) == 0); 307 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
298 return atomic_dec_and_test(&page->_count); 308 return atomic_dec_and_test(&page->_count);
299} 309}
300 310
@@ -355,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
355static inline void compound_lock(struct page *page) 365static inline void compound_lock(struct page *page)
356{ 366{
357#ifdef CONFIG_TRANSPARENT_HUGEPAGE 367#ifdef CONFIG_TRANSPARENT_HUGEPAGE
358 VM_BUG_ON(PageSlab(page)); 368 VM_BUG_ON_PAGE(PageSlab(page), page);
359 bit_spin_lock(PG_compound_lock, &page->flags); 369 bit_spin_lock(PG_compound_lock, &page->flags);
360#endif 370#endif
361} 371}
@@ -363,7 +373,7 @@ static inline void compound_lock(struct page *page)
363static inline void compound_unlock(struct page *page) 373static inline void compound_unlock(struct page *page)
364{ 374{
365#ifdef CONFIG_TRANSPARENT_HUGEPAGE 375#ifdef CONFIG_TRANSPARENT_HUGEPAGE
366 VM_BUG_ON(PageSlab(page)); 376 VM_BUG_ON_PAGE(PageSlab(page), page);
367 bit_spin_unlock(PG_compound_lock, &page->flags); 377 bit_spin_unlock(PG_compound_lock, &page->flags);
368#endif 378#endif
369} 379}
@@ -389,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page,
389 399
390static inline struct page *compound_head(struct page *page) 400static inline struct page *compound_head(struct page *page)
391{ 401{
392 if (unlikely(PageTail(page))) 402 if (unlikely(PageTail(page))) {
393 return page->first_page; 403 struct page *head = page->first_page;
404
405 /*
406 * page->first_page may be a dangling pointer to an old
407 * compound page, so recheck that it is still a tail
408 * page before returning.
409 */
410 smp_rmb();
411 if (likely(PageTail(page)))
412 return head;
413 }
394 return page; 414 return page;
395} 415}
396 416
@@ -414,15 +434,44 @@ static inline int page_count(struct page *page)
414 return atomic_read(&compound_head(page)->_count); 434 return atomic_read(&compound_head(page)->_count);
415} 435}
416 436
437#ifdef CONFIG_HUGETLB_PAGE
438extern int PageHeadHuge(struct page *page_head);
439#else /* CONFIG_HUGETLB_PAGE */
440static inline int PageHeadHuge(struct page *page_head)
441{
442 return 0;
443}
444#endif /* CONFIG_HUGETLB_PAGE */
445
446static inline bool __compound_tail_refcounted(struct page *page)
447{
448 return !PageSlab(page) && !PageHeadHuge(page);
449}
450
451/*
452 * This takes a head page as parameter and tells if the
453 * tail page reference counting can be skipped.
454 *
455 * For this to be safe, PageSlab and PageHeadHuge must remain true on
456 * any given page where they return true here, until all tail pins
457 * have been released.
458 */
459static inline bool compound_tail_refcounted(struct page *page)
460{
461 VM_BUG_ON_PAGE(!PageHead(page), page);
462 return __compound_tail_refcounted(page);
463}
464
417static inline void get_huge_page_tail(struct page *page) 465static inline void get_huge_page_tail(struct page *page)
418{ 466{
419 /* 467 /*
420 * __split_huge_page_refcount() cannot run 468 * __split_huge_page_refcount() cannot run from under us.
421 * from under us.
422 */ 469 */
423 VM_BUG_ON(page_mapcount(page) < 0); 470 VM_BUG_ON_PAGE(!PageTail(page), page);
424 VM_BUG_ON(atomic_read(&page->_count) != 0); 471 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
425 atomic_inc(&page->_mapcount); 472 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
473 if (compound_tail_refcounted(page->first_page))
474 atomic_inc(&page->_mapcount);
426} 475}
427 476
428extern bool __get_page_tail(struct page *page); 477extern bool __get_page_tail(struct page *page);
@@ -436,7 +485,7 @@ static inline void get_page(struct page *page)
436 * Getting a normal page or the head of a compound page 485 * Getting a normal page or the head of a compound page
437 * requires to already have an elevated page->_count. 486 * requires to already have an elevated page->_count.
438 */ 487 */
439 VM_BUG_ON(atomic_read(&page->_count) <= 0); 488 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
440 atomic_inc(&page->_count); 489 atomic_inc(&page->_count);
441} 490}
442 491
@@ -473,13 +522,13 @@ static inline int PageBuddy(struct page *page)
473 522
474static inline void __SetPageBuddy(struct page *page) 523static inline void __SetPageBuddy(struct page *page)
475{ 524{
476 VM_BUG_ON(atomic_read(&page->_mapcount) != -1); 525 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
477 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 526 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
478} 527}
479 528
480static inline void __ClearPageBuddy(struct page *page) 529static inline void __ClearPageBuddy(struct page *page)
481{ 530{
482 VM_BUG_ON(!PageBuddy(page)); 531 VM_BUG_ON_PAGE(!PageBuddy(page), page);
483 atomic_set(&page->_mapcount, -1); 532 atomic_set(&page->_mapcount, -1);
484} 533}
485 534
@@ -718,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
718#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 767#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
719static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 768static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
720{ 769{
721 return xchg(&page->_last_cpupid, cpupid); 770 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
722} 771}
723 772
724static inline int page_cpupid_last(struct page *page) 773static inline int page_cpupid_last(struct page *page)
@@ -727,7 +776,7 @@ static inline int page_cpupid_last(struct page *page)
727} 776}
728static inline void page_cpupid_reset_last(struct page *page) 777static inline void page_cpupid_reset_last(struct page *page)
729{ 778{
730 page->_last_cpupid = -1; 779 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
731} 780}
732#else 781#else
733static inline int page_cpupid_last(struct page *page) 782static inline int page_cpupid_last(struct page *page)
@@ -846,11 +895,14 @@ static __always_inline void *lowmem_page_address(const struct page *page)
846#endif 895#endif
847 896
848#if defined(WANT_PAGE_VIRTUAL) 897#if defined(WANT_PAGE_VIRTUAL)
849#define page_address(page) ((page)->virtual) 898static inline void *page_address(const struct page *page)
850#define set_page_address(page, address) \ 899{
851 do { \ 900 return page->virtual;
852 (page)->virtual = (address); \ 901}
853 } while(0) 902static inline void set_page_address(struct page *page, void *address)
903{
904 page->virtual = address;
905}
854#define page_address_init() do { } while(0) 906#define page_address_init() do { } while(0)
855#endif 907#endif
856 908
@@ -984,7 +1036,6 @@ extern void pagefault_out_of_memory(void);
984 * various contexts. 1036 * various contexts.
985 */ 1037 */
986#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1038#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
987#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */
988 1039
989extern void show_free_areas(unsigned int flags); 1040extern void show_free_areas(unsigned int flags);
990extern bool skip_free_areas_node(unsigned int flags, int nid); 1041extern bool skip_free_areas_node(unsigned int flags, int nid);
@@ -1318,6 +1369,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1318 1369
1319#if USE_SPLIT_PTE_PTLOCKS 1370#if USE_SPLIT_PTE_PTLOCKS
1320#if ALLOC_SPLIT_PTLOCKS 1371#if ALLOC_SPLIT_PTLOCKS
1372void __init ptlock_cache_init(void);
1321extern bool ptlock_alloc(struct page *page); 1373extern bool ptlock_alloc(struct page *page);
1322extern void ptlock_free(struct page *page); 1374extern void ptlock_free(struct page *page);
1323 1375
@@ -1326,6 +1378,10 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1326 return page->ptl; 1378 return page->ptl;
1327} 1379}
1328#else /* ALLOC_SPLIT_PTLOCKS */ 1380#else /* ALLOC_SPLIT_PTLOCKS */
1381static inline void ptlock_cache_init(void)
1382{
1383}
1384
1329static inline bool ptlock_alloc(struct page *page) 1385static inline bool ptlock_alloc(struct page *page)
1330{ 1386{
1331 return true; 1387 return true;
@@ -1356,7 +1412,7 @@ static inline bool ptlock_init(struct page *page)
1356 * slab code uses page->slab_cache and page->first_page (for tail 1412 * slab code uses page->slab_cache and page->first_page (for tail
1357 * pages), which share storage with page->ptl. 1413 * pages), which share storage with page->ptl.
1358 */ 1414 */
1359 VM_BUG_ON(*(unsigned long *)&page->ptl); 1415 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1360 if (!ptlock_alloc(page)) 1416 if (!ptlock_alloc(page))
1361 return false; 1417 return false;
1362 spin_lock_init(ptlock_ptr(page)); 1418 spin_lock_init(ptlock_ptr(page));
@@ -1378,10 +1434,17 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1378{ 1434{
1379 return &mm->page_table_lock; 1435 return &mm->page_table_lock;
1380} 1436}
1437static inline void ptlock_cache_init(void) {}
1381static inline bool ptlock_init(struct page *page) { return true; } 1438static inline bool ptlock_init(struct page *page) { return true; }
1382static inline void pte_lock_deinit(struct page *page) {} 1439static inline void pte_lock_deinit(struct page *page) {}
1383#endif /* USE_SPLIT_PTE_PTLOCKS */ 1440#endif /* USE_SPLIT_PTE_PTLOCKS */
1384 1441
1442static inline void pgtable_init(void)
1443{
1444 ptlock_cache_init();
1445 pgtable_cache_init();
1446}
1447
1385static inline bool pgtable_page_ctor(struct page *page) 1448static inline bool pgtable_page_ctor(struct page *page)
1386{ 1449{
1387 inc_zone_page_state(page, NR_PAGETABLE); 1450 inc_zone_page_state(page, NR_PAGETABLE);
@@ -1440,7 +1503,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
1440static inline void pgtable_pmd_page_dtor(struct page *page) 1503static inline void pgtable_pmd_page_dtor(struct page *page)
1441{ 1504{
1442#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1505#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1443 VM_BUG_ON(page->pmd_huge_pte); 1506 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1444#endif 1507#endif
1445 ptlock_free(page); 1508 ptlock_free(page);
1446} 1509}
@@ -1842,7 +1905,7 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1842} 1905}
1843#endif 1906#endif
1844 1907
1845#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE 1908#ifdef CONFIG_NUMA_BALANCING
1846unsigned long change_prot_numa(struct vm_area_struct *vma, 1909unsigned long change_prot_numa(struct vm_area_struct *vma,
1847 unsigned long start, unsigned long end); 1910 unsigned long start, unsigned long end);
1848#endif 1911#endif
@@ -1977,8 +2040,6 @@ extern void shake_page(struct page *p, int access);
1977extern atomic_long_t num_poisoned_pages; 2040extern atomic_long_t num_poisoned_pages;
1978extern int soft_offline_page(struct page *page, int flags); 2041extern int soft_offline_page(struct page *page, int flags);
1979 2042
1980extern void dump_page(struct page *page);
1981
1982#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2043#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1983extern void clear_huge_page(struct page *page, 2044extern void clear_huge_page(struct page *page,
1984 unsigned long addr, 2045 unsigned long addr,