aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h114
1 files changed, 54 insertions, 60 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 91c08f6f0dc9..00bad7793788 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -139,6 +139,7 @@ extern unsigned int kobjsize(const void *objp);
139 139
140#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 140#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
141#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 141#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
142#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
142#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 143#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
143#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 144#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
144#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 145#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
@@ -202,6 +203,9 @@ extern unsigned int kobjsize(const void *objp);
202/* This mask defines which mm->def_flags a process can inherit its parent */ 203/* This mask defines which mm->def_flags a process can inherit its parent */
203#define VM_INIT_DEF_MASK VM_NOHUGEPAGE 204#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
204 205
206/* This mask is used to clear all the VMA flags used by mlock */
207#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
208
205/* 209/*
206 * mapping from the currently active vm_flags protection bits (the 210 * mapping from the currently active vm_flags protection bits (the
207 * low four bits) to a page protection mask.. 211 * low four bits) to a page protection mask..
@@ -426,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page,
426#endif 430#endif
427} 431}
428 432
429static inline struct page *compound_head_by_tail(struct page *tail)
430{
431 struct page *head = tail->first_page;
432
433 /*
434 * page->first_page may be a dangling pointer to an old
435 * compound page, so recheck that it is still a tail
436 * page before returning.
437 */
438 smp_rmb();
439 if (likely(PageTail(tail)))
440 return head;
441 return tail;
442}
443
444/*
445 * Since either compound page could be dismantled asynchronously in THP
446 * or we access asynchronously arbitrary positioned struct page, there
447 * would be tail flag race. To handle this race, we should call
448 * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
449 */
450static inline struct page *compound_head(struct page *page)
451{
452 if (unlikely(PageTail(page)))
453 return compound_head_by_tail(page);
454 return page;
455}
456
457/*
458 * If we access compound page synchronously such as access to
459 * allocated page, there is no need to handle tail flag race, so we can
460 * check tail flag directly without any synchronization primitive.
461 */
462static inline struct page *compound_head_fast(struct page *page)
463{
464 if (unlikely(PageTail(page)))
465 return page->first_page;
466 return page;
467}
468
469/* 433/*
470 * The atomic page->_mapcount, starts from -1: so that transitions 434 * The atomic page->_mapcount, starts from -1: so that transitions
471 * both from it and to it can be tracked, using atomic_inc_and_test 435 * both from it and to it can be tracked, using atomic_inc_and_test
@@ -514,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page)
514 VM_BUG_ON_PAGE(!PageTail(page), page); 478 VM_BUG_ON_PAGE(!PageTail(page), page);
515 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 479 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
516 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 480 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
517 if (compound_tail_refcounted(page->first_page)) 481 if (compound_tail_refcounted(compound_head(page)))
518 atomic_inc(&page->_mapcount); 482 atomic_inc(&page->_mapcount);
519} 483}
520 484
@@ -537,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x)
537{ 501{
538 struct page *page = virt_to_page(x); 502 struct page *page = virt_to_page(x);
539 503
540 /* 504 return compound_head(page);
541 * We don't need to worry about synchronization of tail flag
542 * when we call virt_to_head_page() since it is only called for
543 * already allocated page and this page won't be freed until
544 * this virt_to_head_page() is finished. So use _fast variant.
545 */
546 return compound_head_fast(page);
547} 505}
548 506
549/* 507/*
@@ -564,28 +522,42 @@ int split_free_page(struct page *page);
564/* 522/*
565 * Compound pages have a destructor function. Provide a 523 * Compound pages have a destructor function. Provide a
566 * prototype for that function and accessor functions. 524 * prototype for that function and accessor functions.
567 * These are _only_ valid on the head of a PG_compound page. 525 * These are _only_ valid on the head of a compound page.
568 */ 526 */
527typedef void compound_page_dtor(struct page *);
528
529/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
530enum compound_dtor_id {
531 NULL_COMPOUND_DTOR,
532 COMPOUND_PAGE_DTOR,
533#ifdef CONFIG_HUGETLB_PAGE
534 HUGETLB_PAGE_DTOR,
535#endif
536 NR_COMPOUND_DTORS,
537};
538extern compound_page_dtor * const compound_page_dtors[];
569 539
570static inline void set_compound_page_dtor(struct page *page, 540static inline void set_compound_page_dtor(struct page *page,
571 compound_page_dtor *dtor) 541 enum compound_dtor_id compound_dtor)
572{ 542{
573 page[1].compound_dtor = dtor; 543 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
544 page[1].compound_dtor = compound_dtor;
574} 545}
575 546
576static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 547static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
577{ 548{
578 return page[1].compound_dtor; 549 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
550 return compound_page_dtors[page[1].compound_dtor];
579} 551}
580 552
581static inline int compound_order(struct page *page) 553static inline unsigned int compound_order(struct page *page)
582{ 554{
583 if (!PageHead(page)) 555 if (!PageHead(page))
584 return 0; 556 return 0;
585 return page[1].compound_order; 557 return page[1].compound_order;
586} 558}
587 559
588static inline void set_compound_order(struct page *page, unsigned long order) 560static inline void set_compound_order(struct page *page, unsigned int order)
589{ 561{
590 page[1].compound_order = order; 562 page[1].compound_order = order;
591} 563}
@@ -905,6 +877,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
905#endif 877#endif
906} 878}
907 879
880#ifdef CONFIG_MEMCG
881static inline struct mem_cgroup *page_memcg(struct page *page)
882{
883 return page->mem_cgroup;
884}
885
886static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
887{
888 page->mem_cgroup = memcg;
889}
890#else
891static inline struct mem_cgroup *page_memcg(struct page *page)
892{
893 return NULL;
894}
895
896static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
897{
898}
899#endif
900
908/* 901/*
909 * Some inline functions in vmstat.h depend on page_zone() 902 * Some inline functions in vmstat.h depend on page_zone()
910 */ 903 */
@@ -1547,8 +1540,7 @@ static inline bool ptlock_init(struct page *page)
1547 * with 0. Make sure nobody took it in use in between. 1540 * with 0. Make sure nobody took it in use in between.
1548 * 1541 *
1549 * It can happen if arch try to use slab for page table allocation: 1542 * It can happen if arch try to use slab for page table allocation:
1550 * slab code uses page->slab_cache and page->first_page (for tail 1543 * slab code uses page->slab_cache, which share storage with page->ptl.
1551 * pages), which share storage with page->ptl.
1552 */ 1544 */
1553 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1545 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1554 if (!ptlock_alloc(page)) 1546 if (!ptlock_alloc(page))
@@ -1585,8 +1577,10 @@ static inline void pgtable_init(void)
1585 1577
1586static inline bool pgtable_page_ctor(struct page *page) 1578static inline bool pgtable_page_ctor(struct page *page)
1587{ 1579{
1580 if (!ptlock_init(page))
1581 return false;
1588 inc_zone_page_state(page, NR_PAGETABLE); 1582 inc_zone_page_state(page, NR_PAGETABLE);
1589 return ptlock_init(page); 1583 return true;
1590} 1584}
1591 1585
1592static inline void pgtable_page_dtor(struct page *page) 1586static inline void pgtable_page_dtor(struct page *page)
@@ -1816,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val);
1816extern void si_meminfo_node(struct sysinfo *val, int nid); 1810extern void si_meminfo_node(struct sysinfo *val, int nid);
1817 1811
1818extern __printf(3, 4) 1812extern __printf(3, 4)
1819void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); 1813void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
1814 const char *fmt, ...);
1820 1815
1821extern void setup_per_cpu_pageset(void); 1816extern void setup_per_cpu_pageset(void);
1822 1817
@@ -2015,8 +2010,6 @@ void page_cache_async_readahead(struct address_space *mapping,
2015 pgoff_t offset, 2010 pgoff_t offset,
2016 unsigned long size); 2011 unsigned long size);
2017 2012
2018unsigned long max_sane_readahead(unsigned long nr);
2019
2020/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2013/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2021extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2014extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2022 2015
@@ -2116,6 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
2116#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2109#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
2117#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2110#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
2118#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2111#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
2112#define FOLL_MLOCK 0x1000 /* lock present pages */
2119 2113
2120typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2114typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2121 void *data); 2115 void *data);