aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h163
1 files changed, 83 insertions, 80 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 66e2f7c61e5c..7acc9dc73c9f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -87,6 +87,7 @@ extern unsigned int kobjsize(const void *objp);
87#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 87#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
88#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 88#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
89 89
90#define VM_POPULATE 0x00001000
90#define VM_LOCKED 0x00002000 91#define VM_LOCKED 0x00002000
91#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 92#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
92 93
@@ -114,6 +115,8 @@ extern unsigned int kobjsize(const void *objp);
114# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 115# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
115#elif defined(CONFIG_PARISC) 116#elif defined(CONFIG_PARISC)
116# define VM_GROWSUP VM_ARCH_1 117# define VM_GROWSUP VM_ARCH_1
118#elif defined(CONFIG_METAG)
119# define VM_GROWSUP VM_ARCH_1
117#elif defined(CONFIG_IA64) 120#elif defined(CONFIG_IA64)
118# define VM_GROWSUP VM_ARCH_1 121# define VM_GROWSUP VM_ARCH_1
119#elif !defined(CONFIG_MMU) 122#elif !defined(CONFIG_MMU)
@@ -366,7 +369,7 @@ static inline struct page *compound_head(struct page *page)
366 * both from it and to it can be tracked, using atomic_inc_and_test 369 * both from it and to it can be tracked, using atomic_inc_and_test
367 * and atomic_add_negative(-1). 370 * and atomic_add_negative(-1).
368 */ 371 */
369static inline void reset_page_mapcount(struct page *page) 372static inline void page_mapcount_reset(struct page *page)
370{ 373{
371 atomic_set(&(page)->_mapcount, -1); 374 atomic_set(&(page)->_mapcount, -1);
372} 375}
@@ -580,50 +583,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
580 * sets it, so none of the operations on it need to be atomic. 583 * sets it, so none of the operations on it need to be atomic.
581 */ 584 */
582 585
583 586/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NID] | ... | FLAGS | */
584/*
585 * page->flags layout:
586 *
587 * There are three possibilities for how page->flags get
588 * laid out. The first is for the normal case, without
589 * sparsemem. The second is for sparsemem when there is
590 * plenty of space for node and section. The last is when
591 * we have run out of space and have to fall back to an
592 * alternate (slower) way of determining the node.
593 *
594 * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
595 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
596 * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
597 */
598#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
599#define SECTIONS_WIDTH SECTIONS_SHIFT
600#else
601#define SECTIONS_WIDTH 0
602#endif
603
604#define ZONES_WIDTH ZONES_SHIFT
605
606#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
607#define NODES_WIDTH NODES_SHIFT
608#else
609#ifdef CONFIG_SPARSEMEM_VMEMMAP
610#error "Vmemmap: No space for nodes field in page flags"
611#endif
612#define NODES_WIDTH 0
613#endif
614
615/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
616#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 587#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
617#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 588#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
618#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 589#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
619 590#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH)
620/*
621 * We are going to use the flags for the page to node mapping if its in
622 * there. This includes the case where there is no node, so it is implicit.
623 */
624#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
625#define NODE_NOT_IN_PAGE_FLAGS
626#endif
627 591
628/* 592/*
629 * Define the bit shifts to access each section. For non-existent 593 * Define the bit shifts to access each section. For non-existent
@@ -633,6 +597,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
633#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 597#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
634#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 598#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
635#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 599#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
600#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
636 601
637/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 602/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
638#ifdef NODE_NOT_IN_PAGE_FLAGS 603#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -654,6 +619,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
654#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 619#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
655#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 620#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
656#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 621#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
622#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1)
657#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 623#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
658 624
659static inline enum zone_type page_zonenum(const struct page *page) 625static inline enum zone_type page_zonenum(const struct page *page)
@@ -661,6 +627,10 @@ static inline enum zone_type page_zonenum(const struct page *page)
661 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 627 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
662} 628}
663 629
630#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
631#define SECTION_IN_PAGE_FLAGS
632#endif
633
664/* 634/*
665 * The identification function is only used by the buddy allocator for 635 * The identification function is only used by the buddy allocator for
666 * determining if two pages could be buddies. We are not really 636 * determining if two pages could be buddies. We are not really
@@ -693,31 +663,48 @@ static inline int page_to_nid(const struct page *page)
693#endif 663#endif
694 664
695#ifdef CONFIG_NUMA_BALANCING 665#ifdef CONFIG_NUMA_BALANCING
696static inline int page_xchg_last_nid(struct page *page, int nid) 666#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
667static inline int page_nid_xchg_last(struct page *page, int nid)
697{ 668{
698 return xchg(&page->_last_nid, nid); 669 return xchg(&page->_last_nid, nid);
699} 670}
700 671
701static inline int page_last_nid(struct page *page) 672static inline int page_nid_last(struct page *page)
702{ 673{
703 return page->_last_nid; 674 return page->_last_nid;
704} 675}
705static inline void reset_page_last_nid(struct page *page) 676static inline void page_nid_reset_last(struct page *page)
706{ 677{
707 page->_last_nid = -1; 678 page->_last_nid = -1;
708} 679}
709#else 680#else
710static inline int page_xchg_last_nid(struct page *page, int nid) 681static inline int page_nid_last(struct page *page)
682{
683 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
684}
685
686extern int page_nid_xchg_last(struct page *page, int nid);
687
688static inline void page_nid_reset_last(struct page *page)
689{
690 int nid = (1 << LAST_NID_SHIFT) - 1;
691
692 page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
693 page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
694}
695#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
696#else
697static inline int page_nid_xchg_last(struct page *page, int nid)
711{ 698{
712 return page_to_nid(page); 699 return page_to_nid(page);
713} 700}
714 701
715static inline int page_last_nid(struct page *page) 702static inline int page_nid_last(struct page *page)
716{ 703{
717 return page_to_nid(page); 704 return page_to_nid(page);
718} 705}
719 706
720static inline void reset_page_last_nid(struct page *page) 707static inline void page_nid_reset_last(struct page *page)
721{ 708{
722} 709}
723#endif 710#endif
@@ -727,7 +714,7 @@ static inline struct zone *page_zone(const struct page *page)
727 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 714 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
728} 715}
729 716
730#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 717#ifdef SECTION_IN_PAGE_FLAGS
731static inline void set_page_section(struct page *page, unsigned long section) 718static inline void set_page_section(struct page *page, unsigned long section)
732{ 719{
733 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 720 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
@@ -757,7 +744,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
757{ 744{
758 set_page_zone(page, zone); 745 set_page_zone(page, zone);
759 set_page_node(page, node); 746 set_page_node(page, node);
760#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 747#ifdef SECTION_IN_PAGE_FLAGS
761 set_page_section(page, pfn_to_section_nr(pfn)); 748 set_page_section(page, pfn_to_section_nr(pfn));
762#endif 749#endif
763} 750}
@@ -817,18 +804,7 @@ void page_address_init(void);
817#define PAGE_MAPPING_KSM 2 804#define PAGE_MAPPING_KSM 2
818#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) 805#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
819 806
820extern struct address_space swapper_space; 807extern struct address_space *page_mapping(struct page *page);
821static inline struct address_space *page_mapping(struct page *page)
822{
823 struct address_space *mapping = page->mapping;
824
825 VM_BUG_ON(PageSlab(page));
826 if (unlikely(PageSwapCache(page)))
827 mapping = &swapper_space;
828 else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
829 mapping = NULL;
830 return mapping;
831}
832 808
833/* Neutral page->mapping pointer to address_space or anon_vma or other */ 809/* Neutral page->mapping pointer to address_space or anon_vma or other */
834static inline void *page_rmapping(struct page *page) 810static inline void *page_rmapping(struct page *page)
@@ -1035,18 +1011,18 @@ static inline int fixup_user_fault(struct task_struct *tsk,
1035} 1011}
1036#endif 1012#endif
1037 1013
1038extern int make_pages_present(unsigned long addr, unsigned long end);
1039extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 1014extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1040extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1015extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1041 void *buf, int len, int write); 1016 void *buf, int len, int write);
1042 1017
1043int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1018long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1044 unsigned long start, int len, unsigned int foll_flags, 1019 unsigned long start, unsigned long nr_pages,
1045 struct page **pages, struct vm_area_struct **vmas, 1020 unsigned int foll_flags, struct page **pages,
1046 int *nonblocking); 1021 struct vm_area_struct **vmas, int *nonblocking);
1047int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1022long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1048 unsigned long start, int nr_pages, int write, int force, 1023 unsigned long start, unsigned long nr_pages,
1049 struct page **pages, struct vm_area_struct **vmas); 1024 int write, int force, struct page **pages,
1025 struct vm_area_struct **vmas);
1050int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1026int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1051 struct page **pages); 1027 struct page **pages);
1052struct kvec; 1028struct kvec;
@@ -1386,7 +1362,6 @@ extern void __init mmap_init(void);
1386extern void show_mem(unsigned int flags); 1362extern void show_mem(unsigned int flags);
1387extern void si_meminfo(struct sysinfo * val); 1363extern void si_meminfo(struct sysinfo * val);
1388extern void si_meminfo_node(struct sysinfo *val, int nid); 1364extern void si_meminfo_node(struct sysinfo *val, int nid);
1389extern int after_bootmem;
1390 1365
1391extern __printf(3, 4) 1366extern __printf(3, 4)
1392void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); 1367void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
@@ -1396,6 +1371,9 @@ extern void setup_per_cpu_pageset(void);
1396extern void zone_pcp_update(struct zone *zone); 1371extern void zone_pcp_update(struct zone *zone);
1397extern void zone_pcp_reset(struct zone *zone); 1372extern void zone_pcp_reset(struct zone *zone);
1398 1373
1374/* page_alloc.c */
1375extern int min_free_kbytes;
1376
1399/* nommu.c */ 1377/* nommu.c */
1400extern atomic_long_t mmap_pages_allocated; 1378extern atomic_long_t mmap_pages_allocated;
1401extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 1379extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
@@ -1473,13 +1451,24 @@ extern int install_special_mapping(struct mm_struct *mm,
1473extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1451extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1474 1452
1475extern unsigned long mmap_region(struct file *file, unsigned long addr, 1453extern unsigned long mmap_region(struct file *file, unsigned long addr,
1476 unsigned long len, unsigned long flags, 1454 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1477 vm_flags_t vm_flags, unsigned long pgoff); 1455extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1478extern unsigned long do_mmap_pgoff(struct file *, unsigned long, 1456 unsigned long len, unsigned long prot, unsigned long flags,
1479 unsigned long, unsigned long, 1457 unsigned long pgoff, unsigned long *populate);
1480 unsigned long, unsigned long);
1481extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1458extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1482 1459
1460#ifdef CONFIG_MMU
1461extern int __mm_populate(unsigned long addr, unsigned long len,
1462 int ignore_errors);
1463static inline void mm_populate(unsigned long addr, unsigned long len)
1464{
1465 /* Ignore errors */
1466 (void) __mm_populate(addr, len, 1);
1467}
1468#else
1469static inline void mm_populate(unsigned long addr, unsigned long len) {}
1470#endif
1471
1483/* These take the mm semaphore themselves */ 1472/* These take the mm semaphore themselves */
1484extern unsigned long vm_brk(unsigned long, unsigned long); 1473extern unsigned long vm_brk(unsigned long, unsigned long);
1485extern int vm_munmap(unsigned long, size_t); 1474extern int vm_munmap(unsigned long, size_t);
@@ -1624,8 +1613,17 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1624int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1613int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1625 unsigned long pfn); 1614 unsigned long pfn);
1626 1615
1627struct page *follow_page(struct vm_area_struct *, unsigned long address, 1616struct page *follow_page_mask(struct vm_area_struct *vma,
1628 unsigned int foll_flags); 1617 unsigned long address, unsigned int foll_flags,
1618 unsigned int *page_mask);
1619
1620static inline struct page *follow_page(struct vm_area_struct *vma,
1621 unsigned long address, unsigned int foll_flags)
1622{
1623 unsigned int unused_page_mask;
1624 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1625}
1626
1629#define FOLL_WRITE 0x01 /* check pte is writable */ 1627#define FOLL_WRITE 0x01 /* check pte is writable */
1630#define FOLL_TOUCH 0x02 /* mark page accessed */ 1628#define FOLL_TOUCH 0x02 /* mark page accessed */
1631#define FOLL_GET 0x04 /* do get_page on page */ 1629#define FOLL_GET 0x04 /* do get_page on page */
@@ -1637,6 +1635,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
1637#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 1635#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
1638#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 1636#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
1639#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 1637#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
1638#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
1640 1639
1641typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 1640typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1642 void *data); 1641 void *data);
@@ -1708,7 +1707,11 @@ int vmemmap_populate_basepages(struct page *start_page,
1708 unsigned long pages, int node); 1707 unsigned long pages, int node);
1709int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1708int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1710void vmemmap_populate_print_last(void); 1709void vmemmap_populate_print_last(void);
1711 1710#ifdef CONFIG_MEMORY_HOTPLUG
1711void vmemmap_free(struct page *memmap, unsigned long nr_pages);
1712#endif
1713void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
1714 unsigned long size);
1712 1715
1713enum mf_flags { 1716enum mf_flags {
1714 MF_COUNT_INCREASED = 1 << 0, 1717 MF_COUNT_INCREASED = 1 << 0,
@@ -1721,7 +1724,7 @@ extern int unpoison_memory(unsigned long pfn);
1721extern int sysctl_memory_failure_early_kill; 1724extern int sysctl_memory_failure_early_kill;
1722extern int sysctl_memory_failure_recovery; 1725extern int sysctl_memory_failure_recovery;
1723extern void shake_page(struct page *p, int access); 1726extern void shake_page(struct page *p, int access);
1724extern atomic_long_t mce_bad_pages; 1727extern atomic_long_t num_poisoned_pages;
1725extern int soft_offline_page(struct page *page, int flags); 1728extern int soft_offline_page(struct page *page, int flags);
1726 1729
1727extern void dump_page(struct page *page); 1730extern void dump_page(struct page *page);