diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 55 |
1 files changed, 36 insertions, 19 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index f6385fc17ad4..692dbae6ffa7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -151,6 +151,7 @@ extern pgprot_t protection_map[16]; | |||
151 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 151 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ |
152 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ | 152 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ |
153 | #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ | 153 | #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ |
154 | #define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ | ||
154 | 155 | ||
155 | /* | 156 | /* |
156 | * This interface is used by x86 PAT code to identify a pfn mapping that is | 157 | * This interface is used by x86 PAT code to identify a pfn mapping that is |
@@ -402,16 +403,23 @@ static inline void init_page_count(struct page *page) | |||
402 | /* | 403 | /* |
403 | * PageBuddy() indicate that the page is free and in the buddy system | 404 | * PageBuddy() indicate that the page is free and in the buddy system |
404 | * (see mm/page_alloc.c). | 405 | * (see mm/page_alloc.c). |
406 | * | ||
407 | * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to | ||
408 | * -2 so that an underflow of the page_mapcount() won't be mistaken | ||
409 | * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very | ||
410 | * efficiently by most CPU architectures. | ||
405 | */ | 411 | */ |
412 | #define PAGE_BUDDY_MAPCOUNT_VALUE (-128) | ||
413 | |||
406 | static inline int PageBuddy(struct page *page) | 414 | static inline int PageBuddy(struct page *page) |
407 | { | 415 | { |
408 | return atomic_read(&page->_mapcount) == -2; | 416 | return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; |
409 | } | 417 | } |
410 | 418 | ||
411 | static inline void __SetPageBuddy(struct page *page) | 419 | static inline void __SetPageBuddy(struct page *page) |
412 | { | 420 | { |
413 | VM_BUG_ON(atomic_read(&page->_mapcount) != -1); | 421 | VM_BUG_ON(atomic_read(&page->_mapcount) != -1); |
414 | atomic_set(&page->_mapcount, -2); | 422 | atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); |
415 | } | 423 | } |
416 | 424 | ||
417 | static inline void __ClearPageBuddy(struct page *page) | 425 | static inline void __ClearPageBuddy(struct page *page) |
@@ -600,7 +608,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
600 | #endif | 608 | #endif |
601 | 609 | ||
602 | /* | 610 | /* |
603 | * Define the bit shifts to access each section. For non-existant | 611 | * Define the bit shifts to access each section. For non-existent |
604 | * sections we define the shift as 0; that plus a 0 mask ensures | 612 | * sections we define the shift as 0; that plus a 0 mask ensures |
605 | * the compiler will optimise away reference to them. | 613 | * the compiler will optimise away reference to them. |
606 | */ | 614 | */ |
@@ -852,7 +860,14 @@ extern void pagefault_out_of_memory(void); | |||
852 | 860 | ||
853 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) | 861 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
854 | 862 | ||
863 | /* | ||
864 | * Flags passed to show_mem() and __show_free_areas() to suppress output in | ||
865 | * various contexts. | ||
866 | */ | ||
867 | #define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */ | ||
868 | |||
855 | extern void show_free_areas(void); | 869 | extern void show_free_areas(void); |
870 | extern void __show_free_areas(unsigned int flags); | ||
856 | 871 | ||
857 | int shmem_lock(struct file *file, int lock, struct user_struct *user); | 872 | int shmem_lock(struct file *file, int lock, struct user_struct *user); |
858 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); | 873 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); |
@@ -899,6 +914,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, | |||
899 | * @pgd_entry: if set, called for each non-empty PGD (top-level) entry | 914 | * @pgd_entry: if set, called for each non-empty PGD (top-level) entry |
900 | * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry | 915 | * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry |
901 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry | 916 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry |
917 | * this handler is required to be able to handle | ||
918 | * pmd_trans_huge() pmds. They may simply choose to | ||
919 | * split_huge_page() instead of handling it explicitly. | ||
902 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry | 920 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry |
903 | * @pte_hole: if set, called for each hole at all levels | 921 | * @pte_hole: if set, called for each hole at all levels |
904 | * @hugetlb_entry: if set, called for each hugetlb entry | 922 | * @hugetlb_entry: if set, called for each hugetlb entry |
@@ -964,7 +982,13 @@ static inline int handle_mm_fault(struct mm_struct *mm, | |||
964 | 982 | ||
965 | extern int make_pages_present(unsigned long addr, unsigned long end); | 983 | extern int make_pages_present(unsigned long addr, unsigned long end); |
966 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | 984 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); |
985 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, | ||
986 | void *buf, int len, int write); | ||
967 | 987 | ||
988 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
989 | unsigned long start, int len, unsigned int foll_flags, | ||
990 | struct page **pages, struct vm_area_struct **vmas, | ||
991 | int *nonblocking); | ||
968 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 992 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
969 | unsigned long start, int nr_pages, int write, int force, | 993 | unsigned long start, int nr_pages, int write, int force, |
970 | struct page **pages, struct vm_area_struct **vmas); | 994 | struct page **pages, struct vm_area_struct **vmas); |
@@ -1309,8 +1333,6 @@ int add_from_early_node_map(struct range *range, int az, | |||
1309 | int nr_range, int nid); | 1333 | int nr_range, int nid); |
1310 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | 1334 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, |
1311 | u64 goal, u64 limit); | 1335 | u64 goal, u64 limit); |
1312 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, | ||
1313 | u64 goal, u64 limit); | ||
1314 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1336 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); |
1315 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | 1337 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); |
1316 | extern void sparse_memory_present_with_active_regions(int nid); | 1338 | extern void sparse_memory_present_with_active_regions(int nid); |
@@ -1338,7 +1360,7 @@ extern void setup_per_zone_wmarks(void); | |||
1338 | extern void calculate_zone_inactive_ratio(struct zone *zone); | 1360 | extern void calculate_zone_inactive_ratio(struct zone *zone); |
1339 | extern void mem_init(void); | 1361 | extern void mem_init(void); |
1340 | extern void __init mmap_init(void); | 1362 | extern void __init mmap_init(void); |
1341 | extern void show_mem(void); | 1363 | extern void show_mem(unsigned int flags); |
1342 | extern void si_meminfo(struct sysinfo * val); | 1364 | extern void si_meminfo(struct sysinfo * val); |
1343 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1365 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
1344 | extern int after_bootmem; | 1366 | extern int after_bootmem; |
@@ -1528,8 +1550,11 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, | |||
1528 | #define FOLL_GET 0x04 /* do get_page on page */ | 1550 | #define FOLL_GET 0x04 /* do get_page on page */ |
1529 | #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ | 1551 | #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ |
1530 | #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ | 1552 | #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ |
1553 | #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO | ||
1554 | * and return without waiting upon it */ | ||
1531 | #define FOLL_MLOCK 0x40 /* mark page as mlocked */ | 1555 | #define FOLL_MLOCK 0x40 /* mark page as mlocked */ |
1532 | #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ | 1556 | #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ |
1557 | #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ | ||
1533 | 1558 | ||
1534 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | 1559 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, |
1535 | void *data); | 1560 | void *data); |
@@ -1568,13 +1593,13 @@ static inline bool kernel_page_present(struct page *page) { return true; } | |||
1568 | #endif /* CONFIG_HIBERNATION */ | 1593 | #endif /* CONFIG_HIBERNATION */ |
1569 | #endif | 1594 | #endif |
1570 | 1595 | ||
1571 | extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); | 1596 | extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); |
1572 | #ifdef __HAVE_ARCH_GATE_AREA | 1597 | #ifdef __HAVE_ARCH_GATE_AREA |
1573 | int in_gate_area_no_task(unsigned long addr); | 1598 | int in_gate_area_no_mm(unsigned long addr); |
1574 | int in_gate_area(struct task_struct *task, unsigned long addr); | 1599 | int in_gate_area(struct mm_struct *mm, unsigned long addr); |
1575 | #else | 1600 | #else |
1576 | int in_gate_area_no_task(unsigned long addr); | 1601 | int in_gate_area_no_mm(unsigned long addr); |
1577 | #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) | 1602 | #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);}) |
1578 | #endif /* __HAVE_ARCH_GATE_AREA */ | 1603 | #endif /* __HAVE_ARCH_GATE_AREA */ |
1579 | 1604 | ||
1580 | int drop_caches_sysctl_handler(struct ctl_table *, int, | 1605 | int drop_caches_sysctl_handler(struct ctl_table *, int, |
@@ -1622,14 +1647,6 @@ extern int sysctl_memory_failure_recovery; | |||
1622 | extern void shake_page(struct page *p, int access); | 1647 | extern void shake_page(struct page *p, int access); |
1623 | extern atomic_long_t mce_bad_pages; | 1648 | extern atomic_long_t mce_bad_pages; |
1624 | extern int soft_offline_page(struct page *page, int flags); | 1649 | extern int soft_offline_page(struct page *page, int flags); |
1625 | #ifdef CONFIG_MEMORY_FAILURE | ||
1626 | int is_hwpoison_address(unsigned long addr); | ||
1627 | #else | ||
1628 | static inline int is_hwpoison_address(unsigned long addr) | ||
1629 | { | ||
1630 | return 0; | ||
1631 | } | ||
1632 | #endif | ||
1633 | 1650 | ||
1634 | extern void dump_page(struct page *page); | 1651 | extern void dump_page(struct page *page); |
1635 | 1652 | ||