diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 53 | 
1 files changed, 47 insertions, 6 deletions
| diff --git a/include/linux/mm.h b/include/linux/mm.h index 709f6728fc90..721f451c3029 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void *objp); | |||
| 78 | #define VM_MAYSHARE 0x00000080 | 78 | #define VM_MAYSHARE 0x00000080 | 
| 79 | 79 | ||
| 80 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 80 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 
| 81 | #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) | ||
| 81 | #define VM_GROWSUP 0x00000200 | 82 | #define VM_GROWSUP 0x00000200 | 
| 83 | #else | ||
| 84 | #define VM_GROWSUP 0x00000000 | ||
| 85 | #endif | ||
| 82 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 86 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 
| 83 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 87 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 
| 84 | 88 | ||
| @@ -140,6 +144,7 @@ extern pgprot_t protection_map[16]; | |||
| 140 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ | 144 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ | 
| 141 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 145 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 
| 142 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ | 146 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ | 
| 147 | #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ | ||
| 143 | 148 | ||
| 144 | /* | 149 | /* | 
| 145 | * This interface is used by x86 PAT code to identify a pfn mapping that is | 150 | * This interface is used by x86 PAT code to identify a pfn mapping that is | 
| @@ -493,8 +498,8 @@ static inline void set_compound_order(struct page *page, unsigned long order) | |||
| 493 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) | 498 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) | 
| 494 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) | 499 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) | 
| 495 | 500 | ||
| 496 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ | 501 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ | 
| 497 | #ifdef NODE_NOT_IN_PAGEFLAGS | 502 | #ifdef NODE_NOT_IN_PAGE_FLAGS | 
| 498 | #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) | 503 | #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) | 
| 499 | #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ | 504 | #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ | 
| 500 | SECTIONS_PGOFF : ZONES_PGOFF) | 505 | SECTIONS_PGOFF : ZONES_PGOFF) | 
| @@ -714,12 +719,21 @@ static inline int page_mapped(struct page *page) | |||
| 714 | #define VM_FAULT_SIGBUS 0x0002 | 719 | #define VM_FAULT_SIGBUS 0x0002 | 
| 715 | #define VM_FAULT_MAJOR 0x0004 | 720 | #define VM_FAULT_MAJOR 0x0004 | 
| 716 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ | 721 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ | 
| 717 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ | 722 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ | 
| 723 | #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ | ||
| 718 | 724 | ||
| 719 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ | 725 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ | 
| 720 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 726 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 
| 727 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ | ||
| 728 | |||
| 729 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ | ||
| 730 | |||
| 731 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ | ||
| 732 | VM_FAULT_HWPOISON_LARGE) | ||
| 721 | 733 | ||
| 722 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) | 734 | /* Encode hstate index for a hwpoisoned large page */ | 
| 735 | #define VM_FAULT_SET_HINDEX(x) ((x) << 12) | ||
| 736 | #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) | ||
| 723 | 737 | ||
| 724 | /* | 738 | /* | 
| 725 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. | 739 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. | 
| @@ -856,10 +870,17 @@ int __set_page_dirty_no_writeback(struct page *page); | |||
| 856 | int redirty_page_for_writepage(struct writeback_control *wbc, | 870 | int redirty_page_for_writepage(struct writeback_control *wbc, | 
| 857 | struct page *page); | 871 | struct page *page); | 
| 858 | void account_page_dirtied(struct page *page, struct address_space *mapping); | 872 | void account_page_dirtied(struct page *page, struct address_space *mapping); | 
| 873 | void account_page_writeback(struct page *page); | ||
| 859 | int set_page_dirty(struct page *page); | 874 | int set_page_dirty(struct page *page); | 
| 860 | int set_page_dirty_lock(struct page *page); | 875 | int set_page_dirty_lock(struct page *page); | 
| 861 | int clear_page_dirty_for_io(struct page *page); | 876 | int clear_page_dirty_for_io(struct page *page); | 
| 862 | 877 | ||
| 878 | /* Is the vma a continuation of the stack vma above it? */ | ||
| 879 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
| 880 | { | ||
| 881 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
| 882 | } | ||
| 883 | |||
| 863 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 884 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 
| 864 | unsigned long old_addr, struct vm_area_struct *new_vma, | 885 | unsigned long old_addr, struct vm_area_struct *new_vma, | 
| 865 | unsigned long new_addr, unsigned long len); | 886 | unsigned long new_addr, unsigned long len); | 
| @@ -1013,7 +1034,15 @@ extern void unregister_shrinker(struct shrinker *); | |||
| 1013 | 1034 | ||
| 1014 | int vma_wants_writenotify(struct vm_area_struct *vma); | 1035 | int vma_wants_writenotify(struct vm_area_struct *vma); | 
| 1015 | 1036 | ||
| 1016 | extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); | 1037 | extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, | 
| 1038 | spinlock_t **ptl); | ||
| 1039 | static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, | ||
| 1040 | spinlock_t **ptl) | ||
| 1041 | { | ||
| 1042 | pte_t *ptep; | ||
| 1043 | __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); | ||
| 1044 | return ptep; | ||
| 1045 | } | ||
| 1017 | 1046 | ||
| 1018 | #ifdef __PAGETABLE_PUD_FOLDED | 1047 | #ifdef __PAGETABLE_PUD_FOLDED | 
| 1019 | static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, | 1048 | static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, | 
| @@ -1165,6 +1194,8 @@ extern void free_bootmem_with_active_regions(int nid, | |||
| 1165 | unsigned long max_low_pfn); | 1194 | unsigned long max_low_pfn); | 
| 1166 | int add_from_early_node_map(struct range *range, int az, | 1195 | int add_from_early_node_map(struct range *range, int az, | 
| 1167 | int nr_range, int nid); | 1196 | int nr_range, int nid); | 
| 1197 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
| 1198 | u64 goal, u64 limit); | ||
| 1168 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, | 1199 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, | 
| 1169 | u64 goal, u64 limit); | 1200 | u64 goal, u64 limit); | 
| 1170 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1201 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 
| @@ -1330,8 +1361,10 @@ unsigned long ra_submit(struct file_ra_state *ra, | |||
| 1330 | 1361 | ||
| 1331 | /* Do stack extension */ | 1362 | /* Do stack extension */ | 
| 1332 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 1363 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 
| 1333 | #ifdef CONFIG_IA64 | 1364 | #if VM_GROWSUP | 
| 1334 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | 1365 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | 
| 1366 | #else | ||
| 1367 | #define expand_upwards(vma, address) do { } while (0) | ||
| 1335 | #endif | 1368 | #endif | 
| 1336 | extern int expand_stack_downwards(struct vm_area_struct *vma, | 1369 | extern int expand_stack_downwards(struct vm_area_struct *vma, | 
| 1337 | unsigned long address); | 1370 | unsigned long address); | 
| @@ -1357,7 +1390,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) | |||
| 1357 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 1390 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 
| 1358 | } | 1391 | } | 
| 1359 | 1392 | ||
| 1393 | #ifdef CONFIG_MMU | ||
| 1360 | pgprot_t vm_get_page_prot(unsigned long vm_flags); | 1394 | pgprot_t vm_get_page_prot(unsigned long vm_flags); | 
| 1395 | #else | ||
| 1396 | static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) | ||
| 1397 | { | ||
| 1398 | return __pgprot(0); | ||
| 1399 | } | ||
| 1400 | #endif | ||
| 1401 | |||
| 1361 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); | 1402 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); | 
| 1362 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | 1403 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | 
| 1363 | unsigned long pfn, unsigned long size, pgprot_t); | 1404 | unsigned long pfn, unsigned long size, pgprot_t); | 
