diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 29 |
1 files changed, 24 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 7687228dd3b7..721f451c3029 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -144,6 +144,7 @@ extern pgprot_t protection_map[16]; | |||
144 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ | 144 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ |
145 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 145 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ |
146 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ | 146 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ |
147 | #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ | ||
147 | 148 | ||
148 | /* | 149 | /* |
149 | * This interface is used by x86 PAT code to identify a pfn mapping that is | 150 | * This interface is used by x86 PAT code to identify a pfn mapping that is |
@@ -497,8 +498,8 @@ static inline void set_compound_order(struct page *page, unsigned long order) | |||
497 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) | 498 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) |
498 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) | 499 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) |
499 | 500 | ||
500 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ | 501 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ |
501 | #ifdef NODE_NOT_IN_PAGEFLAGS | 502 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
502 | #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) | 503 | #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) |
503 | #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ | 504 | #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ |
504 | SECTIONS_PGOFF : ZONES_PGOFF) | 505 | SECTIONS_PGOFF : ZONES_PGOFF) |
@@ -718,12 +719,21 @@ static inline int page_mapped(struct page *page) | |||
718 | #define VM_FAULT_SIGBUS 0x0002 | 719 | #define VM_FAULT_SIGBUS 0x0002 |
719 | #define VM_FAULT_MAJOR 0x0004 | 720 | #define VM_FAULT_MAJOR 0x0004 |
720 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ | 721 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ |
721 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ | 722 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ |
723 | #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ | ||
722 | 724 | ||
723 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ | 725 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ |
724 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 726 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
727 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ | ||
728 | |||
729 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ | ||
730 | |||
731 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ | ||
732 | VM_FAULT_HWPOISON_LARGE) | ||
725 | 733 | ||
726 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) | 734 | /* Encode hstate index for a hwpoisoned large page */ |
735 | #define VM_FAULT_SET_HINDEX(x) ((x) << 12) | ||
736 | #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) | ||
727 | 737 | ||
728 | /* | 738 | /* |
729 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. | 739 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. |
@@ -860,6 +870,7 @@ int __set_page_dirty_no_writeback(struct page *page); | |||
860 | int redirty_page_for_writepage(struct writeback_control *wbc, | 870 | int redirty_page_for_writepage(struct writeback_control *wbc, |
861 | struct page *page); | 871 | struct page *page); |
862 | void account_page_dirtied(struct page *page, struct address_space *mapping); | 872 | void account_page_dirtied(struct page *page, struct address_space *mapping); |
873 | void account_page_writeback(struct page *page); | ||
863 | int set_page_dirty(struct page *page); | 874 | int set_page_dirty(struct page *page); |
864 | int set_page_dirty_lock(struct page *page); | 875 | int set_page_dirty_lock(struct page *page); |
865 | int clear_page_dirty_for_io(struct page *page); | 876 | int clear_page_dirty_for_io(struct page *page); |
@@ -1023,7 +1034,15 @@ extern void unregister_shrinker(struct shrinker *); | |||
1023 | 1034 | ||
1024 | int vma_wants_writenotify(struct vm_area_struct *vma); | 1035 | int vma_wants_writenotify(struct vm_area_struct *vma); |
1025 | 1036 | ||
1026 | extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); | 1037 | extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, |
1038 | spinlock_t **ptl); | ||
1039 | static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, | ||
1040 | spinlock_t **ptl) | ||
1041 | { | ||
1042 | pte_t *ptep; | ||
1043 | __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); | ||
1044 | return ptep; | ||
1045 | } | ||
1027 | 1046 | ||
1028 | #ifdef __PAGETABLE_PUD_FOLDED | 1047 | #ifdef __PAGETABLE_PUD_FOLDED |
1029 | static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, | 1048 | static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, |