diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-07 05:15:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-07 05:15:40 -0400 |
commit | 5e34437840d33554f69380584311743b39e8fbeb (patch) | |
tree | e081135619ee146af5efb9ee883afca950df5757 /include/linux/mm.h | |
parent | 77d05632baee21b1cef8730d7c06aa69601e4dca (diff) | |
parent | d508afb437daee7cf07da085b635c44a4ebf9b38 (diff) |
Merge branch 'linus' into core/softlockup
Conflicts:
kernel/sysctl.c
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 30 |
1 files changed, 24 insertions, 6 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 323561582c10..bff1f0d475c7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -104,6 +104,7 @@ extern unsigned int kobjsize(const void *objp); | |||
104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | 106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ |
107 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | ||
107 | 108 | ||
108 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 109 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
109 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 110 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
@@ -134,6 +135,7 @@ extern pgprot_t protection_map[16]; | |||
134 | 135 | ||
135 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ | 136 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ |
136 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 137 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ |
138 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ | ||
137 | 139 | ||
138 | /* | 140 | /* |
139 | * This interface is used by x86 PAT code to identify a pfn mapping that is | 141 | * This interface is used by x86 PAT code to identify a pfn mapping that is |
@@ -145,7 +147,7 @@ extern pgprot_t protection_map[16]; | |||
145 | */ | 147 | */ |
146 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) | 148 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) |
147 | { | 149 | { |
148 | return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); | 150 | return (vma->vm_flags & VM_PFN_AT_MMAP); |
149 | } | 151 | } |
150 | 152 | ||
151 | static inline int is_pfn_mapping(struct vm_area_struct *vma) | 153 | static inline int is_pfn_mapping(struct vm_area_struct *vma) |
@@ -186,7 +188,7 @@ struct vm_operations_struct { | |||
186 | 188 | ||
187 | /* notification that a previously read-only page is about to become | 189 | /* notification that a previously read-only page is about to become |
188 | * writable, if an error is returned it will cause a SIGBUS */ | 190 | * writable, if an error is returned it will cause a SIGBUS */ |
189 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); | 191 | int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); |
190 | 192 | ||
191 | /* called by access_process_vm when get_user_pages() fails, typically | 193 | /* called by access_process_vm when get_user_pages() fails, typically |
192 | * for use by special VMAs that can switch between memory and hardware | 194 | * for use by special VMAs that can switch between memory and hardware |
@@ -833,6 +835,7 @@ int __set_page_dirty_nobuffers(struct page *page); | |||
833 | int __set_page_dirty_no_writeback(struct page *page); | 835 | int __set_page_dirty_no_writeback(struct page *page); |
834 | int redirty_page_for_writepage(struct writeback_control *wbc, | 836 | int redirty_page_for_writepage(struct writeback_control *wbc, |
835 | struct page *page); | 837 | struct page *page); |
838 | void account_page_dirtied(struct page *page, struct address_space *mapping); | ||
836 | int set_page_dirty(struct page *page); | 839 | int set_page_dirty(struct page *page); |
837 | int set_page_dirty_lock(struct page *page); | 840 | int set_page_dirty_lock(struct page *page); |
838 | int clear_page_dirty_for_io(struct page *page); | 841 | int clear_page_dirty_for_io(struct page *page); |
@@ -1041,10 +1044,23 @@ extern void free_bootmem_with_active_regions(int nid, | |||
1041 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1044 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); |
1042 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | 1045 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); |
1043 | extern void sparse_memory_present_with_active_regions(int nid); | 1046 | extern void sparse_memory_present_with_active_regions(int nid); |
1044 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | ||
1045 | extern int early_pfn_to_nid(unsigned long pfn); | ||
1046 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
1047 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 1047 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ |
1048 | |||
1049 | #if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ | ||
1050 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) | ||
1051 | static inline int __early_pfn_to_nid(unsigned long pfn) | ||
1052 | { | ||
1053 | return 0; | ||
1054 | } | ||
1055 | #else | ||
1056 | /* please see mm/page_alloc.c */ | ||
1057 | extern int __meminit early_pfn_to_nid(unsigned long pfn); | ||
1058 | #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | ||
1059 | /* there is a per-arch backend function. */ | ||
1060 | extern int __meminit __early_pfn_to_nid(unsigned long pfn); | ||
1061 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
1062 | #endif | ||
1063 | |||
1048 | extern void set_dma_reserve(unsigned long new_dma_reserve); | 1064 | extern void set_dma_reserve(unsigned long new_dma_reserve); |
1049 | extern void memmap_init_zone(unsigned long, int, unsigned long, | 1065 | extern void memmap_init_zone(unsigned long, int, unsigned long, |
1050 | unsigned long, enum memmap_context); | 1066 | unsigned long, enum memmap_context); |
@@ -1063,7 +1079,7 @@ static inline void setup_per_cpu_pageset(void) {} | |||
1063 | #endif | 1079 | #endif |
1064 | 1080 | ||
1065 | /* nommu.c */ | 1081 | /* nommu.c */ |
1066 | extern atomic_t mmap_pages_allocated; | 1082 | extern atomic_long_t mmap_pages_allocated; |
1067 | 1083 | ||
1068 | /* prio_tree.c */ | 1084 | /* prio_tree.c */ |
1069 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1085 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
@@ -1159,6 +1175,7 @@ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | |||
1159 | 1175 | ||
1160 | /* mm/page-writeback.c */ | 1176 | /* mm/page-writeback.c */ |
1161 | int write_one_page(struct page *page, int wait); | 1177 | int write_one_page(struct page *page, int wait); |
1178 | void task_dirty_inc(struct task_struct *tsk); | ||
1162 | 1179 | ||
1163 | /* readahead.c */ | 1180 | /* readahead.c */ |
1164 | #define VM_MAX_READAHEAD 128 /* kbytes */ | 1181 | #define VM_MAX_READAHEAD 128 /* kbytes */ |
@@ -1304,5 +1321,6 @@ void vmemmap_populate_print_last(void); | |||
1304 | 1321 | ||
1305 | extern void *alloc_locked_buffer(size_t size); | 1322 | extern void *alloc_locked_buffer(size_t size); |
1306 | extern void free_locked_buffer(void *buffer, size_t size); | 1323 | extern void free_locked_buffer(void *buffer, size_t size); |
1324 | extern void release_locked_buffer(void *buffer, size_t size); | ||
1307 | #endif /* __KERNEL__ */ | 1325 | #endif /* __KERNEL__ */ |
1308 | #endif /* _LINUX_MM_H */ | 1326 | #endif /* _LINUX_MM_H */ |