diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 58 |
1 files changed, 41 insertions, 17 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index b695875d63e3..8b7f4a5d4f6a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -107,6 +107,7 @@ extern unsigned int kobjsize(const void *objp); | |||
107 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ | 107 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ |
108 | 108 | ||
109 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 109 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
110 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | ||
110 | 111 | ||
111 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 112 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
112 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 113 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
@@ -164,8 +165,6 @@ struct vm_operations_struct { | |||
164 | void (*open)(struct vm_area_struct * area); | 165 | void (*open)(struct vm_area_struct * area); |
165 | void (*close)(struct vm_area_struct * area); | 166 | void (*close)(struct vm_area_struct * area); |
166 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 167 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
167 | struct page *(*nopage)(struct vm_area_struct *area, | ||
168 | unsigned long address, int *type); | ||
169 | unsigned long (*nopfn)(struct vm_area_struct *area, | 168 | unsigned long (*nopfn)(struct vm_area_struct *area, |
170 | unsigned long address); | 169 | unsigned long address); |
171 | 170 | ||
@@ -173,7 +172,25 @@ struct vm_operations_struct { | |||
173 | * writable, if an error is returned it will cause a SIGBUS */ | 172 | * writable, if an error is returned it will cause a SIGBUS */ |
174 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); | 173 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); |
175 | #ifdef CONFIG_NUMA | 174 | #ifdef CONFIG_NUMA |
175 | /* | ||
176 | * set_policy() op must add a reference to any non-NULL @new mempolicy | ||
177 | * to hold the policy upon return. Caller should pass NULL @new to | ||
178 | * remove a policy and fall back to surrounding context--i.e. do not | ||
179 | * install a MPOL_DEFAULT policy, nor the task or system default | ||
180 | * mempolicy. | ||
181 | */ | ||
176 | int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); | 182 | int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); |
183 | |||
184 | /* | ||
185 | * get_policy() op must add reference [mpol_get()] to any policy at | ||
186 | * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure | ||
187 | * in mm/mempolicy.c will do this automatically. | ||
188 | * get_policy() must NOT add a ref if the policy at (vma,addr) is not | ||
189 | * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. | ||
190 | * If no [shared/vma] mempolicy exists at the addr, get_policy() op | ||
191 | * must return NULL--i.e., do not "fallback" to task or system default | ||
192 | * policy. | ||
193 | */ | ||
177 | struct mempolicy *(*get_policy)(struct vm_area_struct *vma, | 194 | struct mempolicy *(*get_policy)(struct vm_area_struct *vma, |
178 | unsigned long addr); | 195 | unsigned long addr); |
179 | int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, | 196 | int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, |
@@ -397,11 +414,11 @@ static inline void set_compound_order(struct page *page, unsigned long order) | |||
397 | * we have run out of space and have to fall back to an | 414 | * we have run out of space and have to fall back to an |
398 | * alternate (slower) way of determining the node. | 415 | * alternate (slower) way of determining the node. |
399 | * | 416 | * |
400 | * No sparsemem: | NODE | ZONE | ... | FLAGS | | 417 | * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | |
401 | * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | | 418 | * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | |
402 | * no space for node: | SECTION | ZONE | ... | FLAGS | | 419 | * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | |
403 | */ | 420 | */ |
404 | #ifdef CONFIG_SPARSEMEM | 421 | #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) |
405 | #define SECTIONS_WIDTH SECTIONS_SHIFT | 422 | #define SECTIONS_WIDTH SECTIONS_SHIFT |
406 | #else | 423 | #else |
407 | #define SECTIONS_WIDTH 0 | 424 | #define SECTIONS_WIDTH 0 |
@@ -409,9 +426,12 @@ static inline void set_compound_order(struct page *page, unsigned long order) | |||
409 | 426 | ||
410 | #define ZONES_WIDTH ZONES_SHIFT | 427 | #define ZONES_WIDTH ZONES_SHIFT |
411 | 428 | ||
412 | #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED | 429 | #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS |
413 | #define NODES_WIDTH NODES_SHIFT | 430 | #define NODES_WIDTH NODES_SHIFT |
414 | #else | 431 | #else |
432 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
433 | #error "Vmemmap: No space for nodes field in page flags" | ||
434 | #endif | ||
415 | #define NODES_WIDTH 0 | 435 | #define NODES_WIDTH 0 |
416 | #endif | 436 | #endif |
417 | 437 | ||
@@ -454,8 +474,8 @@ static inline void set_compound_order(struct page *page, unsigned long order) | |||
454 | 474 | ||
455 | #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) | 475 | #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) |
456 | 476 | ||
457 | #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED | 477 | #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS |
458 | #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED | 478 | #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS |
459 | #endif | 479 | #endif |
460 | 480 | ||
461 | #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) | 481 | #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) |
@@ -504,10 +524,12 @@ static inline struct zone *page_zone(struct page *page) | |||
504 | return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; | 524 | return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; |
505 | } | 525 | } |
506 | 526 | ||
527 | #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) | ||
507 | static inline unsigned long page_to_section(struct page *page) | 528 | static inline unsigned long page_to_section(struct page *page) |
508 | { | 529 | { |
509 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | 530 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; |
510 | } | 531 | } |
532 | #endif | ||
511 | 533 | ||
512 | static inline void set_page_zone(struct page *page, enum zone_type zone) | 534 | static inline void set_page_zone(struct page *page, enum zone_type zone) |
513 | { | 535 | { |
@@ -602,9 +624,12 @@ static inline struct address_space *page_mapping(struct page *page) | |||
602 | struct address_space *mapping = page->mapping; | 624 | struct address_space *mapping = page->mapping; |
603 | 625 | ||
604 | VM_BUG_ON(PageSlab(page)); | 626 | VM_BUG_ON(PageSlab(page)); |
627 | #ifdef CONFIG_SWAP | ||
605 | if (unlikely(PageSwapCache(page))) | 628 | if (unlikely(PageSwapCache(page))) |
606 | mapping = &swapper_space; | 629 | mapping = &swapper_space; |
607 | else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) | 630 | else |
631 | #endif | ||
632 | if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) | ||
608 | mapping = NULL; | 633 | mapping = NULL; |
609 | return mapping; | 634 | return mapping; |
610 | } | 635 | } |
@@ -649,12 +674,6 @@ static inline int page_mapped(struct page *page) | |||
649 | } | 674 | } |
650 | 675 | ||
651 | /* | 676 | /* |
652 | * Error return values for the *_nopage functions | ||
653 | */ | ||
654 | #define NOPAGE_SIGBUS (NULL) | ||
655 | #define NOPAGE_OOM ((struct page *) (-1)) | ||
656 | |||
657 | /* | ||
658 | * Error return values for the *_nopfn functions | 677 | * Error return values for the *_nopfn functions |
659 | */ | 678 | */ |
660 | #define NOPFN_SIGBUS ((unsigned long) -1) | 679 | #define NOPFN_SIGBUS ((unsigned long) -1) |
@@ -720,7 +739,9 @@ struct zap_details { | |||
720 | unsigned long truncate_count; /* Compare vm_truncate_count */ | 739 | unsigned long truncate_count; /* Compare vm_truncate_count */ |
721 | }; | 740 | }; |
722 | 741 | ||
723 | struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); | 742 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
743 | pte_t pte); | ||
744 | |||
724 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | 745 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
725 | unsigned long size, struct zap_details *); | 746 | unsigned long size, struct zap_details *); |
726 | unsigned long unmap_vmas(struct mmu_gather **tlb, | 747 | unsigned long unmap_vmas(struct mmu_gather **tlb, |
@@ -1149,6 +1170,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | |||
1149 | int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); | 1170 | int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); |
1150 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | 1171 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, |
1151 | unsigned long pfn); | 1172 | unsigned long pfn); |
1173 | int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, | ||
1174 | unsigned long pfn); | ||
1152 | 1175 | ||
1153 | struct page *follow_page(struct vm_area_struct *, unsigned long address, | 1176 | struct page *follow_page(struct vm_area_struct *, unsigned long address, |
1154 | unsigned int foll_flags); | 1177 | unsigned int foll_flags); |
@@ -1229,6 +1252,7 @@ void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); | |||
1229 | int vmemmap_populate_basepages(struct page *start_page, | 1252 | int vmemmap_populate_basepages(struct page *start_page, |
1230 | unsigned long pages, int node); | 1253 | unsigned long pages, int node); |
1231 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1254 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); |
1255 | void vmemmap_populate_print_last(void); | ||
1232 | 1256 | ||
1233 | #endif /* __KERNEL__ */ | 1257 | #endif /* __KERNEL__ */ |
1234 | #endif /* _LINUX_MM_H */ | 1258 | #endif /* _LINUX_MM_H */ |