diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 127 |
1 files changed, 64 insertions, 63 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 856f0ee7e84a..7b703b6d4358 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/debug_locks.h> | 17 | #include <linux/debug_locks.h> |
| 18 | #include <linux/backing-dev.h> | 18 | #include <linux/backing-dev.h> |
| 19 | #include <linux/mm_types.h> | ||
| 19 | 20 | ||
| 20 | struct mempolicy; | 21 | struct mempolicy; |
| 21 | struct anon_vma; | 22 | struct anon_vma; |
| @@ -198,6 +199,7 @@ struct vm_operations_struct { | |||
| 198 | void (*open)(struct vm_area_struct * area); | 199 | void (*open)(struct vm_area_struct * area); |
| 199 | void (*close)(struct vm_area_struct * area); | 200 | void (*close)(struct vm_area_struct * area); |
| 200 | struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); | 201 | struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); |
| 202 | unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); | ||
| 201 | int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); | 203 | int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); |
| 202 | 204 | ||
| 203 | /* notification that a previously read-only page is about to become | 205 | /* notification that a previously read-only page is about to become |
| @@ -215,62 +217,6 @@ struct vm_operations_struct { | |||
| 215 | struct mmu_gather; | 217 | struct mmu_gather; |
| 216 | struct inode; | 218 | struct inode; |
| 217 | 219 | ||
| 218 | /* | ||
| 219 | * Each physical page in the system has a struct page associated with | ||
| 220 | * it to keep track of whatever it is we are using the page for at the | ||
| 221 | * moment. Note that we have no way to track which tasks are using | ||
| 222 | * a page, though if it is a pagecache page, rmap structures can tell us | ||
| 223 | * who is mapping it. | ||
| 224 | */ | ||
| 225 | struct page { | ||
| 226 | unsigned long flags; /* Atomic flags, some possibly | ||
| 227 | * updated asynchronously */ | ||
| 228 | atomic_t _count; /* Usage count, see below. */ | ||
| 229 | atomic_t _mapcount; /* Count of ptes mapped in mms, | ||
| 230 | * to show when page is mapped | ||
| 231 | * & limit reverse map searches. | ||
| 232 | */ | ||
| 233 | union { | ||
| 234 | struct { | ||
| 235 | unsigned long private; /* Mapping-private opaque data: | ||
| 236 | * usually used for buffer_heads | ||
| 237 | * if PagePrivate set; used for | ||
| 238 | * swp_entry_t if PageSwapCache; | ||
| 239 | * indicates order in the buddy | ||
| 240 | * system if PG_buddy is set. | ||
| 241 | */ | ||
| 242 | struct address_space *mapping; /* If low bit clear, points to | ||
| 243 | * inode address_space, or NULL. | ||
| 244 | * If page mapped as anonymous | ||
| 245 | * memory, low bit is set, and | ||
| 246 | * it points to anon_vma object: | ||
| 247 | * see PAGE_MAPPING_ANON below. | ||
| 248 | */ | ||
| 249 | }; | ||
| 250 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | ||
| 251 | spinlock_t ptl; | ||
| 252 | #endif | ||
| 253 | }; | ||
| 254 | pgoff_t index; /* Our offset within mapping. */ | ||
| 255 | struct list_head lru; /* Pageout list, eg. active_list | ||
| 256 | * protected by zone->lru_lock ! | ||
| 257 | */ | ||
| 258 | /* | ||
| 259 | * On machines where all RAM is mapped into kernel address space, | ||
| 260 | * we can simply calculate the virtual address. On machines with | ||
| 261 | * highmem some memory is mapped into kernel virtual memory | ||
| 262 | * dynamically, so we need a place to store that address. | ||
| 263 | * Note that this field could be 16 bits on x86 ... ;) | ||
| 264 | * | ||
| 265 | * Architectures with slow multiplication can define | ||
| 266 | * WANT_PAGE_VIRTUAL in asm/page.h | ||
| 267 | */ | ||
| 268 | #if defined(WANT_PAGE_VIRTUAL) | ||
| 269 | void *virtual; /* Kernel virtual address (NULL if | ||
| 270 | not kmapped, ie. highmem) */ | ||
| 271 | #endif /* WANT_PAGE_VIRTUAL */ | ||
| 272 | }; | ||
| 273 | |||
| 274 | #define page_private(page) ((page)->private) | 220 | #define page_private(page) ((page)->private) |
| 275 | #define set_page_private(page, v) ((page)->private = (v)) | 221 | #define set_page_private(page, v) ((page)->private = (v)) |
| 276 | 222 | ||
| @@ -501,7 +447,11 @@ static inline struct zone *page_zone(struct page *page) | |||
| 501 | 447 | ||
| 502 | static inline unsigned long zone_to_nid(struct zone *zone) | 448 | static inline unsigned long zone_to_nid(struct zone *zone) |
| 503 | { | 449 | { |
| 504 | return zone->zone_pgdat->node_id; | 450 | #ifdef CONFIG_NUMA |
| 451 | return zone->node; | ||
| 452 | #else | ||
| 453 | return 0; | ||
| 454 | #endif | ||
| 505 | } | 455 | } |
| 506 | 456 | ||
| 507 | static inline unsigned long page_to_nid(struct page *page) | 457 | static inline unsigned long page_to_nid(struct page *page) |
| @@ -546,11 +496,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
| 546 | */ | 496 | */ |
| 547 | #include <linux/vmstat.h> | 497 | #include <linux/vmstat.h> |
| 548 | 498 | ||
| 549 | #ifndef CONFIG_DISCONTIGMEM | ||
| 550 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | ||
| 551 | extern struct page *mem_map; | ||
| 552 | #endif | ||
| 553 | |||
| 554 | static __always_inline void *lowmem_page_address(struct page *page) | 499 | static __always_inline void *lowmem_page_address(struct page *page) |
| 555 | { | 500 | { |
| 556 | return __va(page_to_pfn(page) << PAGE_SHIFT); | 501 | return __va(page_to_pfn(page) << PAGE_SHIFT); |
| @@ -650,6 +595,12 @@ static inline int page_mapped(struct page *page) | |||
| 650 | #define NOPAGE_OOM ((struct page *) (-1)) | 595 | #define NOPAGE_OOM ((struct page *) (-1)) |
| 651 | 596 | ||
| 652 | /* | 597 | /* |
| 598 | * Error return values for the *_nopfn functions | ||
| 599 | */ | ||
| 600 | #define NOPFN_SIGBUS ((unsigned long) -1) | ||
| 601 | #define NOPFN_OOM ((unsigned long) -2) | ||
| 602 | |||
| 603 | /* | ||
| 653 | * Different kinds of faults, as returned by handle_mm_fault(). | 604 | * Different kinds of faults, as returned by handle_mm_fault(). |
| 654 | * Used to decide whether a process gets delivered SIGBUS or | 605 | * Used to decide whether a process gets delivered SIGBUS or |
| 655 | * just gets major/minor fault counters bumped up. | 606 | * just gets major/minor fault counters bumped up. |
| @@ -937,6 +888,56 @@ extern void free_area_init(unsigned long * zones_size); | |||
| 937 | extern void free_area_init_node(int nid, pg_data_t *pgdat, | 888 | extern void free_area_init_node(int nid, pg_data_t *pgdat, |
| 938 | unsigned long * zones_size, unsigned long zone_start_pfn, | 889 | unsigned long * zones_size, unsigned long zone_start_pfn, |
| 939 | unsigned long *zholes_size); | 890 | unsigned long *zholes_size); |
| 891 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | ||
| 892 | /* | ||
| 893 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | ||
| 894 | * zones, allocate the backing mem_map and account for memory holes in a more | ||
| 895 | * architecture independent manner. This is a substitute for creating the | ||
| 896 | * zone_sizes[] and zholes_size[] arrays and passing them to | ||
| 897 | * free_area_init_node() | ||
| 898 | * | ||
| 899 | * An architecture is expected to register range of page frames backed by | ||
| 900 | * physical memory with add_active_range() before calling | ||
| 901 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic | ||
| 902 | * usage, an architecture is expected to do something like | ||
| 903 | * | ||
| 904 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, | ||
| 905 | * max_highmem_pfn}; | ||
| 906 | * for_each_valid_physical_page_range() | ||
| 907 | * add_active_range(node_id, start_pfn, end_pfn) | ||
| 908 | * free_area_init_nodes(max_zone_pfns); | ||
| 909 | * | ||
| 910 | * If the architecture guarantees that there are no holes in the ranges | ||
| 911 | * registered with add_active_range(), free_bootmem_active_regions() | ||
| 912 | * will call free_bootmem_node() for each registered physical page range. | ||
| 913 | * Similarly sparse_memory_present_with_active_regions() calls | ||
| 914 | * memory_present() for each range when SPARSEMEM is enabled. | ||
| 915 | * | ||
| 916 | * See mm/page_alloc.c for more information on each function exposed by | ||
| 917 | * CONFIG_ARCH_POPULATES_NODE_MAP | ||
| 918 | */ | ||
| 919 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | ||
| 920 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | ||
| 921 | unsigned long end_pfn); | ||
| 922 | extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | ||
| 923 | unsigned long new_end_pfn); | ||
| 924 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, | ||
| 925 | unsigned long end_pfn); | ||
| 926 | extern void remove_all_active_ranges(void); | ||
| 927 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | ||
| 928 | unsigned long end_pfn); | ||
| 929 | extern void get_pfn_range_for_nid(unsigned int nid, | ||
| 930 | unsigned long *start_pfn, unsigned long *end_pfn); | ||
| 931 | extern unsigned long find_min_pfn_with_active_regions(void); | ||
| 932 | extern unsigned long find_max_pfn_with_active_regions(void); | ||
| 933 | extern void free_bootmem_with_active_regions(int nid, | ||
| 934 | unsigned long max_low_pfn); | ||
| 935 | extern void sparse_memory_present_with_active_regions(int nid); | ||
| 936 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | ||
| 937 | extern int early_pfn_to_nid(unsigned long pfn); | ||
| 938 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
| 939 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | ||
| 940 | extern void set_dma_reserve(unsigned long new_dma_reserve); | ||
| 940 | extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); | 941 | extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); |
| 941 | extern void setup_per_zone_pages_min(void); | 942 | extern void setup_per_zone_pages_min(void); |
| 942 | extern void mem_init(void); | 943 | extern void mem_init(void); |
| @@ -1130,7 +1131,7 @@ void drop_slab(void); | |||
| 1130 | extern int randomize_va_space; | 1131 | extern int randomize_va_space; |
| 1131 | #endif | 1132 | #endif |
| 1132 | 1133 | ||
| 1133 | const char *arch_vma_name(struct vm_area_struct *vma); | 1134 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma); |
| 1134 | 1135 | ||
| 1135 | #endif /* __KERNEL__ */ | 1136 | #endif /* __KERNEL__ */ |
| 1136 | #endif /* _LINUX_MM_H */ | 1137 | #endif /* _LINUX_MM_H */ |
