diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 263 |
1 files changed, 166 insertions, 97 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 3b09444121d9..7b703b6d4358 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -14,6 +14,9 @@ | |||
| 14 | #include <linux/prio_tree.h> | 14 | #include <linux/prio_tree.h> |
| 15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/debug_locks.h> | ||
| 18 | #include <linux/backing-dev.h> | ||
| 19 | #include <linux/mm_types.h> | ||
| 17 | 20 | ||
| 18 | struct mempolicy; | 21 | struct mempolicy; |
| 19 | struct anon_vma; | 22 | struct anon_vma; |
| @@ -36,7 +39,6 @@ extern int sysctl_legacy_va_layout; | |||
| 36 | #include <asm/page.h> | 39 | #include <asm/page.h> |
| 37 | #include <asm/pgtable.h> | 40 | #include <asm/pgtable.h> |
| 38 | #include <asm/processor.h> | 41 | #include <asm/processor.h> |
| 39 | #include <asm/atomic.h> | ||
| 40 | 42 | ||
| 41 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) | 43 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) |
| 42 | 44 | ||
| @@ -145,7 +147,6 @@ extern unsigned int kobjsize(const void *objp); | |||
| 145 | 147 | ||
| 146 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 148 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
| 147 | #define VM_GROWSUP 0x00000200 | 149 | #define VM_GROWSUP 0x00000200 |
| 148 | #define VM_SHM 0x00000000 /* Means nothing: delete it later */ | ||
| 149 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 150 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
| 150 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 151 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
| 151 | 152 | ||
| @@ -198,6 +199,7 @@ struct vm_operations_struct { | |||
| 198 | void (*open)(struct vm_area_struct * area); | 199 | void (*open)(struct vm_area_struct * area); |
| 199 | void (*close)(struct vm_area_struct * area); | 200 | void (*close)(struct vm_area_struct * area); |
| 200 | struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); | 201 | struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); |
| 202 | unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); | ||
| 201 | int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); | 203 | int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); |
| 202 | 204 | ||
| 203 | /* notification that a previously read-only page is about to become | 205 | /* notification that a previously read-only page is about to become |
| @@ -207,67 +209,14 @@ struct vm_operations_struct { | |||
| 207 | int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); | 209 | int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); |
| 208 | struct mempolicy *(*get_policy)(struct vm_area_struct *vma, | 210 | struct mempolicy *(*get_policy)(struct vm_area_struct *vma, |
| 209 | unsigned long addr); | 211 | unsigned long addr); |
| 212 | int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, | ||
| 213 | const nodemask_t *to, unsigned long flags); | ||
| 210 | #endif | 214 | #endif |
| 211 | }; | 215 | }; |
| 212 | 216 | ||
| 213 | struct mmu_gather; | 217 | struct mmu_gather; |
| 214 | struct inode; | 218 | struct inode; |
| 215 | 219 | ||
| 216 | /* | ||
| 217 | * Each physical page in the system has a struct page associated with | ||
| 218 | * it to keep track of whatever it is we are using the page for at the | ||
| 219 | * moment. Note that we have no way to track which tasks are using | ||
| 220 | * a page. | ||
| 221 | */ | ||
| 222 | struct page { | ||
| 223 | unsigned long flags; /* Atomic flags, some possibly | ||
| 224 | * updated asynchronously */ | ||
| 225 | atomic_t _count; /* Usage count, see below. */ | ||
| 226 | atomic_t _mapcount; /* Count of ptes mapped in mms, | ||
| 227 | * to show when page is mapped | ||
| 228 | * & limit reverse map searches. | ||
| 229 | */ | ||
| 230 | union { | ||
| 231 | struct { | ||
| 232 | unsigned long private; /* Mapping-private opaque data: | ||
| 233 | * usually used for buffer_heads | ||
| 234 | * if PagePrivate set; used for | ||
| 235 | * swp_entry_t if PageSwapCache; | ||
| 236 | * indicates order in the buddy | ||
| 237 | * system if PG_buddy is set. | ||
| 238 | */ | ||
| 239 | struct address_space *mapping; /* If low bit clear, points to | ||
| 240 | * inode address_space, or NULL. | ||
| 241 | * If page mapped as anonymous | ||
| 242 | * memory, low bit is set, and | ||
| 243 | * it points to anon_vma object: | ||
| 244 | * see PAGE_MAPPING_ANON below. | ||
| 245 | */ | ||
| 246 | }; | ||
| 247 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | ||
| 248 | spinlock_t ptl; | ||
| 249 | #endif | ||
| 250 | }; | ||
| 251 | pgoff_t index; /* Our offset within mapping. */ | ||
| 252 | struct list_head lru; /* Pageout list, eg. active_list | ||
| 253 | * protected by zone->lru_lock ! | ||
| 254 | */ | ||
| 255 | /* | ||
| 256 | * On machines where all RAM is mapped into kernel address space, | ||
| 257 | * we can simply calculate the virtual address. On machines with | ||
| 258 | * highmem some memory is mapped into kernel virtual memory | ||
| 259 | * dynamically, so we need a place to store that address. | ||
| 260 | * Note that this field could be 16 bits on x86 ... ;) | ||
| 261 | * | ||
| 262 | * Architectures with slow multiplication can define | ||
| 263 | * WANT_PAGE_VIRTUAL in asm/page.h | ||
| 264 | */ | ||
| 265 | #if defined(WANT_PAGE_VIRTUAL) | ||
| 266 | void *virtual; /* Kernel virtual address (NULL if | ||
| 267 | not kmapped, ie. highmem) */ | ||
| 268 | #endif /* WANT_PAGE_VIRTUAL */ | ||
| 269 | }; | ||
| 270 | |||
| 271 | #define page_private(page) ((page)->private) | 220 | #define page_private(page) ((page)->private) |
| 272 | #define set_page_private(page, v) ((page)->private = (v)) | 221 | #define set_page_private(page, v) ((page)->private = (v)) |
| 273 | 222 | ||
| @@ -277,6 +226,12 @@ struct page { | |||
| 277 | */ | 226 | */ |
| 278 | #include <linux/page-flags.h> | 227 | #include <linux/page-flags.h> |
| 279 | 228 | ||
| 229 | #ifdef CONFIG_DEBUG_VM | ||
| 230 | #define VM_BUG_ON(cond) BUG_ON(cond) | ||
| 231 | #else | ||
| 232 | #define VM_BUG_ON(condition) do { } while(0) | ||
| 233 | #endif | ||
| 234 | |||
| 280 | /* | 235 | /* |
| 281 | * Methods to modify the page usage count. | 236 | * Methods to modify the page usage count. |
| 282 | * | 237 | * |
| @@ -291,12 +246,11 @@ struct page { | |||
| 291 | */ | 246 | */ |
| 292 | 247 | ||
| 293 | /* | 248 | /* |
| 294 | * Drop a ref, return true if the logical refcount fell to zero (the page has | 249 | * Drop a ref, return true if the refcount fell to zero (the page has no users) |
| 295 | * no users) | ||
| 296 | */ | 250 | */ |
| 297 | static inline int put_page_testzero(struct page *page) | 251 | static inline int put_page_testzero(struct page *page) |
| 298 | { | 252 | { |
| 299 | BUG_ON(atomic_read(&page->_count) == 0); | 253 | VM_BUG_ON(atomic_read(&page->_count) == 0); |
| 300 | return atomic_dec_and_test(&page->_count); | 254 | return atomic_dec_and_test(&page->_count); |
| 301 | } | 255 | } |
| 302 | 256 | ||
| @@ -306,11 +260,10 @@ static inline int put_page_testzero(struct page *page) | |||
| 306 | */ | 260 | */ |
| 307 | static inline int get_page_unless_zero(struct page *page) | 261 | static inline int get_page_unless_zero(struct page *page) |
| 308 | { | 262 | { |
| 263 | VM_BUG_ON(PageCompound(page)); | ||
| 309 | return atomic_inc_not_zero(&page->_count); | 264 | return atomic_inc_not_zero(&page->_count); |
| 310 | } | 265 | } |
| 311 | 266 | ||
| 312 | extern void FASTCALL(__page_cache_release(struct page *)); | ||
| 313 | |||
| 314 | static inline int page_count(struct page *page) | 267 | static inline int page_count(struct page *page) |
| 315 | { | 268 | { |
| 316 | if (unlikely(PageCompound(page))) | 269 | if (unlikely(PageCompound(page))) |
| @@ -322,6 +275,7 @@ static inline void get_page(struct page *page) | |||
| 322 | { | 275 | { |
| 323 | if (unlikely(PageCompound(page))) | 276 | if (unlikely(PageCompound(page))) |
| 324 | page = (struct page *)page_private(page); | 277 | page = (struct page *)page_private(page); |
| 278 | VM_BUG_ON(atomic_read(&page->_count) == 0); | ||
| 325 | atomic_inc(&page->_count); | 279 | atomic_inc(&page->_count); |
| 326 | } | 280 | } |
| 327 | 281 | ||
| @@ -335,6 +289,7 @@ static inline void init_page_count(struct page *page) | |||
| 335 | } | 289 | } |
| 336 | 290 | ||
| 337 | void put_page(struct page *page); | 291 | void put_page(struct page *page); |
| 292 | void put_pages_list(struct list_head *pages); | ||
| 338 | 293 | ||
| 339 | void split_page(struct page *page, unsigned int order); | 294 | void split_page(struct page *page, unsigned int order); |
| 340 | 295 | ||
| @@ -347,43 +302,55 @@ void split_page(struct page *page, unsigned int order); | |||
| 347 | * For the non-reserved pages, page_count(page) denotes a reference count. | 302 | * For the non-reserved pages, page_count(page) denotes a reference count. |
| 348 | * page_count() == 0 means the page is free. page->lru is then used for | 303 | * page_count() == 0 means the page is free. page->lru is then used for |
| 349 | * freelist management in the buddy allocator. | 304 | * freelist management in the buddy allocator. |
| 350 | * page_count() == 1 means the page is used for exactly one purpose | 305 | * page_count() > 0 means the page has been allocated. |
| 351 | * (e.g. a private data page of one process). | 306 | * |
| 307 | * Pages are allocated by the slab allocator in order to provide memory | ||
| 308 | * to kmalloc and kmem_cache_alloc. In this case, the management of the | ||
| 309 | * page, and the fields in 'struct page' are the responsibility of mm/slab.c | ||
| 310 | * unless a particular usage is carefully commented. (the responsibility of | ||
| 311 | * freeing the kmalloc memory is the caller's, of course). | ||
| 352 | * | 312 | * |
| 353 | * A page may be used for kmalloc() or anyone else who does a | 313 | * A page may be used by anyone else who does a __get_free_page(). |
| 354 | * __get_free_page(). In this case the page_count() is at least 1, and | 314 | * In this case, page_count still tracks the references, and should only |
| 355 | * all other fields are unused but should be 0 or NULL. The | 315 | * be used through the normal accessor functions. The top bits of page->flags |
| 356 | * management of this page is the responsibility of the one who uses | 316 | * and page->virtual store page management information, but all other fields |
| 357 | * it. | 317 | * are unused and could be used privately, carefully. The management of this |
| 318 | * page is the responsibility of the one who allocated it, and those who have | ||
| 319 | * subsequently been given references to it. | ||
| 358 | * | 320 | * |
| 359 | * The other pages (we may call them "process pages") are completely | 321 | * The other pages (we may call them "pagecache pages") are completely |
| 360 | * managed by the Linux memory manager: I/O, buffers, swapping etc. | 322 | * managed by the Linux memory manager: I/O, buffers, swapping etc. |
| 361 | * The following discussion applies only to them. | 323 | * The following discussion applies only to them. |
| 362 | * | 324 | * |
| 363 | * A page may belong to an inode's memory mapping. In this case, | 325 | * A pagecache page contains an opaque `private' member, which belongs to the |
| 364 | * page->mapping is the pointer to the inode, and page->index is the | 326 | * page's address_space. Usually, this is the address of a circular list of |
| 365 | * file offset of the page, in units of PAGE_CACHE_SIZE. | 327 | * the page's disk buffers. PG_private must be set to tell the VM to call |
| 328 | * into the filesystem to release these pages. | ||
| 366 | * | 329 | * |
| 367 | * A page contains an opaque `private' member, which belongs to the | 330 | * A page may belong to an inode's memory mapping. In this case, page->mapping |
| 368 | * page's address_space. Usually, this is the address of a circular | 331 | * is the pointer to the inode, and page->index is the file offset of the page, |
| 369 | * list of the page's disk buffers. | 332 | * in units of PAGE_CACHE_SIZE. |
| 370 | * | 333 | * |
| 371 | * For pages belonging to inodes, the page_count() is the number of | 334 | * If pagecache pages are not associated with an inode, they are said to be |
| 372 | * attaches, plus 1 if `private' contains something, plus one for | 335 | * anonymous pages. These may become associated with the swapcache, and in that |
| 373 | * the page cache itself. | 336 | * case PG_swapcache is set, and page->private is an offset into the swapcache. |
| 374 | * | 337 | * |
| 375 | * Instead of keeping dirty/clean pages in per address-space lists, we instead | 338 | * In either case (swapcache or inode backed), the pagecache itself holds one |
| 376 | * now tag pages as dirty/under writeback in the radix tree. | 339 | * reference to the page. Setting PG_private should also increment the |
| 340 | * refcount. The each user mapping also has a reference to the page. | ||
| 377 | * | 341 | * |
| 378 | * There is also a per-mapping radix tree mapping index to the page | 342 | * The pagecache pages are stored in a per-mapping radix tree, which is |
| 379 | * in memory if present. The tree is rooted at mapping->root. | 343 | * rooted at mapping->page_tree, and indexed by offset. |
| 344 | * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space | ||
| 345 | * lists, we instead now tag pages as dirty/writeback in the radix tree. | ||
| 380 | * | 346 | * |
| 381 | * All process pages can do I/O: | 347 | * All pagecache pages may be subject to I/O: |
| 382 | * - inode pages may need to be read from disk, | 348 | * - inode pages may need to be read from disk, |
| 383 | * - inode pages which have been modified and are MAP_SHARED may need | 349 | * - inode pages which have been modified and are MAP_SHARED may need |
| 384 | * to be written to disk, | 350 | * to be written back to the inode on disk, |
| 385 | * - private pages which have been modified may need to be swapped out | 351 | * - anonymous pages (including MAP_PRIVATE file mappings) which have been |
| 386 | * to swap space and (later) to be read back into memory. | 352 | * modified may need to be swapped out to swap space and (later) to be read |
| 353 | * back into memory. | ||
| 387 | */ | 354 | */ |
| 388 | 355 | ||
| 389 | /* | 356 | /* |
| @@ -461,7 +428,7 @@ void split_page(struct page *page, unsigned int order); | |||
| 461 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) | 428 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) |
| 462 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) | 429 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) |
| 463 | 430 | ||
| 464 | static inline unsigned long page_zonenum(struct page *page) | 431 | static inline enum zone_type page_zonenum(struct page *page) |
| 465 | { | 432 | { |
| 466 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; | 433 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; |
| 467 | } | 434 | } |
| @@ -478,23 +445,33 @@ static inline struct zone *page_zone(struct page *page) | |||
| 478 | return zone_table[page_zone_id(page)]; | 445 | return zone_table[page_zone_id(page)]; |
| 479 | } | 446 | } |
| 480 | 447 | ||
| 448 | static inline unsigned long zone_to_nid(struct zone *zone) | ||
| 449 | { | ||
| 450 | #ifdef CONFIG_NUMA | ||
| 451 | return zone->node; | ||
| 452 | #else | ||
| 453 | return 0; | ||
| 454 | #endif | ||
| 455 | } | ||
| 456 | |||
| 481 | static inline unsigned long page_to_nid(struct page *page) | 457 | static inline unsigned long page_to_nid(struct page *page) |
| 482 | { | 458 | { |
| 483 | if (FLAGS_HAS_NODE) | 459 | if (FLAGS_HAS_NODE) |
| 484 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | 460 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; |
| 485 | else | 461 | else |
| 486 | return page_zone(page)->zone_pgdat->node_id; | 462 | return zone_to_nid(page_zone(page)); |
| 487 | } | 463 | } |
| 488 | static inline unsigned long page_to_section(struct page *page) | 464 | static inline unsigned long page_to_section(struct page *page) |
| 489 | { | 465 | { |
| 490 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | 466 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; |
| 491 | } | 467 | } |
| 492 | 468 | ||
| 493 | static inline void set_page_zone(struct page *page, unsigned long zone) | 469 | static inline void set_page_zone(struct page *page, enum zone_type zone) |
| 494 | { | 470 | { |
| 495 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); | 471 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); |
| 496 | page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; | 472 | page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; |
| 497 | } | 473 | } |
| 474 | |||
| 498 | static inline void set_page_node(struct page *page, unsigned long node) | 475 | static inline void set_page_node(struct page *page, unsigned long node) |
| 499 | { | 476 | { |
| 500 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); | 477 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); |
| @@ -506,7 +483,7 @@ static inline void set_page_section(struct page *page, unsigned long section) | |||
| 506 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; | 483 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; |
| 507 | } | 484 | } |
| 508 | 485 | ||
| 509 | static inline void set_page_links(struct page *page, unsigned long zone, | 486 | static inline void set_page_links(struct page *page, enum zone_type zone, |
| 510 | unsigned long node, unsigned long pfn) | 487 | unsigned long node, unsigned long pfn) |
| 511 | { | 488 | { |
| 512 | set_page_zone(page, zone); | 489 | set_page_zone(page, zone); |
| @@ -514,10 +491,10 @@ static inline void set_page_links(struct page *page, unsigned long zone, | |||
| 514 | set_page_section(page, pfn_to_section_nr(pfn)); | 491 | set_page_section(page, pfn_to_section_nr(pfn)); |
| 515 | } | 492 | } |
| 516 | 493 | ||
| 517 | #ifndef CONFIG_DISCONTIGMEM | 494 | /* |
| 518 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 495 | * Some inline functions in vmstat.h depend on page_zone() |
| 519 | extern struct page *mem_map; | 496 | */ |
| 520 | #endif | 497 | #include <linux/vmstat.h> |
| 521 | 498 | ||
| 522 | static __always_inline void *lowmem_page_address(struct page *page) | 499 | static __always_inline void *lowmem_page_address(struct page *page) |
| 523 | { | 500 | { |
| @@ -618,6 +595,12 @@ static inline int page_mapped(struct page *page) | |||
| 618 | #define NOPAGE_OOM ((struct page *) (-1)) | 595 | #define NOPAGE_OOM ((struct page *) (-1)) |
| 619 | 596 | ||
| 620 | /* | 597 | /* |
| 598 | * Error return values for the *_nopfn functions | ||
| 599 | */ | ||
| 600 | #define NOPFN_SIGBUS ((unsigned long) -1) | ||
| 601 | #define NOPFN_OOM ((unsigned long) -2) | ||
| 602 | |||
| 603 | /* | ||
| 621 | * Different kinds of faults, as returned by handle_mm_fault(). | 604 | * Different kinds of faults, as returned by handle_mm_fault(). |
| 622 | * Used to decide whether a process gets delivered SIGBUS or | 605 | * Used to decide whether a process gets delivered SIGBUS or |
| 623 | * just gets major/minor fault counters bumped up. | 606 | * just gets major/minor fault counters bumped up. |
| @@ -795,6 +778,39 @@ struct shrinker; | |||
| 795 | extern struct shrinker *set_shrinker(int, shrinker_t); | 778 | extern struct shrinker *set_shrinker(int, shrinker_t); |
| 796 | extern void remove_shrinker(struct shrinker *shrinker); | 779 | extern void remove_shrinker(struct shrinker *shrinker); |
| 797 | 780 | ||
| 781 | /* | ||
| 782 | * Some shared mappigns will want the pages marked read-only | ||
| 783 | * to track write events. If so, we'll downgrade vm_page_prot | ||
| 784 | * to the private version (using protection_map[] without the | ||
| 785 | * VM_SHARED bit). | ||
| 786 | */ | ||
| 787 | static inline int vma_wants_writenotify(struct vm_area_struct *vma) | ||
| 788 | { | ||
| 789 | unsigned int vm_flags = vma->vm_flags; | ||
| 790 | |||
| 791 | /* If it was private or non-writable, the write bit is already clear */ | ||
| 792 | if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) | ||
| 793 | return 0; | ||
| 794 | |||
| 795 | /* The backer wishes to know when pages are first written to? */ | ||
| 796 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) | ||
| 797 | return 1; | ||
| 798 | |||
| 799 | /* The open routine did something to the protections already? */ | ||
| 800 | if (pgprot_val(vma->vm_page_prot) != | ||
| 801 | pgprot_val(protection_map[vm_flags & | ||
| 802 | (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) | ||
| 803 | return 0; | ||
| 804 | |||
| 805 | /* Specialty mapping? */ | ||
| 806 | if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) | ||
| 807 | return 0; | ||
| 808 | |||
| 809 | /* Can the mapping track the dirty pages? */ | ||
| 810 | return vma->vm_file && vma->vm_file->f_mapping && | ||
| 811 | mapping_cap_account_dirty(vma->vm_file->f_mapping); | ||
| 812 | } | ||
| 813 | |||
| 798 | extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); | 814 | extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); |
| 799 | 815 | ||
| 800 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); | 816 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); |
| @@ -872,6 +888,56 @@ extern void free_area_init(unsigned long * zones_size); | |||
| 872 | extern void free_area_init_node(int nid, pg_data_t *pgdat, | 888 | extern void free_area_init_node(int nid, pg_data_t *pgdat, |
| 873 | unsigned long * zones_size, unsigned long zone_start_pfn, | 889 | unsigned long * zones_size, unsigned long zone_start_pfn, |
| 874 | unsigned long *zholes_size); | 890 | unsigned long *zholes_size); |
| 891 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | ||
| 892 | /* | ||
| 893 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | ||
| 894 | * zones, allocate the backing mem_map and account for memory holes in a more | ||
| 895 | * architecture independent manner. This is a substitute for creating the | ||
| 896 | * zone_sizes[] and zholes_size[] arrays and passing them to | ||
| 897 | * free_area_init_node() | ||
| 898 | * | ||
| 899 | * An architecture is expected to register range of page frames backed by | ||
| 900 | * physical memory with add_active_range() before calling | ||
| 901 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic | ||
| 902 | * usage, an architecture is expected to do something like | ||
| 903 | * | ||
| 904 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, | ||
| 905 | * max_highmem_pfn}; | ||
| 906 | * for_each_valid_physical_page_range() | ||
| 907 | * add_active_range(node_id, start_pfn, end_pfn) | ||
| 908 | * free_area_init_nodes(max_zone_pfns); | ||
| 909 | * | ||
| 910 | * If the architecture guarantees that there are no holes in the ranges | ||
| 911 | * registered with add_active_range(), free_bootmem_active_regions() | ||
| 912 | * will call free_bootmem_node() for each registered physical page range. | ||
| 913 | * Similarly sparse_memory_present_with_active_regions() calls | ||
| 914 | * memory_present() for each range when SPARSEMEM is enabled. | ||
| 915 | * | ||
| 916 | * See mm/page_alloc.c for more information on each function exposed by | ||
| 917 | * CONFIG_ARCH_POPULATES_NODE_MAP | ||
| 918 | */ | ||
| 919 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | ||
| 920 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | ||
| 921 | unsigned long end_pfn); | ||
| 922 | extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | ||
| 923 | unsigned long new_end_pfn); | ||
| 924 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, | ||
| 925 | unsigned long end_pfn); | ||
| 926 | extern void remove_all_active_ranges(void); | ||
| 927 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | ||
| 928 | unsigned long end_pfn); | ||
| 929 | extern void get_pfn_range_for_nid(unsigned int nid, | ||
| 930 | unsigned long *start_pfn, unsigned long *end_pfn); | ||
| 931 | extern unsigned long find_min_pfn_with_active_regions(void); | ||
| 932 | extern unsigned long find_max_pfn_with_active_regions(void); | ||
| 933 | extern void free_bootmem_with_active_regions(int nid, | ||
| 934 | unsigned long max_low_pfn); | ||
| 935 | extern void sparse_memory_present_with_active_regions(int nid); | ||
| 936 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | ||
| 937 | extern int early_pfn_to_nid(unsigned long pfn); | ||
| 938 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
| 939 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | ||
| 940 | extern void set_dma_reserve(unsigned long new_dma_reserve); | ||
| 875 | extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); | 941 | extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); |
| 876 | extern void setup_per_zone_pages_min(void); | 942 | extern void setup_per_zone_pages_min(void); |
| 877 | extern void mem_init(void); | 943 | extern void mem_init(void); |
| @@ -1006,6 +1072,7 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) | |||
| 1006 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 1072 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 1007 | } | 1073 | } |
| 1008 | 1074 | ||
| 1075 | pgprot_t vm_get_page_prot(unsigned long vm_flags); | ||
| 1009 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); | 1076 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); |
| 1010 | struct page *vmalloc_to_page(void *addr); | 1077 | struct page *vmalloc_to_page(void *addr); |
| 1011 | unsigned long vmalloc_to_pfn(void *addr); | 1078 | unsigned long vmalloc_to_pfn(void *addr); |
| @@ -1034,8 +1101,8 @@ static inline void | |||
| 1034 | kernel_map_pages(struct page *page, int numpages, int enable) | 1101 | kernel_map_pages(struct page *page, int numpages, int enable) |
| 1035 | { | 1102 | { |
| 1036 | if (!PageHighMem(page) && !enable) | 1103 | if (!PageHighMem(page) && !enable) |
| 1037 | mutex_debug_check_no_locks_freed(page_address(page), | 1104 | debug_check_no_locks_freed(page_address(page), |
| 1038 | numpages * PAGE_SIZE); | 1105 | numpages * PAGE_SIZE); |
| 1039 | } | 1106 | } |
| 1040 | #endif | 1107 | #endif |
| 1041 | 1108 | ||
| @@ -1064,5 +1131,7 @@ void drop_slab(void); | |||
| 1064 | extern int randomize_va_space; | 1131 | extern int randomize_va_space; |
| 1065 | #endif | 1132 | #endif |
| 1066 | 1133 | ||
| 1134 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma); | ||
| 1135 | |||
| 1067 | #endif /* __KERNEL__ */ | 1136 | #endif /* __KERNEL__ */ |
| 1068 | #endif /* _LINUX_MM_H */ | 1137 | #endif /* _LINUX_MM_H */ |
