diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 128 |
1 files changed, 93 insertions, 35 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 224178a000d2..856f0ee7e84a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/debug_locks.h> | 17 | #include <linux/debug_locks.h> |
18 | #include <linux/backing-dev.h> | ||
18 | 19 | ||
19 | struct mempolicy; | 20 | struct mempolicy; |
20 | struct anon_vma; | 21 | struct anon_vma; |
@@ -218,7 +219,8 @@ struct inode; | |||
218 | * Each physical page in the system has a struct page associated with | 219 | * Each physical page in the system has a struct page associated with |
219 | * it to keep track of whatever it is we are using the page for at the | 220 | * it to keep track of whatever it is we are using the page for at the |
220 | * moment. Note that we have no way to track which tasks are using | 221 | * moment. Note that we have no way to track which tasks are using |
221 | * a page. | 222 | * a page, though if it is a pagecache page, rmap structures can tell us |
223 | * who is mapping it. | ||
222 | */ | 224 | */ |
223 | struct page { | 225 | struct page { |
224 | unsigned long flags; /* Atomic flags, some possibly | 226 | unsigned long flags; /* Atomic flags, some possibly |
@@ -278,6 +280,12 @@ struct page { | |||
278 | */ | 280 | */ |
279 | #include <linux/page-flags.h> | 281 | #include <linux/page-flags.h> |
280 | 282 | ||
283 | #ifdef CONFIG_DEBUG_VM | ||
284 | #define VM_BUG_ON(cond) BUG_ON(cond) | ||
285 | #else | ||
286 | #define VM_BUG_ON(condition) do { } while(0) | ||
287 | #endif | ||
288 | |||
281 | /* | 289 | /* |
282 | * Methods to modify the page usage count. | 290 | * Methods to modify the page usage count. |
283 | * | 291 | * |
@@ -292,12 +300,11 @@ struct page { | |||
292 | */ | 300 | */ |
293 | 301 | ||
294 | /* | 302 | /* |
295 | * Drop a ref, return true if the logical refcount fell to zero (the page has | 303 | * Drop a ref, return true if the refcount fell to zero (the page has no users) |
296 | * no users) | ||
297 | */ | 304 | */ |
298 | static inline int put_page_testzero(struct page *page) | 305 | static inline int put_page_testzero(struct page *page) |
299 | { | 306 | { |
300 | BUG_ON(atomic_read(&page->_count) == 0); | 307 | VM_BUG_ON(atomic_read(&page->_count) == 0); |
301 | return atomic_dec_and_test(&page->_count); | 308 | return atomic_dec_and_test(&page->_count); |
302 | } | 309 | } |
303 | 310 | ||
@@ -307,11 +314,10 @@ static inline int put_page_testzero(struct page *page) | |||
307 | */ | 314 | */ |
308 | static inline int get_page_unless_zero(struct page *page) | 315 | static inline int get_page_unless_zero(struct page *page) |
309 | { | 316 | { |
317 | VM_BUG_ON(PageCompound(page)); | ||
310 | return atomic_inc_not_zero(&page->_count); | 318 | return atomic_inc_not_zero(&page->_count); |
311 | } | 319 | } |
312 | 320 | ||
313 | extern void FASTCALL(__page_cache_release(struct page *)); | ||
314 | |||
315 | static inline int page_count(struct page *page) | 321 | static inline int page_count(struct page *page) |
316 | { | 322 | { |
317 | if (unlikely(PageCompound(page))) | 323 | if (unlikely(PageCompound(page))) |
@@ -323,6 +329,7 @@ static inline void get_page(struct page *page) | |||
323 | { | 329 | { |
324 | if (unlikely(PageCompound(page))) | 330 | if (unlikely(PageCompound(page))) |
325 | page = (struct page *)page_private(page); | 331 | page = (struct page *)page_private(page); |
332 | VM_BUG_ON(atomic_read(&page->_count) == 0); | ||
326 | atomic_inc(&page->_count); | 333 | atomic_inc(&page->_count); |
327 | } | 334 | } |
328 | 335 | ||
@@ -349,43 +356,55 @@ void split_page(struct page *page, unsigned int order); | |||
349 | * For the non-reserved pages, page_count(page) denotes a reference count. | 356 | * For the non-reserved pages, page_count(page) denotes a reference count. |
350 | * page_count() == 0 means the page is free. page->lru is then used for | 357 | * page_count() == 0 means the page is free. page->lru is then used for |
351 | * freelist management in the buddy allocator. | 358 | * freelist management in the buddy allocator. |
352 | * page_count() == 1 means the page is used for exactly one purpose | 359 | * page_count() > 0 means the page has been allocated. |
353 | * (e.g. a private data page of one process). | 360 | * |
361 | * Pages are allocated by the slab allocator in order to provide memory | ||
362 | * to kmalloc and kmem_cache_alloc. In this case, the management of the | ||
363 | * page, and the fields in 'struct page' are the responsibility of mm/slab.c | ||
364 | * unless a particular usage is carefully commented. (the responsibility of | ||
365 | * freeing the kmalloc memory is the caller's, of course). | ||
354 | * | 366 | * |
355 | * A page may be used for kmalloc() or anyone else who does a | 367 | * A page may be used by anyone else who does a __get_free_page(). |
356 | * __get_free_page(). In this case the page_count() is at least 1, and | 368 | * In this case, page_count still tracks the references, and should only |
357 | * all other fields are unused but should be 0 or NULL. The | 369 | * be used through the normal accessor functions. The top bits of page->flags |
358 | * management of this page is the responsibility of the one who uses | 370 | * and page->virtual store page management information, but all other fields |
359 | * it. | 371 | * are unused and could be used privately, carefully. The management of this |
372 | * page is the responsibility of the one who allocated it, and those who have | ||
373 | * subsequently been given references to it. | ||
360 | * | 374 | * |
361 | * The other pages (we may call them "process pages") are completely | 375 | * The other pages (we may call them "pagecache pages") are completely |
362 | * managed by the Linux memory manager: I/O, buffers, swapping etc. | 376 | * managed by the Linux memory manager: I/O, buffers, swapping etc. |
363 | * The following discussion applies only to them. | 377 | * The following discussion applies only to them. |
364 | * | 378 | * |
365 | * A page may belong to an inode's memory mapping. In this case, | 379 | * A pagecache page contains an opaque `private' member, which belongs to the |
366 | * page->mapping is the pointer to the inode, and page->index is the | 380 | * page's address_space. Usually, this is the address of a circular list of |
367 | * file offset of the page, in units of PAGE_CACHE_SIZE. | 381 | * the page's disk buffers. PG_private must be set to tell the VM to call |
382 | * into the filesystem to release these pages. | ||
368 | * | 383 | * |
369 | * A page contains an opaque `private' member, which belongs to the | 384 | * A page may belong to an inode's memory mapping. In this case, page->mapping |
370 | * page's address_space. Usually, this is the address of a circular | 385 | * is the pointer to the inode, and page->index is the file offset of the page, |
371 | * list of the page's disk buffers. | 386 | * in units of PAGE_CACHE_SIZE. |
372 | * | 387 | * |
373 | * For pages belonging to inodes, the page_count() is the number of | 388 | * If pagecache pages are not associated with an inode, they are said to be |
374 | * attaches, plus 1 if `private' contains something, plus one for | 389 | * anonymous pages. These may become associated with the swapcache, and in that |
375 | * the page cache itself. | 390 | * case PG_swapcache is set, and page->private is an offset into the swapcache. |
376 | * | 391 | * |
377 | * Instead of keeping dirty/clean pages in per address-space lists, we instead | 392 | * In either case (swapcache or inode backed), the pagecache itself holds one |
378 | * now tag pages as dirty/under writeback in the radix tree. | 393 | * reference to the page. Setting PG_private should also increment the |
394 | * refcount. The each user mapping also has a reference to the page. | ||
379 | * | 395 | * |
380 | * There is also a per-mapping radix tree mapping index to the page | 396 | * The pagecache pages are stored in a per-mapping radix tree, which is |
381 | * in memory if present. The tree is rooted at mapping->root. | 397 | * rooted at mapping->page_tree, and indexed by offset. |
398 | * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space | ||
399 | * lists, we instead now tag pages as dirty/writeback in the radix tree. | ||
382 | * | 400 | * |
383 | * All process pages can do I/O: | 401 | * All pagecache pages may be subject to I/O: |
384 | * - inode pages may need to be read from disk, | 402 | * - inode pages may need to be read from disk, |
385 | * - inode pages which have been modified and are MAP_SHARED may need | 403 | * - inode pages which have been modified and are MAP_SHARED may need |
386 | * to be written to disk, | 404 | * to be written back to the inode on disk, |
387 | * - private pages which have been modified may need to be swapped out | 405 | * - anonymous pages (including MAP_PRIVATE file mappings) which have been |
388 | * to swap space and (later) to be read back into memory. | 406 | * modified may need to be swapped out to swap space and (later) to be read |
407 | * back into memory. | ||
389 | */ | 408 | */ |
390 | 409 | ||
391 | /* | 410 | /* |
@@ -463,7 +482,7 @@ void split_page(struct page *page, unsigned int order); | |||
463 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) | 482 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) |
464 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) | 483 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) |
465 | 484 | ||
466 | static inline unsigned long page_zonenum(struct page *page) | 485 | static inline enum zone_type page_zonenum(struct page *page) |
467 | { | 486 | { |
468 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; | 487 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; |
469 | } | 488 | } |
@@ -480,23 +499,29 @@ static inline struct zone *page_zone(struct page *page) | |||
480 | return zone_table[page_zone_id(page)]; | 499 | return zone_table[page_zone_id(page)]; |
481 | } | 500 | } |
482 | 501 | ||
502 | static inline unsigned long zone_to_nid(struct zone *zone) | ||
503 | { | ||
504 | return zone->zone_pgdat->node_id; | ||
505 | } | ||
506 | |||
483 | static inline unsigned long page_to_nid(struct page *page) | 507 | static inline unsigned long page_to_nid(struct page *page) |
484 | { | 508 | { |
485 | if (FLAGS_HAS_NODE) | 509 | if (FLAGS_HAS_NODE) |
486 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | 510 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; |
487 | else | 511 | else |
488 | return page_zone(page)->zone_pgdat->node_id; | 512 | return zone_to_nid(page_zone(page)); |
489 | } | 513 | } |
490 | static inline unsigned long page_to_section(struct page *page) | 514 | static inline unsigned long page_to_section(struct page *page) |
491 | { | 515 | { |
492 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | 516 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; |
493 | } | 517 | } |
494 | 518 | ||
495 | static inline void set_page_zone(struct page *page, unsigned long zone) | 519 | static inline void set_page_zone(struct page *page, enum zone_type zone) |
496 | { | 520 | { |
497 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); | 521 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); |
498 | page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; | 522 | page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; |
499 | } | 523 | } |
524 | |||
500 | static inline void set_page_node(struct page *page, unsigned long node) | 525 | static inline void set_page_node(struct page *page, unsigned long node) |
501 | { | 526 | { |
502 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); | 527 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); |
@@ -508,7 +533,7 @@ static inline void set_page_section(struct page *page, unsigned long section) | |||
508 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; | 533 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; |
509 | } | 534 | } |
510 | 535 | ||
511 | static inline void set_page_links(struct page *page, unsigned long zone, | 536 | static inline void set_page_links(struct page *page, enum zone_type zone, |
512 | unsigned long node, unsigned long pfn) | 537 | unsigned long node, unsigned long pfn) |
513 | { | 538 | { |
514 | set_page_zone(page, zone); | 539 | set_page_zone(page, zone); |
@@ -802,6 +827,39 @@ struct shrinker; | |||
802 | extern struct shrinker *set_shrinker(int, shrinker_t); | 827 | extern struct shrinker *set_shrinker(int, shrinker_t); |
803 | extern void remove_shrinker(struct shrinker *shrinker); | 828 | extern void remove_shrinker(struct shrinker *shrinker); |
804 | 829 | ||
830 | /* | ||
831 | * Some shared mappigns will want the pages marked read-only | ||
832 | * to track write events. If so, we'll downgrade vm_page_prot | ||
833 | * to the private version (using protection_map[] without the | ||
834 | * VM_SHARED bit). | ||
835 | */ | ||
836 | static inline int vma_wants_writenotify(struct vm_area_struct *vma) | ||
837 | { | ||
838 | unsigned int vm_flags = vma->vm_flags; | ||
839 | |||
840 | /* If it was private or non-writable, the write bit is already clear */ | ||
841 | if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) | ||
842 | return 0; | ||
843 | |||
844 | /* The backer wishes to know when pages are first written to? */ | ||
845 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) | ||
846 | return 1; | ||
847 | |||
848 | /* The open routine did something to the protections already? */ | ||
849 | if (pgprot_val(vma->vm_page_prot) != | ||
850 | pgprot_val(protection_map[vm_flags & | ||
851 | (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) | ||
852 | return 0; | ||
853 | |||
854 | /* Specialty mapping? */ | ||
855 | if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) | ||
856 | return 0; | ||
857 | |||
858 | /* Can the mapping track the dirty pages? */ | ||
859 | return vma->vm_file && vma->vm_file->f_mapping && | ||
860 | mapping_cap_account_dirty(vma->vm_file->f_mapping); | ||
861 | } | ||
862 | |||
805 | extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); | 863 | extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); |
806 | 864 | ||
807 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); | 865 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); |