diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 41 |
1 files changed, 17 insertions, 24 deletions
| @@ -21,27 +21,21 @@ | |||
| 21 | * Lock ordering in mm: | 21 | * Lock ordering in mm: |
| 22 | * | 22 | * |
| 23 | * inode->i_mutex (while writing or truncating, not reading or faulting) | 23 | * inode->i_mutex (while writing or truncating, not reading or faulting) |
| 24 | * inode->i_alloc_sem | 24 | * inode->i_alloc_sem (vmtruncate_range) |
| 25 | * | 25 | * mm->mmap_sem |
| 26 | * When a page fault occurs in writing from user to file, down_read | 26 | * page->flags PG_locked (lock_page) |
| 27 | * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within | 27 | * mapping->i_mmap_lock |
| 28 | * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never | 28 | * anon_vma->lock |
| 29 | * taken together; in truncation, i_mutex is taken outermost. | 29 | * mm->page_table_lock or pte_lock |
| 30 | * | 30 | * zone->lru_lock (in mark_page_accessed, isolate_lru_page) |
| 31 | * mm->mmap_sem | 31 | * swap_lock (in swap_duplicate, swap_info_get) |
| 32 | * page->flags PG_locked (lock_page) | 32 | * mmlist_lock (in mmput, drain_mmlist and others) |
| 33 | * mapping->i_mmap_lock | 33 | * mapping->private_lock (in __set_page_dirty_buffers) |
| 34 | * anon_vma->lock | 34 | * inode_lock (in set_page_dirty's __mark_inode_dirty) |
| 35 | * mm->page_table_lock or pte_lock | 35 | * sb_lock (within inode_lock in fs/fs-writeback.c) |
| 36 | * zone->lru_lock (in mark_page_accessed, isolate_lru_page) | 36 | * mapping->tree_lock (widely used, in set_page_dirty, |
| 37 | * swap_lock (in swap_duplicate, swap_info_get) | 37 | * in arch-dependent flush_dcache_mmap_lock, |
| 38 | * mmlist_lock (in mmput, drain_mmlist and others) | 38 | * within inode_lock in __sync_single_inode) |
| 39 | * mapping->private_lock (in __set_page_dirty_buffers) | ||
| 40 | * inode_lock (in set_page_dirty's __mark_inode_dirty) | ||
| 41 | * sb_lock (within inode_lock in fs/fs-writeback.c) | ||
| 42 | * mapping->tree_lock (widely used, in set_page_dirty, | ||
| 43 | * in arch-dependent flush_dcache_mmap_lock, | ||
| 44 | * within inode_lock in __sync_single_inode) | ||
| 45 | */ | 39 | */ |
| 46 | 40 | ||
| 47 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
| @@ -576,15 +570,14 @@ void page_add_file_rmap(struct page *page) | |||
| 576 | void page_remove_rmap(struct page *page) | 570 | void page_remove_rmap(struct page *page) |
| 577 | { | 571 | { |
| 578 | if (atomic_add_negative(-1, &page->_mapcount)) { | 572 | if (atomic_add_negative(-1, &page->_mapcount)) { |
| 579 | #ifdef CONFIG_DEBUG_VM | ||
| 580 | if (unlikely(page_mapcount(page) < 0)) { | 573 | if (unlikely(page_mapcount(page) < 0)) { |
| 581 | printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); | 574 | printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); |
| 582 | printk (KERN_EMERG " page->flags = %lx\n", page->flags); | 575 | printk (KERN_EMERG " page->flags = %lx\n", page->flags); |
| 583 | printk (KERN_EMERG " page->count = %x\n", page_count(page)); | 576 | printk (KERN_EMERG " page->count = %x\n", page_count(page)); |
| 584 | printk (KERN_EMERG " page->mapping = %p\n", page->mapping); | 577 | printk (KERN_EMERG " page->mapping = %p\n", page->mapping); |
| 578 | BUG(); | ||
| 585 | } | 579 | } |
| 586 | #endif | 580 | |
| 587 | BUG_ON(page_mapcount(page) < 0); | ||
| 588 | /* | 581 | /* |
| 589 | * It would be tidy to reset the PageAnon mapping here, | 582 | * It would be tidy to reset the PageAnon mapping here, |
| 590 | * but that might overwrite a racing page_add_anon_rmap | 583 | * but that might overwrite a racing page_add_anon_rmap |
