diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 67 |
1 files changed, 60 insertions, 7 deletions
@@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma) | |||
162 | static void anon_vma_ctor(void *data, struct kmem_cache *cachep, | 162 | static void anon_vma_ctor(void *data, struct kmem_cache *cachep, |
163 | unsigned long flags) | 163 | unsigned long flags) |
164 | { | 164 | { |
165 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 165 | struct anon_vma *anon_vma = data; |
166 | struct anon_vma *anon_vma = data; | ||
167 | 166 | ||
168 | spin_lock_init(&anon_vma->lock); | 167 | spin_lock_init(&anon_vma->lock); |
169 | INIT_LIST_HEAD(&anon_vma->head); | 168 | INIT_LIST_HEAD(&anon_vma->head); |
170 | } | ||
171 | } | 169 | } |
172 | 170 | ||
173 | void __init anon_vma_init(void) | 171 | void __init anon_vma_init(void) |
@@ -505,6 +503,7 @@ int page_mkclean(struct page *page) | |||
505 | 503 | ||
506 | return ret; | 504 | return ret; |
507 | } | 505 | } |
506 | EXPORT_SYMBOL_GPL(page_mkclean); | ||
508 | 507 | ||
509 | /** | 508 | /** |
510 | * page_set_anon_rmap - setup new anonymous rmap | 509 | * page_set_anon_rmap - setup new anonymous rmap |
@@ -531,19 +530,51 @@ static void __page_set_anon_rmap(struct page *page, | |||
531 | } | 530 | } |
532 | 531 | ||
533 | /** | 532 | /** |
533 | * page_set_anon_rmap - sanity check anonymous rmap addition | ||
534 | * @page: the page to add the mapping to | ||
535 | * @vma: the vm area in which the mapping is added | ||
536 | * @address: the user virtual address mapped | ||
537 | */ | ||
538 | static void __page_check_anon_rmap(struct page *page, | ||
539 | struct vm_area_struct *vma, unsigned long address) | ||
540 | { | ||
541 | #ifdef CONFIG_DEBUG_VM | ||
542 | /* | ||
543 | * The page's anon-rmap details (mapping and index) are guaranteed to | ||
544 | * be set up correctly at this point. | ||
545 | * | ||
546 | * We have exclusion against page_add_anon_rmap because the caller | ||
547 | * always holds the page locked, except if called from page_dup_rmap, | ||
548 | * in which case the page is already known to be setup. | ||
549 | * | ||
550 | * We have exclusion against page_add_new_anon_rmap because those pages | ||
551 | * are initially only visible via the pagetables, and the pte is locked | ||
552 | * over the call to page_add_new_anon_rmap. | ||
553 | */ | ||
554 | struct anon_vma *anon_vma = vma->anon_vma; | ||
555 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | ||
556 | BUG_ON(page->mapping != (struct address_space *)anon_vma); | ||
557 | BUG_ON(page->index != linear_page_index(vma, address)); | ||
558 | #endif | ||
559 | } | ||
560 | |||
561 | /** | ||
534 | * page_add_anon_rmap - add pte mapping to an anonymous page | 562 | * page_add_anon_rmap - add pte mapping to an anonymous page |
535 | * @page: the page to add the mapping to | 563 | * @page: the page to add the mapping to |
536 | * @vma: the vm area in which the mapping is added | 564 | * @vma: the vm area in which the mapping is added |
537 | * @address: the user virtual address mapped | 565 | * @address: the user virtual address mapped |
538 | * | 566 | * |
539 | * The caller needs to hold the pte lock. | 567 | * The caller needs to hold the pte lock and the page must be locked. |
540 | */ | 568 | */ |
541 | void page_add_anon_rmap(struct page *page, | 569 | void page_add_anon_rmap(struct page *page, |
542 | struct vm_area_struct *vma, unsigned long address) | 570 | struct vm_area_struct *vma, unsigned long address) |
543 | { | 571 | { |
572 | VM_BUG_ON(!PageLocked(page)); | ||
573 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
544 | if (atomic_inc_and_test(&page->_mapcount)) | 574 | if (atomic_inc_and_test(&page->_mapcount)) |
545 | __page_set_anon_rmap(page, vma, address); | 575 | __page_set_anon_rmap(page, vma, address); |
546 | /* else checking page index and mapping is racy */ | 576 | else |
577 | __page_check_anon_rmap(page, vma, address); | ||
547 | } | 578 | } |
548 | 579 | ||
549 | /* | 580 | /* |
@@ -554,10 +585,12 @@ void page_add_anon_rmap(struct page *page, | |||
554 | * | 585 | * |
555 | * Same as page_add_anon_rmap but must only be called on *new* pages. | 586 | * Same as page_add_anon_rmap but must only be called on *new* pages. |
556 | * This means the inc-and-test can be bypassed. | 587 | * This means the inc-and-test can be bypassed. |
588 | * Page does not have to be locked. | ||
557 | */ | 589 | */ |
558 | void page_add_new_anon_rmap(struct page *page, | 590 | void page_add_new_anon_rmap(struct page *page, |
559 | struct vm_area_struct *vma, unsigned long address) | 591 | struct vm_area_struct *vma, unsigned long address) |
560 | { | 592 | { |
593 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
561 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ | 594 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ |
562 | __page_set_anon_rmap(page, vma, address); | 595 | __page_set_anon_rmap(page, vma, address); |
563 | } | 596 | } |
@@ -574,6 +607,26 @@ void page_add_file_rmap(struct page *page) | |||
574 | __inc_zone_page_state(page, NR_FILE_MAPPED); | 607 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
575 | } | 608 | } |
576 | 609 | ||
610 | #ifdef CONFIG_DEBUG_VM | ||
611 | /** | ||
612 | * page_dup_rmap - duplicate pte mapping to a page | ||
613 | * @page: the page to add the mapping to | ||
614 | * | ||
615 | * For copy_page_range only: minimal extract from page_add_file_rmap / | ||
616 | * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's | ||
617 | * quicker. | ||
618 | * | ||
619 | * The caller needs to hold the pte lock. | ||
620 | */ | ||
621 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) | ||
622 | { | ||
623 | BUG_ON(page_mapcount(page) == 0); | ||
624 | if (PageAnon(page)) | ||
625 | __page_check_anon_rmap(page, vma, address); | ||
626 | atomic_inc(&page->_mapcount); | ||
627 | } | ||
628 | #endif | ||
629 | |||
577 | /** | 630 | /** |
578 | * page_remove_rmap - take down pte mapping from a page | 631 | * page_remove_rmap - take down pte mapping from a page |
579 | * @page: page to remove mapping from | 632 | * @page: page to remove mapping from |