diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 49 |
2 files changed, 41 insertions, 14 deletions
diff --git a/mm/memory.c b/mm/memory.c index e249088908c4..d7ca7de10f4d 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1498,7 +1498,7 @@ gotten: | |||
1498 | update_mmu_cache(vma, address, entry); | 1498 | update_mmu_cache(vma, address, entry); |
1499 | lazy_mmu_prot_update(entry); | 1499 | lazy_mmu_prot_update(entry); |
1500 | lru_cache_add_active(new_page); | 1500 | lru_cache_add_active(new_page); |
1501 | page_add_anon_rmap(new_page, vma, address); | 1501 | page_add_new_anon_rmap(new_page, vma, address); |
1502 | 1502 | ||
1503 | /* Free the old page.. */ | 1503 | /* Free the old page.. */ |
1504 | new_page = old_page; | 1504 | new_page = old_page; |
@@ -1978,7 +1978,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1978 | inc_mm_counter(mm, anon_rss); | 1978 | inc_mm_counter(mm, anon_rss); |
1979 | lru_cache_add_active(page); | 1979 | lru_cache_add_active(page); |
1980 | SetPageReferenced(page); | 1980 | SetPageReferenced(page); |
1981 | page_add_anon_rmap(page, vma, address); | 1981 | page_add_new_anon_rmap(page, vma, address); |
1982 | } else { | 1982 | } else { |
1983 | /* Map the ZERO_PAGE - vm_page_prot is readonly */ | 1983 | /* Map the ZERO_PAGE - vm_page_prot is readonly */ |
1984 | page = ZERO_PAGE(address); | 1984 | page = ZERO_PAGE(address); |
@@ -2109,7 +2109,7 @@ retry: | |||
2109 | if (anon) { | 2109 | if (anon) { |
2110 | inc_mm_counter(mm, anon_rss); | 2110 | inc_mm_counter(mm, anon_rss); |
2111 | lru_cache_add_active(new_page); | 2111 | lru_cache_add_active(new_page); |
2112 | page_add_anon_rmap(new_page, vma, address); | 2112 | page_add_new_anon_rmap(new_page, vma, address); |
2113 | } else { | 2113 | } else { |
2114 | inc_mm_counter(mm, file_rss); | 2114 | inc_mm_counter(mm, file_rss); |
2115 | page_add_file_rmap(new_page); | 2115 | page_add_file_rmap(new_page); |
@@ -435,6 +435,26 @@ int page_referenced(struct page *page, int is_locked) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | /** | 437 | /** |
438 | * page_set_anon_rmap - setup new anonymous rmap | ||
439 | * @page: the page to add the mapping to | ||
440 | * @vma: the vm area in which the mapping is added | ||
441 | * @address: the user virtual address mapped | ||
442 | */ | ||
443 | static void __page_set_anon_rmap(struct page *page, | ||
444 | struct vm_area_struct *vma, unsigned long address) | ||
445 | { | ||
446 | struct anon_vma *anon_vma = vma->anon_vma; | ||
447 | |||
448 | BUG_ON(!anon_vma); | ||
449 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | ||
450 | page->mapping = (struct address_space *) anon_vma; | ||
451 | |||
452 | page->index = linear_page_index(vma, address); | ||
453 | |||
454 | inc_page_state(nr_mapped); | ||
455 | } | ||
456 | |||
457 | /** | ||
438 | * page_add_anon_rmap - add pte mapping to an anonymous page | 458 | * page_add_anon_rmap - add pte mapping to an anonymous page |
439 | * @page: the page to add the mapping to | 459 | * @page: the page to add the mapping to |
440 | * @vma: the vm area in which the mapping is added | 460 | * @vma: the vm area in which the mapping is added |
@@ -445,20 +465,27 @@ int page_referenced(struct page *page, int is_locked) | |||
445 | void page_add_anon_rmap(struct page *page, | 465 | void page_add_anon_rmap(struct page *page, |
446 | struct vm_area_struct *vma, unsigned long address) | 466 | struct vm_area_struct *vma, unsigned long address) |
447 | { | 467 | { |
448 | if (atomic_inc_and_test(&page->_mapcount)) { | 468 | if (atomic_inc_and_test(&page->_mapcount)) |
449 | struct anon_vma *anon_vma = vma->anon_vma; | 469 | __page_set_anon_rmap(page, vma, address); |
450 | |||
451 | BUG_ON(!anon_vma); | ||
452 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | ||
453 | page->mapping = (struct address_space *) anon_vma; | ||
454 | |||
455 | page->index = linear_page_index(vma, address); | ||
456 | |||
457 | inc_page_state(nr_mapped); | ||
458 | } | ||
459 | /* else checking page index and mapping is racy */ | 470 | /* else checking page index and mapping is racy */ |
460 | } | 471 | } |
461 | 472 | ||
473 | /* | ||
474 | * page_add_new_anon_rmap - add pte mapping to a new anonymous page | ||
475 | * @page: the page to add the mapping to | ||
476 | * @vma: the vm area in which the mapping is added | ||
477 | * @address: the user virtual address mapped | ||
478 | * | ||
479 | * Same as page_add_anon_rmap but must only be called on *new* pages. | ||
480 | * This means the inc-and-test can be bypassed. | ||
481 | */ | ||
482 | void page_add_new_anon_rmap(struct page *page, | ||
483 | struct vm_area_struct *vma, unsigned long address) | ||
484 | { | ||
485 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ | ||
486 | __page_set_anon_rmap(page, vma, address); | ||
487 | } | ||
488 | |||
462 | /** | 489 | /** |
463 | * page_add_file_rmap - add pte mapping to a file page | 490 | * page_add_file_rmap - add pte mapping to a file page |
464 | * @page: the page to add the mapping to | 491 | * @page: the page to add the mapping to |