diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-01-17 18:34:51 -0500 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-01-17 18:34:51 -0500 |
commit | 9cdf083f981b8d37b3212400a359368661385099 (patch) | |
tree | aa15a6a08ad87e650dea40fb59b3180bef0d345b /mm/rmap.c | |
parent | e499e01d234a31d59679b7b1e1cf628d917ba49a (diff) | |
parent | a8b3485287731978899ced11f24628c927890e78 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 36 |
1 files changed, 23 insertions, 13 deletions
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/rmap.h> | 47 | #include <linux/rmap.h> |
48 | #include <linux/rcupdate.h> | 48 | #include <linux/rcupdate.h> |
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/kallsyms.h> | ||
50 | 51 | ||
51 | #include <asm/tlbflush.h> | 52 | #include <asm/tlbflush.h> |
52 | 53 | ||
@@ -432,7 +433,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | |||
432 | { | 433 | { |
433 | struct mm_struct *mm = vma->vm_mm; | 434 | struct mm_struct *mm = vma->vm_mm; |
434 | unsigned long address; | 435 | unsigned long address; |
435 | pte_t *pte, entry; | 436 | pte_t *pte; |
436 | spinlock_t *ptl; | 437 | spinlock_t *ptl; |
437 | int ret = 0; | 438 | int ret = 0; |
438 | 439 | ||
@@ -444,17 +445,18 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | |||
444 | if (!pte) | 445 | if (!pte) |
445 | goto out; | 446 | goto out; |
446 | 447 | ||
447 | if (!pte_dirty(*pte) && !pte_write(*pte)) | 448 | if (pte_dirty(*pte) || pte_write(*pte)) { |
448 | goto unlock; | 449 | pte_t entry; |
449 | 450 | ||
450 | entry = ptep_get_and_clear(mm, address, pte); | 451 | flush_cache_page(vma, address, pte_pfn(*pte)); |
451 | entry = pte_mkclean(entry); | 452 | entry = ptep_clear_flush(vma, address, pte); |
452 | entry = pte_wrprotect(entry); | 453 | entry = pte_wrprotect(entry); |
453 | ptep_establish(vma, address, pte, entry); | 454 | entry = pte_mkclean(entry); |
454 | lazy_mmu_prot_update(entry); | 455 | set_pte_at(mm, address, pte, entry); |
455 | ret = 1; | 456 | lazy_mmu_prot_update(entry); |
457 | ret = 1; | ||
458 | } | ||
456 | 459 | ||
457 | unlock: | ||
458 | pte_unmap_unlock(pte, ptl); | 460 | pte_unmap_unlock(pte, ptl); |
459 | out: | 461 | out: |
460 | return ret; | 462 | return ret; |
@@ -489,6 +491,8 @@ int page_mkclean(struct page *page) | |||
489 | if (mapping) | 491 | if (mapping) |
490 | ret = page_mkclean_file(mapping, page); | 492 | ret = page_mkclean_file(mapping, page); |
491 | } | 493 | } |
494 | if (page_test_and_clear_dirty(page)) | ||
495 | ret = 1; | ||
492 | 496 | ||
493 | return ret; | 497 | return ret; |
494 | } | 498 | } |
@@ -567,14 +571,20 @@ void page_add_file_rmap(struct page *page) | |||
567 | * | 571 | * |
568 | * The caller needs to hold the pte lock. | 572 | * The caller needs to hold the pte lock. |
569 | */ | 573 | */ |
570 | void page_remove_rmap(struct page *page) | 574 | void page_remove_rmap(struct page *page, struct vm_area_struct *vma) |
571 | { | 575 | { |
572 | if (atomic_add_negative(-1, &page->_mapcount)) { | 576 | if (atomic_add_negative(-1, &page->_mapcount)) { |
573 | if (unlikely(page_mapcount(page) < 0)) { | 577 | if (unlikely(page_mapcount(page) < 0)) { |
574 | printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); | 578 | printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); |
579 | printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); | ||
575 | printk (KERN_EMERG " page->flags = %lx\n", page->flags); | 580 | printk (KERN_EMERG " page->flags = %lx\n", page->flags); |
576 | printk (KERN_EMERG " page->count = %x\n", page_count(page)); | 581 | printk (KERN_EMERG " page->count = %x\n", page_count(page)); |
577 | printk (KERN_EMERG " page->mapping = %p\n", page->mapping); | 582 | printk (KERN_EMERG " page->mapping = %p\n", page->mapping); |
583 | print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); | ||
584 | if (vma->vm_ops) | ||
585 | print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); | ||
586 | if (vma->vm_file && vma->vm_file->f_op) | ||
587 | print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); | ||
578 | BUG(); | 588 | BUG(); |
579 | } | 589 | } |
580 | 590 | ||
@@ -679,7 +689,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
679 | dec_mm_counter(mm, file_rss); | 689 | dec_mm_counter(mm, file_rss); |
680 | 690 | ||
681 | 691 | ||
682 | page_remove_rmap(page); | 692 | page_remove_rmap(page, vma); |
683 | page_cache_release(page); | 693 | page_cache_release(page); |
684 | 694 | ||
685 | out_unmap: | 695 | out_unmap: |
@@ -769,7 +779,7 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
769 | if (pte_dirty(pteval)) | 779 | if (pte_dirty(pteval)) |
770 | set_page_dirty(page); | 780 | set_page_dirty(page); |
771 | 781 | ||
772 | page_remove_rmap(page); | 782 | page_remove_rmap(page, vma); |
773 | page_cache_release(page); | 783 | page_cache_release(page); |
774 | dec_mm_counter(mm, file_rss); | 784 | dec_mm_counter(mm, file_rss); |
775 | (*mapcount)--; | 785 | (*mapcount)--; |