diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-05-23 04:24:39 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-05-23 04:24:31 -0400 |
commit | 2d42552d1c1659b014851cf449ad2fe458509128 (patch) | |
tree | b9ef22867ce52e23b5249a7ad38637eec40363b8 /mm | |
parent | c26001d4e9133fe45e47eee18cfd826219e71fb9 (diff) |
[S390] merge page_test_dirty and page_clear_dirty
The page_clear_dirty primitive always sets the default storage key
which resets the access control bits and the fetch protection bit.
That will surprise a KVM guest that sets non-zero access control
bits or the fetch protection bit. Merge page_test_dirty and
page_clear_dirty back to a single function and only clear the
dirty bit from the storage key.
In addition move the function page_test_and_clear_dirty and
page_test_and_clear_young to page.h where they belong. This
requires to change the parameter from a struct page * to a page
frame number.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 11 |
1 files changed, 4 insertions, 7 deletions
@@ -719,7 +719,7 @@ int page_referenced(struct page *page, | |||
719 | unlock_page(page); | 719 | unlock_page(page); |
720 | } | 720 | } |
721 | out: | 721 | out: |
722 | if (page_test_and_clear_young(page)) | 722 | if (page_test_and_clear_young(page_to_pfn(page))) |
723 | referenced++; | 723 | referenced++; |
724 | 724 | ||
725 | return referenced; | 725 | return referenced; |
@@ -785,10 +785,8 @@ int page_mkclean(struct page *page) | |||
785 | struct address_space *mapping = page_mapping(page); | 785 | struct address_space *mapping = page_mapping(page); |
786 | if (mapping) { | 786 | if (mapping) { |
787 | ret = page_mkclean_file(mapping, page); | 787 | ret = page_mkclean_file(mapping, page); |
788 | if (page_test_dirty(page)) { | 788 | if (page_test_and_clear_dirty(page_to_pfn(page), 1)) |
789 | page_clear_dirty(page, 1); | ||
790 | ret = 1; | 789 | ret = 1; |
791 | } | ||
792 | } | 790 | } |
793 | } | 791 | } |
794 | 792 | ||
@@ -981,10 +979,9 @@ void page_remove_rmap(struct page *page) | |||
981 | * not if it's in swapcache - there might be another pte slot | 979 | * not if it's in swapcache - there might be another pte slot |
982 | * containing the swap entry, but page not yet written to swap. | 980 | * containing the swap entry, but page not yet written to swap. |
983 | */ | 981 | */ |
984 | if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { | 982 | if ((!PageAnon(page) || PageSwapCache(page)) && |
985 | page_clear_dirty(page, 1); | 983 | page_test_and_clear_dirty(page_to_pfn(page), 1)) |
986 | set_page_dirty(page); | 984 | set_page_dirty(page); |
987 | } | ||
988 | /* | 985 | /* |
989 | * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED | 986 | * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED |
990 | * and not charged by memcg for now. | 987 | * and not charged by memcg for now. |