aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-08-01 10:39:12 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-08-01 10:39:30 -0400
commita4b526b3ba6353cd89a38e41da48ed83b0ead16f (patch)
tree362842354bdcde59feede51cbeefc9b8833aacf7
parent934b2857cc576ae53c92a66e63fce7ddcfa74691 (diff)
[S390] Optimize storage key operations for anon pages
For anonymous pages without a swap cache backing the check in page_remove_rmap for the physical dirty bit in page_remove_rmap is unnecessary. The instructions that are used to check and reset the dirty bit are expensive. Removing the check noticably speeds up process exit. In addition the clearing of the dirty bit in __SetPageUptodate is pointless as well. With these two changes there is no storage key operation for an anonymous page anymore if it does not hit the swap space. The micro benchmark which repeatedly executes an empty shell script gets about 5% faster. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--include/linux/page-flags.h3
-rw-r--r--mm/rmap.c3
2 files changed, 2 insertions, 4 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 54590a9a103e..25aaccdb2f26 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -239,9 +239,6 @@ static inline void __SetPageUptodate(struct page *page)
239{ 239{
240 smp_wmb(); 240 smp_wmb();
241 __set_bit(PG_uptodate, &(page)->flags); 241 __set_bit(PG_uptodate, &(page)->flags);
242#ifdef CONFIG_S390
243 page_clear_dirty(page);
244#endif
245} 242}
246 243
247static inline void SetPageUptodate(struct page *page) 244static inline void SetPageUptodate(struct page *page)
diff --git a/mm/rmap.c b/mm/rmap.c
index 99bc3f9cd796..94a5246a3f98 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -667,7 +667,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
667 * Leaving it set also helps swapoff to reinstate ptes 667 * Leaving it set also helps swapoff to reinstate ptes
668 * faster for those pages still in swapcache. 668 * faster for those pages still in swapcache.
669 */ 669 */
670 if (page_test_dirty(page)) { 670 if ((!PageAnon(page) || PageSwapCache(page)) &&
671 page_test_dirty(page)) {
671 page_clear_dirty(page); 672 page_clear_dirty(page);
672 set_page_dirty(page); 673 set_page_dirty(page);
673 } 674 }