aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKen Chen <kenchen@google.com>2007-02-10 04:43:15 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 13:51:19 -0500
commit767193253bbac889e176f90b6f17b7015f986551 (patch)
treea81c5c90d14c7892e6d7adb0432f4dff4ca3f906
parentbd8029b66069d29fd02c304599411ca9bb7fa38c (diff)
[PATCH] simplify shmem_aops.set_page_dirty() method
shmem backed file does not have page writeback, nor it participates in backing device's dirty or writeback accounting. So using generic __set_page_dirty_nobuffers() for its .set_page_dirty aops method is a bit overkill. It unnecessarily prolongs shm unmap latency. For example, on a densely populated large shm segment (sevearl GBs), the unmapping operation becomes painfully long. Because at unmap, kernel transfers dirty bit in PTE into page struct and to the radix tree tag. The operation of tagging the radix tree is particularly expensive because it has to traverse the tree from the root to the leaf node on every dirty page. What's bothering is that radix tree tag is used for page write back. However, shmem is memory backed and there is no page write back for such file system. And in the end, we spend all that time tagging radix tree and none of that fancy tagging will be used. So let's simplify it by introduce a new aops __set_page_dirty_no_writeback and this will speed up shm unmap. Signed-off-by: Ken Chen <kenchen@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/shmem.c2
3 files changed, 12 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 26adfcc0d61..77a76101dcd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -785,6 +785,7 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
785extern void do_invalidatepage(struct page *page, unsigned long offset); 785extern void do_invalidatepage(struct page *page, unsigned long offset);
786 786
787int __set_page_dirty_nobuffers(struct page *page); 787int __set_page_dirty_nobuffers(struct page *page);
788int __set_page_dirty_no_writeback(struct page *page);
788int redirty_page_for_writepage(struct writeback_control *wbc, 789int redirty_page_for_writepage(struct writeback_control *wbc,
789 struct page *page); 790 struct page *page);
790int FASTCALL(set_page_dirty(struct page *page)); 791int FASTCALL(set_page_dirty(struct page *page));
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index be0efbde499..438833cbbca 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -737,6 +737,16 @@ int write_one_page(struct page *page, int wait)
737EXPORT_SYMBOL(write_one_page); 737EXPORT_SYMBOL(write_one_page);
738 738
739/* 739/*
740 * For address_spaces which do not use buffers nor write back.
741 */
742int __set_page_dirty_no_writeback(struct page *page)
743{
744 if (!PageDirty(page))
745 SetPageDirty(page);
746 return 0;
747}
748
749/*
740 * For address_spaces which do not use buffers. Just tag the page as dirty in 750 * For address_spaces which do not use buffers. Just tag the page as dirty in
741 * its radix tree. 751 * its radix tree.
742 * 752 *
diff --git a/mm/shmem.c b/mm/shmem.c
index 70da7a0981b..b38e1716927 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2316,7 +2316,7 @@ static void destroy_inodecache(void)
2316 2316
2317static const struct address_space_operations shmem_aops = { 2317static const struct address_space_operations shmem_aops = {
2318 .writepage = shmem_writepage, 2318 .writepage = shmem_writepage,
2319 .set_page_dirty = __set_page_dirty_nobuffers, 2319 .set_page_dirty = __set_page_dirty_no_writeback,
2320#ifdef CONFIG_TMPFS 2320#ifdef CONFIG_TMPFS
2321 .prepare_write = shmem_prepare_write, 2321 .prepare_write = shmem_prepare_write,
2322 .commit_write = simple_commit_write, 2322 .commit_write = simple_commit_write,