diff options
author | Nick Piggin <npiggin@suse.de> | 2008-07-25 22:45:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-26 15:00:06 -0400 |
commit | e286781d5f2e9c846e012a39653a166e9d31777d (patch) | |
tree | 14958fe6d8f3e0459c96c68b3034ea2433ab85ac /mm/swap_state.c | |
parent | 47feff2c8eefe85099f87c43d3096855f0085ca0 (diff) |
mm: speculative page references
If we can be sure that elevating the page_count on a pagecache page will
pin it, we can speculatively run this operation, and subsequently check to
see if we hit the right page rather than relying on holding a lock or
otherwise pinning a reference to the page.
This can be done if get_page/put_page behaves consistently throughout the
whole tree (ie. if we "get" the page after it has been used for something
else, we must be able to free it with a put_page).
Actually, there is a period where the count behaves differently: when the
page is free or if it is a constituent page of a compound page. We need
an atomic_inc_not_zero operation to ensure we don't try to grab the page
in either case.
This patch introduces the core locking protocol to the pagecache (ie.
adds page_cache_get_speculative, and tweaks some update-side code to make
it work).
Thanks to Hugh for pointing out an improvement to the algorithm setting
page_count to zero when we have control of all references, in order to
hold off speculative getters.
[kamezawa.hiroyu@jp.fujitsu.com: fix migration_entry_wait()]
[hugh@veritas.com: fix add_to_page_cache]
[akpm@linux-foundation.org: repair a comment]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index d8aadaf2a0ba..3e3381d6c7ee 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -64,7 +64,7 @@ void show_swap_cache_info(void) | |||
64 | } | 64 | } |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * add_to_swap_cache resembles add_to_page_cache on swapper_space, | 67 | * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
68 | * but sets SwapCache flag and private instead of mapping and index. | 68 | * but sets SwapCache flag and private instead of mapping and index. |
69 | */ | 69 | */ |
70 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | 70 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) |
@@ -76,19 +76,26 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |||
76 | BUG_ON(PagePrivate(page)); | 76 | BUG_ON(PagePrivate(page)); |
77 | error = radix_tree_preload(gfp_mask); | 77 | error = radix_tree_preload(gfp_mask); |
78 | if (!error) { | 78 | if (!error) { |
79 | page_cache_get(page); | ||
80 | SetPageSwapCache(page); | ||
81 | set_page_private(page, entry.val); | ||
82 | |||
79 | write_lock_irq(&swapper_space.tree_lock); | 83 | write_lock_irq(&swapper_space.tree_lock); |
80 | error = radix_tree_insert(&swapper_space.page_tree, | 84 | error = radix_tree_insert(&swapper_space.page_tree, |
81 | entry.val, page); | 85 | entry.val, page); |
82 | if (!error) { | 86 | if (likely(!error)) { |
83 | page_cache_get(page); | ||
84 | SetPageSwapCache(page); | ||
85 | set_page_private(page, entry.val); | ||
86 | total_swapcache_pages++; | 87 | total_swapcache_pages++; |
87 | __inc_zone_page_state(page, NR_FILE_PAGES); | 88 | __inc_zone_page_state(page, NR_FILE_PAGES); |
88 | INC_CACHE_INFO(add_total); | 89 | INC_CACHE_INFO(add_total); |
89 | } | 90 | } |
90 | write_unlock_irq(&swapper_space.tree_lock); | 91 | write_unlock_irq(&swapper_space.tree_lock); |
91 | radix_tree_preload_end(); | 92 | radix_tree_preload_end(); |
93 | |||
94 | if (unlikely(error)) { | ||
95 | set_page_private(page, 0UL); | ||
96 | ClearPageSwapCache(page); | ||
97 | page_cache_release(page); | ||
98 | } | ||
92 | } | 99 | } |
93 | return error; | 100 | return error; |
94 | } | 101 | } |