summaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2016-10-07 20:00:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:28 -0400
commitf6ab1f7f6b2d8e48c5fc47746a67363b20d79a1d (patch)
tree4a4891c1882f4cffb2b78ddcb0069ea069e5c439 /mm/swap_state.c
parent87744ab3832b83ba71b931f86f9cfdb000d07da5 (diff)
mm, swap: use offset of swap entry as key of swap cache
This patch is to improve the performance of swap cache operations when the type of the swap device is not 0. Originally, the whole swap entry value is used as the key of the swap cache, even though there is one radix tree for each swap device. If the type of the swap device is not 0, the height of the radix tree of the swap cache will be increased unnecessary, especially on 64bit architecture. For example, for a 1GB swap device on the x86_64 architecture, the height of the radix tree of the swap cache is 11. But if the offset of the swap entry is used as the key of the swap cache, the height of the radix tree of the swap cache is 4. The increased height causes unnecessary radix tree descending and increased cache footprint. This patch reduces the height of the radix tree of the swap cache via using the offset of the swap entry instead of the whole swap entry value as the key of the swap cache. In 32 processes sequential swap out test case on a Xeon E5 v3 system with RAM disk as swap, the lock contention for the spinlock of the swap cache is reduced from 20.15% to 12.19%, when the type of the swap device is 1. Use the whole swap entry as key, perf-profile.calltrace.cycles-pp._raw_spin_lock_irq.__add_to_swap_cache.add_to_swap_cache.add_to_swap.shrink_page_list: 10.37, perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.__remove_mapping.shrink_page_list.shrink_inactive_list.shrink_node_memcg: 9.78, Use the swap offset as key, perf-profile.calltrace.cycles-pp._raw_spin_lock_irq.__add_to_swap_cache.add_to_swap_cache.add_to_swap.shrink_page_list: 6.25, perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.__remove_mapping.shrink_page_list.shrink_inactive_list.shrink_node_memcg: 5.94, Link: http://lkml.kernel.org/r/1473270649-27229-1-git-send-email-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Aaron Lu <aaron.lu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8679c997eab6..35d7e0ee1c77 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -94,7 +94,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
94 address_space = swap_address_space(entry); 94 address_space = swap_address_space(entry);
95 spin_lock_irq(&address_space->tree_lock); 95 spin_lock_irq(&address_space->tree_lock);
96 error = radix_tree_insert(&address_space->page_tree, 96 error = radix_tree_insert(&address_space->page_tree,
97 entry.val, page); 97 swp_offset(entry), page);
98 if (likely(!error)) { 98 if (likely(!error)) {
99 address_space->nrpages++; 99 address_space->nrpages++;
100 __inc_node_page_state(page, NR_FILE_PAGES); 100 __inc_node_page_state(page, NR_FILE_PAGES);
@@ -145,7 +145,7 @@ void __delete_from_swap_cache(struct page *page)
145 145
146 entry.val = page_private(page); 146 entry.val = page_private(page);
147 address_space = swap_address_space(entry); 147 address_space = swap_address_space(entry);
148 radix_tree_delete(&address_space->page_tree, page_private(page)); 148 radix_tree_delete(&address_space->page_tree, swp_offset(entry));
149 set_page_private(page, 0); 149 set_page_private(page, 0);
150 ClearPageSwapCache(page); 150 ClearPageSwapCache(page);
151 address_space->nrpages--; 151 address_space->nrpages--;
@@ -283,7 +283,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
283{ 283{
284 struct page *page; 284 struct page *page;
285 285
286 page = find_get_page(swap_address_space(entry), entry.val); 286 page = find_get_page(swap_address_space(entry), swp_offset(entry));
287 287
288 if (page) { 288 if (page) {
289 INC_CACHE_INFO(find_success); 289 INC_CACHE_INFO(find_success);
@@ -310,7 +310,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
310 * called after lookup_swap_cache() failed, re-calling 310 * called after lookup_swap_cache() failed, re-calling
311 * that would confuse statistics. 311 * that would confuse statistics.
312 */ 312 */
313 found_page = find_get_page(swapper_space, entry.val); 313 found_page = find_get_page(swapper_space, swp_offset(entry));
314 if (found_page) 314 if (found_page)
315 break; 315 break;
316 316