diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2009-06-16 18:32:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:42 -0400 |
commit | cb4b86ba47bb0937b71fb825b3ed88adf7a190f0 (patch) | |
tree | 4b8528ba914a315e5857e7fe2a6e7d415f2e6650 /mm | |
parent | 6837765963f1723e80ca97b1fae660f3a60d77df (diff) |
mm: add swap cache interface for swap reference
In a following patch, the usage of swap cache is recorded into swap_map.
This patch is for necessary interface changes to do that.
2 interfaces:
- swapcache_prepare()
- swapcache_free()
are added for allocating/freeing refcnt from swap-cache to existing swap
entries. But implementation itself is not changed under this patch. At
adding swapcache_free(), memcg's hook code is moved under
swapcache_free(). This is better than using scattered hooks.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: Balbir Singh <balbir@in.ibm.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/swap_state.c | 11 | ||||
-rw-r--r-- | mm/swapfile.c | 19 | ||||
-rw-r--r-- | mm/vmscan.c | 3 |
4 files changed, 26 insertions, 9 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 0132fbd45a23..47ab19182287 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1097,7 +1097,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
1097 | shmem_swp_unmap(entry); | 1097 | shmem_swp_unmap(entry); |
1098 | unlock: | 1098 | unlock: |
1099 | spin_unlock(&info->lock); | 1099 | spin_unlock(&info->lock); |
1100 | swap_free(swap); | 1100 | swapcache_free(swap, NULL); |
1101 | redirty: | 1101 | redirty: |
1102 | set_page_dirty(page); | 1102 | set_page_dirty(page); |
1103 | if (wbc->for_reclaim) | 1103 | if (wbc->for_reclaim) |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 1416e7e9e02d..19bdf3017a9e 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -162,11 +162,11 @@ int add_to_swap(struct page *page) | |||
162 | return 1; | 162 | return 1; |
163 | case -EEXIST: | 163 | case -EEXIST: |
164 | /* Raced with "speculative" read_swap_cache_async */ | 164 | /* Raced with "speculative" read_swap_cache_async */ |
165 | swap_free(entry); | 165 | swapcache_free(entry, NULL); |
166 | continue; | 166 | continue; |
167 | default: | 167 | default: |
168 | /* -ENOMEM radix-tree allocation failure */ | 168 | /* -ENOMEM radix-tree allocation failure */ |
169 | swap_free(entry); | 169 | swapcache_free(entry, NULL); |
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
172 | } | 172 | } |
@@ -188,8 +188,7 @@ void delete_from_swap_cache(struct page *page) | |||
188 | __delete_from_swap_cache(page); | 188 | __delete_from_swap_cache(page); |
189 | spin_unlock_irq(&swapper_space.tree_lock); | 189 | spin_unlock_irq(&swapper_space.tree_lock); |
190 | 190 | ||
191 | mem_cgroup_uncharge_swapcache(page, entry); | 191 | swapcache_free(entry, page); |
192 | swap_free(entry); | ||
193 | page_cache_release(page); | 192 | page_cache_release(page); |
194 | } | 193 | } |
195 | 194 | ||
@@ -293,7 +292,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
293 | /* | 292 | /* |
294 | * Swap entry may have been freed since our caller observed it. | 293 | * Swap entry may have been freed since our caller observed it. |
295 | */ | 294 | */ |
296 | if (!swap_duplicate(entry)) | 295 | if (!swapcache_prepare(entry)) |
297 | break; | 296 | break; |
298 | 297 | ||
299 | /* | 298 | /* |
@@ -317,7 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
317 | } | 316 | } |
318 | ClearPageSwapBacked(new_page); | 317 | ClearPageSwapBacked(new_page); |
319 | __clear_page_locked(new_page); | 318 | __clear_page_locked(new_page); |
320 | swap_free(entry); | 319 | swapcache_free(entry, NULL); |
321 | } while (err != -ENOMEM); | 320 | } while (err != -ENOMEM); |
322 | 321 | ||
323 | if (new_page) | 322 | if (new_page) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 312fafe0ab6e..3187079903fd 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -510,6 +510,16 @@ void swap_free(swp_entry_t entry) | |||
510 | } | 510 | } |
511 | 511 | ||
512 | /* | 512 | /* |
513 | * Called after dropping swapcache to decrease refcnt to swap entries. | ||
514 | */ | ||
515 | void swapcache_free(swp_entry_t entry, struct page *page) | ||
516 | { | ||
517 | if (page) | ||
518 | mem_cgroup_uncharge_swapcache(page, entry); | ||
519 | return swap_free(entry); | ||
520 | } | ||
521 | |||
522 | /* | ||
513 | * How many references to page are currently swapped out? | 523 | * How many references to page are currently swapped out? |
514 | */ | 524 | */ |
515 | static inline int page_swapcount(struct page *page) | 525 | static inline int page_swapcount(struct page *page) |
@@ -1979,6 +1989,15 @@ bad_file: | |||
1979 | goto out; | 1989 | goto out; |
1980 | } | 1990 | } |
1981 | 1991 | ||
1992 | /* | ||
1993 | * Called when allocating swap cache for exising swap entry, | ||
1994 | */ | ||
1995 | int swapcache_prepare(swp_entry_t entry) | ||
1996 | { | ||
1997 | return swap_duplicate(entry); | ||
1998 | } | ||
1999 | |||
2000 | |||
1982 | struct swap_info_struct * | 2001 | struct swap_info_struct * |
1983 | get_swap_info_struct(unsigned type) | 2002 | get_swap_info_struct(unsigned type) |
1984 | { | 2003 | { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2c4b945b011f..52339dd7bf85 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -470,8 +470,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page) | |||
470 | swp_entry_t swap = { .val = page_private(page) }; | 470 | swp_entry_t swap = { .val = page_private(page) }; |
471 | __delete_from_swap_cache(page); | 471 | __delete_from_swap_cache(page); |
472 | spin_unlock_irq(&mapping->tree_lock); | 472 | spin_unlock_irq(&mapping->tree_lock); |
473 | mem_cgroup_uncharge_swapcache(page, swap); | 473 | swapcache_free(swap, page); |
474 | swap_free(swap); | ||
475 | } else { | 474 | } else { |
476 | __remove_from_page_cache(page); | 475 | __remove_from_page_cache(page); |
477 | spin_unlock_irq(&mapping->tree_lock); | 476 | spin_unlock_irq(&mapping->tree_lock); |