diff options
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 35 |
1 files changed, 19 insertions, 16 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index 3353c9029cef..3ecea98ecb45 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/backing-dev.h> | 17 | #include <linux/backing-dev.h> |
18 | #include <linux/pagevec.h> | 18 | #include <linux/pagevec.h> |
19 | #include <linux/migrate.h> | 19 | #include <linux/migrate.h> |
20 | #include <linux/page_cgroup.h> | ||
20 | 21 | ||
21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
22 | 23 | ||
@@ -72,10 +73,10 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |||
72 | { | 73 | { |
73 | int error; | 74 | int error; |
74 | 75 | ||
75 | BUG_ON(!PageLocked(page)); | 76 | VM_BUG_ON(!PageLocked(page)); |
76 | BUG_ON(PageSwapCache(page)); | 77 | VM_BUG_ON(PageSwapCache(page)); |
77 | BUG_ON(PagePrivate(page)); | 78 | VM_BUG_ON(!PageSwapBacked(page)); |
78 | BUG_ON(!PageSwapBacked(page)); | 79 | |
79 | error = radix_tree_preload(gfp_mask); | 80 | error = radix_tree_preload(gfp_mask); |
80 | if (!error) { | 81 | if (!error) { |
81 | page_cache_get(page); | 82 | page_cache_get(page); |
@@ -108,10 +109,11 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |||
108 | */ | 109 | */ |
109 | void __delete_from_swap_cache(struct page *page) | 110 | void __delete_from_swap_cache(struct page *page) |
110 | { | 111 | { |
111 | BUG_ON(!PageLocked(page)); | 112 | swp_entry_t ent = {.val = page_private(page)}; |
112 | BUG_ON(!PageSwapCache(page)); | 113 | |
113 | BUG_ON(PageWriteback(page)); | 114 | VM_BUG_ON(!PageLocked(page)); |
114 | BUG_ON(PagePrivate(page)); | 115 | VM_BUG_ON(!PageSwapCache(page)); |
116 | VM_BUG_ON(PageWriteback(page)); | ||
115 | 117 | ||
116 | radix_tree_delete(&swapper_space.page_tree, page_private(page)); | 118 | radix_tree_delete(&swapper_space.page_tree, page_private(page)); |
117 | set_page_private(page, 0); | 119 | set_page_private(page, 0); |
@@ -119,6 +121,7 @@ void __delete_from_swap_cache(struct page *page) | |||
119 | total_swapcache_pages--; | 121 | total_swapcache_pages--; |
120 | __dec_zone_page_state(page, NR_FILE_PAGES); | 122 | __dec_zone_page_state(page, NR_FILE_PAGES); |
121 | INC_CACHE_INFO(del_total); | 123 | INC_CACHE_INFO(del_total); |
124 | mem_cgroup_uncharge_swapcache(page, ent); | ||
122 | } | 125 | } |
123 | 126 | ||
124 | /** | 127 | /** |
@@ -129,13 +132,13 @@ void __delete_from_swap_cache(struct page *page) | |||
129 | * Allocate swap space for the page and add the page to the | 132 | * Allocate swap space for the page and add the page to the |
130 | * swap cache. Caller needs to hold the page lock. | 133 | * swap cache. Caller needs to hold the page lock. |
131 | */ | 134 | */ |
132 | int add_to_swap(struct page * page, gfp_t gfp_mask) | 135 | int add_to_swap(struct page *page) |
133 | { | 136 | { |
134 | swp_entry_t entry; | 137 | swp_entry_t entry; |
135 | int err; | 138 | int err; |
136 | 139 | ||
137 | BUG_ON(!PageLocked(page)); | 140 | VM_BUG_ON(!PageLocked(page)); |
138 | BUG_ON(!PageUptodate(page)); | 141 | VM_BUG_ON(!PageUptodate(page)); |
139 | 142 | ||
140 | for (;;) { | 143 | for (;;) { |
141 | entry = get_swap_page(); | 144 | entry = get_swap_page(); |
@@ -154,7 +157,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask) | |||
154 | * Add it to the swap cache and mark it dirty | 157 | * Add it to the swap cache and mark it dirty |
155 | */ | 158 | */ |
156 | err = add_to_swap_cache(page, entry, | 159 | err = add_to_swap_cache(page, entry, |
157 | gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); | 160 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
158 | 161 | ||
159 | switch (err) { | 162 | switch (err) { |
160 | case 0: /* Success */ | 163 | case 0: /* Success */ |
@@ -196,14 +199,14 @@ void delete_from_swap_cache(struct page *page) | |||
196 | * If we are the only user, then try to free up the swap cache. | 199 | * If we are the only user, then try to free up the swap cache. |
197 | * | 200 | * |
198 | * Its ok to check for PageSwapCache without the page lock | 201 | * Its ok to check for PageSwapCache without the page lock |
199 | * here because we are going to recheck again inside | 202 | * here because we are going to recheck again inside |
200 | * exclusive_swap_page() _with_ the lock. | 203 | * try_to_free_swap() _with_ the lock. |
201 | * - Marcelo | 204 | * - Marcelo |
202 | */ | 205 | */ |
203 | static inline void free_swap_cache(struct page *page) | 206 | static inline void free_swap_cache(struct page *page) |
204 | { | 207 | { |
205 | if (PageSwapCache(page) && trylock_page(page)) { | 208 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
206 | remove_exclusive_swap_page(page); | 209 | try_to_free_swap(page); |
207 | unlock_page(page); | 210 | unlock_page(page); |
208 | } | 211 | } |
209 | } | 212 | } |