aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swap_state.c73
2 files changed, 40 insertions, 37 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index bd20f8bb02aa..376d8f0a8f6b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1097,6 +1097,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1097 shmem_swp_unmap(entry); 1097 shmem_swp_unmap(entry);
1098unlock: 1098unlock:
1099 spin_unlock(&info->lock); 1099 spin_unlock(&info->lock);
1100 /*
1101 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1102 * clear SWAP_HAS_CACHE flag.
1103 */
1100 swapcache_free(swap, NULL); 1104 swapcache_free(swap, NULL);
1101redirty: 1105redirty:
1102 set_page_dirty(page); 1106 set_page_dirty(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b076a1a5a0aa..6d1daeb1cb4a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -92,6 +92,12 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
92 spin_unlock_irq(&swapper_space.tree_lock); 92 spin_unlock_irq(&swapper_space.tree_lock);
93 93
94 if (unlikely(error)) { 94 if (unlikely(error)) {
95 /*
96 * Only the context which have set SWAP_HAS_CACHE flag
97 * would call add_to_swap_cache().
98 * So add_to_swap_cache() doesn't returns -EEXIST.
99 */
100 VM_BUG_ON(error == -EEXIST);
95 set_page_private(page, 0UL); 101 set_page_private(page, 0UL);
96 ClearPageSwapCache(page); 102 ClearPageSwapCache(page);
97 page_cache_release(page); 103 page_cache_release(page);
@@ -146,38 +152,34 @@ int add_to_swap(struct page *page)
146 VM_BUG_ON(!PageLocked(page)); 152 VM_BUG_ON(!PageLocked(page));
147 VM_BUG_ON(!PageUptodate(page)); 153 VM_BUG_ON(!PageUptodate(page));
148 154
149 for (;;) { 155 entry = get_swap_page();
150 entry = get_swap_page(); 156 if (!entry.val)
151 if (!entry.val) 157 return 0;
152 return 0;
153 158
159 /*
160 * Radix-tree node allocations from PF_MEMALLOC contexts could
161 * completely exhaust the page allocator. __GFP_NOMEMALLOC
162 * stops emergency reserves from being allocated.
163 *
164 * TODO: this could cause a theoretical memory reclaim
165 * deadlock in the swap out path.
166 */
167 /*
168 * Add it to the swap cache and mark it dirty
169 */
170 err = add_to_swap_cache(page, entry,
171 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
172
173 if (!err) { /* Success */
174 SetPageDirty(page);
175 return 1;
176 } else { /* -ENOMEM radix-tree allocation failure */
154 /* 177 /*
155 * Radix-tree node allocations from PF_MEMALLOC contexts could 178 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
156 * completely exhaust the page allocator. __GFP_NOMEMALLOC 179 * clear SWAP_HAS_CACHE flag.
157 * stops emergency reserves from being allocated.
158 *
159 * TODO: this could cause a theoretical memory reclaim
160 * deadlock in the swap out path.
161 */
162 /*
163 * Add it to the swap cache and mark it dirty
164 */ 180 */
165 err = add_to_swap_cache(page, entry, 181 swapcache_free(entry, NULL);
166 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); 182 return 0;
167
168 switch (err) {
169 case 0: /* Success */
170 SetPageDirty(page);
171 return 1;
172 case -EEXIST:
173 /* Raced with "speculative" read_swap_cache_async */
174 swapcache_free(entry, NULL);
175 continue;
176 default:
177 /* -ENOMEM radix-tree allocation failure */
178 swapcache_free(entry, NULL);
179 return 0;
180 }
181 } 183 }
182} 184}
183 185
@@ -318,14 +320,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
318 break; 320 break;
319 } 321 }
320 322
321 /* 323 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
322 * Associate the page with swap entry in the swap cache.
323 * May fail (-EEXIST) if there is already a page associated
324 * with this entry in the swap cache: added by a racing
325 * read_swap_cache_async, or add_to_swap or shmem_writepage
326 * re-using the just freed swap entry for an existing page.
327 * May fail (-ENOMEM) if radix-tree node allocation failed.
328 */
329 __set_page_locked(new_page); 324 __set_page_locked(new_page);
330 SetPageSwapBacked(new_page); 325 SetPageSwapBacked(new_page);
331 err = __add_to_swap_cache(new_page, entry); 326 err = __add_to_swap_cache(new_page, entry);
@@ -341,6 +336,10 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
341 radix_tree_preload_end(); 336 radix_tree_preload_end();
342 ClearPageSwapBacked(new_page); 337 ClearPageSwapBacked(new_page);
343 __clear_page_locked(new_page); 338 __clear_page_locked(new_page);
339 /*
340 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
341 * clear SWAP_HAS_CACHE flag.
342 */
344 swapcache_free(entry, NULL); 343 swapcache_free(entry, NULL);
345 } while (err != -ENOMEM); 344 } while (err != -ENOMEM);
346 345