aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-05 01:28:42 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:14 -0500
commit02098feaa42b2e0087fbbe6c6ab9a23e4653b16a (patch)
tree494eaf13f204c9384d4316202fd76cd1b5d960ad /mm/swap_state.c
parent46017e954826ac59e91df76341a3f76b45467847 (diff)
swapin needs gfp_mask for loop on tmpfs
Building in a filesystem on a loop device on a tmpfs file can hang when swapping, the loop thread caught in that infamous throttle_vm_writeout. In theory this is a long standing problem, which I've either never seen in practice, or long ago suppressed the recollection, after discounting my load and my tmpfs size as unrealistically high. But now, with the new aops, it has become easy to hang on one machine. Loop used to grab_cache_page before the old prepare_write to tmpfs, which seems to have been enough to free up some memory for any swapin needed; but the new write_begin lets tmpfs find or allocate the page (much nicer, since grab_cache_page missed tmpfs pages in swapcache). When allocating a fresh page, tmpfs respects loop's mapping_gfp_mask, which has __GFP_IO|__GFP_FS stripped off, and throttle_vm_writeout is designed to break out when __GFP_IO or GFP_FS is unset; but when tmfps swaps in, read_swap_cache_async allocates with GFP_HIGHUSER_MOVABLE regardless of the mapping_gfp_mask - hence the hang. So, pass gfp_mask down the line from shmem_getpage to shmem_swapin to swapin_readahead to read_swap_cache_async to add_to_swap_cache. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 668a80422630..e7875642e2cf 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -96,7 +96,8 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
96 return error; 96 return error;
97} 97}
98 98
99static int add_to_swap_cache(struct page *page, swp_entry_t entry) 99static int add_to_swap_cache(struct page *page, swp_entry_t entry,
100 gfp_t gfp_mask)
100{ 101{
101 int error; 102 int error;
102 103
@@ -106,7 +107,7 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry)
106 return -ENOENT; 107 return -ENOENT;
107 } 108 }
108 SetPageLocked(page); 109 SetPageLocked(page);
109 error = __add_to_swap_cache(page, entry, GFP_KERNEL); 110 error = __add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL);
110 /* 111 /*
111 * Anon pages are already on the LRU, we don't run lru_cache_add here. 112 * Anon pages are already on the LRU, we don't run lru_cache_add here.
112 */ 113 */
@@ -318,7 +319,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
318 * A failure return means that either the page allocation failed or that 319 * A failure return means that either the page allocation failed or that
319 * the swap entry is no longer in use. 320 * the swap entry is no longer in use.
320 */ 321 */
321struct page *read_swap_cache_async(swp_entry_t entry, 322struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
322 struct vm_area_struct *vma, unsigned long addr) 323 struct vm_area_struct *vma, unsigned long addr)
323{ 324{
324 struct page *found_page, *new_page = NULL; 325 struct page *found_page, *new_page = NULL;
@@ -338,8 +339,7 @@ struct page *read_swap_cache_async(swp_entry_t entry,
338 * Get a new page to read into from swap. 339 * Get a new page to read into from swap.
339 */ 340 */
340 if (!new_page) { 341 if (!new_page) {
341 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 342 new_page = alloc_page_vma(gfp_mask, vma, addr);
342 vma, addr);
343 if (!new_page) 343 if (!new_page)
344 break; /* Out of memory */ 344 break; /* Out of memory */
345 } 345 }
@@ -354,7 +354,7 @@ struct page *read_swap_cache_async(swp_entry_t entry,
354 * the just freed swap entry for an existing page. 354 * the just freed swap entry for an existing page.
355 * May fail (-ENOMEM) if radix-tree node allocation failed. 355 * May fail (-ENOMEM) if radix-tree node allocation failed.
356 */ 356 */
357 err = add_to_swap_cache(new_page, entry); 357 err = add_to_swap_cache(new_page, entry, gfp_mask);
358 if (!err) { 358 if (!err) {
359 /* 359 /*
360 * Initiate read into locked page and return. 360 * Initiate read into locked page and return.
@@ -388,7 +388,7 @@ struct page *read_swap_cache_async(swp_entry_t entry,
388 * 388 *
389 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 389 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
390 */ 390 */
391struct page *swapin_readahead(swp_entry_t entry, 391struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
392 struct vm_area_struct *vma, unsigned long addr) 392 struct vm_area_struct *vma, unsigned long addr)
393{ 393{
394 int nr_pages; 394 int nr_pages;
@@ -407,11 +407,11 @@ struct page *swapin_readahead(swp_entry_t entry,
407 for (end_offset = offset + nr_pages; offset < end_offset; offset++) { 407 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
408 /* Ok, do the async read-ahead now */ 408 /* Ok, do the async read-ahead now */
409 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), 409 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
410 vma, addr); 410 gfp_mask, vma, addr);
411 if (!page) 411 if (!page)
412 break; 412 break;
413 page_cache_release(page); 413 page_cache_release(page);
414 } 414 }
415 lru_add_drain(); /* Push any new pages onto the LRU now */ 415 lru_add_drain(); /* Push any new pages onto the LRU now */
416 return read_swap_cache_async(entry, vma, addr); 416 return read_swap_cache_async(entry, gfp_mask, vma, addr);
417} 417}