diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-02-05 01:28:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:14 -0500 |
commit | 02098feaa42b2e0087fbbe6c6ab9a23e4653b16a (patch) | |
tree | 494eaf13f204c9384d4316202fd76cd1b5d960ad /mm | |
parent | 46017e954826ac59e91df76341a3f76b45467847 (diff) |
swapin needs gfp_mask for loop on tmpfs
Building in a filesystem on a loop device on a tmpfs file can hang when
swapping, the loop thread caught in that infamous throttle_vm_writeout.
In theory this is a long standing problem, which I've either never seen in
practice, or long ago suppressed the recollection, after discounting my load
and my tmpfs size as unrealistically high. But now, with the new aops, it has
become easy to hang on one machine.
Loop used to grab_cache_page before the old prepare_write to tmpfs, which
seems to have been enough to free up some memory for any swapin needed; but
the new write_begin lets tmpfs find or allocate the page (much nicer, since
grab_cache_page missed tmpfs pages in swapcache).
When allocating a fresh page, tmpfs respects loop's mapping_gfp_mask, which
has __GFP_IO|__GFP_FS stripped off, and throttle_vm_writeout is designed to
break out when __GFP_IO or GFP_FS is unset; but when tmfps swaps in,
read_swap_cache_async allocates with GFP_HIGHUSER_MOVABLE regardless of the
mapping_gfp_mask - hence the hang.
So, pass gfp_mask down the line from shmem_getpage to shmem_swapin to
swapin_readahead to read_swap_cache_async to add_to_swap_cache.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/shmem.c | 28 | ||||
-rw-r--r-- | mm/swap_state.c | 18 | ||||
-rw-r--r-- | mm/swapfile.c | 3 |
4 files changed, 27 insertions, 25 deletions
diff --git a/mm/memory.c b/mm/memory.c index ccc9403d5352..bc137751da7f 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2007,7 +2007,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2007 | page = lookup_swap_cache(entry); | 2007 | page = lookup_swap_cache(entry); |
2008 | if (!page) { | 2008 | if (!page) { |
2009 | grab_swap_token(); /* Contend for token _before_ read-in */ | 2009 | grab_swap_token(); /* Contend for token _before_ read-in */ |
2010 | page = swapin_readahead(entry, vma, address); | 2010 | page = swapin_readahead(entry, |
2011 | GFP_HIGHUSER_MOVABLE, vma, address); | ||
2011 | if (!page) { | 2012 | if (!page) { |
2012 | /* | 2013 | /* |
2013 | * Back out if somebody else faulted in this pte | 2014 | * Back out if somebody else faulted in this pte |
diff --git a/mm/shmem.c b/mm/shmem.c index 3a22a8f79331..55b696aa3ddd 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1025,8 +1025,8 @@ out: | |||
1025 | return err; | 1025 | return err; |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | static struct page *shmem_swapin(struct shmem_inode_info *info, | 1028 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, |
1029 | swp_entry_t entry, unsigned long idx) | 1029 | struct shmem_inode_info *info, unsigned long idx) |
1030 | { | 1030 | { |
1031 | struct vm_area_struct pvma; | 1031 | struct vm_area_struct pvma; |
1032 | struct page *page; | 1032 | struct page *page; |
@@ -1036,13 +1036,13 @@ static struct page *shmem_swapin(struct shmem_inode_info *info, | |||
1036 | pvma.vm_pgoff = idx; | 1036 | pvma.vm_pgoff = idx; |
1037 | pvma.vm_ops = NULL; | 1037 | pvma.vm_ops = NULL; |
1038 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); | 1038 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); |
1039 | page = swapin_readahead(entry, &pvma, 0); | 1039 | page = swapin_readahead(entry, gfp, &pvma, 0); |
1040 | mpol_free(pvma.vm_policy); | 1040 | mpol_free(pvma.vm_policy); |
1041 | return page; | 1041 | return page; |
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, | 1044 | static struct page *shmem_alloc_page(gfp_t gfp, |
1045 | unsigned long idx) | 1045 | struct shmem_inode_info *info, unsigned long idx) |
1046 | { | 1046 | { |
1047 | struct vm_area_struct pvma; | 1047 | struct vm_area_struct pvma; |
1048 | struct page *page; | 1048 | struct page *page; |
@@ -1063,14 +1063,14 @@ static inline int shmem_parse_mpol(char *value, int *policy, | |||
1063 | return 1; | 1063 | return 1; |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | static inline struct page * | 1066 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, |
1067 | shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) | 1067 | struct shmem_inode_info *info, unsigned long idx) |
1068 | { | 1068 | { |
1069 | return swapin_readahead(entry, NULL, 0); | 1069 | return swapin_readahead(entry, gfp, NULL, 0); |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | static inline struct page * | 1072 | static inline struct page *shmem_alloc_page(gfp_t gfp, |
1073 | shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) | 1073 | struct shmem_inode_info *info, unsigned long idx) |
1074 | { | 1074 | { |
1075 | return alloc_page(gfp); | 1075 | return alloc_page(gfp); |
1076 | } | 1076 | } |
@@ -1093,6 +1093,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, | |||
1093 | struct page *swappage; | 1093 | struct page *swappage; |
1094 | swp_entry_t *entry; | 1094 | swp_entry_t *entry; |
1095 | swp_entry_t swap; | 1095 | swp_entry_t swap; |
1096 | gfp_t gfp; | ||
1096 | int error; | 1097 | int error; |
1097 | 1098 | ||
1098 | if (idx >= SHMEM_MAX_INDEX) | 1099 | if (idx >= SHMEM_MAX_INDEX) |
@@ -1117,6 +1118,7 @@ repeat: | |||
1117 | error = 0; | 1118 | error = 0; |
1118 | if (sgp == SGP_QUICK) | 1119 | if (sgp == SGP_QUICK) |
1119 | goto failed; | 1120 | goto failed; |
1121 | gfp = mapping_gfp_mask(mapping); | ||
1120 | 1122 | ||
1121 | spin_lock(&info->lock); | 1123 | spin_lock(&info->lock); |
1122 | shmem_recalc_inode(inode); | 1124 | shmem_recalc_inode(inode); |
@@ -1139,7 +1141,7 @@ repeat: | |||
1139 | *type |= VM_FAULT_MAJOR; | 1141 | *type |= VM_FAULT_MAJOR; |
1140 | } | 1142 | } |
1141 | spin_unlock(&info->lock); | 1143 | spin_unlock(&info->lock); |
1142 | swappage = shmem_swapin(info, swap, idx); | 1144 | swappage = shmem_swapin(swap, gfp, info, idx); |
1143 | if (!swappage) { | 1145 | if (!swappage) { |
1144 | spin_lock(&info->lock); | 1146 | spin_lock(&info->lock); |
1145 | entry = shmem_swp_alloc(info, idx, sgp); | 1147 | entry = shmem_swp_alloc(info, idx, sgp); |
@@ -1251,9 +1253,7 @@ repeat: | |||
1251 | 1253 | ||
1252 | if (!filepage) { | 1254 | if (!filepage) { |
1253 | spin_unlock(&info->lock); | 1255 | spin_unlock(&info->lock); |
1254 | filepage = shmem_alloc_page(mapping_gfp_mask(mapping), | 1256 | filepage = shmem_alloc_page(gfp, info, idx); |
1255 | info, | ||
1256 | idx); | ||
1257 | if (!filepage) { | 1257 | if (!filepage) { |
1258 | shmem_unacct_blocks(info->flags, 1); | 1258 | shmem_unacct_blocks(info->flags, 1); |
1259 | shmem_free_blocks(inode, 1); | 1259 | shmem_free_blocks(inode, 1); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 668a80422630..e7875642e2cf 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -96,7 +96,8 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, | |||
96 | return error; | 96 | return error; |
97 | } | 97 | } |
98 | 98 | ||
99 | static int add_to_swap_cache(struct page *page, swp_entry_t entry) | 99 | static int add_to_swap_cache(struct page *page, swp_entry_t entry, |
100 | gfp_t gfp_mask) | ||
100 | { | 101 | { |
101 | int error; | 102 | int error; |
102 | 103 | ||
@@ -106,7 +107,7 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry) | |||
106 | return -ENOENT; | 107 | return -ENOENT; |
107 | } | 108 | } |
108 | SetPageLocked(page); | 109 | SetPageLocked(page); |
109 | error = __add_to_swap_cache(page, entry, GFP_KERNEL); | 110 | error = __add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL); |
110 | /* | 111 | /* |
111 | * Anon pages are already on the LRU, we don't run lru_cache_add here. | 112 | * Anon pages are already on the LRU, we don't run lru_cache_add here. |
112 | */ | 113 | */ |
@@ -318,7 +319,7 @@ struct page * lookup_swap_cache(swp_entry_t entry) | |||
318 | * A failure return means that either the page allocation failed or that | 319 | * A failure return means that either the page allocation failed or that |
319 | * the swap entry is no longer in use. | 320 | * the swap entry is no longer in use. |
320 | */ | 321 | */ |
321 | struct page *read_swap_cache_async(swp_entry_t entry, | 322 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
322 | struct vm_area_struct *vma, unsigned long addr) | 323 | struct vm_area_struct *vma, unsigned long addr) |
323 | { | 324 | { |
324 | struct page *found_page, *new_page = NULL; | 325 | struct page *found_page, *new_page = NULL; |
@@ -338,8 +339,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, | |||
338 | * Get a new page to read into from swap. | 339 | * Get a new page to read into from swap. |
339 | */ | 340 | */ |
340 | if (!new_page) { | 341 | if (!new_page) { |
341 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, | 342 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
342 | vma, addr); | ||
343 | if (!new_page) | 343 | if (!new_page) |
344 | break; /* Out of memory */ | 344 | break; /* Out of memory */ |
345 | } | 345 | } |
@@ -354,7 +354,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, | |||
354 | * the just freed swap entry for an existing page. | 354 | * the just freed swap entry for an existing page. |
355 | * May fail (-ENOMEM) if radix-tree node allocation failed. | 355 | * May fail (-ENOMEM) if radix-tree node allocation failed. |
356 | */ | 356 | */ |
357 | err = add_to_swap_cache(new_page, entry); | 357 | err = add_to_swap_cache(new_page, entry, gfp_mask); |
358 | if (!err) { | 358 | if (!err) { |
359 | /* | 359 | /* |
360 | * Initiate read into locked page and return. | 360 | * Initiate read into locked page and return. |
@@ -388,7 +388,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, | |||
388 | * | 388 | * |
389 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | 389 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. |
390 | */ | 390 | */ |
391 | struct page *swapin_readahead(swp_entry_t entry, | 391 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
392 | struct vm_area_struct *vma, unsigned long addr) | 392 | struct vm_area_struct *vma, unsigned long addr) |
393 | { | 393 | { |
394 | int nr_pages; | 394 | int nr_pages; |
@@ -407,11 +407,11 @@ struct page *swapin_readahead(swp_entry_t entry, | |||
407 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { | 407 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { |
408 | /* Ok, do the async read-ahead now */ | 408 | /* Ok, do the async read-ahead now */ |
409 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | 409 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), |
410 | vma, addr); | 410 | gfp_mask, vma, addr); |
411 | if (!page) | 411 | if (!page) |
412 | break; | 412 | break; |
413 | page_cache_release(page); | 413 | page_cache_release(page); |
414 | } | 414 | } |
415 | lru_add_drain(); /* Push any new pages onto the LRU now */ | 415 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
416 | return read_swap_cache_async(entry, vma, addr); | 416 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
417 | } | 417 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index f071648e1360..ab93505dfbf4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -730,7 +730,8 @@ static int try_to_unuse(unsigned int type) | |||
730 | */ | 730 | */ |
731 | swap_map = &si->swap_map[i]; | 731 | swap_map = &si->swap_map[i]; |
732 | entry = swp_entry(type, i); | 732 | entry = swp_entry(type, i); |
733 | page = read_swap_cache_async(entry, NULL, 0); | 733 | page = read_swap_cache_async(entry, |
734 | GFP_HIGHUSER_MOVABLE, NULL, 0); | ||
734 | if (!page) { | 735 | if (!page) { |
735 | /* | 736 | /* |
736 | * Either swap_duplicate() failed because entry | 737 | * Either swap_duplicate() failed because entry |