diff options
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index 668a80422630..e7875642e2cf 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -96,7 +96,8 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, | |||
96 | return error; | 96 | return error; |
97 | } | 97 | } |
98 | 98 | ||
99 | static int add_to_swap_cache(struct page *page, swp_entry_t entry) | 99 | static int add_to_swap_cache(struct page *page, swp_entry_t entry, |
100 | gfp_t gfp_mask) | ||
100 | { | 101 | { |
101 | int error; | 102 | int error; |
102 | 103 | ||
@@ -106,7 +107,7 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry) | |||
106 | return -ENOENT; | 107 | return -ENOENT; |
107 | } | 108 | } |
108 | SetPageLocked(page); | 109 | SetPageLocked(page); |
109 | error = __add_to_swap_cache(page, entry, GFP_KERNEL); | 110 | error = __add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL); |
110 | /* | 111 | /* |
111 | * Anon pages are already on the LRU, we don't run lru_cache_add here. | 112 | * Anon pages are already on the LRU, we don't run lru_cache_add here. |
112 | */ | 113 | */ |
@@ -318,7 +319,7 @@ struct page * lookup_swap_cache(swp_entry_t entry) | |||
318 | * A failure return means that either the page allocation failed or that | 319 | * A failure return means that either the page allocation failed or that |
319 | * the swap entry is no longer in use. | 320 | * the swap entry is no longer in use. |
320 | */ | 321 | */ |
321 | struct page *read_swap_cache_async(swp_entry_t entry, | 322 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
322 | struct vm_area_struct *vma, unsigned long addr) | 323 | struct vm_area_struct *vma, unsigned long addr) |
323 | { | 324 | { |
324 | struct page *found_page, *new_page = NULL; | 325 | struct page *found_page, *new_page = NULL; |
@@ -338,8 +339,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, | |||
338 | * Get a new page to read into from swap. | 339 | * Get a new page to read into from swap. |
339 | */ | 340 | */ |
340 | if (!new_page) { | 341 | if (!new_page) { |
341 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, | 342 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
342 | vma, addr); | ||
343 | if (!new_page) | 343 | if (!new_page) |
344 | break; /* Out of memory */ | 344 | break; /* Out of memory */ |
345 | } | 345 | } |
@@ -354,7 +354,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, | |||
354 | * the just freed swap entry for an existing page. | 354 | * the just freed swap entry for an existing page. |
355 | * May fail (-ENOMEM) if radix-tree node allocation failed. | 355 | * May fail (-ENOMEM) if radix-tree node allocation failed. |
356 | */ | 356 | */ |
357 | err = add_to_swap_cache(new_page, entry); | 357 | err = add_to_swap_cache(new_page, entry, gfp_mask); |
358 | if (!err) { | 358 | if (!err) { |
359 | /* | 359 | /* |
360 | * Initiate read into locked page and return. | 360 | * Initiate read into locked page and return. |
@@ -388,7 +388,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, | |||
388 | * | 388 | * |
389 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | 389 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. |
390 | */ | 390 | */ |
391 | struct page *swapin_readahead(swp_entry_t entry, | 391 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
392 | struct vm_area_struct *vma, unsigned long addr) | 392 | struct vm_area_struct *vma, unsigned long addr) |
393 | { | 393 | { |
394 | int nr_pages; | 394 | int nr_pages; |
@@ -407,11 +407,11 @@ struct page *swapin_readahead(swp_entry_t entry, | |||
407 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { | 407 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { |
408 | /* Ok, do the async read-ahead now */ | 408 | /* Ok, do the async read-ahead now */ |
409 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | 409 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), |
410 | vma, addr); | 410 | gfp_mask, vma, addr); |
411 | if (!page) | 411 | if (!page) |
412 | break; | 412 | break; |
413 | page_cache_release(page); | 413 | page_cache_release(page); |
414 | } | 414 | } |
415 | lru_add_drain(); /* Push any new pages onto the LRU now */ | 415 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
416 | return read_swap_cache_async(entry, vma, addr); | 416 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
417 | } | 417 | } |