aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--mm/swap_state.c27
2 files changed, 13 insertions, 33 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 77619a56e2bf..0dd6c2b5391b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -331,25 +331,19 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
331 struct bio *bio; 331 struct bio *bio;
332 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 332 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 333 int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
334 unsigned long flags = current->flags;
335 unsigned int i; 334 unsigned int i;
336 335
337 /* 336 /*
338 * Tell VM to act less aggressively and fail earlier. 337 * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
339 * This is not necessary but increases throughput. 338 * to fail earlier. This is not necessary but increases throughput.
340 * FIXME: Is this really intelligent? 339 * FIXME: Is this really intelligent?
341 */ 340 */
342 current->flags &= ~PF_MEMALLOC;
343
344 if (base_bio) 341 if (base_bio)
345 bio = bio_clone(base_bio, GFP_NOIO); 342 bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
346 else 343 else
347 bio = bio_alloc(GFP_NOIO, nr_iovecs); 344 bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
348 if (!bio) { 345 if (!bio)
349 if (flags & PF_MEMALLOC)
350 current->flags |= PF_MEMALLOC;
351 return NULL; 346 return NULL;
352 }
353 347
354 /* if the last bio was not complete, continue where that one ended */ 348 /* if the last bio was not complete, continue where that one ended */
355 bio->bi_idx = *bio_vec_idx; 349 bio->bi_idx = *bio_vec_idx;
@@ -386,9 +380,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
386 size -= bv->bv_len; 380 size -= bv->bv_len;
387 } 381 }
388 382
389 if (flags & PF_MEMALLOC)
390 current->flags |= PF_MEMALLOC;
391
392 if (!bio->bi_size) { 383 if (!bio->bi_size) {
393 bio_put(bio); 384 bio_put(bio);
394 return NULL; 385 return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index a063a902ed03..4f251775ef90 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -143,7 +143,6 @@ void __delete_from_swap_cache(struct page *page)
143int add_to_swap(struct page * page) 143int add_to_swap(struct page * page)
144{ 144{
145 swp_entry_t entry; 145 swp_entry_t entry;
146 int pf_flags;
147 int err; 146 int err;
148 147
149 if (!PageLocked(page)) 148 if (!PageLocked(page))
@@ -154,29 +153,19 @@ int add_to_swap(struct page * page)
154 if (!entry.val) 153 if (!entry.val)
155 return 0; 154 return 0;
156 155
157 /* Radix-tree node allocations are performing 156 /*
158 * GFP_ATOMIC allocations under PF_MEMALLOC. 157 * Radix-tree node allocations from PF_MEMALLOC contexts could
159 * They can completely exhaust the page allocator. 158 * completely exhaust the page allocator. __GFP_NOMEMALLOC
160 * 159 * stops emergency reserves from being allocated.
161 * So PF_MEMALLOC is dropped here. This causes the slab
162 * allocations to fail earlier, so radix-tree nodes will
163 * then be allocated from the mempool reserves.
164 * 160 *
165 * We're still using __GFP_HIGH for radix-tree node 161 * TODO: this could cause a theoretical memory reclaim
166 * allocations, so some of the emergency pools are available, 162 * deadlock in the swap out path.
167 * just not all of them.
168 */ 163 */
169
170 pf_flags = current->flags;
171 current->flags &= ~PF_MEMALLOC;
172
173 /* 164 /*
174 * Add it to the swap cache and mark it dirty 165 * Add it to the swap cache and mark it dirty
175 */ 166 */
176 err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN); 167 err = __add_to_swap_cache(page, entry,
177 168 GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
178 if (pf_flags & PF_MEMALLOC)
179 current->flags |= PF_MEMALLOC;
180 169
181 switch (err) { 170 switch (err) {
182 case 0: /* Success */ 171 case 0: /* Success */