aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd2
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2015-06-08 10:53:10 -0400
committerTheodore Ts'o <tytso@mit.edu>2015-06-08 10:53:10 -0400
commit6ccaf3e2f302b6af8d9e17ce4e7f0af26b6baa0e (patch)
treecc3a37a205d0fc980ff9e9788daf9526a0976532 /fs/jbd2
parent3dbb5eb9a3aa04f40e551338eee5e8d06f352fe8 (diff)
jbd2: revert must-not-fail allocation loops back to GFP_NOFAIL
This basically reverts 47def82672b3 (jbd2: Remove __GFP_NOFAIL from jbd2 layer). The deprecation of __GFP_NOFAIL was a bad choice because it led to open coding the endless loop around the allocator rather than removing the dependency on the non failing allocation. So the deprecation was a clear failure and the reality tells us that __GFP_NOFAIL is not even close to go away. It is still true that __GFP_NOFAIL allocations are generally discouraged and new uses should be evaluated and an alternative (pre-allocations or reservations) should be considered but it doesn't make any sense to lie the allocator about the requirements. Allocator can take steps to help making a progress if it knows the requirements. Signed-off-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu> Acked-by: David Rientjes <rientjes@google.com>
Diffstat (limited to 'fs/jbd2')
-rw-r--r--fs/jbd2/journal.c11
-rw-r--r--fs/jbd2/transaction.c20
2 files changed, 8 insertions, 23 deletions
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index b96bd8076b70..0bc333b4a594 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -371,16 +371,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
371 */ 371 */
372 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); 372 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
373 373
374retry_alloc: 374 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
375 new_bh = alloc_buffer_head(GFP_NOFS);
376 if (!new_bh) {
377 /*
378 * Failure is not an option, but __GFP_NOFAIL is going
379 * away; so we retry ourselves here.
380 */
381 congestion_wait(BLK_RW_ASYNC, HZ/50);
382 goto retry_alloc;
383 }
384 375
385 /* keep subsequent assertions sane */ 376 /* keep subsequent assertions sane */
386 atomic_set(&new_bh->b_count, 1); 377 atomic_set(&new_bh->b_count, 1);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index ff2f2e6ad311..799242cecffb 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -278,22 +278,16 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
278 278
279alloc_transaction: 279alloc_transaction:
280 if (!journal->j_running_transaction) { 280 if (!journal->j_running_transaction) {
281 /*
282 * If __GFP_FS is not present, then we may be being called from
283 * inside the fs writeback layer, so we MUST NOT fail.
284 */
285 if ((gfp_mask & __GFP_FS) == 0)
286 gfp_mask |= __GFP_NOFAIL;
281 new_transaction = kmem_cache_zalloc(transaction_cache, 287 new_transaction = kmem_cache_zalloc(transaction_cache,
282 gfp_mask); 288 gfp_mask);
283 if (!new_transaction) { 289 if (!new_transaction)
284 /*
285 * If __GFP_FS is not present, then we may be
286 * being called from inside the fs writeback
287 * layer, so we MUST NOT fail. Since
288 * __GFP_NOFAIL is going away, we will arrange
289 * to retry the allocation ourselves.
290 */
291 if ((gfp_mask & __GFP_FS) == 0) {
292 congestion_wait(BLK_RW_ASYNC, HZ/50);
293 goto alloc_transaction;
294 }
295 return -ENOMEM; 290 return -ENOMEM;
296 }
297 } 291 }
298 292
299 jbd_debug(3, "New handle %p going live.\n", handle); 293 jbd_debug(3, "New handle %p going live.\n", handle);