summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2015-11-06 19:28:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-06 20:50:42 -0500
commit71baba4b92dc1fa1bc461742c6ab1942ec6034e9 (patch)
tree48c361ba0cc06890703bee1464a9349519118330
parent40113370836e8e79befa585277296ed42781ef31 (diff)
mm, page_alloc: rename __GFP_WAIT to __GFP_RECLAIM
__GFP_WAIT was used to signal that the caller was in atomic context and could not sleep. Now it is possible to distinguish between true atomic context and callers that are not willing to sleep. The latter should clear __GFP_DIRECT_RECLAIM so kswapd will still wake. As clearing __GFP_WAIT behaves differently, there is a risk that people will clear the wrong flags. This patch renames __GFP_WAIT to __GFP_RECLAIM to clearly indicate what it does -- setting it allows all reclaim activity, clearing them prevents it. [akpm@linux-foundation.org: fix build] [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Vitaly Wool <vitalywool@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/scsi_ioctl.c6
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-cd_ioctl.c2
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-ioctls.c4
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/ide/ide-tape.c4
-rw-r--r--drivers/ide/ide-taskfile.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/staging/rdma/hfi1/init.c2
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c2
-rw-r--r--fs/cachefiles/internal.h2
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/nilfs2/mdt.h2
-rw-r--r--include/linux/gfp.h16
-rw-r--r--kernel/power/swap.c16
-rw-r--r--lib/percpu_ida.c2
-rw-r--r--mm/failslab.c8
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c9
-rw-r--r--security/integrity/ima/ima_crypto.c2
38 files changed, 71 insertions, 68 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 9e32f0868e36..590cca21c24a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -638,7 +638,7 @@ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
638 if (percpu_ref_tryget_live(&q->q_usage_counter)) 638 if (percpu_ref_tryget_live(&q->q_usage_counter))
639 return 0; 639 return 0;
640 640
641 if (!(gfp & __GFP_WAIT)) 641 if (!gfpflags_allow_blocking(gfp))
642 return -EBUSY; 642 return -EBUSY;
643 643
644 ret = wait_event_interruptible(q->mq_freeze_wq, 644 ret = wait_event_interruptible(q->mq_freeze_wq,
@@ -2038,7 +2038,7 @@ void generic_make_request(struct bio *bio)
2038 do { 2038 do {
2039 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2039 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2040 2040
2041 if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) { 2041 if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
2042 2042
2043 q->make_request_fn(q, bio); 2043 q->make_request_fn(q, bio);
2044 2044
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 68c0a3416b34..694f8703f83c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1186,7 +1186,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1186 ctx = blk_mq_get_ctx(q); 1186 ctx = blk_mq_get_ctx(q);
1187 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1187 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1188 blk_mq_set_alloc_data(&alloc_data, q, 1188 blk_mq_set_alloc_data(&alloc_data, q,
1189 __GFP_WAIT|__GFP_HIGH, false, ctx, hctx); 1189 __GFP_RECLAIM|__GFP_HIGH, false, ctx, hctx);
1190 rq = __blk_mq_alloc_request(&alloc_data, rw); 1190 rq = __blk_mq_alloc_request(&alloc_data, rw);
1191 ctx = alloc_data.ctx; 1191 ctx = alloc_data.ctx;
1192 hctx = alloc_data.hctx; 1192 hctx = alloc_data.hctx;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index dda653ce7b24..0774799942e0 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -444,7 +444,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
444 444
445 } 445 }
446 446
447 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 447 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
448 if (IS_ERR(rq)) { 448 if (IS_ERR(rq)) {
449 err = PTR_ERR(rq); 449 err = PTR_ERR(rq);
450 goto error_free_buffer; 450 goto error_free_buffer;
@@ -495,7 +495,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
495 break; 495 break;
496 } 496 }
497 497
498 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { 498 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) {
499 err = DRIVER_ERROR << 24; 499 err = DRIVER_ERROR << 24;
500 goto error; 500 goto error;
501 } 501 }
@@ -536,7 +536,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
536 struct request *rq; 536 struct request *rq;
537 int err; 537 int err;
538 538
539 rq = blk_get_request(q, WRITE, __GFP_WAIT); 539 rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
540 if (IS_ERR(rq)) 540 if (IS_ERR(rq))
541 return PTR_ERR(rq); 541 return PTR_ERR(rq);
542 blk_rq_set_block_pc(rq); 542 blk_rq_set_block_pc(rq);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index e5e0f19ceda0..3dc53a16ed3a 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1007,7 +1007,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
1007 bm_set_page_unchanged(b->bm_pages[page_nr]); 1007 bm_set_page_unchanged(b->bm_pages[page_nr]);
1008 1008
1009 if (ctx->flags & BM_AIO_COPY_PAGES) { 1009 if (ctx->flags & BM_AIO_COPY_PAGES) {
1010 page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); 1010 page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
1011 copy_highpage(page, b->bm_pages[page_nr]); 1011 copy_highpage(page, b->bm_pages[page_nr]);
1012 bm_store_page_idx(page, page_nr); 1012 bm_store_page_idx(page, page_nr);
1013 } else 1013 } else
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index f504232c1ee7..a28a562f7b7f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -173,7 +173,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
173{ 173{
174 struct request *rq; 174 struct request *rq;
175 175
176 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); 176 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
177 return blk_mq_rq_to_pdu(rq); 177 return blk_mq_rq_to_pdu(rq);
178} 178}
179 179
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index b9242d78283d..562b5a4ca7b7 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk,
723 struct request *rq; 723 struct request *rq;
724 int err = 0; 724 int err = 0;
725 725
726 rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); 726 rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
727 if (IS_ERR(rq)) 727 if (IS_ERR(rq))
728 return PTR_ERR(rq); 728 return PTR_ERR(rq);
729 729
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7be2375db7f2..5959c2981cc7 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -704,14 +704,14 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
704 int ret = 0; 704 int ret = 0;
705 705
706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
707 WRITE : READ, __GFP_WAIT); 707 WRITE : READ, __GFP_RECLAIM);
708 if (IS_ERR(rq)) 708 if (IS_ERR(rq))
709 return PTR_ERR(rq); 709 return PTR_ERR(rq);
710 blk_rq_set_block_pc(rq); 710 blk_rq_set_block_pc(rq);
711 711
712 if (cgc->buflen) { 712 if (cgc->buflen) {
713 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, 713 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
714 __GFP_WAIT); 714 __GFP_RECLAIM);
715 if (ret) 715 if (ret)
716 goto out; 716 goto out;
717 } 717 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d58cb9e034fe..7e505d4be7c0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2216,7 +2216,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2216 mapping = file_inode(obj->base.filp)->i_mapping; 2216 mapping = file_inode(obj->base.filp)->i_mapping;
2217 gfp = mapping_gfp_mask(mapping); 2217 gfp = mapping_gfp_mask(mapping);
2218 gfp |= __GFP_NORETRY | __GFP_NOWARN; 2218 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2219 gfp &= ~(__GFP_IO | __GFP_WAIT); 2219 gfp &= ~(__GFP_IO | __GFP_RECLAIM);
2220 sg = st->sgl; 2220 sg = st->sgl;
2221 st->nents = 0; 2221 st->nents = 0;
2222 for (i = 0; i < page_count; i++) { 2222 for (i = 0; i < page_count; i++) {
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 1362ad80a76c..05352f490d60 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -92,7 +92,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
92 struct request *rq; 92 struct request *rq;
93 int error; 93 int error;
94 94
95 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 95 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
96 rq->cmd_type = REQ_TYPE_DRV_PRIV; 96 rq->cmd_type = REQ_TYPE_DRV_PRIV;
97 rq->special = (char *)pc; 97 rq->special = (char *)pc;
98 98
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 64a6b827b3dd..ef907fd5ba98 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -441,7 +441,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
441 struct request *rq; 441 struct request *rq;
442 int error; 442 int error;
443 443
444 rq = blk_get_request(drive->queue, write, __GFP_WAIT); 444 rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
445 445
446 memcpy(rq->cmd, cmd, BLK_MAX_CDB); 446 memcpy(rq->cmd, cmd, BLK_MAX_CDB);
447 rq->cmd_type = REQ_TYPE_ATA_PC; 447 rq->cmd_type = REQ_TYPE_ATA_PC;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 066e39036518..474173eb31bb 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -303,7 +303,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
303 struct request *rq; 303 struct request *rq;
304 int ret; 304 int ret;
305 305
306 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 306 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
307 rq->cmd_type = REQ_TYPE_DRV_PRIV; 307 rq->cmd_type = REQ_TYPE_DRV_PRIV;
308 rq->cmd_flags = REQ_QUIET; 308 rq->cmd_flags = REQ_QUIET;
309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); 309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index b05a74d78ef5..0dd43b4fcec6 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -165,7 +165,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
165 if (!(setting->flags & DS_SYNC)) 165 if (!(setting->flags & DS_SYNC))
166 return setting->set(drive, arg); 166 return setting->set(drive, arg);
167 167
168 rq = blk_get_request(q, READ, __GFP_WAIT); 168 rq = blk_get_request(q, READ, __GFP_RECLAIM);
169 rq->cmd_type = REQ_TYPE_DRV_PRIV; 169 rq->cmd_type = REQ_TYPE_DRV_PRIV;
170 rq->cmd_len = 5; 170 rq->cmd_len = 5;
171 rq->cmd[0] = REQ_DEVSET_EXEC; 171 rq->cmd[0] = REQ_DEVSET_EXEC;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 56b9708894a5..37a8a907febe 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -477,7 +477,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
477 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) 477 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
478 return -EBUSY; 478 return -EBUSY;
479 479
480 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 480 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
481 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 481 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
482 482
483 drive->mult_req = arg; 483 drive->mult_req = arg;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index aa2e9b77b20d..d05db2469209 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -125,7 +125,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
125 if (NULL == (void *) arg) { 125 if (NULL == (void *) arg) {
126 struct request *rq; 126 struct request *rq;
127 127
128 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 128 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
129 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 129 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
130 err = blk_execute_rq(drive->queue, NULL, rq, 0); 130 err = blk_execute_rq(drive->queue, NULL, rq, 0);
131 blk_put_request(rq); 131 blk_put_request(rq);
@@ -221,7 +221,7 @@ static int generic_drive_reset(ide_drive_t *drive)
221 struct request *rq; 221 struct request *rq;
222 int ret = 0; 222 int ret = 0;
223 223
224 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 224 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
225 rq->cmd_type = REQ_TYPE_DRV_PRIV; 225 rq->cmd_type = REQ_TYPE_DRV_PRIV;
226 rq->cmd_len = 1; 226 rq->cmd_len = 1;
227 rq->cmd[0] = REQ_DRIVE_RESET; 227 rq->cmd[0] = REQ_DRIVE_RESET;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index c80868520488..2d7dca56dd24 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -31,7 +31,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
31 } 31 }
32 spin_unlock_irq(&hwif->lock); 32 spin_unlock_irq(&hwif->lock);
33 33
34 rq = blk_get_request(q, READ, __GFP_WAIT); 34 rq = blk_get_request(q, READ, __GFP_RECLAIM);
35 rq->cmd[0] = REQ_PARK_HEADS; 35 rq->cmd[0] = REQ_PARK_HEADS;
36 rq->cmd_len = 1; 36 rq->cmd_len = 1;
37 rq->cmd_type = REQ_TYPE_DRV_PRIV; 37 rq->cmd_type = REQ_TYPE_DRV_PRIV;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 081e43458d50..e34af488693a 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -18,7 +18,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
18 } 18 }
19 19
20 memset(&rqpm, 0, sizeof(rqpm)); 20 memset(&rqpm, 0, sizeof(rqpm));
21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 21 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
22 rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; 22 rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
23 rq->special = &rqpm; 23 rq->special = &rqpm;
24 rqpm.pm_step = IDE_PM_START_SUSPEND; 24 rqpm.pm_step = IDE_PM_START_SUSPEND;
@@ -88,7 +88,7 @@ int generic_ide_resume(struct device *dev)
88 } 88 }
89 89
90 memset(&rqpm, 0, sizeof(rqpm)); 90 memset(&rqpm, 0, sizeof(rqpm));
91 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 91 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
92 rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; 92 rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
93 rq->cmd_flags |= REQ_PREEMPT; 93 rq->cmd_flags |= REQ_PREEMPT;
94 rq->special = &rqpm; 94 rq->special = &rqpm;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index f5d51d1d09ee..12fa04997dcc 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -852,7 +852,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
852 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE); 852 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
853 BUG_ON(size < 0 || size % tape->blk_size); 853 BUG_ON(size < 0 || size % tape->blk_size);
854 854
855 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 855 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
856 rq->cmd_type = REQ_TYPE_DRV_PRIV; 856 rq->cmd_type = REQ_TYPE_DRV_PRIV;
857 rq->cmd[13] = cmd; 857 rq->cmd[13] = cmd;
858 rq->rq_disk = tape->disk; 858 rq->rq_disk = tape->disk;
@@ -860,7 +860,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
860 860
861 if (size) { 861 if (size) {
862 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size, 862 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
863 __GFP_WAIT); 863 __GFP_RECLAIM);
864 if (ret) 864 if (ret)
865 goto out_put; 865 goto out_put;
866 } 866 }
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 0979e126fff1..a716693417a3 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -430,7 +430,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
430 int error; 430 int error;
431 int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; 431 int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
432 432
433 rq = blk_get_request(drive->queue, rw, __GFP_WAIT); 433 rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
434 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 434 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
435 435
436 /* 436 /*
@@ -441,7 +441,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
441 */ 441 */
442 if (nsect) { 442 if (nsect) {
443 error = blk_rq_map_kern(drive->queue, rq, buf, 443 error = blk_rq_map_kern(drive->queue, rq, buf,
444 nsect * SECTOR_SIZE, __GFP_WAIT); 444 nsect * SECTOR_SIZE, __GFP_RECLAIM);
445 if (error) 445 if (error)
446 goto put_req; 446 goto put_req;
447 } 447 }
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 7e00470adc30..4ff340fe904f 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1680,7 +1680,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1680 * heavy filesystem activity makes these fail, and we can 1680 * heavy filesystem activity makes these fail, and we can
1681 * use compound pages. 1681 * use compound pages.
1682 */ 1682 */
1683 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 1683 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1684 1684
1685 egrcnt = rcd->rcvegrcnt; 1685 egrcnt = rcd->rcvegrcnt;
1686 egroff = rcd->rcvegr_tid_base; 1686 egroff = rcd->rcvegr_tid_base;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 89300870fefb..1e688bfec567 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -75,7 +75,7 @@ MODULE_LICENSE("GPL");
75 75
76/* 76/*
77 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't 77 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
78 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use 78 * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
79 * __GFP_NOWARN, to suppress page allocation failure warnings. 79 * __GFP_NOWARN, to suppress page allocation failure warnings.
80 */ 80 */
81#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) 81#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e878590e71b6..6c195554d94a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1025,11 +1025,13 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1025 req->special = (void *)0; 1025 req->special = (void *)0;
1026 1026
1027 if (buffer && bufflen) { 1027 if (buffer && bufflen) {
1028 ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_WAIT); 1028 ret = blk_rq_map_kern(q, req, buffer, bufflen,
1029 __GFP_DIRECT_RECLAIM);
1029 if (ret) 1030 if (ret)
1030 goto out; 1031 goto out;
1031 } else if (ubuffer && bufflen) { 1032 } else if (ubuffer && bufflen) {
1032 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_WAIT); 1033 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
1034 __GFP_DIRECT_RECLAIM);
1033 if (ret) 1035 if (ret)
1034 goto out; 1036 goto out;
1035 bio = req->bio; 1037 bio = req->bio;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 66a96cd98b97..984ddcb4786d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1970,7 +1970,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1970 struct request *req; 1970 struct request *req;
1971 1971
1972 /* 1972 /*
1973 * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a 1973 * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
1974 * request becomes available 1974 * request becomes available
1975 */ 1975 */
1976 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1976 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 126a48c6431e..dd8ad2a44510 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -222,13 +222,13 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
222 int write = (data_direction == DMA_TO_DEVICE); 222 int write = (data_direction == DMA_TO_DEVICE);
223 int ret = DRIVER_ERROR << 24; 223 int ret = DRIVER_ERROR << 24;
224 224
225 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 225 req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
226 if (IS_ERR(req)) 226 if (IS_ERR(req))
227 return ret; 227 return ret;
228 blk_rq_set_block_pc(req); 228 blk_rq_set_block_pc(req);
229 229
230 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 230 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
231 buffer, bufflen, __GFP_WAIT)) 231 buffer, bufflen, __GFP_RECLAIM))
232 goto out; 232 goto out;
233 233
234 req->cmd_len = COMMAND_SIZE(cmd[0]); 234 req->cmd_len = COMMAND_SIZE(cmd[0]);
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c
index 47a1202fcbdf..8666f3ad24e9 100644
--- a/drivers/staging/rdma/hfi1/init.c
+++ b/drivers/staging/rdma/hfi1/init.c
@@ -1560,7 +1560,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1560 * heavy filesystem activity makes these fail, and we can 1560 * heavy filesystem activity makes these fail, and we can
1561 * use compound pages. 1561 * use compound pages.
1562 */ 1562 */
1563 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 1563 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1564 1564
1565 /* 1565 /*
1566 * The minimum size of the eager buffers is a groups of MTU-sized 1566 * The minimum size of the eager buffers is a groups of MTU-sized
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
index 5d9b9dbd8fc4..13c3cd11ab92 100644
--- a/drivers/staging/rdma/ipath/ipath_file_ops.c
+++ b/drivers/staging/rdma/ipath/ipath_file_ops.c
@@ -905,7 +905,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
905 * heavy filesystem activity makes these fail, and we can 905 * heavy filesystem activity makes these fail, and we can
906 * use compound pages. 906 * use compound pages.
907 */ 907 */
908 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 908 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
909 909
910 egrcnt = dd->ipath_rcvegrcnt; 910 egrcnt = dd->ipath_rcvegrcnt;
911 /* TID number offset for this port */ 911 /* TID number offset for this port */
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index aecd0859eacb..9c4b737a54df 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -30,7 +30,7 @@ extern unsigned cachefiles_debug;
30#define CACHEFILES_DEBUG_KLEAVE 2 30#define CACHEFILES_DEBUG_KLEAVE 2
31#define CACHEFILES_DEBUG_KDEBUG 4 31#define CACHEFILES_DEBUG_KDEBUG 4
32 32
33#define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC) 33#define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
34 34
35/* 35/*
36 * node records 36 * node records
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3ae0e0427191..18e7554cf94c 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -361,7 +361,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
361 361
362 /* 362 /*
363 * bio_alloc() is guaranteed to return a bio when called with 363 * bio_alloc() is guaranteed to return a bio when called with
364 * __GFP_WAIT and we request a valid number of vectors. 364 * __GFP_RECLAIM and we request a valid number of vectors.
365 */ 365 */
366 bio = bio_alloc(GFP_KERNEL, nr_vecs); 366 bio = bio_alloc(GFP_KERNEL, nr_vecs);
367 367
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index fe529a87a208..03246cac3338 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -72,7 +72,7 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
72} 72}
73 73
74/* Default GFP flags using highmem */ 74/* Default GFP flags using highmem */
75#define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM) 75#define NILFS_MDT_GFP (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM)
76 76
77int nilfs_mdt_get_block(struct inode *, unsigned long, int, 77int nilfs_mdt_get_block(struct inode *, unsigned long, int,
78 void (*init_block)(struct inode *, 78 void (*init_block)(struct inode *,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 86f9f7da86ea..369227202ac2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -107,7 +107,7 @@ struct vm_area_struct;
107 * can be cleared when the reclaiming of pages would cause unnecessary 107 * can be cleared when the reclaiming of pages would cause unnecessary
108 * disruption. 108 * disruption.
109 */ 109 */
110#define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) 110#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
111#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ 111#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
112#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ 112#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
113 113
@@ -126,12 +126,12 @@ struct vm_area_struct;
126 */ 126 */
127#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) 127#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
128#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) 128#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
129#define GFP_NOIO (__GFP_WAIT) 129#define GFP_NOIO (__GFP_RECLAIM)
130#define GFP_NOFS (__GFP_WAIT | __GFP_IO) 130#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
131#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 131#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
132#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ 132#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
133 __GFP_RECLAIMABLE) 133 __GFP_RECLAIMABLE)
134#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 134#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
135#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) 135#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
136#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) 136#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
137#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 137#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
@@ -143,12 +143,12 @@ struct vm_area_struct;
143#define GFP_MOVABLE_SHIFT 3 143#define GFP_MOVABLE_SHIFT 3
144 144
145/* Control page allocator reclaim behavior */ 145/* Control page allocator reclaim behavior */
146#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 146#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
147 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ 147 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
148 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) 148 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
149 149
150/* Control slab gfp mask during early boot */ 150/* Control slab gfp mask during early boot */
151#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) 151#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
152 152
153/* Control allocation constraints */ 153/* Control allocation constraints */
154#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) 154#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b2066fb5b10f..12cd989dadf6 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -257,7 +257,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
257 struct bio *bio; 257 struct bio *bio;
258 int error = 0; 258 int error = 0;
259 259
260 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 260 bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
261 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); 261 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
262 bio->bi_bdev = hib_resume_bdev; 262 bio->bi_bdev = hib_resume_bdev;
263 263
@@ -356,7 +356,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
356 return -ENOSPC; 356 return -ENOSPC;
357 357
358 if (hb) { 358 if (hb) {
359 src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | 359 src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
360 __GFP_NORETRY); 360 __GFP_NORETRY);
361 if (src) { 361 if (src) {
362 copy_page(src, buf); 362 copy_page(src, buf);
@@ -364,7 +364,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
364 ret = hib_wait_io(hb); /* Free pages */ 364 ret = hib_wait_io(hb); /* Free pages */
365 if (ret) 365 if (ret)
366 return ret; 366 return ret;
367 src = (void *)__get_free_page(__GFP_WAIT | 367 src = (void *)__get_free_page(__GFP_RECLAIM |
368 __GFP_NOWARN | 368 __GFP_NOWARN |
369 __GFP_NORETRY); 369 __GFP_NORETRY);
370 if (src) { 370 if (src) {
@@ -672,7 +672,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
672 nr_threads = num_online_cpus() - 1; 672 nr_threads = num_online_cpus() - 1;
673 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); 673 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
674 674
675 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 675 page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
676 if (!page) { 676 if (!page) {
677 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 677 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
678 ret = -ENOMEM; 678 ret = -ENOMEM;
@@ -975,7 +975,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
975 last = tmp; 975 last = tmp;
976 976
977 tmp->map = (struct swap_map_page *) 977 tmp->map = (struct swap_map_page *)
978 __get_free_page(__GFP_WAIT | __GFP_HIGH); 978 __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
979 if (!tmp->map) { 979 if (!tmp->map) {
980 release_swap_reader(handle); 980 release_swap_reader(handle);
981 return -ENOMEM; 981 return -ENOMEM;
@@ -1242,9 +1242,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
1242 1242
1243 for (i = 0; i < read_pages; i++) { 1243 for (i = 0; i < read_pages; i++) {
1244 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? 1244 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1245 __GFP_WAIT | __GFP_HIGH : 1245 __GFP_RECLAIM | __GFP_HIGH :
1246 __GFP_WAIT | __GFP_NOWARN | 1246 __GFP_RECLAIM | __GFP_NOWARN |
1247 __GFP_NORETRY); 1247 __GFP_NORETRY);
1248 1248
1249 if (!page[i]) { 1249 if (!page[i]) {
1250 if (i < LZO_CMP_PAGES) { 1250 if (i < LZO_CMP_PAGES) {
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index f75715131f20..6d40944960de 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -135,7 +135,7 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
135 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). 135 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
136 * 136 *
137 * @gfp indicates whether or not to wait until a free id is available (it's not 137 * @gfp indicates whether or not to wait until a free id is available (it's not
138 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep 138 * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
139 * however long it takes until another thread frees an id (same semantics as a 139 * however long it takes until another thread frees an id (same semantics as a
140 * mempool). 140 * mempool).
141 * 141 *
diff --git a/mm/failslab.c b/mm/failslab.c
index 98fb490311eb..79171b4a5826 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -3,11 +3,11 @@
3 3
4static struct { 4static struct {
5 struct fault_attr attr; 5 struct fault_attr attr;
6 bool ignore_gfp_wait; 6 bool ignore_gfp_reclaim;
7 bool cache_filter; 7 bool cache_filter;
8} failslab = { 8} failslab = {
9 .attr = FAULT_ATTR_INITIALIZER, 9 .attr = FAULT_ATTR_INITIALIZER,
10 .ignore_gfp_wait = true, 10 .ignore_gfp_reclaim = true,
11 .cache_filter = false, 11 .cache_filter = false,
12}; 12};
13 13
@@ -16,7 +16,7 @@ bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
16 if (gfpflags & __GFP_NOFAIL) 16 if (gfpflags & __GFP_NOFAIL)
17 return false; 17 return false;
18 18
19 if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) 19 if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM))
20 return false; 20 return false;
21 21
22 if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) 22 if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
@@ -42,7 +42,7 @@ static int __init failslab_debugfs_init(void)
42 return PTR_ERR(dir); 42 return PTR_ERR(dir);
43 43
44 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 44 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
45 &failslab.ignore_gfp_wait)) 45 &failslab.ignore_gfp_reclaim))
46 goto fail; 46 goto fail;
47 if (!debugfs_create_bool("cache-filter", mode, dir, 47 if (!debugfs_create_bool("cache-filter", mode, dir,
48 &failslab.cache_filter)) 48 &failslab.cache_filter))
diff --git a/mm/filemap.c b/mm/filemap.c
index 58e04e26f996..6ef3674c0763 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2713,7 +2713,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
2713 * page is known to the local caching routines. 2713 * page is known to the local caching routines.
2714 * 2714 *
2715 * The @gfp_mask argument specifies whether I/O may be performed to release 2715 * The @gfp_mask argument specifies whether I/O may be performed to release
2716 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2716 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
2717 * 2717 *
2718 */ 2718 */
2719int try_to_release_page(struct page *page, gfp_t gfp_mask) 2719int try_to_release_page(struct page *page, gfp_t gfp_mask)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f5c08b46fef8..9812d4618651 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -786,7 +786,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
786 786
787static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 787static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
788{ 788{
789 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 789 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
790} 790}
791 791
792/* Caller must hold page table lock. */ 792/* Caller must hold page table lock. */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 05374f09339c..a5470674a477 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2120,7 +2120,7 @@ done_restock:
2120 /* 2120 /*
2121 * If the hierarchy is above the normal consumption range, schedule 2121 * If the hierarchy is above the normal consumption range, schedule
2122 * reclaim on returning to userland. We can perform reclaim here 2122 * reclaim on returning to userland. We can perform reclaim here
2123 * if __GFP_WAIT but let's always punt for simplicity and so that 2123 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2124 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2124 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2125 * not recorded as it most likely matches current's and won't 2125 * not recorded as it most likely matches current's and won't
2126 * change in the meantime. As high limit is checked again before 2126 * change in the meantime. As high limit is checked again before
diff --git a/mm/migrate.c b/mm/migrate.c
index e60379eb23f8..7890d0bb5e23 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1752,7 +1752,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1752 goto out_dropref; 1752 goto out_dropref;
1753 1753
1754 new_page = alloc_pages_node(node, 1754 new_page = alloc_pages_node(node,
1755 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, 1755 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
1756 HPAGE_PMD_ORDER); 1756 HPAGE_PMD_ORDER);
1757 if (!new_page) 1757 if (!new_page)
1758 goto out_fail; 1758 goto out_fail;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 70461f3e3378..1b373096b990 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2160,11 +2160,11 @@ static struct {
2160 struct fault_attr attr; 2160 struct fault_attr attr;
2161 2161
2162 bool ignore_gfp_highmem; 2162 bool ignore_gfp_highmem;
2163 bool ignore_gfp_wait; 2163 bool ignore_gfp_reclaim;
2164 u32 min_order; 2164 u32 min_order;
2165} fail_page_alloc = { 2165} fail_page_alloc = {
2166 .attr = FAULT_ATTR_INITIALIZER, 2166 .attr = FAULT_ATTR_INITIALIZER,
2167 .ignore_gfp_wait = true, 2167 .ignore_gfp_reclaim = true,
2168 .ignore_gfp_highmem = true, 2168 .ignore_gfp_highmem = true,
2169 .min_order = 1, 2169 .min_order = 1,
2170}; 2170};
@@ -2183,7 +2183,8 @@ static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2183 return false; 2183 return false;
2184 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 2184 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2185 return false; 2185 return false;
2186 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_DIRECT_RECLAIM)) 2186 if (fail_page_alloc.ignore_gfp_reclaim &&
2187 (gfp_mask & __GFP_DIRECT_RECLAIM))
2187 return false; 2188 return false;
2188 2189
2189 return should_fail(&fail_page_alloc.attr, 1 << order); 2190 return should_fail(&fail_page_alloc.attr, 1 << order);
@@ -2202,7 +2203,7 @@ static int __init fail_page_alloc_debugfs(void)
2202 return PTR_ERR(dir); 2203 return PTR_ERR(dir);
2203 2204
2204 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 2205 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2205 &fail_page_alloc.ignore_gfp_wait)) 2206 &fail_page_alloc.ignore_gfp_reclaim))
2206 goto fail; 2207 goto fail;
2207 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 2208 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2208 &fail_page_alloc.ignore_gfp_highmem)) 2209 &fail_page_alloc.ignore_gfp_highmem))
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index e24121afb2f2..6eb62936c672 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -126,7 +126,7 @@ static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
126{ 126{
127 void *ptr; 127 void *ptr;
128 int order = ima_maxorder; 128 int order = ima_maxorder;
129 gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY; 129 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
130 130
131 if (order) 131 if (order)
132 order = min(get_order(max_size), order); 132 order = min(get_order(max_size), order);