summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>2019-08-26 15:06:22 -0400
committerDarrick J. Wong <darrick.wong@oracle.com>2019-08-26 15:06:22 -0400
commit707e0ddaf67e8942448ebdd16b523e409ebe40ce (patch)
tree081fe5a9335bd0856fc979e62ec18803623bdf6f
parenta55aa89aab90fae7c815b0551b07be37db359d76 (diff)
fs: xfs: Remove KM_NOSLEEP and KM_SLEEP.
Since no caller is using KM_NOSLEEP and no callee branches on KM_SLEEP, we can remove KM_NOSLEEP and replace KM_SLEEP with 0. Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/kmem.c6
-rw-r--r--fs/xfs/kmem.h14
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c2
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c6
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_defer.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c14
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c8
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c16
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c4
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c2
-rw-r--r--fs/xfs/scrub/attr.c2
-rw-r--r--fs/xfs/scrub/fscounters.c2
-rw-r--r--fs/xfs/scrub/symlink.c2
-rw-r--r--fs/xfs/xfs_acl.c4
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_bmap_item.c4
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_dquot.c2
-rw-r--r--fs/xfs/xfs_dquot_item.c2
-rw-r--r--fs/xfs/xfs_error.c2
-rw-r--r--fs/xfs/xfs_extent_busy.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c8
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_icreate_item.c2
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_ioctl.c4
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_itable.c4
-rw-r--r--fs/xfs/xfs_iwalk.c2
-rw-r--r--fs/xfs/xfs_log.c3
-rw-r--r--fs/xfs/xfs_log_cil.c10
-rw-r--r--fs/xfs/xfs_log_recover.c16
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mru_cache.c4
-rw-r--r--fs/xfs/xfs_qm.c4
-rw-r--r--fs/xfs/xfs_refcount_item.c6
-rw-r--r--fs/xfs/xfs_rmap_item.c6
-rw-r--r--fs/xfs/xfs_rtalloc.c4
-rw-r--r--fs/xfs/xfs_trans.c4
-rw-r--r--fs/xfs/xfs_trans_dquot.c2
46 files changed, 102 insertions, 109 deletions
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 16bb9a328678..7cd315ad937e 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -17,7 +17,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
17 17
18 do { 18 do {
19 ptr = kmalloc(size, lflags); 19 ptr = kmalloc(size, lflags);
20 if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 20 if (ptr || (flags & KM_MAYFAIL))
21 return ptr; 21 return ptr;
22 if (!(++retries % 100)) 22 if (!(++retries % 100))
23 xfs_err(NULL, 23 xfs_err(NULL,
@@ -67,7 +67,7 @@ kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
67 67
68 do { 68 do {
69 ptr = krealloc(old, newsize, lflags); 69 ptr = krealloc(old, newsize, lflags);
70 if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 70 if (ptr || (flags & KM_MAYFAIL))
71 return ptr; 71 return ptr;
72 if (!(++retries % 100)) 72 if (!(++retries % 100))
73 xfs_err(NULL, 73 xfs_err(NULL,
@@ -87,7 +87,7 @@ kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
87 87
88 do { 88 do {
89 ptr = kmem_cache_alloc(zone, lflags); 89 ptr = kmem_cache_alloc(zone, lflags);
90 if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 90 if (ptr || (flags & KM_MAYFAIL))
91 return ptr; 91 return ptr;
92 if (!(++retries % 100)) 92 if (!(++retries % 100))
93 xfs_err(NULL, 93 xfs_err(NULL,
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 267655acd426..cb6fa7984ffa 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -16,8 +16,6 @@
16 */ 16 */
17 17
18typedef unsigned __bitwise xfs_km_flags_t; 18typedef unsigned __bitwise xfs_km_flags_t;
19#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
20#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
21#define KM_NOFS ((__force xfs_km_flags_t)0x0004u) 19#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
22#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) 20#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
23#define KM_ZERO ((__force xfs_km_flags_t)0x0010u) 21#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
@@ -32,15 +30,11 @@ kmem_flags_convert(xfs_km_flags_t flags)
32{ 30{
33 gfp_t lflags; 31 gfp_t lflags;
34 32
35 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO)); 33 BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO));
36 34
37 if (flags & KM_NOSLEEP) { 35 lflags = GFP_KERNEL | __GFP_NOWARN;
38 lflags = GFP_ATOMIC | __GFP_NOWARN; 36 if (flags & KM_NOFS)
39 } else { 37 lflags &= ~__GFP_FS;
40 lflags = GFP_KERNEL | __GFP_NOWARN;
41 if (flags & KM_NOFS)
42 lflags &= ~__GFP_FS;
43 }
44 38
45 /* 39 /*
46 * Default page/slab allocator behavior is to retry for ever 40 * Default page/slab allocator behavior is to retry for ever
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 372ad55631fc..533b04aaf6f6 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2205,7 +2205,7 @@ xfs_defer_agfl_block(
2205 ASSERT(xfs_bmap_free_item_zone != NULL); 2205 ASSERT(xfs_bmap_free_item_zone != NULL);
2206 ASSERT(oinfo != NULL); 2206 ASSERT(oinfo != NULL);
2207 2207
2208 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 2208 new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
2209 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); 2209 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2210 new->xefi_blockcount = 1; 2210 new->xefi_blockcount = 1;
2211 new->xefi_oinfo = *oinfo; 2211 new->xefi_oinfo = *oinfo;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 70eb941d02e4..1408638c21c5 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -782,7 +782,7 @@ xfs_attr_shortform_to_leaf(
782 ifp = dp->i_afp; 782 ifp = dp->i_afp;
783 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 783 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
784 size = be16_to_cpu(sf->hdr.totsize); 784 size = be16_to_cpu(sf->hdr.totsize);
785 tmpbuffer = kmem_alloc(size, KM_SLEEP); 785 tmpbuffer = kmem_alloc(size, 0);
786 ASSERT(tmpbuffer != NULL); 786 ASSERT(tmpbuffer != NULL);
787 memcpy(tmpbuffer, ifp->if_u1.if_data, size); 787 memcpy(tmpbuffer, ifp->if_u1.if_data, size);
788 sf = (xfs_attr_shortform_t *)tmpbuffer; 788 sf = (xfs_attr_shortform_t *)tmpbuffer;
@@ -985,7 +985,7 @@ xfs_attr3_leaf_to_shortform(
985 985
986 trace_xfs_attr_leaf_to_sf(args); 986 trace_xfs_attr_leaf_to_sf(args);
987 987
988 tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); 988 tmpbuffer = kmem_alloc(args->geo->blksize, 0);
989 if (!tmpbuffer) 989 if (!tmpbuffer)
990 return -ENOMEM; 990 return -ENOMEM;
991 991
@@ -1448,7 +1448,7 @@ xfs_attr3_leaf_compact(
1448 1448
1449 trace_xfs_attr_leaf_compact(args); 1449 trace_xfs_attr_leaf_compact(args);
1450 1450
1451 tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); 1451 tmpbuffer = kmem_alloc(args->geo->blksize, 0);
1452 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); 1452 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
1453 memset(bp->b_addr, 0, args->geo->blksize); 1453 memset(bp->b_addr, 0, args->geo->blksize);
1454 leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; 1454 leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -2167,7 +2167,7 @@ xfs_attr3_leaf_unbalance(
2167 struct xfs_attr_leafblock *tmp_leaf; 2167 struct xfs_attr_leafblock *tmp_leaf;
2168 struct xfs_attr3_icleaf_hdr tmphdr; 2168 struct xfs_attr3_icleaf_hdr tmphdr;
2169 2169
2170 tmp_leaf = kmem_zalloc(state->args->geo->blksize, KM_SLEEP); 2170 tmp_leaf = kmem_zalloc(state->args->geo->blksize, 0);
2171 2171
2172 /* 2172 /*
2173 * Copy the header into the temp leaf so that all the stuff 2173 * Copy the header into the temp leaf so that all the stuff
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 07aad70f3931..65f4348af9ae 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -553,7 +553,7 @@ __xfs_bmap_add_free(
553#endif 553#endif
554 ASSERT(xfs_bmap_free_item_zone != NULL); 554 ASSERT(xfs_bmap_free_item_zone != NULL);
555 555
556 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 556 new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
557 new->xefi_startblock = bno; 557 new->xefi_startblock = bno;
558 new->xefi_blockcount = (xfs_extlen_t)len; 558 new->xefi_blockcount = (xfs_extlen_t)len;
559 if (oinfo) 559 if (oinfo)
@@ -1099,7 +1099,7 @@ xfs_bmap_add_attrfork(
1099 if (error) 1099 if (error)
1100 goto trans_cancel; 1100 goto trans_cancel;
1101 ASSERT(ip->i_afp == NULL); 1101 ASSERT(ip->i_afp == NULL);
1102 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1102 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0);
1103 ip->i_afp->if_flags = XFS_IFEXTENTS; 1103 ip->i_afp->if_flags = XFS_IFEXTENTS;
1104 logflags = 0; 1104 logflags = 0;
1105 switch (ip->i_d.di_format) { 1105 switch (ip->i_d.di_format) {
@@ -6094,7 +6094,7 @@ __xfs_bmap_add(
6094 bmap->br_blockcount, 6094 bmap->br_blockcount,
6095 bmap->br_state); 6095 bmap->br_state);
6096 6096
6097 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6097 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
6098 INIT_LIST_HEAD(&bi->bi_list); 6098 INIT_LIST_HEAD(&bi->bi_list);
6099 bi->bi_type = type; 6099 bi->bi_type = type;
6100 bi->bi_owner = ip; 6100 bi->bi_owner = ip;
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 0bf56e94bfe9..4fd1223c1bd5 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2098,7 +2098,7 @@ xfs_da_grow_inode_int(
2098 * If we didn't get it and the block might work if fragmented, 2098 * If we didn't get it and the block might work if fragmented,
2099 * try without the CONTIG flag. Loop until we get it all. 2099 * try without the CONTIG flag. Loop until we get it all.
2100 */ 2100 */
2101 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); 2101 mapp = kmem_alloc(sizeof(*mapp) * count, 0);
2102 for (b = *bno, mapi = 0; b < *bno + count; ) { 2102 for (b = *bno, mapi = 0; b < *bno + count; ) {
2103 nmap = min(XFS_BMAP_MAX_NMAP, count); 2103 nmap = min(XFS_BMAP_MAX_NMAP, count);
2104 c = (int)(*bno + count - b); 2104 c = (int)(*bno + count - b);
@@ -2480,7 +2480,7 @@ xfs_buf_map_from_irec(
2480 2480
2481 if (nirecs > 1) { 2481 if (nirecs > 1) {
2482 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), 2482 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2483 KM_SLEEP | KM_NOFS); 2483 KM_NOFS);
2484 if (!map) 2484 if (!map)
2485 return -ENOMEM; 2485 return -ENOMEM;
2486 *mapp = map; 2486 *mapp = map;
@@ -2539,7 +2539,7 @@ xfs_dabuf_map(
2539 */ 2539 */
2540 if (nfsb != 1) 2540 if (nfsb != 1)
2541 irecs = kmem_zalloc(sizeof(irec) * nfsb, 2541 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2542 KM_SLEEP | KM_NOFS); 2542 KM_NOFS);
2543 2543
2544 nirecs = nfsb; 2544 nirecs = nfsb;
2545 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, 2545 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index eb2be2a6a25a..22557527cfdb 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -517,7 +517,7 @@ xfs_defer_add(
517 } 517 }
518 if (!dfp) { 518 if (!dfp) {
519 dfp = kmem_alloc(sizeof(struct xfs_defer_pending), 519 dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
520 KM_SLEEP | KM_NOFS); 520 KM_NOFS);
521 dfp->dfp_type = type; 521 dfp->dfp_type = type;
522 dfp->dfp_intent = NULL; 522 dfp->dfp_intent = NULL;
523 dfp->dfp_done = NULL; 523 dfp->dfp_done = NULL;
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 67840723edbb..867c5dee0751 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -110,9 +110,9 @@ xfs_da_mount(
110 110
111 nodehdr_size = mp->m_dir_inode_ops->node_hdr_size; 111 nodehdr_size = mp->m_dir_inode_ops->node_hdr_size;
112 mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry), 112 mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
113 KM_SLEEP | KM_MAYFAIL); 113 KM_MAYFAIL);
114 mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry), 114 mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
115 KM_SLEEP | KM_MAYFAIL); 115 KM_MAYFAIL);
116 if (!mp->m_dir_geo || !mp->m_attr_geo) { 116 if (!mp->m_dir_geo || !mp->m_attr_geo) {
117 kmem_free(mp->m_dir_geo); 117 kmem_free(mp->m_dir_geo);
118 kmem_free(mp->m_attr_geo); 118 kmem_free(mp->m_attr_geo);
@@ -217,7 +217,7 @@ xfs_dir_init(
217 if (error) 217 if (error)
218 return error; 218 return error;
219 219
220 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 220 args = kmem_zalloc(sizeof(*args), KM_NOFS);
221 if (!args) 221 if (!args)
222 return -ENOMEM; 222 return -ENOMEM;
223 223
@@ -254,7 +254,7 @@ xfs_dir_createname(
254 XFS_STATS_INC(dp->i_mount, xs_dir_create); 254 XFS_STATS_INC(dp->i_mount, xs_dir_create);
255 } 255 }
256 256
257 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 257 args = kmem_zalloc(sizeof(*args), KM_NOFS);
258 if (!args) 258 if (!args)
259 return -ENOMEM; 259 return -ENOMEM;
260 260
@@ -353,7 +353,7 @@ xfs_dir_lookup(
353 * lockdep Doing this avoids having to add a bunch of lockdep class 353 * lockdep Doing this avoids having to add a bunch of lockdep class
354 * annotations into the reclaim path for the ilock. 354 * annotations into the reclaim path for the ilock.
355 */ 355 */
356 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 356 args = kmem_zalloc(sizeof(*args), KM_NOFS);
357 args->geo = dp->i_mount->m_dir_geo; 357 args->geo = dp->i_mount->m_dir_geo;
358 args->name = name->name; 358 args->name = name->name;
359 args->namelen = name->len; 359 args->namelen = name->len;
@@ -422,7 +422,7 @@ xfs_dir_removename(
422 ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); 422 ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
423 XFS_STATS_INC(dp->i_mount, xs_dir_remove); 423 XFS_STATS_INC(dp->i_mount, xs_dir_remove);
424 424
425 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 425 args = kmem_zalloc(sizeof(*args), KM_NOFS);
426 if (!args) 426 if (!args)
427 return -ENOMEM; 427 return -ENOMEM;
428 428
@@ -483,7 +483,7 @@ xfs_dir_replace(
483 if (rval) 483 if (rval)
484 return rval; 484 return rval;
485 485
486 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 486 args = kmem_zalloc(sizeof(*args), KM_NOFS);
487 if (!args) 487 if (!args)
488 return -ENOMEM; 488 return -ENOMEM;
489 489
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index a6fb0cc2085e..9595ced393dc 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -1092,7 +1092,7 @@ xfs_dir2_sf_to_block(
1092 * Copy the directory into a temporary buffer. 1092 * Copy the directory into a temporary buffer.
1093 * Then pitch the incore inode data so we can make extents. 1093 * Then pitch the incore inode data so we can make extents.
1094 */ 1094 */
1095 sfp = kmem_alloc(ifp->if_bytes, KM_SLEEP); 1095 sfp = kmem_alloc(ifp->if_bytes, 0);
1096 memcpy(sfp, oldsfp, ifp->if_bytes); 1096 memcpy(sfp, oldsfp, ifp->if_bytes);
1097 1097
1098 xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK); 1098 xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 033589257f54..85f14fc2a8da 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -164,7 +164,7 @@ xfs_dir2_block_to_sf(
164 * can free the block and copy the formatted data into the inode literal 164 * can free the block and copy the formatted data into the inode literal
165 * area. 165 * area.
166 */ 166 */
167 dst = kmem_alloc(mp->m_sb.sb_inodesize, KM_SLEEP); 167 dst = kmem_alloc(mp->m_sb.sb_inodesize, 0);
168 hdr = bp->b_addr; 168 hdr = bp->b_addr;
169 169
170 /* 170 /*
@@ -436,7 +436,7 @@ xfs_dir2_sf_addname_hard(
436 436
437 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 437 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
438 old_isize = (int)dp->i_d.di_size; 438 old_isize = (int)dp->i_d.di_size;
439 buf = kmem_alloc(old_isize, KM_SLEEP); 439 buf = kmem_alloc(old_isize, 0);
440 oldsfp = (xfs_dir2_sf_hdr_t *)buf; 440 oldsfp = (xfs_dir2_sf_hdr_t *)buf;
441 memcpy(oldsfp, sfp, old_isize); 441 memcpy(oldsfp, sfp, old_isize);
442 /* 442 /*
@@ -1096,7 +1096,7 @@ xfs_dir2_sf_toino4(
1096 * Don't want xfs_idata_realloc copying the data here. 1096 * Don't want xfs_idata_realloc copying the data here.
1097 */ 1097 */
1098 oldsize = dp->i_df.if_bytes; 1098 oldsize = dp->i_df.if_bytes;
1099 buf = kmem_alloc(oldsize, KM_SLEEP); 1099 buf = kmem_alloc(oldsize, 0);
1100 oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 1100 oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
1101 ASSERT(oldsfp->i8count == 1); 1101 ASSERT(oldsfp->i8count == 1);
1102 memcpy(buf, oldsfp, oldsize); 1102 memcpy(buf, oldsfp, oldsize);
@@ -1169,7 +1169,7 @@ xfs_dir2_sf_toino8(
1169 * Don't want xfs_idata_realloc copying the data here. 1169 * Don't want xfs_idata_realloc copying the data here.
1170 */ 1170 */
1171 oldsize = dp->i_df.if_bytes; 1171 oldsize = dp->i_df.if_bytes;
1172 buf = kmem_alloc(oldsize, KM_SLEEP); 1172 buf = kmem_alloc(oldsize, 0);
1173 oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 1173 oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
1174 ASSERT(oldsfp->i8count == 0); 1174 ASSERT(oldsfp->i8count == 0);
1175 memcpy(buf, oldsfp, oldsize); 1175 memcpy(buf, oldsfp, oldsize);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index bf3e04018246..c643beeb5a24 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -94,7 +94,7 @@ xfs_iformat_fork(
94 return 0; 94 return 0;
95 95
96 ASSERT(ip->i_afp == NULL); 96 ASSERT(ip->i_afp == NULL);
97 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); 97 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS);
98 98
99 switch (dip->di_aformat) { 99 switch (dip->di_aformat) {
100 case XFS_DINODE_FMT_LOCAL: 100 case XFS_DINODE_FMT_LOCAL:
@@ -147,7 +147,7 @@ xfs_init_local_fork(
147 147
148 if (size) { 148 if (size) {
149 real_size = roundup(mem_size, 4); 149 real_size = roundup(mem_size, 4);
150 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); 150 ifp->if_u1.if_data = kmem_alloc(real_size, KM_NOFS);
151 memcpy(ifp->if_u1.if_data, data, size); 151 memcpy(ifp->if_u1.if_data, data, size);
152 if (zero_terminate) 152 if (zero_terminate)
153 ifp->if_u1.if_data[size] = '\0'; 153 ifp->if_u1.if_data[size] = '\0';
@@ -302,7 +302,7 @@ xfs_iformat_btree(
302 } 302 }
303 303
304 ifp->if_broot_bytes = size; 304 ifp->if_broot_bytes = size;
305 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); 305 ifp->if_broot = kmem_alloc(size, KM_NOFS);
306 ASSERT(ifp->if_broot != NULL); 306 ASSERT(ifp->if_broot != NULL);
307 /* 307 /*
308 * Copy and convert from the on-disk structure 308 * Copy and convert from the on-disk structure
@@ -367,7 +367,7 @@ xfs_iroot_realloc(
367 */ 367 */
368 if (ifp->if_broot_bytes == 0) { 368 if (ifp->if_broot_bytes == 0) {
369 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff); 369 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
370 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 370 ifp->if_broot = kmem_alloc(new_size, KM_NOFS);
371 ifp->if_broot_bytes = (int)new_size; 371 ifp->if_broot_bytes = (int)new_size;
372 return; 372 return;
373 } 373 }
@@ -382,7 +382,7 @@ xfs_iroot_realloc(
382 new_max = cur_max + rec_diff; 382 new_max = cur_max + rec_diff;
383 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); 383 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
384 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 384 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
385 KM_SLEEP | KM_NOFS); 385 KM_NOFS);
386 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 386 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
387 ifp->if_broot_bytes); 387 ifp->if_broot_bytes);
388 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 388 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
@@ -408,7 +408,7 @@ xfs_iroot_realloc(
408 else 408 else
409 new_size = 0; 409 new_size = 0;
410 if (new_size > 0) { 410 if (new_size > 0) {
411 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 411 new_broot = kmem_alloc(new_size, KM_NOFS);
412 /* 412 /*
413 * First copy over the btree block header. 413 * First copy over the btree block header.
414 */ 414 */
@@ -492,7 +492,7 @@ xfs_idata_realloc(
492 * We enforce that here. 492 * We enforce that here.
493 */ 493 */
494 ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data, 494 ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data,
495 roundup(new_size, 4), KM_SLEEP | KM_NOFS); 495 roundup(new_size, 4), KM_NOFS);
496 ifp->if_bytes = new_size; 496 ifp->if_bytes = new_size;
497} 497}
498 498
@@ -683,7 +683,7 @@ xfs_ifork_init_cow(
683 return; 683 return;
684 684
685 ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone, 685 ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone,
686 KM_SLEEP | KM_NOFS); 686 KM_NOFS);
687 ip->i_cowfp->if_flags = XFS_IFEXTENTS; 687 ip->i_cowfp->if_flags = XFS_IFEXTENTS;
688 ip->i_cformat = XFS_DINODE_FMT_EXTENTS; 688 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
689 ip->i_cnextents = 0; 689 ip->i_cnextents = 0;
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 51bb9bdb0e84..14b9e3e056cc 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1189,7 +1189,7 @@ __xfs_refcount_add(
1189 blockcount); 1189 blockcount);
1190 1190
1191 ri = kmem_alloc(sizeof(struct xfs_refcount_intent), 1191 ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
1192 KM_SLEEP | KM_NOFS); 1192 KM_NOFS);
1193 INIT_LIST_HEAD(&ri->ri_list); 1193 INIT_LIST_HEAD(&ri->ri_list);
1194 ri->ri_type = type; 1194 ri->ri_type = type;
1195 ri->ri_startblock = startblock; 1195 ri->ri_startblock = startblock;
@@ -1602,7 +1602,7 @@ xfs_refcount_recover_extent(
1602 if (be32_to_cpu(rec->refc.rc_refcount) != 1) 1602 if (be32_to_cpu(rec->refc.rc_refcount) != 1)
1603 return -EFSCORRUPTED; 1603 return -EFSCORRUPTED;
1604 1604
1605 rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP); 1605 rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
1606 xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec); 1606 xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
1607 list_add_tail(&rr->rr_list, debris); 1607 list_add_tail(&rr->rr_list, debris);
1608 1608
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index e6aeb390b2fb..12a61f0c1e6a 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -2287,7 +2287,7 @@ __xfs_rmap_add(
2287 bmap->br_blockcount, 2287 bmap->br_blockcount,
2288 bmap->br_state); 2288 bmap->br_state);
2289 2289
2290 ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS); 2290 ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_NOFS);
2291 INIT_LIST_HEAD(&ri->ri_list); 2291 INIT_LIST_HEAD(&ri->ri_list);
2292 ri->ri_type = type; 2292 ri->ri_type = type;
2293 ri->ri_owner = owner; 2293 ri->ri_owner = owner;
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index 1afc58bf71dd..922a5154e2b8 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -80,7 +80,7 @@ xchk_setup_xattr(
80 * without the inode lock held, which means we can sleep. 80 * without the inode lock held, which means we can sleep.
81 */ 81 */
82 if (sc->flags & XCHK_TRY_HARDER) { 82 if (sc->flags & XCHK_TRY_HARDER) {
83 error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, KM_SLEEP); 83 error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0);
84 if (error) 84 if (error)
85 return error; 85 return error;
86 } 86 }
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index fc3f510c9034..98f82d7c8b40 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -125,7 +125,7 @@ xchk_setup_fscounters(
125 struct xchk_fscounters *fsc; 125 struct xchk_fscounters *fsc;
126 int error; 126 int error;
127 127
128 sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), KM_SLEEP); 128 sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0);
129 if (!sc->buf) 129 if (!sc->buf)
130 return -ENOMEM; 130 return -ENOMEM;
131 fsc = sc->buf; 131 fsc = sc->buf;
diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c
index 99c0b1234c3c..5641ae512c9e 100644
--- a/fs/xfs/scrub/symlink.c
+++ b/fs/xfs/scrub/symlink.c
@@ -22,7 +22,7 @@ xchk_setup_symlink(
22 struct xfs_inode *ip) 22 struct xfs_inode *ip)
23{ 23{
24 /* Allocate the buffer without the inode lock held. */ 24 /* Allocate the buffer without the inode lock held. */
25 sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, KM_SLEEP); 25 sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, 0);
26 if (!sc->buf) 26 if (!sc->buf)
27 return -ENOMEM; 27 return -ENOMEM;
28 28
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index cbda40d40326..86c0697870a5 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -135,7 +135,7 @@ xfs_get_acl(struct inode *inode, int type)
135 * go out to the disk. 135 * go out to the disk.
136 */ 136 */
137 len = XFS_ACL_MAX_SIZE(ip->i_mount); 137 len = XFS_ACL_MAX_SIZE(ip->i_mount);
138 xfs_acl = kmem_zalloc_large(len, KM_SLEEP); 138 xfs_acl = kmem_zalloc_large(len, 0);
139 if (!xfs_acl) 139 if (!xfs_acl)
140 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
141 141
@@ -180,7 +180,7 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
180 struct xfs_acl *xfs_acl; 180 struct xfs_acl *xfs_acl;
181 int len = XFS_ACL_MAX_SIZE(ip->i_mount); 181 int len = XFS_ACL_MAX_SIZE(ip->i_mount);
182 182
183 xfs_acl = kmem_zalloc_large(len, KM_SLEEP); 183 xfs_acl = kmem_zalloc_large(len, 0);
184 if (!xfs_acl) 184 if (!xfs_acl)
185 return -ENOMEM; 185 return -ENOMEM;
186 186
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index dc93c51c17de..a640a285cc52 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -147,7 +147,7 @@ xfs_attr3_leaf_inactive(
147 * Allocate storage for a list of all the "remote" value extents. 147 * Allocate storage for a list of all the "remote" value extents.
148 */ 148 */
149 size = count * sizeof(xfs_attr_inactive_list_t); 149 size = count * sizeof(xfs_attr_inactive_list_t);
150 list = kmem_alloc(size, KM_SLEEP); 150 list = kmem_alloc(size, 0);
151 151
152 /* 152 /*
153 * Identify each of the "remote" value extents. 153 * Identify each of the "remote" value extents.
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 58fc820a70c6..00758fdc2fec 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -109,7 +109,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
109 * It didn't all fit, so we have to sort everything on hashval. 109 * It didn't all fit, so we have to sort everything on hashval.
110 */ 110 */
111 sbsize = sf->hdr.count * sizeof(*sbuf); 111 sbsize = sf->hdr.count * sizeof(*sbuf);
112 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); 112 sbp = sbuf = kmem_alloc(sbsize, KM_NOFS);
113 113
114 /* 114 /*
115 * Scan the attribute list for the rest of the entries, storing 115 * Scan the attribute list for the rest of the entries, storing
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 9fa4a7ee8cfc..989163e1f900 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -141,7 +141,7 @@ xfs_bui_init(
141{ 141{
142 struct xfs_bui_log_item *buip; 142 struct xfs_bui_log_item *buip;
143 143
144 buip = kmem_zone_zalloc(xfs_bui_zone, KM_SLEEP); 144 buip = kmem_zone_zalloc(xfs_bui_zone, 0);
145 145
146 xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops); 146 xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
147 buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS; 147 buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
@@ -218,7 +218,7 @@ xfs_trans_get_bud(
218{ 218{
219 struct xfs_bud_log_item *budp; 219 struct xfs_bud_log_item *budp;
220 220
221 budp = kmem_zone_zalloc(xfs_bud_zone, KM_SLEEP); 221 budp = kmem_zone_zalloc(xfs_bud_zone, 0);
222 xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD, 222 xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD,
223 &xfs_bud_item_ops); 223 &xfs_bud_item_ops);
224 budp->bud_buip = buip; 224 budp->bud_buip = buip;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index ca0849043f54..d3be9ab0359b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1741,7 +1741,7 @@ xfs_alloc_buftarg(
1741{ 1741{
1742 xfs_buftarg_t *btp; 1742 xfs_buftarg_t *btp;
1743 1743
1744 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); 1744 btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
1745 1745
1746 btp->bt_mount = mp; 1746 btp->bt_mount = mp;
1747 btp->bt_dev = bdev->bd_dev; 1747 btp->bt_dev = bdev->bd_dev;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 7dcaec54a20b..d74fbd1e9d3e 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -702,7 +702,7 @@ xfs_buf_item_get_format(
702 } 702 }
703 703
704 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), 704 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
705 KM_SLEEP); 705 0);
706 if (!bip->bli_formats) 706 if (!bip->bli_formats)
707 return -ENOMEM; 707 return -ENOMEM;
708 return 0; 708 return 0;
@@ -747,7 +747,7 @@ xfs_buf_item_init(
747 return 0; 747 return 0;
748 } 748 }
749 749
750 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP); 750 bip = kmem_zone_zalloc(xfs_buf_item_zone, 0);
751 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 751 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
752 bip->bli_buf = bp; 752 bip->bli_buf = bp;
753 753
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index fb1ad4483081..7ce770e779b4 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -440,7 +440,7 @@ xfs_dquot_alloc(
440{ 440{
441 struct xfs_dquot *dqp; 441 struct xfs_dquot *dqp;
442 442
443 dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); 443 dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0);
444 444
445 dqp->dq_flags = type; 445 dqp->dq_flags = type;
446 dqp->q_core.d_id = cpu_to_be32(id); 446 dqp->q_core.d_id = cpu_to_be32(id);
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 282ec5af293e..d60647d7197b 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -347,7 +347,7 @@ xfs_qm_qoff_logitem_init(
347{ 347{
348 struct xfs_qoff_logitem *qf; 348 struct xfs_qoff_logitem *qf;
349 349
350 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP); 350 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), 0);
351 351
352 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? 352 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
353 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); 353 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 544c9482a0ef..849fd4476950 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -213,7 +213,7 @@ xfs_errortag_init(
213 struct xfs_mount *mp) 213 struct xfs_mount *mp)
214{ 214{
215 mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX, 215 mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX,
216 KM_SLEEP | KM_MAYFAIL); 216 KM_MAYFAIL);
217 if (!mp->m_errortag) 217 if (!mp->m_errortag)
218 return -ENOMEM; 218 return -ENOMEM;
219 219
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 0ed68379e551..2183d87be4cf 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -33,7 +33,7 @@ xfs_extent_busy_insert(
33 struct rb_node **rbp; 33 struct rb_node **rbp;
34 struct rb_node *parent = NULL; 34 struct rb_node *parent = NULL;
35 35
36 new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP); 36 new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
37 new->agno = agno; 37 new->agno = agno;
38 new->bno = bno; 38 new->bno = bno;
39 new->length = len; 39 new->length = len;
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 86f6512d6864..e44efc41a041 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -163,9 +163,9 @@ xfs_efi_init(
163 if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { 163 if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
164 size = (uint)(sizeof(xfs_efi_log_item_t) + 164 size = (uint)(sizeof(xfs_efi_log_item_t) +
165 ((nextents - 1) * sizeof(xfs_extent_t))); 165 ((nextents - 1) * sizeof(xfs_extent_t)));
166 efip = kmem_zalloc(size, KM_SLEEP); 166 efip = kmem_zalloc(size, 0);
167 } else { 167 } else {
168 efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP); 168 efip = kmem_zone_zalloc(xfs_efi_zone, 0);
169 } 169 }
170 170
171 xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); 171 xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
@@ -333,9 +333,9 @@ xfs_trans_get_efd(
333 if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { 333 if (nextents > XFS_EFD_MAX_FAST_EXTENTS) {
334 efdp = kmem_zalloc(sizeof(struct xfs_efd_log_item) + 334 efdp = kmem_zalloc(sizeof(struct xfs_efd_log_item) +
335 (nextents - 1) * sizeof(struct xfs_extent), 335 (nextents - 1) * sizeof(struct xfs_extent),
336 KM_SLEEP); 336 0);
337 } else { 337 } else {
338 efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP); 338 efdp = kmem_zone_zalloc(xfs_efd_zone, 0);
339 } 339 }
340 340
341 xfs_log_item_init(tp->t_mountp, &efdp->efd_item, XFS_LI_EFD, 341 xfs_log_item_init(tp->t_mountp, &efdp->efd_item, XFS_LI_EFD,
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 0b0fd10a36d4..944add5ff8e0 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -40,7 +40,7 @@ xfs_inode_alloc(
40 * KM_MAYFAIL and return NULL here on ENOMEM. Set the 40 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
41 * code up to do this anyway. 41 * code up to do this anyway.
42 */ 42 */
43 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); 43 ip = kmem_zone_alloc(xfs_inode_zone, 0);
44 if (!ip) 44 if (!ip)
45 return NULL; 45 return NULL;
46 if (inode_init_always(mp->m_super, VFS_I(ip))) { 46 if (inode_init_always(mp->m_super, VFS_I(ip))) {
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index d99a0a3e5f40..3ebd1b7f49d8 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -89,7 +89,7 @@ xfs_icreate_log(
89{ 89{
90 struct xfs_icreate_item *icp; 90 struct xfs_icreate_item *icp;
91 91
92 icp = kmem_zone_zalloc(xfs_icreate_zone, KM_SLEEP); 92 icp = kmem_zone_zalloc(xfs_icreate_zone, 0);
93 93
94 xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE, 94 xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE,
95 &xfs_icreate_item_ops); 95 &xfs_icreate_item_ops);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 6467d5e1df2d..cdb97fa027fa 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2018,7 +2018,7 @@ xfs_iunlink_add_backref(
2018 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK)) 2018 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2019 return 0; 2019 return 0;
2020 2020
2021 iu = kmem_zalloc(sizeof(*iu), KM_SLEEP | KM_NOFS); 2021 iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2022 iu->iu_agino = prev_agino; 2022 iu->iu_agino = prev_agino;
2023 iu->iu_next_unlinked = this_agino; 2023 iu->iu_next_unlinked = this_agino;
2024 2024
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index c9a502eed204..bb8f076805b9 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -651,7 +651,7 @@ xfs_inode_item_init(
651 struct xfs_inode_log_item *iip; 651 struct xfs_inode_log_item *iip;
652 652
653 ASSERT(ip->i_itemp == NULL); 653 ASSERT(ip->i_itemp == NULL);
654 iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); 654 iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, 0);
655 655
656 iip->ili_inode = ip; 656 iip->ili_inode = ip;
657 xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, 657 xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 6f7848cd5527..9ea51664932e 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -396,7 +396,7 @@ xfs_attrlist_by_handle(
396 if (IS_ERR(dentry)) 396 if (IS_ERR(dentry))
397 return PTR_ERR(dentry); 397 return PTR_ERR(dentry);
398 398
399 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 399 kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
400 if (!kbuf) 400 if (!kbuf)
401 goto out_dput; 401 goto out_dput;
402 402
@@ -434,7 +434,7 @@ xfs_attrmulti_attr_get(
434 434
435 if (*len > XFS_XATTR_SIZE_MAX) 435 if (*len > XFS_XATTR_SIZE_MAX)
436 return -EINVAL; 436 return -EINVAL;
437 kbuf = kmem_zalloc_large(*len, KM_SLEEP); 437 kbuf = kmem_zalloc_large(*len, 0);
438 if (!kbuf) 438 if (!kbuf)
439 return -ENOMEM; 439 return -ENOMEM;
440 440
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7bd7534f5051..1e08bf79b478 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -381,7 +381,7 @@ xfs_compat_attrlist_by_handle(
381 return PTR_ERR(dentry); 381 return PTR_ERR(dentry);
382 382
383 error = -ENOMEM; 383 error = -ENOMEM;
384 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 384 kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
385 if (!kbuf) 385 if (!kbuf)
386 goto out_dput; 386 goto out_dput;
387 387
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f5c955d35be4..b049e7369a66 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -169,7 +169,7 @@ xfs_bulkstat_one(
169 ASSERT(breq->icount == 1); 169 ASSERT(breq->icount == 1);
170 170
171 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), 171 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
172 KM_SLEEP | KM_MAYFAIL); 172 KM_MAYFAIL);
173 if (!bc.buf) 173 if (!bc.buf)
174 return -ENOMEM; 174 return -ENOMEM;
175 175
@@ -243,7 +243,7 @@ xfs_bulkstat(
243 return 0; 243 return 0;
244 244
245 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), 245 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
246 KM_SLEEP | KM_MAYFAIL); 246 KM_MAYFAIL);
247 if (!bc.buf) 247 if (!bc.buf)
248 return -ENOMEM; 248 return -ENOMEM;
249 249
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index 8c7d727149ea..86ce52c1871f 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -616,7 +616,7 @@ xfs_iwalk_threaded(
616 if (xfs_pwork_ctl_want_abort(&pctl)) 616 if (xfs_pwork_ctl_want_abort(&pctl))
617 break; 617 break;
618 618
619 iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), KM_SLEEP); 619 iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
620 iwag->mp = mp; 620 iwag->mp = mp;
621 iwag->iwalk_fn = iwalk_fn; 621 iwag->iwalk_fn = iwalk_fn;
622 iwag->data = data; 622 iwag->data = data;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 7fc3c1ad36bc..50d854bfc45c 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -428,8 +428,7 @@ xfs_log_reserve(
428 XFS_STATS_INC(mp, xs_try_logspace); 428 XFS_STATS_INC(mp, xs_try_logspace);
429 429
430 ASSERT(*ticp == NULL); 430 ASSERT(*ticp == NULL);
431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 0);
432 KM_SLEEP);
433 *ticp = tic; 432 *ticp = tic;
434 433
435 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 434 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index fa5602d0fd7f..ef652abd112c 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -38,7 +38,7 @@ xlog_cil_ticket_alloc(
38 struct xlog_ticket *tic; 38 struct xlog_ticket *tic;
39 39
40 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, 40 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
41 KM_SLEEP|KM_NOFS); 41 KM_NOFS);
42 42
43 /* 43 /*
44 * set the current reservation to zero so we know to steal the basic 44 * set the current reservation to zero so we know to steal the basic
@@ -186,7 +186,7 @@ xlog_cil_alloc_shadow_bufs(
186 */ 186 */
187 kmem_free(lip->li_lv_shadow); 187 kmem_free(lip->li_lv_shadow);
188 188
189 lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS); 189 lv = kmem_alloc_large(buf_size, KM_NOFS);
190 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 190 memset(lv, 0, xlog_cil_iovec_space(niovecs));
191 191
192 lv->lv_item = lip; 192 lv->lv_item = lip;
@@ -660,7 +660,7 @@ xlog_cil_push(
660 if (!cil) 660 if (!cil)
661 return 0; 661 return 0;
662 662
663 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 663 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
664 new_ctx->ticket = xlog_cil_ticket_alloc(log); 664 new_ctx->ticket = xlog_cil_ticket_alloc(log);
665 665
666 down_write(&cil->xc_ctx_lock); 666 down_write(&cil->xc_ctx_lock);
@@ -1179,11 +1179,11 @@ xlog_cil_init(
1179 struct xfs_cil *cil; 1179 struct xfs_cil *cil;
1180 struct xfs_cil_ctx *ctx; 1180 struct xfs_cil_ctx *ctx;
1181 1181
1182 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 1182 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1183 if (!cil) 1183 if (!cil)
1184 return -ENOMEM; 1184 return -ENOMEM;
1185 1185
1186 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 1186 ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
1187 if (!ctx) { 1187 if (!ctx) {
1188 kmem_free(cil); 1188 kmem_free(cil);
1189 return -ENOMEM; 1189 return -ENOMEM;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 13d1d3e95b88..eafb36cb4c66 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1960,7 +1960,7 @@ xlog_recover_buffer_pass1(
1960 } 1960 }
1961 } 1961 }
1962 1962
1963 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); 1963 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
1964 bcp->bc_blkno = buf_f->blf_blkno; 1964 bcp->bc_blkno = buf_f->blf_blkno;
1965 bcp->bc_len = buf_f->blf_len; 1965 bcp->bc_len = buf_f->blf_len;
1966 bcp->bc_refcount = 1; 1966 bcp->bc_refcount = 1;
@@ -2930,7 +2930,7 @@ xlog_recover_inode_pass2(
2930 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { 2930 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
2931 in_f = item->ri_buf[0].i_addr; 2931 in_f = item->ri_buf[0].i_addr;
2932 } else { 2932 } else {
2933 in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP); 2933 in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
2934 need_free = 1; 2934 need_free = 1;
2935 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); 2935 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2936 if (error) 2936 if (error)
@@ -4161,7 +4161,7 @@ xlog_recover_add_item(
4161{ 4161{
4162 xlog_recover_item_t *item; 4162 xlog_recover_item_t *item;
4163 4163
4164 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); 4164 item = kmem_zalloc(sizeof(xlog_recover_item_t), 0);
4165 INIT_LIST_HEAD(&item->ri_list); 4165 INIT_LIST_HEAD(&item->ri_list);
4166 list_add_tail(&item->ri_list, head); 4166 list_add_tail(&item->ri_list, head);
4167} 4167}
@@ -4201,7 +4201,7 @@ xlog_recover_add_to_cont_trans(
4201 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 4201 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4202 old_len = item->ri_buf[item->ri_cnt-1].i_len; 4202 old_len = item->ri_buf[item->ri_cnt-1].i_len;
4203 4203
4204 ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP); 4204 ptr = kmem_realloc(old_ptr, len + old_len, 0);
4205 memcpy(&ptr[old_len], dp, len); 4205 memcpy(&ptr[old_len], dp, len);
4206 item->ri_buf[item->ri_cnt-1].i_len += len; 4206 item->ri_buf[item->ri_cnt-1].i_len += len;
4207 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 4207 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
@@ -4261,7 +4261,7 @@ xlog_recover_add_to_trans(
4261 return 0; 4261 return 0;
4262 } 4262 }
4263 4263
4264 ptr = kmem_alloc(len, KM_SLEEP); 4264 ptr = kmem_alloc(len, 0);
4265 memcpy(ptr, dp, len); 4265 memcpy(ptr, dp, len);
4266 in_f = (struct xfs_inode_log_format *)ptr; 4266 in_f = (struct xfs_inode_log_format *)ptr;
4267 4267
@@ -4289,7 +4289,7 @@ xlog_recover_add_to_trans(
4289 item->ri_total = in_f->ilf_size; 4289 item->ri_total = in_f->ilf_size;
4290 item->ri_buf = 4290 item->ri_buf =
4291 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 4291 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4292 KM_SLEEP); 4292 0);
4293 } 4293 }
4294 ASSERT(item->ri_total > item->ri_cnt); 4294 ASSERT(item->ri_total > item->ri_cnt);
4295 /* Description region is ri_buf[0] */ 4295 /* Description region is ri_buf[0] */
@@ -4423,7 +4423,7 @@ xlog_recover_ophdr_to_trans(
4423 * This is a new transaction so allocate a new recovery container to 4423 * This is a new transaction so allocate a new recovery container to
4424 * hold the recovery ops that will follow. 4424 * hold the recovery ops that will follow.
4425 */ 4425 */
4426 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP); 4426 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
4427 trans->r_log_tid = tid; 4427 trans->r_log_tid = tid;
4428 trans->r_lsn = be64_to_cpu(rhead->h_lsn); 4428 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4429 INIT_LIST_HEAD(&trans->r_itemq); 4429 INIT_LIST_HEAD(&trans->r_itemq);
@@ -5527,7 +5527,7 @@ xlog_do_log_recovery(
5527 */ 5527 */
5528 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 5528 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5529 sizeof(struct list_head), 5529 sizeof(struct list_head),
5530 KM_SLEEP); 5530 0);
5531 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 5531 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5532 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 5532 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5533 5533
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 322da6909290..da50b12ef634 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -82,7 +82,7 @@ xfs_uuid_mount(
82 if (hole < 0) { 82 if (hole < 0) {
83 xfs_uuid_table = kmem_realloc(xfs_uuid_table, 83 xfs_uuid_table = kmem_realloc(xfs_uuid_table,
84 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 84 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
85 KM_SLEEP); 85 0);
86 hole = xfs_uuid_table_size++; 86 hole = xfs_uuid_table_size++;
87 } 87 }
88 xfs_uuid_table[hole] = *uuid; 88 xfs_uuid_table[hole] = *uuid;
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 74738813f60d..a06661dac5be 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -333,12 +333,12 @@ xfs_mru_cache_create(
333 if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) 333 if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count))
334 return -EINVAL; 334 return -EINVAL;
335 335
336 if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) 336 if (!(mru = kmem_zalloc(sizeof(*mru), 0)))
337 return -ENOMEM; 337 return -ENOMEM;
338 338
339 /* An extra list is needed to avoid reaping up to a grp_time early. */ 339 /* An extra list is needed to avoid reaping up to a grp_time early. */
340 mru->grp_count = grp_count + 1; 340 mru->grp_count = grp_count + 1;
341 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); 341 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0);
342 342
343 if (!mru->lists) { 343 if (!mru->lists) {
344 err = -ENOMEM; 344 err = -ENOMEM;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 5e7a37f0cf84..ecd8ce152ab1 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -642,7 +642,7 @@ xfs_qm_init_quotainfo(
642 642
643 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 643 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
644 644
645 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 645 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), 0);
646 646
647 error = list_lru_init(&qinf->qi_lru); 647 error = list_lru_init(&qinf->qi_lru);
648 if (error) 648 if (error)
@@ -978,7 +978,7 @@ xfs_qm_reset_dqcounts_buf(
978 if (qip->i_d.di_nblocks == 0) 978 if (qip->i_d.di_nblocks == 0)
979 return 0; 979 return 0;
980 980
981 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 981 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
982 982
983 lblkno = 0; 983 lblkno = 0;
984 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 984 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index d8288aa0670a..db0e0d7cffb7 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -144,9 +144,9 @@ xfs_cui_init(
144 ASSERT(nextents > 0); 144 ASSERT(nextents > 0);
145 if (nextents > XFS_CUI_MAX_FAST_EXTENTS) 145 if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
146 cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents), 146 cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
147 KM_SLEEP); 147 0);
148 else 148 else
149 cuip = kmem_zone_zalloc(xfs_cui_zone, KM_SLEEP); 149 cuip = kmem_zone_zalloc(xfs_cui_zone, 0);
150 150
151 xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops); 151 xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
152 cuip->cui_format.cui_nextents = nextents; 152 cuip->cui_format.cui_nextents = nextents;
@@ -223,7 +223,7 @@ xfs_trans_get_cud(
223{ 223{
224 struct xfs_cud_log_item *cudp; 224 struct xfs_cud_log_item *cudp;
225 225
226 cudp = kmem_zone_zalloc(xfs_cud_zone, KM_SLEEP); 226 cudp = kmem_zone_zalloc(xfs_cud_zone, 0);
227 xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD, 227 xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
228 &xfs_cud_item_ops); 228 &xfs_cud_item_ops);
229 cudp->cud_cuip = cuip; 229 cudp->cud_cuip = cuip;
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 77ed557b6127..8939e0ea09cd 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -142,9 +142,9 @@ xfs_rui_init(
142 142
143 ASSERT(nextents > 0); 143 ASSERT(nextents > 0);
144 if (nextents > XFS_RUI_MAX_FAST_EXTENTS) 144 if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
145 ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP); 145 ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
146 else 146 else
147 ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP); 147 ruip = kmem_zone_zalloc(xfs_rui_zone, 0);
148 148
149 xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops); 149 xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
150 ruip->rui_format.rui_nextents = nextents; 150 ruip->rui_format.rui_nextents = nextents;
@@ -244,7 +244,7 @@ xfs_trans_get_rud(
244{ 244{
245 struct xfs_rud_log_item *rudp; 245 struct xfs_rud_log_item *rudp;
246 246
247 rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP); 247 rudp = kmem_zone_zalloc(xfs_rud_zone, 0);
248 xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD, 248 xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
249 &xfs_rud_item_ops); 249 &xfs_rud_item_ops);
250 rudp->rud_ruip = ruip; 250 rudp->rud_ruip = ruip;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 5fa4db3c3e32..4a48a8c75b4f 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -865,7 +865,7 @@ xfs_alloc_rsum_cache(
865 * lower bound on the minimum level with any free extents. We can 865 * lower bound on the minimum level with any free extents. We can
866 * continue without the cache if it couldn't be allocated. 866 * continue without the cache if it couldn't be allocated.
867 */ 867 */
868 mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, KM_SLEEP); 868 mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, 0);
869 if (!mp->m_rsum_cache) 869 if (!mp->m_rsum_cache)
870 xfs_warn(mp, "could not allocate realtime summary cache"); 870 xfs_warn(mp, "could not allocate realtime summary cache");
871} 871}
@@ -963,7 +963,7 @@ xfs_growfs_rt(
963 /* 963 /*
964 * Allocate a new (fake) mount/sb. 964 * Allocate a new (fake) mount/sb.
965 */ 965 */
966 nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP); 966 nmp = kmem_alloc(sizeof(*nmp), 0);
967 /* 967 /*
968 * Loop over the bitmap blocks. 968 * Loop over the bitmap blocks.
969 * We will do everything one bitmap block at a time. 969 * We will do everything one bitmap block at a time.
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index d42a68d8313b..f4795fdb7389 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -90,7 +90,7 @@ xfs_trans_dup(
90 90
91 trace_xfs_trans_dup(tp, _RET_IP_); 91 trace_xfs_trans_dup(tp, _RET_IP_);
92 92
93 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 93 ntp = kmem_zone_zalloc(xfs_trans_zone, 0);
94 94
95 /* 95 /*
96 * Initialize the new transaction structure. 96 * Initialize the new transaction structure.
@@ -263,7 +263,7 @@ xfs_trans_alloc(
263 * GFP_NOFS allocation context so that we avoid lockdep false positives 263 * GFP_NOFS allocation context so that we avoid lockdep false positives
264 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 264 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
265 */ 265 */
266 tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 266 tp = kmem_zone_zalloc(xfs_trans_zone, 0);
267 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 267 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
268 sb_start_intwrite(mp->m_super); 268 sb_start_intwrite(mp->m_super);
269 269
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 1027c9ca6eb8..16457465833b 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -863,7 +863,7 @@ STATIC void
863xfs_trans_alloc_dqinfo( 863xfs_trans_alloc_dqinfo(
864 xfs_trans_t *tp) 864 xfs_trans_t *tp)
865{ 865{
866 tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP); 866 tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0);
867} 867}
868 868
869void 869void