aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorGu Zheng <guz.fnst@cn.fujitsu.com>2013-11-04 05:21:05 -0500
committerBen Myers <bpm@sgi.com>2013-11-06 17:31:27 -0500
commit359d992bcd398273637cd9edde10afca953783c4 (patch)
treebfe6012116dc8b8d59d16bc517d0c94fa24abae9 /fs/xfs
parentd123031a5673cd38a85ce66cc07243dfe5f424c9 (diff)
xfs: simplify kmem_{zone_}zalloc
Introduce flag KM_ZERO which is used to alloc zeroed entry, and convert kmem_{zone_}zalloc to call kmem_{zone_}alloc() with KM_ZERO directly, in order to avoid the setting to zero step. And following Dave's suggestion, make kmem_{zone_}zalloc static inline into kmem.h as they're now just a simple wrapper. V2: Make kmem_{zone_}zalloc static inline into kmem.h as Dave suggested. Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/kmem.c22
-rw-r--r--fs/xfs/kmem.h21
2 files changed, 18 insertions, 25 deletions
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index a02cfb9e3bce..66a36befc5c0 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -63,17 +63,6 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
63} 63}
64 64
65void * 65void *
66kmem_zalloc(size_t size, xfs_km_flags_t flags)
67{
68 void *ptr;
69
70 ptr = kmem_alloc(size, flags);
71 if (ptr)
72 memset((char *)ptr, 0, (int)size);
73 return ptr;
74}
75
76void *
77kmem_zalloc_large(size_t size, xfs_km_flags_t flags) 66kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
78{ 67{
79 void *ptr; 68 void *ptr;
@@ -128,14 +117,3 @@ kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
128 congestion_wait(BLK_RW_ASYNC, HZ/50); 117 congestion_wait(BLK_RW_ASYNC, HZ/50);
129 } while (1); 118 } while (1);
130} 119}
131
132void *
133kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
134{
135 void *ptr;
136
137 ptr = kmem_zone_alloc(zone, flags);
138 if (ptr)
139 memset((char *)ptr, 0, kmem_cache_size(zone));
140 return ptr;
141}
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 3a7371cab508..64db0e53edea 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -32,6 +32,7 @@ typedef unsigned __bitwise xfs_km_flags_t;
32#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u) 32#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
33#define KM_NOFS ((__force xfs_km_flags_t)0x0004u) 33#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
34#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) 34#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
35#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
35 36
36/* 37/*
37 * We use a special process flag to avoid recursive callbacks into 38 * We use a special process flag to avoid recursive callbacks into
@@ -43,7 +44,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
43{ 44{
44 gfp_t lflags; 45 gfp_t lflags;
45 46
46 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); 47 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
47 48
48 if (flags & KM_NOSLEEP) { 49 if (flags & KM_NOSLEEP) {
49 lflags = GFP_ATOMIC | __GFP_NOWARN; 50 lflags = GFP_ATOMIC | __GFP_NOWARN;
@@ -52,11 +53,14 @@ kmem_flags_convert(xfs_km_flags_t flags)
52 if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) 53 if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
53 lflags &= ~__GFP_FS; 54 lflags &= ~__GFP_FS;
54 } 55 }
56
57 if (flags & KM_ZERO)
58 lflags |= __GFP_ZERO;
59
55 return lflags; 60 return lflags;
56} 61}
57 62
58extern void *kmem_alloc(size_t, xfs_km_flags_t); 63extern void *kmem_alloc(size_t, xfs_km_flags_t);
59extern void *kmem_zalloc(size_t, xfs_km_flags_t);
60extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t); 64extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
61extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t); 65extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
62extern void kmem_free(const void *); 66extern void kmem_free(const void *);
@@ -64,6 +68,12 @@ extern void kmem_free(const void *);
64 68
65extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); 69extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
66 70
71static inline void *
72kmem_zalloc(size_t size, xfs_km_flags_t flags)
73{
74 return kmem_alloc(size, flags | KM_ZERO);
75}
76
67/* 77/*
68 * Zone interfaces 78 * Zone interfaces
69 */ 79 */
@@ -102,6 +112,11 @@ kmem_zone_destroy(kmem_zone_t *zone)
102} 112}
103 113
104extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t); 114extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
105extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t); 115
116static inline void *
117kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
118{
119 return kmem_zone_alloc(zone, flags | KM_ZERO);
120}
106 121
107#endif /* __XFS_SUPPORT_KMEM_H__ */ 122#endif /* __XFS_SUPPORT_KMEM_H__ */