aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-03-13 21:18:19 -0500
committerNathan Scott <nathans@sgi.com>2006-03-13 21:18:19 -0500
commit8758280fcc6129be89503efe93bb59eaf2f85d28 (patch)
tree395246120b571385c1f3efad773b83a932d008da /fs/xfs
parent8d280b98cfe3c0b69c37d355218975c1c0279bb0 (diff)
[XFS] Cleanup the use of zones/slabs, more consistent and allows flags to
be passed. SGI-PV: 949073 SGI-Modid: xfs-linux-melb:xfs-kern:25122a Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/kmem.h91
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c38
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_vfsops.c34
5 files changed, 100 insertions, 72 deletions
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index c64a29cdfff3..f0268a84e6fd 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -23,17 +23,8 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24 24
25/* 25/*
26 * memory management routines 26 * Process flags handling
27 */ 27 */
28#define KM_SLEEP 0x0001u
29#define KM_NOSLEEP 0x0002u
30#define KM_NOFS 0x0004u
31#define KM_MAYFAIL 0x0008u
32
33#define kmem_zone kmem_cache
34#define kmem_zone_t struct kmem_cache
35
36typedef unsigned long xfs_pflags_t;
37 28
38#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO) 29#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO)
39#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS) 30#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS)
@@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t;
67 *(NSTATEP) = *(OSTATEP); \ 58 *(NSTATEP) = *(OSTATEP); \
68} while (0) 59} while (0)
69 60
70static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags) 61/*
62 * General memory allocation interfaces
63 */
64
65#define KM_SLEEP 0x0001u
66#define KM_NOSLEEP 0x0002u
67#define KM_NOFS 0x0004u
68#define KM_MAYFAIL 0x0008u
69
70/*
71 * We use a special process flag to avoid recursive callbacks into
72 * the filesystem during transactions. We will also issue our own
73 * warnings, so we explicitly skip any generic ones (silly of us).
74 */
75static inline gfp_t
76kmem_flags_convert(unsigned int __nocast flags)
71{ 77{
72 gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */ 78 gfp_t lflags;
73 79
74#ifdef DEBUG 80 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
75 if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
76 printk(KERN_WARNING
77 "XFS: memory allocation with wrong flags (%x)\n", flags);
78 BUG();
79 }
80#endif
81 81
82 if (flags & KM_NOSLEEP) { 82 if (flags & KM_NOSLEEP) {
83 lflags |= GFP_ATOMIC; 83 lflags = GFP_ATOMIC | __GFP_NOWARN;
84 } else { 84 } else {
85 lflags |= GFP_KERNEL; 85 lflags = GFP_KERNEL | __GFP_NOWARN;
86
87 /* avoid recusive callbacks to filesystem during transactions */
88 if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS)) 86 if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
89 lflags &= ~__GFP_FS; 87 lflags &= ~__GFP_FS;
90 } 88 }
91 89 return lflags;
92 return lflags;
93} 90}
94 91
95static __inline kmem_zone_t * 92extern void *kmem_alloc(size_t, unsigned int __nocast);
93extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
94extern void *kmem_zalloc(size_t, unsigned int __nocast);
95extern void kmem_free(void *, size_t);
96
97/*
98 * Zone interfaces
99 */
100
101#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
102#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
103#define KM_ZONE_SPREAD 0
104
105#define kmem_zone kmem_cache
106#define kmem_zone_t struct kmem_cache
107
108static inline kmem_zone_t *
96kmem_zone_init(int size, char *zone_name) 109kmem_zone_init(int size, char *zone_name)
97{ 110{
98 return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); 111 return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
99} 112}
100 113
101static __inline void 114static inline kmem_zone_t *
115kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
116 void (*construct)(void *, kmem_zone_t *, unsigned long))
117{
118 return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
119}
120
121static inline void
102kmem_zone_free(kmem_zone_t *zone, void *ptr) 122kmem_zone_free(kmem_zone_t *zone, void *ptr)
103{ 123{
104 kmem_cache_free(zone, ptr); 124 kmem_cache_free(zone, ptr);
105} 125}
106 126
107static __inline void 127static inline void
108kmem_zone_destroy(kmem_zone_t *zone) 128kmem_zone_destroy(kmem_zone_t *zone)
109{ 129{
110 if (zone && kmem_cache_destroy(zone)) 130 if (zone && kmem_cache_destroy(zone))
111 BUG(); 131 BUG();
112} 132}
113 133
114extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
115extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 134extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
135extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
116 136
117extern void *kmem_alloc(size_t, unsigned int __nocast); 137/*
118extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); 138 * Low memory cache shrinkers
119extern void *kmem_zalloc(size_t, unsigned int __nocast); 139 */
120extern void kmem_free(void *, size_t);
121 140
122typedef struct shrinker *kmem_shaker_t; 141typedef struct shrinker *kmem_shaker_t;
123typedef int (*kmem_shake_func_t)(int, gfp_t); 142typedef int (*kmem_shake_func_t)(int, gfp_t);
124 143
125static __inline kmem_shaker_t 144static inline kmem_shaker_t
126kmem_shake_register(kmem_shake_func_t sfunc) 145kmem_shake_register(kmem_shake_func_t sfunc)
127{ 146{
128 return set_shrinker(DEFAULT_SEEKS, sfunc); 147 return set_shrinker(DEFAULT_SEEKS, sfunc);
129} 148}
130 149
131static __inline void 150static inline void
132kmem_shake_deregister(kmem_shaker_t shrinker) 151kmem_shake_deregister(kmem_shaker_t shrinker)
133{ 152{
134 remove_shrinker(shrinker); 153 remove_shrinker(shrinker);
135} 154}
136 155
137static __inline int 156static inline int
138kmem_shake_allow(gfp_t gfp_mask) 157kmem_shake_allow(gfp_t gfp_mask)
139{ 158{
140 return (gfp_mask & __GFP_WAIT); 159 return (gfp_mask & __GFP_WAIT);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index bfb4f2917bb6..cdb905ab4dba 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1805,13 +1805,12 @@ xfs_flush_buftarg(
1805int __init 1805int __init
1806xfs_buf_init(void) 1806xfs_buf_init(void)
1807{ 1807{
1808 int error = -ENOMEM;
1809
1810#ifdef XFS_BUF_TRACE 1808#ifdef XFS_BUF_TRACE
1811 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); 1809 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1812#endif 1810#endif
1813 1811
1814 xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); 1812 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1813 KM_ZONE_HWALIGN, NULL);
1815 if (!xfs_buf_zone) 1814 if (!xfs_buf_zone)
1816 goto out_free_trace_buf; 1815 goto out_free_trace_buf;
1817 1816
@@ -1839,7 +1838,7 @@ xfs_buf_init(void)
1839#ifdef XFS_BUF_TRACE 1838#ifdef XFS_BUF_TRACE
1840 ktrace_free(xfs_buf_trace_buf); 1839 ktrace_free(xfs_buf_trace_buf);
1841#endif 1840#endif
1842 return error; 1841 return -ENOMEM;
1843} 1842}
1844 1843
1845void 1844void
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 59989f6f83ef..0c7ed4b29c54 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -337,8 +337,8 @@ linvfs_alloc_inode(
337{ 337{
338 vnode_t *vp; 338 vnode_t *vp;
339 339
340 vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP)); 340 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
341 if (!vp) 341 if (unlikely(!vp))
342 return NULL; 342 return NULL;
343 return LINVFS_GET_IP(vp); 343 return LINVFS_GET_IP(vp);
344} 344}
@@ -352,23 +352,21 @@ linvfs_destroy_inode(
352 352
353STATIC void 353STATIC void
354linvfs_inode_init_once( 354linvfs_inode_init_once(
355 void *data, 355 void *vnode,
356 kmem_cache_t *cachep, 356 kmem_zone_t *zonep,
357 unsigned long flags) 357 unsigned long flags)
358{ 358{
359 vnode_t *vp = (vnode_t *)data;
360
361 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 359 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
362 SLAB_CTOR_CONSTRUCTOR) 360 SLAB_CTOR_CONSTRUCTOR)
363 inode_init_once(LINVFS_GET_IP(vp)); 361 inode_init_once(LINVFS_GET_IP((vnode_t *)vnode));
364} 362}
365 363
366STATIC int 364STATIC int
367linvfs_init_zones(void) 365xfs_init_zones(void)
368{ 366{
369 xfs_vnode_zone = kmem_cache_create("xfs_vnode", 367 xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t",
370 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, 368 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM,
371 linvfs_inode_init_once, NULL); 369 linvfs_inode_init_once);
372 if (!xfs_vnode_zone) 370 if (!xfs_vnode_zone)
373 goto out; 371 goto out;
374 372
@@ -377,14 +375,12 @@ linvfs_init_zones(void)
377 goto out_destroy_vnode_zone; 375 goto out_destroy_vnode_zone;
378 376
379 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, 377 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
380 mempool_alloc_slab, mempool_free_slab, 378 mempool_alloc_slab, mempool_free_slab,
381 xfs_ioend_zone); 379 xfs_ioend_zone);
382 if (!xfs_ioend_pool) 380 if (!xfs_ioend_pool)
383 goto out_free_ioend_zone; 381 goto out_free_ioend_zone;
384
385 return 0; 382 return 0;
386 383
387
388 out_free_ioend_zone: 384 out_free_ioend_zone:
389 kmem_zone_destroy(xfs_ioend_zone); 385 kmem_zone_destroy(xfs_ioend_zone);
390 out_destroy_vnode_zone: 386 out_destroy_vnode_zone:
@@ -394,7 +390,7 @@ linvfs_init_zones(void)
394} 390}
395 391
396STATIC void 392STATIC void
397linvfs_destroy_zones(void) 393xfs_destroy_zones(void)
398{ 394{
399 mempool_destroy(xfs_ioend_pool); 395 mempool_destroy(xfs_ioend_pool);
400 kmem_zone_destroy(xfs_vnode_zone); 396 kmem_zone_destroy(xfs_vnode_zone);
@@ -405,7 +401,7 @@ linvfs_destroy_zones(void)
405 * Attempt to flush the inode, this will actually fail 401 * Attempt to flush the inode, this will actually fail
406 * if the inode is pinned, but we dirty the inode again 402 * if the inode is pinned, but we dirty the inode again
407 * at the point when it is unpinned after a log write, 403 * at the point when it is unpinned after a log write,
408 * since this is when the inode itself becomes flushable. 404 * since this is when the inode itself becomes flushable.
409 */ 405 */
410STATIC int 406STATIC int
411linvfs_write_inode( 407linvfs_write_inode(
@@ -963,7 +959,7 @@ init_xfs_fs( void )
963 959
964 ktrace_init(64); 960 ktrace_init(64);
965 961
966 error = linvfs_init_zones(); 962 error = xfs_init_zones();
967 if (error < 0) 963 if (error < 0)
968 goto undo_zones; 964 goto undo_zones;
969 965
@@ -986,7 +982,7 @@ undo_register:
986 xfs_buf_terminate(); 982 xfs_buf_terminate();
987 983
988undo_buffers: 984undo_buffers:
989 linvfs_destroy_zones(); 985 xfs_destroy_zones();
990 986
991undo_zones: 987undo_zones:
992 return error; 988 return error;
@@ -1000,7 +996,7 @@ exit_xfs_fs( void )
1000 unregister_filesystem(&xfs_fs_type); 996 unregister_filesystem(&xfs_fs_type);
1001 xfs_cleanup(); 997 xfs_cleanup();
1002 xfs_buf_terminate(); 998 xfs_buf_terminate();
1003 linvfs_destroy_zones(); 999 xfs_destroy_zones();
1004 ktrace_uninit(); 1000 ktrace_uninit();
1005} 1001}
1006 1002
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index d77901c07f63..e48befa4e337 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -380,7 +380,7 @@ typedef struct xfs_trans {
380 xfs_trans_header_t t_header; /* header for in-log trans */ 380 xfs_trans_header_t t_header; /* header for in-log trans */
381 unsigned int t_busy_free; /* busy descs free */ 381 unsigned int t_busy_free; /* busy descs free */
382 xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ 382 xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */
383 xfs_pflags_t t_pflags; /* saved pflags state */ 383 unsigned long t_pflags; /* saved process flags state */
384} xfs_trans_t; 384} xfs_trans_t;
385 385
386#endif /* __KERNEL__ */ 386#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 2e1045837881..5dd84fe609cc 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -77,11 +77,12 @@ xfs_init(void)
77 "xfs_bmap_free_item"); 77 "xfs_bmap_free_item");
78 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 78 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
79 "xfs_btree_cur"); 79 "xfs_btree_cur");
80 xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode");
81 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 80 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
82 xfs_da_state_zone = 81 xfs_da_state_zone =
83 kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); 82 kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state");
84 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 83 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
84 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
85 xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
85 86
86 /* 87 /*
87 * The size of the zone allocated buf log item is the maximum 88 * The size of the zone allocated buf log item is the maximum
@@ -93,17 +94,30 @@ xfs_init(void)
93 (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / 94 (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
94 NBWORD) * sizeof(int))), 95 NBWORD) * sizeof(int))),
95 "xfs_buf_item"); 96 "xfs_buf_item");
96 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 97 xfs_efd_zone =
97 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), 98 kmem_zone_init((sizeof(xfs_efd_log_item_t) +
99 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
100 sizeof(xfs_extent_t))),
98 "xfs_efd_item"); 101 "xfs_efd_item");
99 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 102 xfs_efi_zone =
100 ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), 103 kmem_zone_init((sizeof(xfs_efi_log_item_t) +
104 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
105 sizeof(xfs_extent_t))),
101 "xfs_efi_item"); 106 "xfs_efi_item");
102 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 107
103 xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); 108 /*
104 xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), 109 * These zones warrant special memory allocator hints
105 "xfs_chashlist"); 110 */
106 xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); 111 xfs_inode_zone =
112 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
113 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
114 KM_ZONE_SPREAD, NULL);
115 xfs_ili_zone =
116 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
117 KM_ZONE_SPREAD, NULL);
118 xfs_chashlist_zone =
119 kmem_zone_init_flags(sizeof(xfs_chashlist_t), "xfs_chashlist",
120 KM_ZONE_SPREAD, NULL);
107 121
108 /* 122 /*
109 * Allocate global trace buffers. 123 * Allocate global trace buffers.