aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-09-27 21:03:27 -0400
committerTim Shimmin <tes@sgi.com>2006-09-27 21:03:27 -0400
commit77e4635ae191774526ed695482a151ac986f3806 (patch)
tree42cfa03f913883cd7f3d53be19b7e8c25258ee2c /fs/xfs
parent572d95f49f3652fffe8242c4498b85f4083e52ab (diff)
[XFS] Add a greedy allocation interface, allocating within a min/max size
range. SGI-PV: 955302 SGI-Modid: xfs-linux-melb:xfs-kern:26803a Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/kmem.c16
-rw-r--r--fs/xfs/linux-2.6/kmem.h3
-rw-r--r--fs/xfs/quota/xfs_qm.c13
-rw-r--r--fs/xfs/xfs_iget.c17
-rw-r--r--fs/xfs/xfs_itable.c16
5 files changed, 34 insertions, 31 deletions
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index f77fe5c8fcc1..80b9340488e5 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -68,6 +68,22 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
68 return ptr; 68 return ptr;
69} 69}
70 70
71void *
72kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
73 unsigned int __nocast flags)
74{
75 void *ptr;
76
77 while (!(ptr = kmem_zalloc(maxsize, flags))) {
78 if ((maxsize >>= 1) <= minsize) {
79 maxsize = minsize;
80 flags = KM_SLEEP;
81 }
82 }
83 *size = maxsize;
84 return ptr;
85}
86
71void 87void
72kmem_free(void *ptr, size_t size) 88kmem_free(void *ptr, size_t size)
73{ 89{
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 6d24274fb3cb..9ebabdf7829c 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -55,8 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
55} 55}
56 56
57extern void *kmem_alloc(size_t, unsigned int __nocast); 57extern void *kmem_alloc(size_t, unsigned int __nocast);
58extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
59extern void *kmem_zalloc(size_t, unsigned int __nocast); 58extern void *kmem_zalloc(size_t, unsigned int __nocast);
59extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
60extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
60extern void kmem_free(void *, size_t); 61extern void kmem_free(void *, size_t);
61 62
62/* 63/*
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 3f86c7c04648..8b2c6cf2c849 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -112,17 +112,16 @@ xfs_Gqm_init(void)
112{ 112{
113 xfs_dqhash_t *udqhash, *gdqhash; 113 xfs_dqhash_t *udqhash, *gdqhash;
114 xfs_qm_t *xqm; 114 xfs_qm_t *xqm;
115 uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE; 115 uint i, hsize;
116 116
117 /* 117 /*
118 * Initialize the dquot hash tables. 118 * Initialize the dquot hash tables.
119 */ 119 */
120 hsize = XFS_QM_HASHSIZE_HIGH; 120 udqhash = kmem_zalloc_greedy(&hsize,
121 while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) { 121 XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
122 if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW) 122 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
123 flags = KM_SLEEP; 123 gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
124 } 124 hsize /= sizeof(xfs_dqhash_t);
125 gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE);
126 ndquot = hsize << 8; 125 ndquot = hsize << 8;
127 126
128 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); 127 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 30eebc2fd90e..e304ab078273 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -50,7 +50,7 @@ void
50xfs_ihash_init(xfs_mount_t *mp) 50xfs_ihash_init(xfs_mount_t *mp)
51{ 51{
52 __uint64_t icount; 52 __uint64_t icount;
53 uint i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE; 53 uint i;
54 54
55 if (!mp->m_ihsize) { 55 if (!mp->m_ihsize) {
56 icount = mp->m_maxicount ? mp->m_maxicount : 56 icount = mp->m_maxicount ? mp->m_maxicount :
@@ -61,14 +61,13 @@ xfs_ihash_init(xfs_mount_t *mp)
61 (64 * NBPP) / sizeof(xfs_ihash_t)); 61 (64 * NBPP) / sizeof(xfs_ihash_t));
62 } 62 }
63 63
64 while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize * 64 mp->m_ihash = kmem_zalloc_greedy(&mp->m_ihsize,
65 sizeof(xfs_ihash_t), flags))) { 65 NBPC * sizeof(xfs_ihash_t),
66 if ((mp->m_ihsize >>= 1) <= NBPP) 66 mp->m_ihsize * sizeof(xfs_ihash_t),
67 flags = KM_SLEEP; 67 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
68 } 68 mp->m_ihsize /= sizeof(xfs_ihash_t);
69 for (i = 0; i < mp->m_ihsize; i++) { 69 for (i = 0; i < mp->m_ihsize; i++)
70 rwlock_init(&(mp->m_ihash[i].ih_lock)); 70 rwlock_init(&(mp->m_ihash[i].ih_lock));
71 }
72} 71}
73 72
74/* 73/*
@@ -77,7 +76,7 @@ xfs_ihash_init(xfs_mount_t *mp)
77void 76void
78xfs_ihash_free(xfs_mount_t *mp) 77xfs_ihash_free(xfs_mount_t *mp)
79{ 78{
80 kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t)); 79 kmem_free(mp->m_ihash, mp->m_ihsize * sizeof(xfs_ihash_t));
81 mp->m_ihash = NULL; 80 mp->m_ihash = NULL;
82} 81}
83 82
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 612689940659..0fbbd7b9c696 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -326,7 +326,6 @@ xfs_bulkstat(
326 int i; /* loop index */ 326 int i; /* loop index */
327 int icount; /* count of inodes good in irbuf */ 327 int icount; /* count of inodes good in irbuf */
328 int irbsize; /* size of irec buffer in bytes */ 328 int irbsize; /* size of irec buffer in bytes */
329 unsigned int kmflags; /* flags for allocating irec buffer */
330 xfs_ino_t ino; /* inode number (filesystem) */ 329 xfs_ino_t ino; /* inode number (filesystem) */
331 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ 330 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
332 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 331 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
@@ -371,19 +370,8 @@ xfs_bulkstat(
371 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); 370 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
372 nimask = ~(nicluster - 1); 371 nimask = ~(nicluster - 1);
373 nbcluster = nicluster >> mp->m_sb.sb_inopblog; 372 nbcluster = nicluster >> mp->m_sb.sb_inopblog;
374 /* 373 irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4,
375 * Allocate a local buffer for inode cluster btree records. 374 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
376 * This caps our maximum readahead window (so don't be stingy)
377 * but we must handle the case where we can't get a contiguous
378 * multi-page buffer, so we drop back toward pagesize; the end
379 * case we ensure succeeds, via appropriate allocation flags.
380 */
381 irbsize = NBPP * 4;
382 kmflags = KM_SLEEP | KM_MAYFAIL;
383 while (!(irbuf = kmem_alloc(irbsize, kmflags))) {
384 if ((irbsize >>= 1) <= NBPP)
385 kmflags = KM_SLEEP;
386 }
387 nirbuf = irbsize / sizeof(*irbuf); 375 nirbuf = irbsize / sizeof(*irbuf);
388 376
389 /* 377 /*