aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_ialloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_ialloc.c')
-rw-r--r--fs/xfs/xfs_ialloc.c123
1 files changed, 73 insertions, 50 deletions
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 8f3fae1aa98a..4eeb856183b1 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -136,10 +136,8 @@ xfs_ialloc_ag_alloc(
136 int ninodes; /* num inodes per buf */ 136 int ninodes; /* num inodes per buf */
137 xfs_agino_t thisino; /* current inode number, for loop */ 137 xfs_agino_t thisino; /* current inode number, for loop */
138 int version; /* inode version number to use */ 138 int version; /* inode version number to use */
139 int isaligned; /* inode allocation at stripe unit */ 139 int isaligned = 0; /* inode allocation at stripe unit */
140 /* boundary */ 140 /* boundary */
141 xfs_dinode_core_t dic; /* a dinode_core to copy to new */
142 /* inodes */
143 141
144 args.tp = tp; 142 args.tp = tp;
145 args.mp = tp->t_mountp; 143 args.mp = tp->t_mountp;
@@ -154,47 +152,75 @@ xfs_ialloc_ag_alloc(
154 return XFS_ERROR(ENOSPC); 152 return XFS_ERROR(ENOSPC);
155 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); 153 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
156 /* 154 /*
157 * Set the alignment for the allocation. 155 * First try to allocate inodes contiguous with the last-allocated
158 * If stripe alignment is turned on then align at stripe unit 156 * chunk of inodes. If the filesystem is striped, this will fill
159 * boundary. 157 * an entire stripe unit with inodes.
160 * If the cluster size is smaller than a filesystem block 158 */
161 * then we're doing I/O for inodes in filesystem block size pieces,
162 * so don't need alignment anyway.
163 */
164 isaligned = 0;
165 if (args.mp->m_sinoalign) {
166 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
167 args.alignment = args.mp->m_dalign;
168 isaligned = 1;
169 } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
170 args.mp->m_sb.sb_inoalignmt >=
171 XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
172 args.alignment = args.mp->m_sb.sb_inoalignmt;
173 else
174 args.alignment = 1;
175 agi = XFS_BUF_TO_AGI(agbp); 159 agi = XFS_BUF_TO_AGI(agbp);
176 /* 160 newino = be32_to_cpu(agi->agi_newino);
177 * Need to figure out where to allocate the inode blocks. 161 if(likely(newino != NULLAGINO)) {
178 * Ideally they should be spaced out through the a.g. 162 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
179 * For now, just allocate blocks up front. 163 XFS_IALLOC_BLOCKS(args.mp);
180 */ 164 args.fsbno = XFS_AGB_TO_FSB(args.mp,
181 args.agbno = be32_to_cpu(agi->agi_root); 165 be32_to_cpu(agi->agi_seqno), args.agbno);
182 args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno), 166 args.type = XFS_ALLOCTYPE_THIS_BNO;
183 args.agbno); 167 args.mod = args.total = args.wasdel = args.isfl =
184 /* 168 args.userdata = args.minalignslop = 0;
185 * Allocate a fixed-size extent of inodes. 169 args.prod = 1;
186 */ 170 args.alignment = 1;
187 args.type = XFS_ALLOCTYPE_NEAR_BNO; 171 /*
188 args.mod = args.total = args.wasdel = args.isfl = args.userdata = 172 * Allow space for the inode btree to split.
189 args.minalignslop = 0; 173 */
190 args.prod = 1; 174 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
191 /* 175 if ((error = xfs_alloc_vextent(&args)))
192 * Allow space for the inode btree to split. 176 return error;
193 */ 177 } else
194 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; 178 args.fsbno = NULLFSBLOCK;
195 if ((error = xfs_alloc_vextent(&args)))
196 return error;
197 179
180 if (unlikely(args.fsbno == NULLFSBLOCK)) {
181 /*
182 * Set the alignment for the allocation.
183 * If stripe alignment is turned on then align at stripe unit
184 * boundary.
185 * If the cluster size is smaller than a filesystem block
186 * then we're doing I/O for inodes in filesystem block size
187 * pieces, so don't need alignment anyway.
188 */
189 isaligned = 0;
190 if (args.mp->m_sinoalign) {
191 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
192 args.alignment = args.mp->m_dalign;
193 isaligned = 1;
194 } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
195 args.mp->m_sb.sb_inoalignmt >=
196 XFS_B_TO_FSBT(args.mp,
197 XFS_INODE_CLUSTER_SIZE(args.mp)))
198 args.alignment = args.mp->m_sb.sb_inoalignmt;
199 else
200 args.alignment = 1;
201 /*
202 * Need to figure out where to allocate the inode blocks.
203 * Ideally they should be spaced out through the a.g.
204 * For now, just allocate blocks up front.
205 */
206 args.agbno = be32_to_cpu(agi->agi_root);
207 args.fsbno = XFS_AGB_TO_FSB(args.mp,
208 be32_to_cpu(agi->agi_seqno), args.agbno);
209 /*
210 * Allocate a fixed-size extent of inodes.
211 */
212 args.type = XFS_ALLOCTYPE_NEAR_BNO;
213 args.mod = args.total = args.wasdel = args.isfl =
214 args.userdata = args.minalignslop = 0;
215 args.prod = 1;
216 /*
217 * Allow space for the inode btree to split.
218 */
219 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
220 if ((error = xfs_alloc_vextent(&args)))
221 return error;
222 }
223
198 /* 224 /*
199 * If stripe alignment is turned on, then try again with cluster 225 * If stripe alignment is turned on, then try again with cluster
200 * alignment. 226 * alignment.
@@ -250,10 +276,6 @@ xfs_ialloc_ag_alloc(
250 else 276 else
251 version = XFS_DINODE_VERSION_1; 277 version = XFS_DINODE_VERSION_1;
252 278
253 memset(&dic, 0, sizeof(xfs_dinode_core_t));
254 INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);
255 INT_SET(dic.di_version, ARCH_CONVERT, version);
256
257 for (j = 0; j < nbufs; j++) { 279 for (j = 0; j < nbufs; j++) {
258 /* 280 /*
259 * Get the block. 281 * Get the block.
@@ -266,12 +288,13 @@ xfs_ialloc_ag_alloc(
266 ASSERT(fbuf); 288 ASSERT(fbuf);
267 ASSERT(!XFS_BUF_GETERROR(fbuf)); 289 ASSERT(!XFS_BUF_GETERROR(fbuf));
268 /* 290 /*
269 * Loop over the inodes in this buffer. 291 * Set initial values for the inodes in this buffer.
270 */ 292 */
271 293 xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
272 for (i = 0; i < ninodes; i++) { 294 for (i = 0; i < ninodes; i++) {
273 free = XFS_MAKE_IPTR(args.mp, fbuf, i); 295 free = XFS_MAKE_IPTR(args.mp, fbuf, i);
274 memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t)); 296 INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);
297 INT_SET(free->di_core.di_version, ARCH_CONVERT, version);
275 INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); 298 INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
276 xfs_ialloc_log_di(tp, fbuf, i, 299 xfs_ialloc_log_di(tp, fbuf, i,
277 XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); 300 XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
@@ -1028,7 +1051,7 @@ xfs_difree(
1028 rec.ir_freecount++; 1051 rec.ir_freecount++;
1029 1052
1030 /* 1053 /*
1031 * When an inode cluster is free, it becomes elgible for removal 1054 * When an inode cluster is free, it becomes eligible for removal
1032 */ 1055 */
1033 if ((mp->m_flags & XFS_MOUNT_IDELETE) && 1056 if ((mp->m_flags & XFS_MOUNT_IDELETE) &&
1034 (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { 1057 (rec.ir_freecount == XFS_IALLOC_INODES(mp))) {