diff options
Diffstat (limited to 'fs/xfs/libxfs/xfs_ialloc.c')
-rw-r--r-- | fs/xfs/libxfs/xfs_ialloc.c | 2189 |
1 files changed, 2189 insertions, 0 deletions
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c new file mode 100644 index 000000000000..16fb63a9bc5e --- /dev/null +++ b/fs/xfs/libxfs/xfs_ialloc.c | |||
@@ -0,0 +1,2189 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_fs.h" | ||
20 | #include "xfs_shared.h" | ||
21 | #include "xfs_format.h" | ||
22 | #include "xfs_log_format.h" | ||
23 | #include "xfs_trans_resv.h" | ||
24 | #include "xfs_bit.h" | ||
25 | #include "xfs_inum.h" | ||
26 | #include "xfs_sb.h" | ||
27 | #include "xfs_ag.h" | ||
28 | #include "xfs_mount.h" | ||
29 | #include "xfs_inode.h" | ||
30 | #include "xfs_btree.h" | ||
31 | #include "xfs_ialloc.h" | ||
32 | #include "xfs_ialloc_btree.h" | ||
33 | #include "xfs_alloc.h" | ||
34 | #include "xfs_rtalloc.h" | ||
35 | #include "xfs_error.h" | ||
36 | #include "xfs_bmap.h" | ||
37 | #include "xfs_cksum.h" | ||
38 | #include "xfs_trans.h" | ||
39 | #include "xfs_buf_item.h" | ||
40 | #include "xfs_icreate_item.h" | ||
41 | #include "xfs_icache.h" | ||
42 | #include "xfs_dinode.h" | ||
43 | #include "xfs_trace.h" | ||
44 | |||
45 | |||
46 | /* | ||
47 | * Allocation group level functions. | ||
48 | */ | ||
49 | static inline int | ||
50 | xfs_ialloc_cluster_alignment( | ||
51 | xfs_alloc_arg_t *args) | ||
52 | { | ||
53 | if (xfs_sb_version_hasalign(&args->mp->m_sb) && | ||
54 | args->mp->m_sb.sb_inoalignmt >= | ||
55 | XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size)) | ||
56 | return args->mp->m_sb.sb_inoalignmt; | ||
57 | return 1; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Lookup a record by ino in the btree given by cur. | ||
62 | */ | ||
63 | int /* error */ | ||
64 | xfs_inobt_lookup( | ||
65 | struct xfs_btree_cur *cur, /* btree cursor */ | ||
66 | xfs_agino_t ino, /* starting inode of chunk */ | ||
67 | xfs_lookup_t dir, /* <=, >=, == */ | ||
68 | int *stat) /* success/failure */ | ||
69 | { | ||
70 | cur->bc_rec.i.ir_startino = ino; | ||
71 | cur->bc_rec.i.ir_freecount = 0; | ||
72 | cur->bc_rec.i.ir_free = 0; | ||
73 | return xfs_btree_lookup(cur, dir, stat); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Update the record referred to by cur to the value given. | ||
78 | * This either works (return 0) or gets an EFSCORRUPTED error. | ||
79 | */ | ||
80 | STATIC int /* error */ | ||
81 | xfs_inobt_update( | ||
82 | struct xfs_btree_cur *cur, /* btree cursor */ | ||
83 | xfs_inobt_rec_incore_t *irec) /* btree record */ | ||
84 | { | ||
85 | union xfs_btree_rec rec; | ||
86 | |||
87 | rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); | ||
88 | rec.inobt.ir_freecount = cpu_to_be32(irec->ir_freecount); | ||
89 | rec.inobt.ir_free = cpu_to_be64(irec->ir_free); | ||
90 | return xfs_btree_update(cur, &rec); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Get the data from the pointed-to record. | ||
95 | */ | ||
96 | int /* error */ | ||
97 | xfs_inobt_get_rec( | ||
98 | struct xfs_btree_cur *cur, /* btree cursor */ | ||
99 | xfs_inobt_rec_incore_t *irec, /* btree record */ | ||
100 | int *stat) /* output: success/failure */ | ||
101 | { | ||
102 | union xfs_btree_rec *rec; | ||
103 | int error; | ||
104 | |||
105 | error = xfs_btree_get_rec(cur, &rec, stat); | ||
106 | if (!error && *stat == 1) { | ||
107 | irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); | ||
108 | irec->ir_freecount = be32_to_cpu(rec->inobt.ir_freecount); | ||
109 | irec->ir_free = be64_to_cpu(rec->inobt.ir_free); | ||
110 | } | ||
111 | return error; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Insert a single inobt record. Cursor must already point to desired location. | ||
116 | */ | ||
117 | STATIC int | ||
118 | xfs_inobt_insert_rec( | ||
119 | struct xfs_btree_cur *cur, | ||
120 | __int32_t freecount, | ||
121 | xfs_inofree_t free, | ||
122 | int *stat) | ||
123 | { | ||
124 | cur->bc_rec.i.ir_freecount = freecount; | ||
125 | cur->bc_rec.i.ir_free = free; | ||
126 | return xfs_btree_insert(cur, stat); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Insert records describing a newly allocated inode chunk into the inobt. | ||
131 | */ | ||
132 | STATIC int | ||
133 | xfs_inobt_insert( | ||
134 | struct xfs_mount *mp, | ||
135 | struct xfs_trans *tp, | ||
136 | struct xfs_buf *agbp, | ||
137 | xfs_agino_t newino, | ||
138 | xfs_agino_t newlen, | ||
139 | xfs_btnum_t btnum) | ||
140 | { | ||
141 | struct xfs_btree_cur *cur; | ||
142 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); | ||
143 | xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); | ||
144 | xfs_agino_t thisino; | ||
145 | int i; | ||
146 | int error; | ||
147 | |||
148 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum); | ||
149 | |||
150 | for (thisino = newino; | ||
151 | thisino < newino + newlen; | ||
152 | thisino += XFS_INODES_PER_CHUNK) { | ||
153 | error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); | ||
154 | if (error) { | ||
155 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | ||
156 | return error; | ||
157 | } | ||
158 | ASSERT(i == 0); | ||
159 | |||
160 | error = xfs_inobt_insert_rec(cur, XFS_INODES_PER_CHUNK, | ||
161 | XFS_INOBT_ALL_FREE, &i); | ||
162 | if (error) { | ||
163 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | ||
164 | return error; | ||
165 | } | ||
166 | ASSERT(i == 1); | ||
167 | } | ||
168 | |||
169 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
170 | |||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Verify that the number of free inodes in the AGI is correct. | ||
176 | */ | ||
177 | #ifdef DEBUG | ||
178 | STATIC int | ||
179 | xfs_check_agi_freecount( | ||
180 | struct xfs_btree_cur *cur, | ||
181 | struct xfs_agi *agi) | ||
182 | { | ||
183 | if (cur->bc_nlevels == 1) { | ||
184 | xfs_inobt_rec_incore_t rec; | ||
185 | int freecount = 0; | ||
186 | int error; | ||
187 | int i; | ||
188 | |||
189 | error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); | ||
190 | if (error) | ||
191 | return error; | ||
192 | |||
193 | do { | ||
194 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
195 | if (error) | ||
196 | return error; | ||
197 | |||
198 | if (i) { | ||
199 | freecount += rec.ir_freecount; | ||
200 | error = xfs_btree_increment(cur, 0, &i); | ||
201 | if (error) | ||
202 | return error; | ||
203 | } | ||
204 | } while (i == 1); | ||
205 | |||
206 | if (!XFS_FORCED_SHUTDOWN(cur->bc_mp)) | ||
207 | ASSERT(freecount == be32_to_cpu(agi->agi_freecount)); | ||
208 | } | ||
209 | return 0; | ||
210 | } | ||
211 | #else | ||
212 | #define xfs_check_agi_freecount(cur, agi) 0 | ||
213 | #endif | ||
214 | |||
215 | /* | ||
216 | * Initialise a new set of inodes. When called without a transaction context | ||
217 | * (e.g. from recovery) we initiate a delayed write of the inode buffers rather | ||
218 | * than logging them (which in a transaction context puts them into the AIL | ||
219 | * for writeback rather than the xfsbufd queue). | ||
220 | */ | ||
221 | int | ||
222 | xfs_ialloc_inode_init( | ||
223 | struct xfs_mount *mp, | ||
224 | struct xfs_trans *tp, | ||
225 | struct list_head *buffer_list, | ||
226 | xfs_agnumber_t agno, | ||
227 | xfs_agblock_t agbno, | ||
228 | xfs_agblock_t length, | ||
229 | unsigned int gen) | ||
230 | { | ||
231 | struct xfs_buf *fbuf; | ||
232 | struct xfs_dinode *free; | ||
233 | int nbufs, blks_per_cluster, inodes_per_cluster; | ||
234 | int version; | ||
235 | int i, j; | ||
236 | xfs_daddr_t d; | ||
237 | xfs_ino_t ino = 0; | ||
238 | |||
239 | /* | ||
240 | * Loop over the new block(s), filling in the inodes. For small block | ||
241 | * sizes, manipulate the inodes in buffers which are multiples of the | ||
242 | * blocks size. | ||
243 | */ | ||
244 | blks_per_cluster = xfs_icluster_size_fsb(mp); | ||
245 | inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; | ||
246 | nbufs = length / blks_per_cluster; | ||
247 | |||
248 | /* | ||
249 | * Figure out what version number to use in the inodes we create. If | ||
250 | * the superblock version has caught up to the one that supports the new | ||
251 | * inode format, then use the new inode version. Otherwise use the old | ||
252 | * version so that old kernels will continue to be able to use the file | ||
253 | * system. | ||
254 | * | ||
255 | * For v3 inodes, we also need to write the inode number into the inode, | ||
256 | * so calculate the first inode number of the chunk here as | ||
257 | * XFS_OFFBNO_TO_AGINO() only works within a filesystem block, not | ||
258 | * across multiple filesystem blocks (such as a cluster) and so cannot | ||
259 | * be used in the cluster buffer loop below. | ||
260 | * | ||
261 | * Further, because we are writing the inode directly into the buffer | ||
262 | * and calculating a CRC on the entire inode, we have ot log the entire | ||
263 | * inode so that the entire range the CRC covers is present in the log. | ||
264 | * That means for v3 inode we log the entire buffer rather than just the | ||
265 | * inode cores. | ||
266 | */ | ||
267 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | ||
268 | version = 3; | ||
269 | ino = XFS_AGINO_TO_INO(mp, agno, | ||
270 | XFS_OFFBNO_TO_AGINO(mp, agbno, 0)); | ||
271 | |||
272 | /* | ||
273 | * log the initialisation that is about to take place as an | ||
274 | * logical operation. This means the transaction does not | ||
275 | * need to log the physical changes to the inode buffers as log | ||
276 | * recovery will know what initialisation is actually needed. | ||
277 | * Hence we only need to log the buffers as "ordered" buffers so | ||
278 | * they track in the AIL as if they were physically logged. | ||
279 | */ | ||
280 | if (tp) | ||
281 | xfs_icreate_log(tp, agno, agbno, mp->m_ialloc_inos, | ||
282 | mp->m_sb.sb_inodesize, length, gen); | ||
283 | } else | ||
284 | version = 2; | ||
285 | |||
286 | for (j = 0; j < nbufs; j++) { | ||
287 | /* | ||
288 | * Get the block. | ||
289 | */ | ||
290 | d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster)); | ||
291 | fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, | ||
292 | mp->m_bsize * blks_per_cluster, | ||
293 | XBF_UNMAPPED); | ||
294 | if (!fbuf) | ||
295 | return ENOMEM; | ||
296 | |||
297 | /* Initialize the inode buffers and log them appropriately. */ | ||
298 | fbuf->b_ops = &xfs_inode_buf_ops; | ||
299 | xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); | ||
300 | for (i = 0; i < inodes_per_cluster; i++) { | ||
301 | int ioffset = i << mp->m_sb.sb_inodelog; | ||
302 | uint isize = xfs_dinode_size(version); | ||
303 | |||
304 | free = xfs_make_iptr(mp, fbuf, i); | ||
305 | free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); | ||
306 | free->di_version = version; | ||
307 | free->di_gen = cpu_to_be32(gen); | ||
308 | free->di_next_unlinked = cpu_to_be32(NULLAGINO); | ||
309 | |||
310 | if (version == 3) { | ||
311 | free->di_ino = cpu_to_be64(ino); | ||
312 | ino++; | ||
313 | uuid_copy(&free->di_uuid, &mp->m_sb.sb_uuid); | ||
314 | xfs_dinode_calc_crc(mp, free); | ||
315 | } else if (tp) { | ||
316 | /* just log the inode core */ | ||
317 | xfs_trans_log_buf(tp, fbuf, ioffset, | ||
318 | ioffset + isize - 1); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | if (tp) { | ||
323 | /* | ||
324 | * Mark the buffer as an inode allocation buffer so it | ||
325 | * sticks in AIL at the point of this allocation | ||
326 | * transaction. This ensures the they are on disk before | ||
327 | * the tail of the log can be moved past this | ||
328 | * transaction (i.e. by preventing relogging from moving | ||
329 | * it forward in the log). | ||
330 | */ | ||
331 | xfs_trans_inode_alloc_buf(tp, fbuf); | ||
332 | if (version == 3) { | ||
333 | /* | ||
334 | * Mark the buffer as ordered so that they are | ||
335 | * not physically logged in the transaction but | ||
336 | * still tracked in the AIL as part of the | ||
337 | * transaction and pin the log appropriately. | ||
338 | */ | ||
339 | xfs_trans_ordered_buf(tp, fbuf); | ||
340 | xfs_trans_log_buf(tp, fbuf, 0, | ||
341 | BBTOB(fbuf->b_length) - 1); | ||
342 | } | ||
343 | } else { | ||
344 | fbuf->b_flags |= XBF_DONE; | ||
345 | xfs_buf_delwri_queue(fbuf, buffer_list); | ||
346 | xfs_buf_relse(fbuf); | ||
347 | } | ||
348 | } | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Allocate new inodes in the allocation group specified by agbp. | ||
354 | * Return 0 for success, else error code. | ||
355 | */ | ||
356 | STATIC int /* error code or 0 */ | ||
357 | xfs_ialloc_ag_alloc( | ||
358 | xfs_trans_t *tp, /* transaction pointer */ | ||
359 | xfs_buf_t *agbp, /* alloc group buffer */ | ||
360 | int *alloc) | ||
361 | { | ||
362 | xfs_agi_t *agi; /* allocation group header */ | ||
363 | xfs_alloc_arg_t args; /* allocation argument structure */ | ||
364 | xfs_agnumber_t agno; | ||
365 | int error; | ||
366 | xfs_agino_t newino; /* new first inode's number */ | ||
367 | xfs_agino_t newlen; /* new number of inodes */ | ||
368 | int isaligned = 0; /* inode allocation at stripe unit */ | ||
369 | /* boundary */ | ||
370 | struct xfs_perag *pag; | ||
371 | |||
372 | memset(&args, 0, sizeof(args)); | ||
373 | args.tp = tp; | ||
374 | args.mp = tp->t_mountp; | ||
375 | |||
376 | /* | ||
377 | * Locking will ensure that we don't have two callers in here | ||
378 | * at one time. | ||
379 | */ | ||
380 | newlen = args.mp->m_ialloc_inos; | ||
381 | if (args.mp->m_maxicount && | ||
382 | args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) | ||
383 | return ENOSPC; | ||
384 | args.minlen = args.maxlen = args.mp->m_ialloc_blks; | ||
385 | /* | ||
386 | * First try to allocate inodes contiguous with the last-allocated | ||
387 | * chunk of inodes. If the filesystem is striped, this will fill | ||
388 | * an entire stripe unit with inodes. | ||
389 | */ | ||
390 | agi = XFS_BUF_TO_AGI(agbp); | ||
391 | newino = be32_to_cpu(agi->agi_newino); | ||
392 | agno = be32_to_cpu(agi->agi_seqno); | ||
393 | args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + | ||
394 | args.mp->m_ialloc_blks; | ||
395 | if (likely(newino != NULLAGINO && | ||
396 | (args.agbno < be32_to_cpu(agi->agi_length)))) { | ||
397 | args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); | ||
398 | args.type = XFS_ALLOCTYPE_THIS_BNO; | ||
399 | args.prod = 1; | ||
400 | |||
401 | /* | ||
402 | * We need to take into account alignment here to ensure that | ||
403 | * we don't modify the free list if we fail to have an exact | ||
404 | * block. If we don't have an exact match, and every oher | ||
405 | * attempt allocation attempt fails, we'll end up cancelling | ||
406 | * a dirty transaction and shutting down. | ||
407 | * | ||
408 | * For an exact allocation, alignment must be 1, | ||
409 | * however we need to take cluster alignment into account when | ||
410 | * fixing up the freelist. Use the minalignslop field to | ||
411 | * indicate that extra blocks might be required for alignment, | ||
412 | * but not to use them in the actual exact allocation. | ||
413 | */ | ||
414 | args.alignment = 1; | ||
415 | args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1; | ||
416 | |||
417 | /* Allow space for the inode btree to split. */ | ||
418 | args.minleft = args.mp->m_in_maxlevels - 1; | ||
419 | if ((error = xfs_alloc_vextent(&args))) | ||
420 | return error; | ||
421 | |||
422 | /* | ||
423 | * This request might have dirtied the transaction if the AG can | ||
424 | * satisfy the request, but the exact block was not available. | ||
425 | * If the allocation did fail, subsequent requests will relax | ||
426 | * the exact agbno requirement and increase the alignment | ||
427 | * instead. It is critical that the total size of the request | ||
428 | * (len + alignment + slop) does not increase from this point | ||
429 | * on, so reset minalignslop to ensure it is not included in | ||
430 | * subsequent requests. | ||
431 | */ | ||
432 | args.minalignslop = 0; | ||
433 | } else | ||
434 | args.fsbno = NULLFSBLOCK; | ||
435 | |||
436 | if (unlikely(args.fsbno == NULLFSBLOCK)) { | ||
437 | /* | ||
438 | * Set the alignment for the allocation. | ||
439 | * If stripe alignment is turned on then align at stripe unit | ||
440 | * boundary. | ||
441 | * If the cluster size is smaller than a filesystem block | ||
442 | * then we're doing I/O for inodes in filesystem block size | ||
443 | * pieces, so don't need alignment anyway. | ||
444 | */ | ||
445 | isaligned = 0; | ||
446 | if (args.mp->m_sinoalign) { | ||
447 | ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); | ||
448 | args.alignment = args.mp->m_dalign; | ||
449 | isaligned = 1; | ||
450 | } else | ||
451 | args.alignment = xfs_ialloc_cluster_alignment(&args); | ||
452 | /* | ||
453 | * Need to figure out where to allocate the inode blocks. | ||
454 | * Ideally they should be spaced out through the a.g. | ||
455 | * For now, just allocate blocks up front. | ||
456 | */ | ||
457 | args.agbno = be32_to_cpu(agi->agi_root); | ||
458 | args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); | ||
459 | /* | ||
460 | * Allocate a fixed-size extent of inodes. | ||
461 | */ | ||
462 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | ||
463 | args.prod = 1; | ||
464 | /* | ||
465 | * Allow space for the inode btree to split. | ||
466 | */ | ||
467 | args.minleft = args.mp->m_in_maxlevels - 1; | ||
468 | if ((error = xfs_alloc_vextent(&args))) | ||
469 | return error; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * If stripe alignment is turned on, then try again with cluster | ||
474 | * alignment. | ||
475 | */ | ||
476 | if (isaligned && args.fsbno == NULLFSBLOCK) { | ||
477 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | ||
478 | args.agbno = be32_to_cpu(agi->agi_root); | ||
479 | args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); | ||
480 | args.alignment = xfs_ialloc_cluster_alignment(&args); | ||
481 | if ((error = xfs_alloc_vextent(&args))) | ||
482 | return error; | ||
483 | } | ||
484 | |||
485 | if (args.fsbno == NULLFSBLOCK) { | ||
486 | *alloc = 0; | ||
487 | return 0; | ||
488 | } | ||
489 | ASSERT(args.len == args.minlen); | ||
490 | |||
491 | /* | ||
492 | * Stamp and write the inode buffers. | ||
493 | * | ||
494 | * Seed the new inode cluster with a random generation number. This | ||
495 | * prevents short-term reuse of generation numbers if a chunk is | ||
496 | * freed and then immediately reallocated. We use random numbers | ||
497 | * rather than a linear progression to prevent the next generation | ||
498 | * number from being easily guessable. | ||
499 | */ | ||
500 | error = xfs_ialloc_inode_init(args.mp, tp, NULL, agno, args.agbno, | ||
501 | args.len, prandom_u32()); | ||
502 | |||
503 | if (error) | ||
504 | return error; | ||
505 | /* | ||
506 | * Convert the results. | ||
507 | */ | ||
508 | newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); | ||
509 | be32_add_cpu(&agi->agi_count, newlen); | ||
510 | be32_add_cpu(&agi->agi_freecount, newlen); | ||
511 | pag = xfs_perag_get(args.mp, agno); | ||
512 | pag->pagi_freecount += newlen; | ||
513 | xfs_perag_put(pag); | ||
514 | agi->agi_newino = cpu_to_be32(newino); | ||
515 | |||
516 | /* | ||
517 | * Insert records describing the new inode chunk into the btrees. | ||
518 | */ | ||
519 | error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen, | ||
520 | XFS_BTNUM_INO); | ||
521 | if (error) | ||
522 | return error; | ||
523 | |||
524 | if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) { | ||
525 | error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen, | ||
526 | XFS_BTNUM_FINO); | ||
527 | if (error) | ||
528 | return error; | ||
529 | } | ||
530 | /* | ||
531 | * Log allocation group header fields | ||
532 | */ | ||
533 | xfs_ialloc_log_agi(tp, agbp, | ||
534 | XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); | ||
535 | /* | ||
536 | * Modify/log superblock values for inode count and inode free count. | ||
537 | */ | ||
538 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); | ||
539 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); | ||
540 | *alloc = 1; | ||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | STATIC xfs_agnumber_t | ||
545 | xfs_ialloc_next_ag( | ||
546 | xfs_mount_t *mp) | ||
547 | { | ||
548 | xfs_agnumber_t agno; | ||
549 | |||
550 | spin_lock(&mp->m_agirotor_lock); | ||
551 | agno = mp->m_agirotor; | ||
552 | if (++mp->m_agirotor >= mp->m_maxagi) | ||
553 | mp->m_agirotor = 0; | ||
554 | spin_unlock(&mp->m_agirotor_lock); | ||
555 | |||
556 | return agno; | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * Select an allocation group to look for a free inode in, based on the parent | ||
561 | * inode and the mode. Return the allocation group buffer. | ||
562 | */ | ||
563 | STATIC xfs_agnumber_t | ||
564 | xfs_ialloc_ag_select( | ||
565 | xfs_trans_t *tp, /* transaction pointer */ | ||
566 | xfs_ino_t parent, /* parent directory inode number */ | ||
567 | umode_t mode, /* bits set to indicate file type */ | ||
568 | int okalloc) /* ok to allocate more space */ | ||
569 | { | ||
570 | xfs_agnumber_t agcount; /* number of ag's in the filesystem */ | ||
571 | xfs_agnumber_t agno; /* current ag number */ | ||
572 | int flags; /* alloc buffer locking flags */ | ||
573 | xfs_extlen_t ineed; /* blocks needed for inode allocation */ | ||
574 | xfs_extlen_t longest = 0; /* longest extent available */ | ||
575 | xfs_mount_t *mp; /* mount point structure */ | ||
576 | int needspace; /* file mode implies space allocated */ | ||
577 | xfs_perag_t *pag; /* per allocation group data */ | ||
578 | xfs_agnumber_t pagno; /* parent (starting) ag number */ | ||
579 | int error; | ||
580 | |||
581 | /* | ||
582 | * Files of these types need at least one block if length > 0 | ||
583 | * (and they won't fit in the inode, but that's hard to figure out). | ||
584 | */ | ||
585 | needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); | ||
586 | mp = tp->t_mountp; | ||
587 | agcount = mp->m_maxagi; | ||
588 | if (S_ISDIR(mode)) | ||
589 | pagno = xfs_ialloc_next_ag(mp); | ||
590 | else { | ||
591 | pagno = XFS_INO_TO_AGNO(mp, parent); | ||
592 | if (pagno >= agcount) | ||
593 | pagno = 0; | ||
594 | } | ||
595 | |||
596 | ASSERT(pagno < agcount); | ||
597 | |||
598 | /* | ||
599 | * Loop through allocation groups, looking for one with a little | ||
600 | * free space in it. Note we don't look for free inodes, exactly. | ||
601 | * Instead, we include whether there is a need to allocate inodes | ||
602 | * to mean that blocks must be allocated for them, | ||
603 | * if none are currently free. | ||
604 | */ | ||
605 | agno = pagno; | ||
606 | flags = XFS_ALLOC_FLAG_TRYLOCK; | ||
607 | for (;;) { | ||
608 | pag = xfs_perag_get(mp, agno); | ||
609 | if (!pag->pagi_inodeok) { | ||
610 | xfs_ialloc_next_ag(mp); | ||
611 | goto nextag; | ||
612 | } | ||
613 | |||
614 | if (!pag->pagi_init) { | ||
615 | error = xfs_ialloc_pagi_init(mp, tp, agno); | ||
616 | if (error) | ||
617 | goto nextag; | ||
618 | } | ||
619 | |||
620 | if (pag->pagi_freecount) { | ||
621 | xfs_perag_put(pag); | ||
622 | return agno; | ||
623 | } | ||
624 | |||
625 | if (!okalloc) | ||
626 | goto nextag; | ||
627 | |||
628 | if (!pag->pagf_init) { | ||
629 | error = xfs_alloc_pagf_init(mp, tp, agno, flags); | ||
630 | if (error) | ||
631 | goto nextag; | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Is there enough free space for the file plus a block of | ||
636 | * inodes? (if we need to allocate some)? | ||
637 | */ | ||
638 | ineed = mp->m_ialloc_blks; | ||
639 | longest = pag->pagf_longest; | ||
640 | if (!longest) | ||
641 | longest = pag->pagf_flcount > 0; | ||
642 | |||
643 | if (pag->pagf_freeblks >= needspace + ineed && | ||
644 | longest >= ineed) { | ||
645 | xfs_perag_put(pag); | ||
646 | return agno; | ||
647 | } | ||
648 | nextag: | ||
649 | xfs_perag_put(pag); | ||
650 | /* | ||
651 | * No point in iterating over the rest, if we're shutting | ||
652 | * down. | ||
653 | */ | ||
654 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
655 | return NULLAGNUMBER; | ||
656 | agno++; | ||
657 | if (agno >= agcount) | ||
658 | agno = 0; | ||
659 | if (agno == pagno) { | ||
660 | if (flags == 0) | ||
661 | return NULLAGNUMBER; | ||
662 | flags = 0; | ||
663 | } | ||
664 | } | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * Try to retrieve the next record to the left/right from the current one. | ||
669 | */ | ||
670 | STATIC int | ||
671 | xfs_ialloc_next_rec( | ||
672 | struct xfs_btree_cur *cur, | ||
673 | xfs_inobt_rec_incore_t *rec, | ||
674 | int *done, | ||
675 | int left) | ||
676 | { | ||
677 | int error; | ||
678 | int i; | ||
679 | |||
680 | if (left) | ||
681 | error = xfs_btree_decrement(cur, 0, &i); | ||
682 | else | ||
683 | error = xfs_btree_increment(cur, 0, &i); | ||
684 | |||
685 | if (error) | ||
686 | return error; | ||
687 | *done = !i; | ||
688 | if (i) { | ||
689 | error = xfs_inobt_get_rec(cur, rec, &i); | ||
690 | if (error) | ||
691 | return error; | ||
692 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
693 | } | ||
694 | |||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | STATIC int | ||
699 | xfs_ialloc_get_rec( | ||
700 | struct xfs_btree_cur *cur, | ||
701 | xfs_agino_t agino, | ||
702 | xfs_inobt_rec_incore_t *rec, | ||
703 | int *done) | ||
704 | { | ||
705 | int error; | ||
706 | int i; | ||
707 | |||
708 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); | ||
709 | if (error) | ||
710 | return error; | ||
711 | *done = !i; | ||
712 | if (i) { | ||
713 | error = xfs_inobt_get_rec(cur, rec, &i); | ||
714 | if (error) | ||
715 | return error; | ||
716 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
717 | } | ||
718 | |||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | /* | ||
723 | * Allocate an inode using the inobt-only algorithm. | ||
724 | */ | ||
725 | STATIC int | ||
726 | xfs_dialloc_ag_inobt( | ||
727 | struct xfs_trans *tp, | ||
728 | struct xfs_buf *agbp, | ||
729 | xfs_ino_t parent, | ||
730 | xfs_ino_t *inop) | ||
731 | { | ||
732 | struct xfs_mount *mp = tp->t_mountp; | ||
733 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); | ||
734 | xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); | ||
735 | xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); | ||
736 | xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); | ||
737 | struct xfs_perag *pag; | ||
738 | struct xfs_btree_cur *cur, *tcur; | ||
739 | struct xfs_inobt_rec_incore rec, trec; | ||
740 | xfs_ino_t ino; | ||
741 | int error; | ||
742 | int offset; | ||
743 | int i, j; | ||
744 | |||
745 | pag = xfs_perag_get(mp, agno); | ||
746 | |||
747 | ASSERT(pag->pagi_init); | ||
748 | ASSERT(pag->pagi_inodeok); | ||
749 | ASSERT(pag->pagi_freecount > 0); | ||
750 | |||
751 | restart_pagno: | ||
752 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); | ||
753 | /* | ||
754 | * If pagino is 0 (this is the root inode allocation) use newino. | ||
755 | * This must work because we've just allocated some. | ||
756 | */ | ||
757 | if (!pagino) | ||
758 | pagino = be32_to_cpu(agi->agi_newino); | ||
759 | |||
760 | error = xfs_check_agi_freecount(cur, agi); | ||
761 | if (error) | ||
762 | goto error0; | ||
763 | |||
764 | /* | ||
765 | * If in the same AG as the parent, try to get near the parent. | ||
766 | */ | ||
767 | if (pagno == agno) { | ||
768 | int doneleft; /* done, to the left */ | ||
769 | int doneright; /* done, to the right */ | ||
770 | int searchdistance = 10; | ||
771 | |||
772 | error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); | ||
773 | if (error) | ||
774 | goto error0; | ||
775 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | ||
776 | |||
777 | error = xfs_inobt_get_rec(cur, &rec, &j); | ||
778 | if (error) | ||
779 | goto error0; | ||
780 | XFS_WANT_CORRUPTED_GOTO(j == 1, error0); | ||
781 | |||
782 | if (rec.ir_freecount > 0) { | ||
783 | /* | ||
784 | * Found a free inode in the same chunk | ||
785 | * as the parent, done. | ||
786 | */ | ||
787 | goto alloc_inode; | ||
788 | } | ||
789 | |||
790 | |||
791 | /* | ||
792 | * In the same AG as parent, but parent's chunk is full. | ||
793 | */ | ||
794 | |||
795 | /* duplicate the cursor, search left & right simultaneously */ | ||
796 | error = xfs_btree_dup_cursor(cur, &tcur); | ||
797 | if (error) | ||
798 | goto error0; | ||
799 | |||
800 | /* | ||
801 | * Skip to last blocks looked up if same parent inode. | ||
802 | */ | ||
803 | if (pagino != NULLAGINO && | ||
804 | pag->pagl_pagino == pagino && | ||
805 | pag->pagl_leftrec != NULLAGINO && | ||
806 | pag->pagl_rightrec != NULLAGINO) { | ||
807 | error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, | ||
808 | &trec, &doneleft); | ||
809 | if (error) | ||
810 | goto error1; | ||
811 | |||
812 | error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, | ||
813 | &rec, &doneright); | ||
814 | if (error) | ||
815 | goto error1; | ||
816 | } else { | ||
817 | /* search left with tcur, back up 1 record */ | ||
818 | error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); | ||
819 | if (error) | ||
820 | goto error1; | ||
821 | |||
822 | /* search right with cur, go forward 1 record. */ | ||
823 | error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); | ||
824 | if (error) | ||
825 | goto error1; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * Loop until we find an inode chunk with a free inode. | ||
830 | */ | ||
831 | while (!doneleft || !doneright) { | ||
832 | int useleft; /* using left inode chunk this time */ | ||
833 | |||
834 | if (!--searchdistance) { | ||
835 | /* | ||
836 | * Not in range - save last search | ||
837 | * location and allocate a new inode | ||
838 | */ | ||
839 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); | ||
840 | pag->pagl_leftrec = trec.ir_startino; | ||
841 | pag->pagl_rightrec = rec.ir_startino; | ||
842 | pag->pagl_pagino = pagino; | ||
843 | goto newino; | ||
844 | } | ||
845 | |||
846 | /* figure out the closer block if both are valid. */ | ||
847 | if (!doneleft && !doneright) { | ||
848 | useleft = pagino - | ||
849 | (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < | ||
850 | rec.ir_startino - pagino; | ||
851 | } else { | ||
852 | useleft = !doneleft; | ||
853 | } | ||
854 | |||
855 | /* free inodes to the left? */ | ||
856 | if (useleft && trec.ir_freecount) { | ||
857 | rec = trec; | ||
858 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
859 | cur = tcur; | ||
860 | |||
861 | pag->pagl_leftrec = trec.ir_startino; | ||
862 | pag->pagl_rightrec = rec.ir_startino; | ||
863 | pag->pagl_pagino = pagino; | ||
864 | goto alloc_inode; | ||
865 | } | ||
866 | |||
867 | /* free inodes to the right? */ | ||
868 | if (!useleft && rec.ir_freecount) { | ||
869 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); | ||
870 | |||
871 | pag->pagl_leftrec = trec.ir_startino; | ||
872 | pag->pagl_rightrec = rec.ir_startino; | ||
873 | pag->pagl_pagino = pagino; | ||
874 | goto alloc_inode; | ||
875 | } | ||
876 | |||
877 | /* get next record to check */ | ||
878 | if (useleft) { | ||
879 | error = xfs_ialloc_next_rec(tcur, &trec, | ||
880 | &doneleft, 1); | ||
881 | } else { | ||
882 | error = xfs_ialloc_next_rec(cur, &rec, | ||
883 | &doneright, 0); | ||
884 | } | ||
885 | if (error) | ||
886 | goto error1; | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * We've reached the end of the btree. because | ||
891 | * we are only searching a small chunk of the | ||
892 | * btree each search, there is obviously free | ||
893 | * inodes closer to the parent inode than we | ||
894 | * are now. restart the search again. | ||
895 | */ | ||
896 | pag->pagl_pagino = NULLAGINO; | ||
897 | pag->pagl_leftrec = NULLAGINO; | ||
898 | pag->pagl_rightrec = NULLAGINO; | ||
899 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); | ||
900 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
901 | goto restart_pagno; | ||
902 | } | ||
903 | |||
904 | /* | ||
905 | * In a different AG from the parent. | ||
906 | * See if the most recently allocated block has any free. | ||
907 | */ | ||
908 | newino: | ||
909 | if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { | ||
910 | error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), | ||
911 | XFS_LOOKUP_EQ, &i); | ||
912 | if (error) | ||
913 | goto error0; | ||
914 | |||
915 | if (i == 1) { | ||
916 | error = xfs_inobt_get_rec(cur, &rec, &j); | ||
917 | if (error) | ||
918 | goto error0; | ||
919 | |||
920 | if (j == 1 && rec.ir_freecount > 0) { | ||
921 | /* | ||
922 | * The last chunk allocated in the group | ||
923 | * still has a free inode. | ||
924 | */ | ||
925 | goto alloc_inode; | ||
926 | } | ||
927 | } | ||
928 | } | ||
929 | |||
930 | /* | ||
931 | * None left in the last group, search the whole AG | ||
932 | */ | ||
933 | error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); | ||
934 | if (error) | ||
935 | goto error0; | ||
936 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | ||
937 | |||
938 | for (;;) { | ||
939 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
940 | if (error) | ||
941 | goto error0; | ||
942 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | ||
943 | if (rec.ir_freecount > 0) | ||
944 | break; | ||
945 | error = xfs_btree_increment(cur, 0, &i); | ||
946 | if (error) | ||
947 | goto error0; | ||
948 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | ||
949 | } | ||
950 | |||
951 | alloc_inode: | ||
952 | offset = xfs_lowbit64(rec.ir_free); | ||
953 | ASSERT(offset >= 0); | ||
954 | ASSERT(offset < XFS_INODES_PER_CHUNK); | ||
955 | ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % | ||
956 | XFS_INODES_PER_CHUNK) == 0); | ||
957 | ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); | ||
958 | rec.ir_free &= ~XFS_INOBT_MASK(offset); | ||
959 | rec.ir_freecount--; | ||
960 | error = xfs_inobt_update(cur, &rec); | ||
961 | if (error) | ||
962 | goto error0; | ||
963 | be32_add_cpu(&agi->agi_freecount, -1); | ||
964 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); | ||
965 | pag->pagi_freecount--; | ||
966 | |||
967 | error = xfs_check_agi_freecount(cur, agi); | ||
968 | if (error) | ||
969 | goto error0; | ||
970 | |||
971 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
972 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); | ||
973 | xfs_perag_put(pag); | ||
974 | *inop = ino; | ||
975 | return 0; | ||
976 | error1: | ||
977 | xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); | ||
978 | error0: | ||
979 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | ||
980 | xfs_perag_put(pag); | ||
981 | return error; | ||
982 | } | ||
983 | |||
984 | /* | ||
985 | * Use the free inode btree to allocate an inode based on distance from the | ||
986 | * parent. Note that the provided cursor may be deleted and replaced. | ||
987 | */ | ||
988 | STATIC int | ||
989 | xfs_dialloc_ag_finobt_near( | ||
990 | xfs_agino_t pagino, | ||
991 | struct xfs_btree_cur **ocur, | ||
992 | struct xfs_inobt_rec_incore *rec) | ||
993 | { | ||
994 | struct xfs_btree_cur *lcur = *ocur; /* left search cursor */ | ||
995 | struct xfs_btree_cur *rcur; /* right search cursor */ | ||
996 | struct xfs_inobt_rec_incore rrec; | ||
997 | int error; | ||
998 | int i, j; | ||
999 | |||
1000 | error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i); | ||
1001 | if (error) | ||
1002 | return error; | ||
1003 | |||
1004 | if (i == 1) { | ||
1005 | error = xfs_inobt_get_rec(lcur, rec, &i); | ||
1006 | if (error) | ||
1007 | return error; | ||
1008 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
1009 | |||
1010 | /* | ||
1011 | * See if we've landed in the parent inode record. The finobt | ||
1012 | * only tracks chunks with at least one free inode, so record | ||
1013 | * existence is enough. | ||
1014 | */ | ||
1015 | if (pagino >= rec->ir_startino && | ||
1016 | pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK)) | ||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | error = xfs_btree_dup_cursor(lcur, &rcur); | ||
1021 | if (error) | ||
1022 | return error; | ||
1023 | |||
1024 | error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j); | ||
1025 | if (error) | ||
1026 | goto error_rcur; | ||
1027 | if (j == 1) { | ||
1028 | error = xfs_inobt_get_rec(rcur, &rrec, &j); | ||
1029 | if (error) | ||
1030 | goto error_rcur; | ||
1031 | XFS_WANT_CORRUPTED_GOTO(j == 1, error_rcur); | ||
1032 | } | ||
1033 | |||
1034 | XFS_WANT_CORRUPTED_GOTO(i == 1 || j == 1, error_rcur); | ||
1035 | if (i == 1 && j == 1) { | ||
1036 | /* | ||
1037 | * Both the left and right records are valid. Choose the closer | ||
1038 | * inode chunk to the target. | ||
1039 | */ | ||
1040 | if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) > | ||
1041 | (rrec.ir_startino - pagino)) { | ||
1042 | *rec = rrec; | ||
1043 | xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); | ||
1044 | *ocur = rcur; | ||
1045 | } else { | ||
1046 | xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); | ||
1047 | } | ||
1048 | } else if (j == 1) { | ||
1049 | /* only the right record is valid */ | ||
1050 | *rec = rrec; | ||
1051 | xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); | ||
1052 | *ocur = rcur; | ||
1053 | } else if (i == 1) { | ||
1054 | /* only the left record is valid */ | ||
1055 | xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); | ||
1056 | } | ||
1057 | |||
1058 | return 0; | ||
1059 | |||
1060 | error_rcur: | ||
1061 | xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR); | ||
1062 | return error; | ||
1063 | } | ||
1064 | |||
1065 | /* | ||
1066 | * Use the free inode btree to find a free inode based on a newino hint. If | ||
1067 | * the hint is NULL, find the first free inode in the AG. | ||
1068 | */ | ||
1069 | STATIC int | ||
1070 | xfs_dialloc_ag_finobt_newino( | ||
1071 | struct xfs_agi *agi, | ||
1072 | struct xfs_btree_cur *cur, | ||
1073 | struct xfs_inobt_rec_incore *rec) | ||
1074 | { | ||
1075 | int error; | ||
1076 | int i; | ||
1077 | |||
1078 | if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { | ||
1079 | error = xfs_inobt_lookup(cur, agi->agi_newino, XFS_LOOKUP_EQ, | ||
1080 | &i); | ||
1081 | if (error) | ||
1082 | return error; | ||
1083 | if (i == 1) { | ||
1084 | error = xfs_inobt_get_rec(cur, rec, &i); | ||
1085 | if (error) | ||
1086 | return error; | ||
1087 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
1088 | |||
1089 | return 0; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | /* | ||
1094 | * Find the first inode available in the AG. | ||
1095 | */ | ||
1096 | error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); | ||
1097 | if (error) | ||
1098 | return error; | ||
1099 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
1100 | |||
1101 | error = xfs_inobt_get_rec(cur, rec, &i); | ||
1102 | if (error) | ||
1103 | return error; | ||
1104 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
1105 | |||
1106 | return 0; | ||
1107 | } | ||
1108 | |||
1109 | /* | ||
1110 | * Update the inobt based on a modification made to the finobt. Also ensure that | ||
1111 | * the records from both trees are equivalent post-modification. | ||
1112 | */ | ||
1113 | STATIC int | ||
1114 | xfs_dialloc_ag_update_inobt( | ||
1115 | struct xfs_btree_cur *cur, /* inobt cursor */ | ||
1116 | struct xfs_inobt_rec_incore *frec, /* finobt record */ | ||
1117 | int offset) /* inode offset */ | ||
1118 | { | ||
1119 | struct xfs_inobt_rec_incore rec; | ||
1120 | int error; | ||
1121 | int i; | ||
1122 | |||
1123 | error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); | ||
1124 | if (error) | ||
1125 | return error; | ||
1126 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
1127 | |||
1128 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
1129 | if (error) | ||
1130 | return error; | ||
1131 | XFS_WANT_CORRUPTED_RETURN(i == 1); | ||
1132 | ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % | ||
1133 | XFS_INODES_PER_CHUNK) == 0); | ||
1134 | |||
1135 | rec.ir_free &= ~XFS_INOBT_MASK(offset); | ||
1136 | rec.ir_freecount--; | ||
1137 | |||
1138 | XFS_WANT_CORRUPTED_RETURN((rec.ir_free == frec->ir_free) && | ||
1139 | (rec.ir_freecount == frec->ir_freecount)); | ||
1140 | |||
1141 | error = xfs_inobt_update(cur, &rec); | ||
1142 | if (error) | ||
1143 | return error; | ||
1144 | |||
1145 | return 0; | ||
1146 | } | ||
1147 | |||
1148 | /* | ||
1149 | * Allocate an inode using the free inode btree, if available. Otherwise, fall | ||
1150 | * back to the inobt search algorithm. | ||
1151 | * | ||
1152 | * The caller selected an AG for us, and made sure that free inodes are | ||
1153 | * available. | ||
1154 | */ | ||
1155 | STATIC int | ||
1156 | xfs_dialloc_ag( | ||
1157 | struct xfs_trans *tp, | ||
1158 | struct xfs_buf *agbp, | ||
1159 | xfs_ino_t parent, | ||
1160 | xfs_ino_t *inop) | ||
1161 | { | ||
1162 | struct xfs_mount *mp = tp->t_mountp; | ||
1163 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); | ||
1164 | xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); | ||
1165 | xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); | ||
1166 | xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); | ||
1167 | struct xfs_perag *pag; | ||
1168 | struct xfs_btree_cur *cur; /* finobt cursor */ | ||
1169 | struct xfs_btree_cur *icur; /* inobt cursor */ | ||
1170 | struct xfs_inobt_rec_incore rec; | ||
1171 | xfs_ino_t ino; | ||
1172 | int error; | ||
1173 | int offset; | ||
1174 | int i; | ||
1175 | |||
1176 | if (!xfs_sb_version_hasfinobt(&mp->m_sb)) | ||
1177 | return xfs_dialloc_ag_inobt(tp, agbp, parent, inop); | ||
1178 | |||
1179 | pag = xfs_perag_get(mp, agno); | ||
1180 | |||
1181 | /* | ||
1182 | * If pagino is 0 (this is the root inode allocation) use newino. | ||
1183 | * This must work because we've just allocated some. | ||
1184 | */ | ||
1185 | if (!pagino) | ||
1186 | pagino = be32_to_cpu(agi->agi_newino); | ||
1187 | |||
1188 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO); | ||
1189 | |||
1190 | error = xfs_check_agi_freecount(cur, agi); | ||
1191 | if (error) | ||
1192 | goto error_cur; | ||
1193 | |||
1194 | /* | ||
1195 | * The search algorithm depends on whether we're in the same AG as the | ||
1196 | * parent. If so, find the closest available inode to the parent. If | ||
1197 | * not, consider the agi hint or find the first free inode in the AG. | ||
1198 | */ | ||
1199 | if (agno == pagno) | ||
1200 | error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); | ||
1201 | else | ||
1202 | error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); | ||
1203 | if (error) | ||
1204 | goto error_cur; | ||
1205 | |||
1206 | offset = xfs_lowbit64(rec.ir_free); | ||
1207 | ASSERT(offset >= 0); | ||
1208 | ASSERT(offset < XFS_INODES_PER_CHUNK); | ||
1209 | ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % | ||
1210 | XFS_INODES_PER_CHUNK) == 0); | ||
1211 | ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); | ||
1212 | |||
1213 | /* | ||
1214 | * Modify or remove the finobt record. | ||
1215 | */ | ||
1216 | rec.ir_free &= ~XFS_INOBT_MASK(offset); | ||
1217 | rec.ir_freecount--; | ||
1218 | if (rec.ir_freecount) | ||
1219 | error = xfs_inobt_update(cur, &rec); | ||
1220 | else | ||
1221 | error = xfs_btree_delete(cur, &i); | ||
1222 | if (error) | ||
1223 | goto error_cur; | ||
1224 | |||
1225 | /* | ||
1226 | * The finobt has now been updated appropriately. We haven't updated the | ||
1227 | * agi and superblock yet, so we can create an inobt cursor and validate | ||
1228 | * the original freecount. If all is well, make the equivalent update to | ||
1229 | * the inobt using the finobt record and offset information. | ||
1230 | */ | ||
1231 | icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); | ||
1232 | |||
1233 | error = xfs_check_agi_freecount(icur, agi); | ||
1234 | if (error) | ||
1235 | goto error_icur; | ||
1236 | |||
1237 | error = xfs_dialloc_ag_update_inobt(icur, &rec, offset); | ||
1238 | if (error) | ||
1239 | goto error_icur; | ||
1240 | |||
1241 | /* | ||
1242 | * Both trees have now been updated. We must update the perag and | ||
1243 | * superblock before we can check the freecount for each btree. | ||
1244 | */ | ||
1245 | be32_add_cpu(&agi->agi_freecount, -1); | ||
1246 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); | ||
1247 | pag->pagi_freecount--; | ||
1248 | |||
1249 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); | ||
1250 | |||
1251 | error = xfs_check_agi_freecount(icur, agi); | ||
1252 | if (error) | ||
1253 | goto error_icur; | ||
1254 | error = xfs_check_agi_freecount(cur, agi); | ||
1255 | if (error) | ||
1256 | goto error_icur; | ||
1257 | |||
1258 | xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); | ||
1259 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1260 | xfs_perag_put(pag); | ||
1261 | *inop = ino; | ||
1262 | return 0; | ||
1263 | |||
1264 | error_icur: | ||
1265 | xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); | ||
1266 | error_cur: | ||
1267 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | ||
1268 | xfs_perag_put(pag); | ||
1269 | return error; | ||
1270 | } | ||
1271 | |||
1272 | /* | ||
1273 | * Allocate an inode on disk. | ||
1274 | * | ||
1275 | * Mode is used to tell whether the new inode will need space, and whether it | ||
1276 | * is a directory. | ||
1277 | * | ||
1278 | * This function is designed to be called twice if it has to do an allocation | ||
1279 | * to make more free inodes. On the first call, *IO_agbp should be set to NULL. | ||
1280 | * If an inode is available without having to performn an allocation, an inode | ||
1281 | * number is returned. In this case, *IO_agbp is set to NULL. If an allocation | ||
1282 | * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp. | ||
1283 | * The caller should then commit the current transaction, allocate a | ||
1284 | * new transaction, and call xfs_dialloc() again, passing in the previous value | ||
1285 | * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI | ||
1286 | * buffer is locked across the two calls, the second call is guaranteed to have | ||
1287 | * a free inode available. | ||
1288 | * | ||
1289 | * Once we successfully pick an inode its number is returned and the on-disk | ||
1290 | * data structures are updated. The inode itself is not read in, since doing so | ||
1291 | * would break ordering constraints with xfs_reclaim. | ||
1292 | */ | ||
1293 | int | ||
1294 | xfs_dialloc( | ||
1295 | struct xfs_trans *tp, | ||
1296 | xfs_ino_t parent, | ||
1297 | umode_t mode, | ||
1298 | int okalloc, | ||
1299 | struct xfs_buf **IO_agbp, | ||
1300 | xfs_ino_t *inop) | ||
1301 | { | ||
1302 | struct xfs_mount *mp = tp->t_mountp; | ||
1303 | struct xfs_buf *agbp; | ||
1304 | xfs_agnumber_t agno; | ||
1305 | int error; | ||
1306 | int ialloced; | ||
1307 | int noroom = 0; | ||
1308 | xfs_agnumber_t start_agno; | ||
1309 | struct xfs_perag *pag; | ||
1310 | |||
1311 | if (*IO_agbp) { | ||
1312 | /* | ||
1313 | * If the caller passes in a pointer to the AGI buffer, | ||
1314 | * continue where we left off before. In this case, we | ||
1315 | * know that the allocation group has free inodes. | ||
1316 | */ | ||
1317 | agbp = *IO_agbp; | ||
1318 | goto out_alloc; | ||
1319 | } | ||
1320 | |||
1321 | /* | ||
1322 | * We do not have an agbp, so select an initial allocation | ||
1323 | * group for inode allocation. | ||
1324 | */ | ||
1325 | start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc); | ||
1326 | if (start_agno == NULLAGNUMBER) { | ||
1327 | *inop = NULLFSINO; | ||
1328 | return 0; | ||
1329 | } | ||
1330 | |||
1331 | /* | ||
1332 | * If we have already hit the ceiling of inode blocks then clear | ||
1333 | * okalloc so we scan all available agi structures for a free | ||
1334 | * inode. | ||
1335 | */ | ||
1336 | if (mp->m_maxicount && | ||
1337 | mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) { | ||
1338 | noroom = 1; | ||
1339 | okalloc = 0; | ||
1340 | } | ||
1341 | |||
1342 | /* | ||
1343 | * Loop until we find an allocation group that either has free inodes | ||
1344 | * or in which we can allocate some inodes. Iterate through the | ||
1345 | * allocation groups upward, wrapping at the end. | ||
1346 | */ | ||
1347 | agno = start_agno; | ||
1348 | for (;;) { | ||
1349 | pag = xfs_perag_get(mp, agno); | ||
1350 | if (!pag->pagi_inodeok) { | ||
1351 | xfs_ialloc_next_ag(mp); | ||
1352 | goto nextag; | ||
1353 | } | ||
1354 | |||
1355 | if (!pag->pagi_init) { | ||
1356 | error = xfs_ialloc_pagi_init(mp, tp, agno); | ||
1357 | if (error) | ||
1358 | goto out_error; | ||
1359 | } | ||
1360 | |||
1361 | /* | ||
1362 | * Do a first racy fast path check if this AG is usable. | ||
1363 | */ | ||
1364 | if (!pag->pagi_freecount && !okalloc) | ||
1365 | goto nextag; | ||
1366 | |||
1367 | /* | ||
1368 | * Then read in the AGI buffer and recheck with the AGI buffer | ||
1369 | * lock held. | ||
1370 | */ | ||
1371 | error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); | ||
1372 | if (error) | ||
1373 | goto out_error; | ||
1374 | |||
1375 | if (pag->pagi_freecount) { | ||
1376 | xfs_perag_put(pag); | ||
1377 | goto out_alloc; | ||
1378 | } | ||
1379 | |||
1380 | if (!okalloc) | ||
1381 | goto nextag_relse_buffer; | ||
1382 | |||
1383 | |||
1384 | error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced); | ||
1385 | if (error) { | ||
1386 | xfs_trans_brelse(tp, agbp); | ||
1387 | |||
1388 | if (error != ENOSPC) | ||
1389 | goto out_error; | ||
1390 | |||
1391 | xfs_perag_put(pag); | ||
1392 | *inop = NULLFSINO; | ||
1393 | return 0; | ||
1394 | } | ||
1395 | |||
1396 | if (ialloced) { | ||
1397 | /* | ||
1398 | * We successfully allocated some inodes, return | ||
1399 | * the current context to the caller so that it | ||
1400 | * can commit the current transaction and call | ||
1401 | * us again where we left off. | ||
1402 | */ | ||
1403 | ASSERT(pag->pagi_freecount > 0); | ||
1404 | xfs_perag_put(pag); | ||
1405 | |||
1406 | *IO_agbp = agbp; | ||
1407 | *inop = NULLFSINO; | ||
1408 | return 0; | ||
1409 | } | ||
1410 | |||
1411 | nextag_relse_buffer: | ||
1412 | xfs_trans_brelse(tp, agbp); | ||
1413 | nextag: | ||
1414 | xfs_perag_put(pag); | ||
1415 | if (++agno == mp->m_sb.sb_agcount) | ||
1416 | agno = 0; | ||
1417 | if (agno == start_agno) { | ||
1418 | *inop = NULLFSINO; | ||
1419 | return noroom ? ENOSPC : 0; | ||
1420 | } | ||
1421 | } | ||
1422 | |||
1423 | out_alloc: | ||
1424 | *IO_agbp = NULL; | ||
1425 | return xfs_dialloc_ag(tp, agbp, parent, inop); | ||
1426 | out_error: | ||
1427 | xfs_perag_put(pag); | ||
1428 | return error; | ||
1429 | } | ||
1430 | |||
1431 | STATIC int | ||
1432 | xfs_difree_inobt( | ||
1433 | struct xfs_mount *mp, | ||
1434 | struct xfs_trans *tp, | ||
1435 | struct xfs_buf *agbp, | ||
1436 | xfs_agino_t agino, | ||
1437 | struct xfs_bmap_free *flist, | ||
1438 | int *deleted, | ||
1439 | xfs_ino_t *first_ino, | ||
1440 | struct xfs_inobt_rec_incore *orec) | ||
1441 | { | ||
1442 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); | ||
1443 | xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); | ||
1444 | struct xfs_perag *pag; | ||
1445 | struct xfs_btree_cur *cur; | ||
1446 | struct xfs_inobt_rec_incore rec; | ||
1447 | int ilen; | ||
1448 | int error; | ||
1449 | int i; | ||
1450 | int off; | ||
1451 | |||
1452 | ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); | ||
1453 | ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length)); | ||
1454 | |||
1455 | /* | ||
1456 | * Initialize the cursor. | ||
1457 | */ | ||
1458 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); | ||
1459 | |||
1460 | error = xfs_check_agi_freecount(cur, agi); | ||
1461 | if (error) | ||
1462 | goto error0; | ||
1463 | |||
1464 | /* | ||
1465 | * Look for the entry describing this inode. | ||
1466 | */ | ||
1467 | if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { | ||
1468 | xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", | ||
1469 | __func__, error); | ||
1470 | goto error0; | ||
1471 | } | ||
1472 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | ||
1473 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
1474 | if (error) { | ||
1475 | xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", | ||
1476 | __func__, error); | ||
1477 | goto error0; | ||
1478 | } | ||
1479 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | ||
1480 | /* | ||
1481 | * Get the offset in the inode chunk. | ||
1482 | */ | ||
1483 | off = agino - rec.ir_startino; | ||
1484 | ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); | ||
1485 | ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); | ||
1486 | /* | ||
1487 | * Mark the inode free & increment the count. | ||
1488 | */ | ||
1489 | rec.ir_free |= XFS_INOBT_MASK(off); | ||
1490 | rec.ir_freecount++; | ||
1491 | |||
1492 | /* | ||
1493 | * When an inode cluster is free, it becomes eligible for removal | ||
1494 | */ | ||
1495 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && | ||
1496 | (rec.ir_freecount == mp->m_ialloc_inos)) { | ||
1497 | |||
1498 | *deleted = 1; | ||
1499 | *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); | ||
1500 | |||
1501 | /* | ||
1502 | * Remove the inode cluster from the AGI B+Tree, adjust the | ||
1503 | * AGI and Superblock inode counts, and mark the disk space | ||
1504 | * to be freed when the transaction is committed. | ||
1505 | */ | ||
1506 | ilen = mp->m_ialloc_inos; | ||
1507 | be32_add_cpu(&agi->agi_count, -ilen); | ||
1508 | be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); | ||
1509 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); | ||
1510 | pag = xfs_perag_get(mp, agno); | ||
1511 | pag->pagi_freecount -= ilen - 1; | ||
1512 | xfs_perag_put(pag); | ||
1513 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); | ||
1514 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); | ||
1515 | |||
1516 | if ((error = xfs_btree_delete(cur, &i))) { | ||
1517 | xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", | ||
1518 | __func__, error); | ||
1519 | goto error0; | ||
1520 | } | ||
1521 | |||
1522 | xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno, | ||
1523 | XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)), | ||
1524 | mp->m_ialloc_blks, flist, mp); | ||
1525 | } else { | ||
1526 | *deleted = 0; | ||
1527 | |||
1528 | error = xfs_inobt_update(cur, &rec); | ||
1529 | if (error) { | ||
1530 | xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", | ||
1531 | __func__, error); | ||
1532 | goto error0; | ||
1533 | } | ||
1534 | |||
1535 | /* | ||
1536 | * Change the inode free counts and log the ag/sb changes. | ||
1537 | */ | ||
1538 | be32_add_cpu(&agi->agi_freecount, 1); | ||
1539 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); | ||
1540 | pag = xfs_perag_get(mp, agno); | ||
1541 | pag->pagi_freecount++; | ||
1542 | xfs_perag_put(pag); | ||
1543 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); | ||
1544 | } | ||
1545 | |||
1546 | error = xfs_check_agi_freecount(cur, agi); | ||
1547 | if (error) | ||
1548 | goto error0; | ||
1549 | |||
1550 | *orec = rec; | ||
1551 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1552 | return 0; | ||
1553 | |||
1554 | error0: | ||
1555 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | ||
1556 | return error; | ||
1557 | } | ||
1558 | |||
1559 | /* | ||
1560 | * Free an inode in the free inode btree. | ||
1561 | */ | ||
1562 | STATIC int | ||
1563 | xfs_difree_finobt( | ||
1564 | struct xfs_mount *mp, | ||
1565 | struct xfs_trans *tp, | ||
1566 | struct xfs_buf *agbp, | ||
1567 | xfs_agino_t agino, | ||
1568 | struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ | ||
1569 | { | ||
1570 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); | ||
1571 | xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); | ||
1572 | struct xfs_btree_cur *cur; | ||
1573 | struct xfs_inobt_rec_incore rec; | ||
1574 | int offset = agino - ibtrec->ir_startino; | ||
1575 | int error; | ||
1576 | int i; | ||
1577 | |||
1578 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO); | ||
1579 | |||
1580 | error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); | ||
1581 | if (error) | ||
1582 | goto error; | ||
1583 | if (i == 0) { | ||
1584 | /* | ||
1585 | * If the record does not exist in the finobt, we must have just | ||
1586 | * freed an inode in a previously fully allocated chunk. If not, | ||
1587 | * something is out of sync. | ||
1588 | */ | ||
1589 | XFS_WANT_CORRUPTED_GOTO(ibtrec->ir_freecount == 1, error); | ||
1590 | |||
1591 | error = xfs_inobt_insert_rec(cur, ibtrec->ir_freecount, | ||
1592 | ibtrec->ir_free, &i); | ||
1593 | if (error) | ||
1594 | goto error; | ||
1595 | ASSERT(i == 1); | ||
1596 | |||
1597 | goto out; | ||
1598 | } | ||
1599 | |||
1600 | /* | ||
1601 | * Read and update the existing record. We could just copy the ibtrec | ||
1602 | * across here, but that would defeat the purpose of having redundant | ||
1603 | * metadata. By making the modifications independently, we can catch | ||
1604 | * corruptions that we wouldn't see if we just copied from one record | ||
1605 | * to another. | ||
1606 | */ | ||
1607 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
1608 | if (error) | ||
1609 | goto error; | ||
1610 | XFS_WANT_CORRUPTED_GOTO(i == 1, error); | ||
1611 | |||
1612 | rec.ir_free |= XFS_INOBT_MASK(offset); | ||
1613 | rec.ir_freecount++; | ||
1614 | |||
1615 | XFS_WANT_CORRUPTED_GOTO((rec.ir_free == ibtrec->ir_free) && | ||
1616 | (rec.ir_freecount == ibtrec->ir_freecount), | ||
1617 | error); | ||
1618 | |||
1619 | /* | ||
1620 | * The content of inobt records should always match between the inobt | ||
1621 | * and finobt. The lifecycle of records in the finobt is different from | ||
1622 | * the inobt in that the finobt only tracks records with at least one | ||
1623 | * free inode. Hence, if all of the inodes are free and we aren't | ||
1624 | * keeping inode chunks permanently on disk, remove the record. | ||
1625 | * Otherwise, update the record with the new information. | ||
1626 | */ | ||
1627 | if (rec.ir_freecount == mp->m_ialloc_inos && | ||
1628 | !(mp->m_flags & XFS_MOUNT_IKEEP)) { | ||
1629 | error = xfs_btree_delete(cur, &i); | ||
1630 | if (error) | ||
1631 | goto error; | ||
1632 | ASSERT(i == 1); | ||
1633 | } else { | ||
1634 | error = xfs_inobt_update(cur, &rec); | ||
1635 | if (error) | ||
1636 | goto error; | ||
1637 | } | ||
1638 | |||
1639 | out: | ||
1640 | error = xfs_check_agi_freecount(cur, agi); | ||
1641 | if (error) | ||
1642 | goto error; | ||
1643 | |||
1644 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1645 | return 0; | ||
1646 | |||
1647 | error: | ||
1648 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | ||
1649 | return error; | ||
1650 | } | ||
1651 | |||
1652 | /* | ||
1653 | * Free disk inode. Carefully avoids touching the incore inode, all | ||
1654 | * manipulations incore are the caller's responsibility. | ||
1655 | * The on-disk inode is not changed by this operation, only the | ||
1656 | * btree (free inode mask) is changed. | ||
1657 | */ | ||
1658 | int | ||
1659 | xfs_difree( | ||
1660 | struct xfs_trans *tp, /* transaction pointer */ | ||
1661 | xfs_ino_t inode, /* inode to be freed */ | ||
1662 | struct xfs_bmap_free *flist, /* extents to free */ | ||
1663 | int *deleted,/* set if inode cluster was deleted */ | ||
1664 | xfs_ino_t *first_ino)/* first inode in deleted cluster */ | ||
1665 | { | ||
1666 | /* REFERENCED */ | ||
1667 | xfs_agblock_t agbno; /* block number containing inode */ | ||
1668 | struct xfs_buf *agbp; /* buffer for allocation group header */ | ||
1669 | xfs_agino_t agino; /* allocation group inode number */ | ||
1670 | xfs_agnumber_t agno; /* allocation group number */ | ||
1671 | int error; /* error return value */ | ||
1672 | struct xfs_mount *mp; /* mount structure for filesystem */ | ||
1673 | struct xfs_inobt_rec_incore rec;/* btree record */ | ||
1674 | |||
1675 | mp = tp->t_mountp; | ||
1676 | |||
1677 | /* | ||
1678 | * Break up inode number into its components. | ||
1679 | */ | ||
1680 | agno = XFS_INO_TO_AGNO(mp, inode); | ||
1681 | if (agno >= mp->m_sb.sb_agcount) { | ||
1682 | xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).", | ||
1683 | __func__, agno, mp->m_sb.sb_agcount); | ||
1684 | ASSERT(0); | ||
1685 | return EINVAL; | ||
1686 | } | ||
1687 | agino = XFS_INO_TO_AGINO(mp, inode); | ||
1688 | if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { | ||
1689 | xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", | ||
1690 | __func__, (unsigned long long)inode, | ||
1691 | (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino)); | ||
1692 | ASSERT(0); | ||
1693 | return EINVAL; | ||
1694 | } | ||
1695 | agbno = XFS_AGINO_TO_AGBNO(mp, agino); | ||
1696 | if (agbno >= mp->m_sb.sb_agblocks) { | ||
1697 | xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", | ||
1698 | __func__, agbno, mp->m_sb.sb_agblocks); | ||
1699 | ASSERT(0); | ||
1700 | return EINVAL; | ||
1701 | } | ||
1702 | /* | ||
1703 | * Get the allocation group header. | ||
1704 | */ | ||
1705 | error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); | ||
1706 | if (error) { | ||
1707 | xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", | ||
1708 | __func__, error); | ||
1709 | return error; | ||
1710 | } | ||
1711 | |||
1712 | /* | ||
1713 | * Fix up the inode allocation btree. | ||
1714 | */ | ||
1715 | error = xfs_difree_inobt(mp, tp, agbp, agino, flist, deleted, first_ino, | ||
1716 | &rec); | ||
1717 | if (error) | ||
1718 | goto error0; | ||
1719 | |||
1720 | /* | ||
1721 | * Fix up the free inode btree. | ||
1722 | */ | ||
1723 | if (xfs_sb_version_hasfinobt(&mp->m_sb)) { | ||
1724 | error = xfs_difree_finobt(mp, tp, agbp, agino, &rec); | ||
1725 | if (error) | ||
1726 | goto error0; | ||
1727 | } | ||
1728 | |||
1729 | return 0; | ||
1730 | |||
1731 | error0: | ||
1732 | return error; | ||
1733 | } | ||
1734 | |||
1735 | STATIC int | ||
1736 | xfs_imap_lookup( | ||
1737 | struct xfs_mount *mp, | ||
1738 | struct xfs_trans *tp, | ||
1739 | xfs_agnumber_t agno, | ||
1740 | xfs_agino_t agino, | ||
1741 | xfs_agblock_t agbno, | ||
1742 | xfs_agblock_t *chunk_agbno, | ||
1743 | xfs_agblock_t *offset_agbno, | ||
1744 | int flags) | ||
1745 | { | ||
1746 | struct xfs_inobt_rec_incore rec; | ||
1747 | struct xfs_btree_cur *cur; | ||
1748 | struct xfs_buf *agbp; | ||
1749 | int error; | ||
1750 | int i; | ||
1751 | |||
1752 | error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); | ||
1753 | if (error) { | ||
1754 | xfs_alert(mp, | ||
1755 | "%s: xfs_ialloc_read_agi() returned error %d, agno %d", | ||
1756 | __func__, error, agno); | ||
1757 | return error; | ||
1758 | } | ||
1759 | |||
1760 | /* | ||
1761 | * Lookup the inode record for the given agino. If the record cannot be | ||
1762 | * found, then it's an invalid inode number and we should abort. Once | ||
1763 | * we have a record, we need to ensure it contains the inode number | ||
1764 | * we are looking up. | ||
1765 | */ | ||
1766 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); | ||
1767 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); | ||
1768 | if (!error) { | ||
1769 | if (i) | ||
1770 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
1771 | if (!error && i == 0) | ||
1772 | error = EINVAL; | ||
1773 | } | ||
1774 | |||
1775 | xfs_trans_brelse(tp, agbp); | ||
1776 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1777 | if (error) | ||
1778 | return error; | ||
1779 | |||
1780 | /* check that the returned record contains the required inode */ | ||
1781 | if (rec.ir_startino > agino || | ||
1782 | rec.ir_startino + mp->m_ialloc_inos <= agino) | ||
1783 | return EINVAL; | ||
1784 | |||
1785 | /* for untrusted inodes check it is allocated first */ | ||
1786 | if ((flags & XFS_IGET_UNTRUSTED) && | ||
1787 | (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) | ||
1788 | return EINVAL; | ||
1789 | |||
1790 | *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); | ||
1791 | *offset_agbno = agbno - *chunk_agbno; | ||
1792 | return 0; | ||
1793 | } | ||
1794 | |||
1795 | /* | ||
1796 | * Return the location of the inode in imap, for mapping it into a buffer. | ||
1797 | */ | ||
1798 | int | ||
1799 | xfs_imap( | ||
1800 | xfs_mount_t *mp, /* file system mount structure */ | ||
1801 | xfs_trans_t *tp, /* transaction pointer */ | ||
1802 | xfs_ino_t ino, /* inode to locate */ | ||
1803 | struct xfs_imap *imap, /* location map structure */ | ||
1804 | uint flags) /* flags for inode btree lookup */ | ||
1805 | { | ||
1806 | xfs_agblock_t agbno; /* block number of inode in the alloc group */ | ||
1807 | xfs_agino_t agino; /* inode number within alloc group */ | ||
1808 | xfs_agnumber_t agno; /* allocation group number */ | ||
1809 | int blks_per_cluster; /* num blocks per inode cluster */ | ||
1810 | xfs_agblock_t chunk_agbno; /* first block in inode chunk */ | ||
1811 | xfs_agblock_t cluster_agbno; /* first block in inode cluster */ | ||
1812 | int error; /* error code */ | ||
1813 | int offset; /* index of inode in its buffer */ | ||
1814 | xfs_agblock_t offset_agbno; /* blks from chunk start to inode */ | ||
1815 | |||
1816 | ASSERT(ino != NULLFSINO); | ||
1817 | |||
1818 | /* | ||
1819 | * Split up the inode number into its parts. | ||
1820 | */ | ||
1821 | agno = XFS_INO_TO_AGNO(mp, ino); | ||
1822 | agino = XFS_INO_TO_AGINO(mp, ino); | ||
1823 | agbno = XFS_AGINO_TO_AGBNO(mp, agino); | ||
1824 | if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || | ||
1825 | ino != XFS_AGINO_TO_INO(mp, agno, agino)) { | ||
1826 | #ifdef DEBUG | ||
1827 | /* | ||
1828 | * Don't output diagnostic information for untrusted inodes | ||
1829 | * as they can be invalid without implying corruption. | ||
1830 | */ | ||
1831 | if (flags & XFS_IGET_UNTRUSTED) | ||
1832 | return EINVAL; | ||
1833 | if (agno >= mp->m_sb.sb_agcount) { | ||
1834 | xfs_alert(mp, | ||
1835 | "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", | ||
1836 | __func__, agno, mp->m_sb.sb_agcount); | ||
1837 | } | ||
1838 | if (agbno >= mp->m_sb.sb_agblocks) { | ||
1839 | xfs_alert(mp, | ||
1840 | "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", | ||
1841 | __func__, (unsigned long long)agbno, | ||
1842 | (unsigned long)mp->m_sb.sb_agblocks); | ||
1843 | } | ||
1844 | if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { | ||
1845 | xfs_alert(mp, | ||
1846 | "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", | ||
1847 | __func__, ino, | ||
1848 | XFS_AGINO_TO_INO(mp, agno, agino)); | ||
1849 | } | ||
1850 | xfs_stack_trace(); | ||
1851 | #endif /* DEBUG */ | ||
1852 | return EINVAL; | ||
1853 | } | ||
1854 | |||
1855 | blks_per_cluster = xfs_icluster_size_fsb(mp); | ||
1856 | |||
1857 | /* | ||
1858 | * For bulkstat and handle lookups, we have an untrusted inode number | ||
1859 | * that we have to verify is valid. We cannot do this just by reading | ||
1860 | * the inode buffer as it may have been unlinked and removed leaving | ||
1861 | * inodes in stale state on disk. Hence we have to do a btree lookup | ||
1862 | * in all cases where an untrusted inode number is passed. | ||
1863 | */ | ||
1864 | if (flags & XFS_IGET_UNTRUSTED) { | ||
1865 | error = xfs_imap_lookup(mp, tp, agno, agino, agbno, | ||
1866 | &chunk_agbno, &offset_agbno, flags); | ||
1867 | if (error) | ||
1868 | return error; | ||
1869 | goto out_map; | ||
1870 | } | ||
1871 | |||
1872 | /* | ||
1873 | * If the inode cluster size is the same as the blocksize or | ||
1874 | * smaller we get to the buffer by simple arithmetics. | ||
1875 | */ | ||
1876 | if (blks_per_cluster == 1) { | ||
1877 | offset = XFS_INO_TO_OFFSET(mp, ino); | ||
1878 | ASSERT(offset < mp->m_sb.sb_inopblock); | ||
1879 | |||
1880 | imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); | ||
1881 | imap->im_len = XFS_FSB_TO_BB(mp, 1); | ||
1882 | imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); | ||
1883 | return 0; | ||
1884 | } | ||
1885 | |||
1886 | /* | ||
1887 | * If the inode chunks are aligned then use simple maths to | ||
1888 | * find the location. Otherwise we have to do a btree | ||
1889 | * lookup to find the location. | ||
1890 | */ | ||
1891 | if (mp->m_inoalign_mask) { | ||
1892 | offset_agbno = agbno & mp->m_inoalign_mask; | ||
1893 | chunk_agbno = agbno - offset_agbno; | ||
1894 | } else { | ||
1895 | error = xfs_imap_lookup(mp, tp, agno, agino, agbno, | ||
1896 | &chunk_agbno, &offset_agbno, flags); | ||
1897 | if (error) | ||
1898 | return error; | ||
1899 | } | ||
1900 | |||
1901 | out_map: | ||
1902 | ASSERT(agbno >= chunk_agbno); | ||
1903 | cluster_agbno = chunk_agbno + | ||
1904 | ((offset_agbno / blks_per_cluster) * blks_per_cluster); | ||
1905 | offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + | ||
1906 | XFS_INO_TO_OFFSET(mp, ino); | ||
1907 | |||
1908 | imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno); | ||
1909 | imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); | ||
1910 | imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); | ||
1911 | |||
1912 | /* | ||
1913 | * If the inode number maps to a block outside the bounds | ||
1914 | * of the file system then return NULL rather than calling | ||
1915 | * read_buf and panicing when we get an error from the | ||
1916 | * driver. | ||
1917 | */ | ||
1918 | if ((imap->im_blkno + imap->im_len) > | ||
1919 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { | ||
1920 | xfs_alert(mp, | ||
1921 | "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", | ||
1922 | __func__, (unsigned long long) imap->im_blkno, | ||
1923 | (unsigned long long) imap->im_len, | ||
1924 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); | ||
1925 | return EINVAL; | ||
1926 | } | ||
1927 | return 0; | ||
1928 | } | ||
1929 | |||
1930 | /* | ||
1931 | * Compute and fill in value of m_in_maxlevels. | ||
1932 | */ | ||
1933 | void | ||
1934 | xfs_ialloc_compute_maxlevels( | ||
1935 | xfs_mount_t *mp) /* file system mount structure */ | ||
1936 | { | ||
1937 | int level; | ||
1938 | uint maxblocks; | ||
1939 | uint maxleafents; | ||
1940 | int minleafrecs; | ||
1941 | int minnoderecs; | ||
1942 | |||
1943 | maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >> | ||
1944 | XFS_INODES_PER_CHUNK_LOG; | ||
1945 | minleafrecs = mp->m_alloc_mnr[0]; | ||
1946 | minnoderecs = mp->m_alloc_mnr[1]; | ||
1947 | maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; | ||
1948 | for (level = 1; maxblocks > 1; level++) | ||
1949 | maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; | ||
1950 | mp->m_in_maxlevels = level; | ||
1951 | } | ||
1952 | |||
1953 | /* | ||
1954 | * Log specified fields for the ag hdr (inode section). The growth of the agi | ||
1955 | * structure over time requires that we interpret the buffer as two logical | ||
1956 | * regions delineated by the end of the unlinked list. This is due to the size | ||
1957 | * of the hash table and its location in the middle of the agi. | ||
1958 | * | ||
1959 | * For example, a request to log a field before agi_unlinked and a field after | ||
1960 | * agi_unlinked could cause us to log the entire hash table and use an excessive | ||
1961 | * amount of log space. To avoid this behavior, log the region up through | ||
1962 | * agi_unlinked in one call and the region after agi_unlinked through the end of | ||
1963 | * the structure in another. | ||
1964 | */ | ||
1965 | void | ||
1966 | xfs_ialloc_log_agi( | ||
1967 | xfs_trans_t *tp, /* transaction pointer */ | ||
1968 | xfs_buf_t *bp, /* allocation group header buffer */ | ||
1969 | int fields) /* bitmask of fields to log */ | ||
1970 | { | ||
1971 | int first; /* first byte number */ | ||
1972 | int last; /* last byte number */ | ||
1973 | static const short offsets[] = { /* field starting offsets */ | ||
1974 | /* keep in sync with bit definitions */ | ||
1975 | offsetof(xfs_agi_t, agi_magicnum), | ||
1976 | offsetof(xfs_agi_t, agi_versionnum), | ||
1977 | offsetof(xfs_agi_t, agi_seqno), | ||
1978 | offsetof(xfs_agi_t, agi_length), | ||
1979 | offsetof(xfs_agi_t, agi_count), | ||
1980 | offsetof(xfs_agi_t, agi_root), | ||
1981 | offsetof(xfs_agi_t, agi_level), | ||
1982 | offsetof(xfs_agi_t, agi_freecount), | ||
1983 | offsetof(xfs_agi_t, agi_newino), | ||
1984 | offsetof(xfs_agi_t, agi_dirino), | ||
1985 | offsetof(xfs_agi_t, agi_unlinked), | ||
1986 | offsetof(xfs_agi_t, agi_free_root), | ||
1987 | offsetof(xfs_agi_t, agi_free_level), | ||
1988 | sizeof(xfs_agi_t) | ||
1989 | }; | ||
1990 | #ifdef DEBUG | ||
1991 | xfs_agi_t *agi; /* allocation group header */ | ||
1992 | |||
1993 | agi = XFS_BUF_TO_AGI(bp); | ||
1994 | ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); | ||
1995 | #endif | ||
1996 | |||
1997 | xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGI_BUF); | ||
1998 | |||
1999 | /* | ||
2000 | * Compute byte offsets for the first and last fields in the first | ||
2001 | * region and log the agi buffer. This only logs up through | ||
2002 | * agi_unlinked. | ||
2003 | */ | ||
2004 | if (fields & XFS_AGI_ALL_BITS_R1) { | ||
2005 | xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1, | ||
2006 | &first, &last); | ||
2007 | xfs_trans_log_buf(tp, bp, first, last); | ||
2008 | } | ||
2009 | |||
2010 | /* | ||
2011 | * Mask off the bits in the first region and calculate the first and | ||
2012 | * last field offsets for any bits in the second region. | ||
2013 | */ | ||
2014 | fields &= ~XFS_AGI_ALL_BITS_R1; | ||
2015 | if (fields) { | ||
2016 | xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2, | ||
2017 | &first, &last); | ||
2018 | xfs_trans_log_buf(tp, bp, first, last); | ||
2019 | } | ||
2020 | } | ||
2021 | |||
2022 | #ifdef DEBUG | ||
2023 | STATIC void | ||
2024 | xfs_check_agi_unlinked( | ||
2025 | struct xfs_agi *agi) | ||
2026 | { | ||
2027 | int i; | ||
2028 | |||
2029 | for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) | ||
2030 | ASSERT(agi->agi_unlinked[i]); | ||
2031 | } | ||
2032 | #else | ||
2033 | #define xfs_check_agi_unlinked(agi) | ||
2034 | #endif | ||
2035 | |||
2036 | static bool | ||
2037 | xfs_agi_verify( | ||
2038 | struct xfs_buf *bp) | ||
2039 | { | ||
2040 | struct xfs_mount *mp = bp->b_target->bt_mount; | ||
2041 | struct xfs_agi *agi = XFS_BUF_TO_AGI(bp); | ||
2042 | |||
2043 | if (xfs_sb_version_hascrc(&mp->m_sb) && | ||
2044 | !uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_uuid)) | ||
2045 | return false; | ||
2046 | /* | ||
2047 | * Validate the magic number of the agi block. | ||
2048 | */ | ||
2049 | if (agi->agi_magicnum != cpu_to_be32(XFS_AGI_MAGIC)) | ||
2050 | return false; | ||
2051 | if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) | ||
2052 | return false; | ||
2053 | |||
2054 | /* | ||
2055 | * during growfs operations, the perag is not fully initialised, | ||
2056 | * so we can't use it for any useful checking. growfs ensures we can't | ||
2057 | * use it by using uncached buffers that don't have the perag attached | ||
2058 | * so we can detect and avoid this problem. | ||
2059 | */ | ||
2060 | if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) | ||
2061 | return false; | ||
2062 | |||
2063 | xfs_check_agi_unlinked(agi); | ||
2064 | return true; | ||
2065 | } | ||
2066 | |||
2067 | static void | ||
2068 | xfs_agi_read_verify( | ||
2069 | struct xfs_buf *bp) | ||
2070 | { | ||
2071 | struct xfs_mount *mp = bp->b_target->bt_mount; | ||
2072 | |||
2073 | if (xfs_sb_version_hascrc(&mp->m_sb) && | ||
2074 | !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) | ||
2075 | xfs_buf_ioerror(bp, EFSBADCRC); | ||
2076 | else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp, | ||
2077 | XFS_ERRTAG_IALLOC_READ_AGI, | ||
2078 | XFS_RANDOM_IALLOC_READ_AGI)) | ||
2079 | xfs_buf_ioerror(bp, EFSCORRUPTED); | ||
2080 | |||
2081 | if (bp->b_error) | ||
2082 | xfs_verifier_error(bp); | ||
2083 | } | ||
2084 | |||
2085 | static void | ||
2086 | xfs_agi_write_verify( | ||
2087 | struct xfs_buf *bp) | ||
2088 | { | ||
2089 | struct xfs_mount *mp = bp->b_target->bt_mount; | ||
2090 | struct xfs_buf_log_item *bip = bp->b_fspriv; | ||
2091 | |||
2092 | if (!xfs_agi_verify(bp)) { | ||
2093 | xfs_buf_ioerror(bp, EFSCORRUPTED); | ||
2094 | xfs_verifier_error(bp); | ||
2095 | return; | ||
2096 | } | ||
2097 | |||
2098 | if (!xfs_sb_version_hascrc(&mp->m_sb)) | ||
2099 | return; | ||
2100 | |||
2101 | if (bip) | ||
2102 | XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); | ||
2103 | xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); | ||
2104 | } | ||
2105 | |||
2106 | const struct xfs_buf_ops xfs_agi_buf_ops = { | ||
2107 | .verify_read = xfs_agi_read_verify, | ||
2108 | .verify_write = xfs_agi_write_verify, | ||
2109 | }; | ||
2110 | |||
2111 | /* | ||
2112 | * Read in the allocation group header (inode allocation section) | ||
2113 | */ | ||
2114 | int | ||
2115 | xfs_read_agi( | ||
2116 | struct xfs_mount *mp, /* file system mount structure */ | ||
2117 | struct xfs_trans *tp, /* transaction pointer */ | ||
2118 | xfs_agnumber_t agno, /* allocation group number */ | ||
2119 | struct xfs_buf **bpp) /* allocation group hdr buf */ | ||
2120 | { | ||
2121 | int error; | ||
2122 | |||
2123 | trace_xfs_read_agi(mp, agno); | ||
2124 | |||
2125 | ASSERT(agno != NULLAGNUMBER); | ||
2126 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, | ||
2127 | XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), | ||
2128 | XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops); | ||
2129 | if (error) | ||
2130 | return error; | ||
2131 | |||
2132 | xfs_buf_set_ref(*bpp, XFS_AGI_REF); | ||
2133 | return 0; | ||
2134 | } | ||
2135 | |||
2136 | int | ||
2137 | xfs_ialloc_read_agi( | ||
2138 | struct xfs_mount *mp, /* file system mount structure */ | ||
2139 | struct xfs_trans *tp, /* transaction pointer */ | ||
2140 | xfs_agnumber_t agno, /* allocation group number */ | ||
2141 | struct xfs_buf **bpp) /* allocation group hdr buf */ | ||
2142 | { | ||
2143 | struct xfs_agi *agi; /* allocation group header */ | ||
2144 | struct xfs_perag *pag; /* per allocation group data */ | ||
2145 | int error; | ||
2146 | |||
2147 | trace_xfs_ialloc_read_agi(mp, agno); | ||
2148 | |||
2149 | error = xfs_read_agi(mp, tp, agno, bpp); | ||
2150 | if (error) | ||
2151 | return error; | ||
2152 | |||
2153 | agi = XFS_BUF_TO_AGI(*bpp); | ||
2154 | pag = xfs_perag_get(mp, agno); | ||
2155 | if (!pag->pagi_init) { | ||
2156 | pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); | ||
2157 | pag->pagi_count = be32_to_cpu(agi->agi_count); | ||
2158 | pag->pagi_init = 1; | ||
2159 | } | ||
2160 | |||
2161 | /* | ||
2162 | * It's possible for these to be out of sync if | ||
2163 | * we are in the middle of a forced shutdown. | ||
2164 | */ | ||
2165 | ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || | ||
2166 | XFS_FORCED_SHUTDOWN(mp)); | ||
2167 | xfs_perag_put(pag); | ||
2168 | return 0; | ||
2169 | } | ||
2170 | |||
2171 | /* | ||
2172 | * Read in the agi to initialise the per-ag data in the mount structure | ||
2173 | */ | ||
2174 | int | ||
2175 | xfs_ialloc_pagi_init( | ||
2176 | xfs_mount_t *mp, /* file system mount structure */ | ||
2177 | xfs_trans_t *tp, /* transaction pointer */ | ||
2178 | xfs_agnumber_t agno) /* allocation group number */ | ||
2179 | { | ||
2180 | xfs_buf_t *bp = NULL; | ||
2181 | int error; | ||
2182 | |||
2183 | error = xfs_ialloc_read_agi(mp, tp, agno, &bp); | ||
2184 | if (error) | ||
2185 | return error; | ||
2186 | if (bp) | ||
2187 | xfs_trans_brelse(tp, bp); | ||
2188 | return 0; | ||
2189 | } | ||