diff options
Diffstat (limited to 'fs/ext4/indirect.c')
-rw-r--r-- | fs/ext4/indirect.c | 1482 |
1 files changed, 1482 insertions, 0 deletions
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c new file mode 100644 index 000000000000..b8602cde5b5a --- /dev/null +++ b/fs/ext4/indirect.c | |||
@@ -0,0 +1,1482 @@ | |||
1 | /* | ||
2 | * linux/fs/ext4/indirect.c | ||
3 | * | ||
4 | * from | ||
5 | * | ||
6 | * linux/fs/ext4/inode.c | ||
7 | * | ||
8 | * Copyright (C) 1992, 1993, 1994, 1995 | ||
9 | * Remy Card (card@masi.ibp.fr) | ||
10 | * Laboratoire MASI - Institut Blaise Pascal | ||
11 | * Universite Pierre et Marie Curie (Paris VI) | ||
12 | * | ||
13 | * from | ||
14 | * | ||
15 | * linux/fs/minix/inode.c | ||
16 | * | ||
17 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
18 | * | ||
19 | * Goal-directed block allocation by Stephen Tweedie | ||
20 | * (sct@redhat.com), 1993, 1998 | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include "ext4_jbd2.h" | ||
25 | #include "truncate.h" | ||
26 | |||
27 | #include <trace/events/ext4.h> | ||
28 | |||
29 | typedef struct { | ||
30 | __le32 *p; | ||
31 | __le32 key; | ||
32 | struct buffer_head *bh; | ||
33 | } Indirect; | ||
34 | |||
35 | static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) | ||
36 | { | ||
37 | p->key = *(p->p = v); | ||
38 | p->bh = bh; | ||
39 | } | ||
40 | |||
41 | /** | ||
42 | * ext4_block_to_path - parse the block number into array of offsets | ||
43 | * @inode: inode in question (we are only interested in its superblock) | ||
44 | * @i_block: block number to be parsed | ||
45 | * @offsets: array to store the offsets in | ||
46 | * @boundary: set this non-zero if the referred-to block is likely to be | ||
47 | * followed (on disk) by an indirect block. | ||
48 | * | ||
49 | * To store the locations of file's data ext4 uses a data structure common | ||
50 | * for UNIX filesystems - tree of pointers anchored in the inode, with | ||
51 | * data blocks at leaves and indirect blocks in intermediate nodes. | ||
52 | * This function translates the block number into path in that tree - | ||
53 | * return value is the path length and @offsets[n] is the offset of | ||
54 | * pointer to (n+1)th node in the nth one. If @block is out of range | ||
55 | * (negative or too large) warning is printed and zero returned. | ||
56 | * | ||
57 | * Note: function doesn't find node addresses, so no IO is needed. All | ||
58 | * we need to know is the capacity of indirect blocks (taken from the | ||
59 | * inode->i_sb). | ||
60 | */ | ||
61 | |||
62 | /* | ||
63 | * Portability note: the last comparison (check that we fit into triple | ||
64 | * indirect block) is spelled differently, because otherwise on an | ||
65 | * architecture with 32-bit longs and 8Kb pages we might get into trouble | ||
66 | * if our filesystem had 8Kb blocks. We might use long long, but that would | ||
67 | * kill us on x86. Oh, well, at least the sign propagation does not matter - | ||
68 | * i_block would have to be negative in the very beginning, so we would not | ||
69 | * get there at all. | ||
70 | */ | ||
71 | |||
72 | static int ext4_block_to_path(struct inode *inode, | ||
73 | ext4_lblk_t i_block, | ||
74 | ext4_lblk_t offsets[4], int *boundary) | ||
75 | { | ||
76 | int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); | ||
77 | int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); | ||
78 | const long direct_blocks = EXT4_NDIR_BLOCKS, | ||
79 | indirect_blocks = ptrs, | ||
80 | double_blocks = (1 << (ptrs_bits * 2)); | ||
81 | int n = 0; | ||
82 | int final = 0; | ||
83 | |||
84 | if (i_block < direct_blocks) { | ||
85 | offsets[n++] = i_block; | ||
86 | final = direct_blocks; | ||
87 | } else if ((i_block -= direct_blocks) < indirect_blocks) { | ||
88 | offsets[n++] = EXT4_IND_BLOCK; | ||
89 | offsets[n++] = i_block; | ||
90 | final = ptrs; | ||
91 | } else if ((i_block -= indirect_blocks) < double_blocks) { | ||
92 | offsets[n++] = EXT4_DIND_BLOCK; | ||
93 | offsets[n++] = i_block >> ptrs_bits; | ||
94 | offsets[n++] = i_block & (ptrs - 1); | ||
95 | final = ptrs; | ||
96 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { | ||
97 | offsets[n++] = EXT4_TIND_BLOCK; | ||
98 | offsets[n++] = i_block >> (ptrs_bits * 2); | ||
99 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); | ||
100 | offsets[n++] = i_block & (ptrs - 1); | ||
101 | final = ptrs; | ||
102 | } else { | ||
103 | ext4_warning(inode->i_sb, "block %lu > max in inode %lu", | ||
104 | i_block + direct_blocks + | ||
105 | indirect_blocks + double_blocks, inode->i_ino); | ||
106 | } | ||
107 | if (boundary) | ||
108 | *boundary = final - 1 - (i_block & (ptrs - 1)); | ||
109 | return n; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * ext4_get_branch - read the chain of indirect blocks leading to data | ||
114 | * @inode: inode in question | ||
115 | * @depth: depth of the chain (1 - direct pointer, etc.) | ||
116 | * @offsets: offsets of pointers in inode/indirect blocks | ||
117 | * @chain: place to store the result | ||
118 | * @err: here we store the error value | ||
119 | * | ||
120 | * Function fills the array of triples <key, p, bh> and returns %NULL | ||
121 | * if everything went OK or the pointer to the last filled triple | ||
122 | * (incomplete one) otherwise. Upon the return chain[i].key contains | ||
123 | * the number of (i+1)-th block in the chain (as it is stored in memory, | ||
124 | * i.e. little-endian 32-bit), chain[i].p contains the address of that | ||
125 | * number (it points into struct inode for i==0 and into the bh->b_data | ||
126 | * for i>0) and chain[i].bh points to the buffer_head of i-th indirect | ||
127 | * block for i>0 and NULL for i==0. In other words, it holds the block | ||
128 | * numbers of the chain, addresses they were taken from (and where we can | ||
129 | * verify that chain did not change) and buffer_heads hosting these | ||
130 | * numbers. | ||
131 | * | ||
132 | * Function stops when it stumbles upon zero pointer (absent block) | ||
133 | * (pointer to last triple returned, *@err == 0) | ||
134 | * or when it gets an IO error reading an indirect block | ||
135 | * (ditto, *@err == -EIO) | ||
136 | * or when it reads all @depth-1 indirect blocks successfully and finds | ||
137 | * the whole chain, all way to the data (returns %NULL, *err == 0). | ||
138 | * | ||
139 | * Need to be called with | ||
140 | * down_read(&EXT4_I(inode)->i_data_sem) | ||
141 | */ | ||
142 | static Indirect *ext4_get_branch(struct inode *inode, int depth, | ||
143 | ext4_lblk_t *offsets, | ||
144 | Indirect chain[4], int *err) | ||
145 | { | ||
146 | struct super_block *sb = inode->i_sb; | ||
147 | Indirect *p = chain; | ||
148 | struct buffer_head *bh; | ||
149 | |||
150 | *err = 0; | ||
151 | /* i_data is not going away, no lock needed */ | ||
152 | add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); | ||
153 | if (!p->key) | ||
154 | goto no_block; | ||
155 | while (--depth) { | ||
156 | bh = sb_getblk(sb, le32_to_cpu(p->key)); | ||
157 | if (unlikely(!bh)) | ||
158 | goto failure; | ||
159 | |||
160 | if (!bh_uptodate_or_lock(bh)) { | ||
161 | if (bh_submit_read(bh) < 0) { | ||
162 | put_bh(bh); | ||
163 | goto failure; | ||
164 | } | ||
165 | /* validate block references */ | ||
166 | if (ext4_check_indirect_blockref(inode, bh)) { | ||
167 | put_bh(bh); | ||
168 | goto failure; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); | ||
173 | /* Reader: end */ | ||
174 | if (!p->key) | ||
175 | goto no_block; | ||
176 | } | ||
177 | return NULL; | ||
178 | |||
179 | failure: | ||
180 | *err = -EIO; | ||
181 | no_block: | ||
182 | return p; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * ext4_find_near - find a place for allocation with sufficient locality | ||
187 | * @inode: owner | ||
188 | * @ind: descriptor of indirect block. | ||
189 | * | ||
190 | * This function returns the preferred place for block allocation. | ||
191 | * It is used when heuristic for sequential allocation fails. | ||
192 | * Rules are: | ||
193 | * + if there is a block to the left of our position - allocate near it. | ||
194 | * + if pointer will live in indirect block - allocate near that block. | ||
195 | * + if pointer will live in inode - allocate in the same | ||
196 | * cylinder group. | ||
197 | * | ||
198 | * In the latter case we colour the starting block by the callers PID to | ||
199 | * prevent it from clashing with concurrent allocations for a different inode | ||
200 | * in the same block group. The PID is used here so that functionally related | ||
201 | * files will be close-by on-disk. | ||
202 | * | ||
203 | * Caller must make sure that @ind is valid and will stay that way. | ||
204 | */ | ||
205 | static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) | ||
206 | { | ||
207 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
208 | __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; | ||
209 | __le32 *p; | ||
210 | |||
211 | /* Try to find previous block */ | ||
212 | for (p = ind->p - 1; p >= start; p--) { | ||
213 | if (*p) | ||
214 | return le32_to_cpu(*p); | ||
215 | } | ||
216 | |||
217 | /* No such thing, so let's try location of indirect block */ | ||
218 | if (ind->bh) | ||
219 | return ind->bh->b_blocknr; | ||
220 | |||
221 | /* | ||
222 | * It is going to be referred to from the inode itself? OK, just put it | ||
223 | * into the same cylinder group then. | ||
224 | */ | ||
225 | return ext4_inode_to_goal_block(inode); | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * ext4_find_goal - find a preferred place for allocation. | ||
230 | * @inode: owner | ||
231 | * @block: block we want | ||
232 | * @partial: pointer to the last triple within a chain | ||
233 | * | ||
234 | * Normally this function find the preferred place for block allocation, | ||
235 | * returns it. | ||
236 | * Because this is only used for non-extent files, we limit the block nr | ||
237 | * to 32 bits. | ||
238 | */ | ||
239 | static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, | ||
240 | Indirect *partial) | ||
241 | { | ||
242 | ext4_fsblk_t goal; | ||
243 | |||
244 | /* | ||
245 | * XXX need to get goal block from mballoc's data structures | ||
246 | */ | ||
247 | |||
248 | goal = ext4_find_near(inode, partial); | ||
249 | goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; | ||
250 | return goal; | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * ext4_blks_to_allocate - Look up the block map and count the number | ||
255 | * of direct blocks need to be allocated for the given branch. | ||
256 | * | ||
257 | * @branch: chain of indirect blocks | ||
258 | * @k: number of blocks need for indirect blocks | ||
259 | * @blks: number of data blocks to be mapped. | ||
260 | * @blocks_to_boundary: the offset in the indirect block | ||
261 | * | ||
262 | * return the total number of blocks to be allocate, including the | ||
263 | * direct and indirect blocks. | ||
264 | */ | ||
265 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, | ||
266 | int blocks_to_boundary) | ||
267 | { | ||
268 | unsigned int count = 0; | ||
269 | |||
270 | /* | ||
271 | * Simple case, [t,d]Indirect block(s) has not allocated yet | ||
272 | * then it's clear blocks on that path have not allocated | ||
273 | */ | ||
274 | if (k > 0) { | ||
275 | /* right now we don't handle cross boundary allocation */ | ||
276 | if (blks < blocks_to_boundary + 1) | ||
277 | count += blks; | ||
278 | else | ||
279 | count += blocks_to_boundary + 1; | ||
280 | return count; | ||
281 | } | ||
282 | |||
283 | count++; | ||
284 | while (count < blks && count <= blocks_to_boundary && | ||
285 | le32_to_cpu(*(branch[0].p + count)) == 0) { | ||
286 | count++; | ||
287 | } | ||
288 | return count; | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * ext4_alloc_blocks: multiple allocate blocks needed for a branch | ||
293 | * @handle: handle for this transaction | ||
294 | * @inode: inode which needs allocated blocks | ||
295 | * @iblock: the logical block to start allocated at | ||
296 | * @goal: preferred physical block of allocation | ||
297 | * @indirect_blks: the number of blocks need to allocate for indirect | ||
298 | * blocks | ||
299 | * @blks: number of desired blocks | ||
300 | * @new_blocks: on return it will store the new block numbers for | ||
301 | * the indirect blocks(if needed) and the first direct block, | ||
302 | * @err: on return it will store the error code | ||
303 | * | ||
304 | * This function will return the number of blocks allocated as | ||
305 | * requested by the passed-in parameters. | ||
306 | */ | ||
307 | static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | ||
308 | ext4_lblk_t iblock, ext4_fsblk_t goal, | ||
309 | int indirect_blks, int blks, | ||
310 | ext4_fsblk_t new_blocks[4], int *err) | ||
311 | { | ||
312 | struct ext4_allocation_request ar; | ||
313 | int target, i; | ||
314 | unsigned long count = 0, blk_allocated = 0; | ||
315 | int index = 0; | ||
316 | ext4_fsblk_t current_block = 0; | ||
317 | int ret = 0; | ||
318 | |||
319 | /* | ||
320 | * Here we try to allocate the requested multiple blocks at once, | ||
321 | * on a best-effort basis. | ||
322 | * To build a branch, we should allocate blocks for | ||
323 | * the indirect blocks(if not allocated yet), and at least | ||
324 | * the first direct block of this branch. That's the | ||
325 | * minimum number of blocks need to allocate(required) | ||
326 | */ | ||
327 | /* first we try to allocate the indirect blocks */ | ||
328 | target = indirect_blks; | ||
329 | while (target > 0) { | ||
330 | count = target; | ||
331 | /* allocating blocks for indirect blocks and direct blocks */ | ||
332 | current_block = ext4_new_meta_blocks(handle, inode, goal, | ||
333 | 0, &count, err); | ||
334 | if (*err) | ||
335 | goto failed_out; | ||
336 | |||
337 | if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { | ||
338 | EXT4_ERROR_INODE(inode, | ||
339 | "current_block %llu + count %lu > %d!", | ||
340 | current_block, count, | ||
341 | EXT4_MAX_BLOCK_FILE_PHYS); | ||
342 | *err = -EIO; | ||
343 | goto failed_out; | ||
344 | } | ||
345 | |||
346 | target -= count; | ||
347 | /* allocate blocks for indirect blocks */ | ||
348 | while (index < indirect_blks && count) { | ||
349 | new_blocks[index++] = current_block++; | ||
350 | count--; | ||
351 | } | ||
352 | if (count > 0) { | ||
353 | /* | ||
354 | * save the new block number | ||
355 | * for the first direct block | ||
356 | */ | ||
357 | new_blocks[index] = current_block; | ||
358 | printk(KERN_INFO "%s returned more blocks than " | ||
359 | "requested\n", __func__); | ||
360 | WARN_ON(1); | ||
361 | break; | ||
362 | } | ||
363 | } | ||
364 | |||
365 | target = blks - count ; | ||
366 | blk_allocated = count; | ||
367 | if (!target) | ||
368 | goto allocated; | ||
369 | /* Now allocate data blocks */ | ||
370 | memset(&ar, 0, sizeof(ar)); | ||
371 | ar.inode = inode; | ||
372 | ar.goal = goal; | ||
373 | ar.len = target; | ||
374 | ar.logical = iblock; | ||
375 | if (S_ISREG(inode->i_mode)) | ||
376 | /* enable in-core preallocation only for regular files */ | ||
377 | ar.flags = EXT4_MB_HINT_DATA; | ||
378 | |||
379 | current_block = ext4_mb_new_blocks(handle, &ar, err); | ||
380 | if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { | ||
381 | EXT4_ERROR_INODE(inode, | ||
382 | "current_block %llu + ar.len %d > %d!", | ||
383 | current_block, ar.len, | ||
384 | EXT4_MAX_BLOCK_FILE_PHYS); | ||
385 | *err = -EIO; | ||
386 | goto failed_out; | ||
387 | } | ||
388 | |||
389 | if (*err && (target == blks)) { | ||
390 | /* | ||
391 | * if the allocation failed and we didn't allocate | ||
392 | * any blocks before | ||
393 | */ | ||
394 | goto failed_out; | ||
395 | } | ||
396 | if (!*err) { | ||
397 | if (target == blks) { | ||
398 | /* | ||
399 | * save the new block number | ||
400 | * for the first direct block | ||
401 | */ | ||
402 | new_blocks[index] = current_block; | ||
403 | } | ||
404 | blk_allocated += ar.len; | ||
405 | } | ||
406 | allocated: | ||
407 | /* total number of blocks allocated for direct blocks */ | ||
408 | ret = blk_allocated; | ||
409 | *err = 0; | ||
410 | return ret; | ||
411 | failed_out: | ||
412 | for (i = 0; i < index; i++) | ||
413 | ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); | ||
414 | return ret; | ||
415 | } | ||
416 | |||
417 | /** | ||
418 | * ext4_alloc_branch - allocate and set up a chain of blocks. | ||
419 | * @handle: handle for this transaction | ||
420 | * @inode: owner | ||
421 | * @indirect_blks: number of allocated indirect blocks | ||
422 | * @blks: number of allocated direct blocks | ||
423 | * @goal: preferred place for allocation | ||
424 | * @offsets: offsets (in the blocks) to store the pointers to next. | ||
425 | * @branch: place to store the chain in. | ||
426 | * | ||
427 | * This function allocates blocks, zeroes out all but the last one, | ||
428 | * links them into chain and (if we are synchronous) writes them to disk. | ||
429 | * In other words, it prepares a branch that can be spliced onto the | ||
430 | * inode. It stores the information about that chain in the branch[], in | ||
431 | * the same format as ext4_get_branch() would do. We are calling it after | ||
432 | * we had read the existing part of chain and partial points to the last | ||
433 | * triple of that (one with zero ->key). Upon the exit we have the same | ||
434 | * picture as after the successful ext4_get_block(), except that in one | ||
435 | * place chain is disconnected - *branch->p is still zero (we did not | ||
436 | * set the last link), but branch->key contains the number that should | ||
437 | * be placed into *branch->p to fill that gap. | ||
438 | * | ||
439 | * If allocation fails we free all blocks we've allocated (and forget | ||
440 | * their buffer_heads) and return the error value the from failed | ||
441 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain | ||
442 | * as described above and return 0. | ||
443 | */ | ||
444 | static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | ||
445 | ext4_lblk_t iblock, int indirect_blks, | ||
446 | int *blks, ext4_fsblk_t goal, | ||
447 | ext4_lblk_t *offsets, Indirect *branch) | ||
448 | { | ||
449 | int blocksize = inode->i_sb->s_blocksize; | ||
450 | int i, n = 0; | ||
451 | int err = 0; | ||
452 | struct buffer_head *bh; | ||
453 | int num; | ||
454 | ext4_fsblk_t new_blocks[4]; | ||
455 | ext4_fsblk_t current_block; | ||
456 | |||
457 | num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, | ||
458 | *blks, new_blocks, &err); | ||
459 | if (err) | ||
460 | return err; | ||
461 | |||
462 | branch[0].key = cpu_to_le32(new_blocks[0]); | ||
463 | /* | ||
464 | * metadata blocks and data blocks are allocated. | ||
465 | */ | ||
466 | for (n = 1; n <= indirect_blks; n++) { | ||
467 | /* | ||
468 | * Get buffer_head for parent block, zero it out | ||
469 | * and set the pointer to new one, then send | ||
470 | * parent to disk. | ||
471 | */ | ||
472 | bh = sb_getblk(inode->i_sb, new_blocks[n-1]); | ||
473 | if (unlikely(!bh)) { | ||
474 | err = -EIO; | ||
475 | goto failed; | ||
476 | } | ||
477 | |||
478 | branch[n].bh = bh; | ||
479 | lock_buffer(bh); | ||
480 | BUFFER_TRACE(bh, "call get_create_access"); | ||
481 | err = ext4_journal_get_create_access(handle, bh); | ||
482 | if (err) { | ||
483 | /* Don't brelse(bh) here; it's done in | ||
484 | * ext4_journal_forget() below */ | ||
485 | unlock_buffer(bh); | ||
486 | goto failed; | ||
487 | } | ||
488 | |||
489 | memset(bh->b_data, 0, blocksize); | ||
490 | branch[n].p = (__le32 *) bh->b_data + offsets[n]; | ||
491 | branch[n].key = cpu_to_le32(new_blocks[n]); | ||
492 | *branch[n].p = branch[n].key; | ||
493 | if (n == indirect_blks) { | ||
494 | current_block = new_blocks[n]; | ||
495 | /* | ||
496 | * End of chain, update the last new metablock of | ||
497 | * the chain to point to the new allocated | ||
498 | * data blocks numbers | ||
499 | */ | ||
500 | for (i = 1; i < num; i++) | ||
501 | *(branch[n].p + i) = cpu_to_le32(++current_block); | ||
502 | } | ||
503 | BUFFER_TRACE(bh, "marking uptodate"); | ||
504 | set_buffer_uptodate(bh); | ||
505 | unlock_buffer(bh); | ||
506 | |||
507 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | ||
508 | err = ext4_handle_dirty_metadata(handle, inode, bh); | ||
509 | if (err) | ||
510 | goto failed; | ||
511 | } | ||
512 | *blks = num; | ||
513 | return err; | ||
514 | failed: | ||
515 | /* Allocation failed, free what we already allocated */ | ||
516 | ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0); | ||
517 | for (i = 1; i <= n ; i++) { | ||
518 | /* | ||
519 | * branch[i].bh is newly allocated, so there is no | ||
520 | * need to revoke the block, which is why we don't | ||
521 | * need to set EXT4_FREE_BLOCKS_METADATA. | ||
522 | */ | ||
523 | ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, | ||
524 | EXT4_FREE_BLOCKS_FORGET); | ||
525 | } | ||
526 | for (i = n+1; i < indirect_blks; i++) | ||
527 | ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); | ||
528 | |||
529 | ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0); | ||
530 | |||
531 | return err; | ||
532 | } | ||
533 | |||
534 | /** | ||
535 | * ext4_splice_branch - splice the allocated branch onto inode. | ||
536 | * @handle: handle for this transaction | ||
537 | * @inode: owner | ||
538 | * @block: (logical) number of block we are adding | ||
539 | * @chain: chain of indirect blocks (with a missing link - see | ||
540 | * ext4_alloc_branch) | ||
541 | * @where: location of missing link | ||
542 | * @num: number of indirect blocks we are adding | ||
543 | * @blks: number of direct blocks we are adding | ||
544 | * | ||
545 | * This function fills the missing link and does all housekeeping needed in | ||
546 | * inode (->i_blocks, etc.). In case of success we end up with the full | ||
547 | * chain to new block and return 0. | ||
548 | */ | ||
549 | static int ext4_splice_branch(handle_t *handle, struct inode *inode, | ||
550 | ext4_lblk_t block, Indirect *where, int num, | ||
551 | int blks) | ||
552 | { | ||
553 | int i; | ||
554 | int err = 0; | ||
555 | ext4_fsblk_t current_block; | ||
556 | |||
557 | /* | ||
558 | * If we're splicing into a [td]indirect block (as opposed to the | ||
559 | * inode) then we need to get write access to the [td]indirect block | ||
560 | * before the splice. | ||
561 | */ | ||
562 | if (where->bh) { | ||
563 | BUFFER_TRACE(where->bh, "get_write_access"); | ||
564 | err = ext4_journal_get_write_access(handle, where->bh); | ||
565 | if (err) | ||
566 | goto err_out; | ||
567 | } | ||
568 | /* That's it */ | ||
569 | |||
570 | *where->p = where->key; | ||
571 | |||
572 | /* | ||
573 | * Update the host buffer_head or inode to point to more just allocated | ||
574 | * direct blocks blocks | ||
575 | */ | ||
576 | if (num == 0 && blks > 1) { | ||
577 | current_block = le32_to_cpu(where->key) + 1; | ||
578 | for (i = 1; i < blks; i++) | ||
579 | *(where->p + i) = cpu_to_le32(current_block++); | ||
580 | } | ||
581 | |||
582 | /* We are done with atomic stuff, now do the rest of housekeeping */ | ||
583 | /* had we spliced it onto indirect block? */ | ||
584 | if (where->bh) { | ||
585 | /* | ||
586 | * If we spliced it onto an indirect block, we haven't | ||
587 | * altered the inode. Note however that if it is being spliced | ||
588 | * onto an indirect block at the very end of the file (the | ||
589 | * file is growing) then we *will* alter the inode to reflect | ||
590 | * the new i_size. But that is not done here - it is done in | ||
591 | * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. | ||
592 | */ | ||
593 | jbd_debug(5, "splicing indirect only\n"); | ||
594 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); | ||
595 | err = ext4_handle_dirty_metadata(handle, inode, where->bh); | ||
596 | if (err) | ||
597 | goto err_out; | ||
598 | } else { | ||
599 | /* | ||
600 | * OK, we spliced it into the inode itself on a direct block. | ||
601 | */ | ||
602 | ext4_mark_inode_dirty(handle, inode); | ||
603 | jbd_debug(5, "splicing direct\n"); | ||
604 | } | ||
605 | return err; | ||
606 | |||
607 | err_out: | ||
608 | for (i = 1; i <= num; i++) { | ||
609 | /* | ||
610 | * branch[i].bh is newly allocated, so there is no | ||
611 | * need to revoke the block, which is why we don't | ||
612 | * need to set EXT4_FREE_BLOCKS_METADATA. | ||
613 | */ | ||
614 | ext4_free_blocks(handle, inode, where[i].bh, 0, 1, | ||
615 | EXT4_FREE_BLOCKS_FORGET); | ||
616 | } | ||
617 | ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), | ||
618 | blks, 0); | ||
619 | |||
620 | return err; | ||
621 | } | ||
622 | |||
623 | /* | ||
624 | * The ext4_ind_map_blocks() function handles non-extents inodes | ||
625 | * (i.e., using the traditional indirect/double-indirect i_blocks | ||
626 | * scheme) for ext4_map_blocks(). | ||
627 | * | ||
628 | * Allocation strategy is simple: if we have to allocate something, we will | ||
629 | * have to go the whole way to leaf. So let's do it before attaching anything | ||
630 | * to tree, set linkage between the newborn blocks, write them if sync is | ||
631 | * required, recheck the path, free and repeat if check fails, otherwise | ||
632 | * set the last missing link (that will protect us from any truncate-generated | ||
633 | * removals - all blocks on the path are immune now) and possibly force the | ||
634 | * write on the parent block. | ||
635 | * That has a nice additional property: no special recovery from the failed | ||
636 | * allocations is needed - we simply release blocks and do not touch anything | ||
637 | * reachable from inode. | ||
638 | * | ||
639 | * `handle' can be NULL if create == 0. | ||
640 | * | ||
641 | * return > 0, # of blocks mapped or allocated. | ||
642 | * return = 0, if plain lookup failed. | ||
643 | * return < 0, error case. | ||
644 | * | ||
645 | * The ext4_ind_get_blocks() function should be called with | ||
646 | * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem | ||
647 | * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or | ||
648 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system | ||
649 | * blocks. | ||
650 | */ | ||
651 | int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | ||
652 | struct ext4_map_blocks *map, | ||
653 | int flags) | ||
654 | { | ||
655 | int err = -EIO; | ||
656 | ext4_lblk_t offsets[4]; | ||
657 | Indirect chain[4]; | ||
658 | Indirect *partial; | ||
659 | ext4_fsblk_t goal; | ||
660 | int indirect_blks; | ||
661 | int blocks_to_boundary = 0; | ||
662 | int depth; | ||
663 | int count = 0; | ||
664 | ext4_fsblk_t first_block = 0; | ||
665 | |||
666 | trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); | ||
667 | J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); | ||
668 | J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); | ||
669 | depth = ext4_block_to_path(inode, map->m_lblk, offsets, | ||
670 | &blocks_to_boundary); | ||
671 | |||
672 | if (depth == 0) | ||
673 | goto out; | ||
674 | |||
675 | partial = ext4_get_branch(inode, depth, offsets, chain, &err); | ||
676 | |||
677 | /* Simplest case - block found, no allocation needed */ | ||
678 | if (!partial) { | ||
679 | first_block = le32_to_cpu(chain[depth - 1].key); | ||
680 | count++; | ||
681 | /*map more blocks*/ | ||
682 | while (count < map->m_len && count <= blocks_to_boundary) { | ||
683 | ext4_fsblk_t blk; | ||
684 | |||
685 | blk = le32_to_cpu(*(chain[depth-1].p + count)); | ||
686 | |||
687 | if (blk == first_block + count) | ||
688 | count++; | ||
689 | else | ||
690 | break; | ||
691 | } | ||
692 | goto got_it; | ||
693 | } | ||
694 | |||
695 | /* Next simple case - plain lookup or failed read of indirect block */ | ||
696 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) | ||
697 | goto cleanup; | ||
698 | |||
699 | /* | ||
700 | * Okay, we need to do block allocation. | ||
701 | */ | ||
702 | goal = ext4_find_goal(inode, map->m_lblk, partial); | ||
703 | |||
704 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | ||
705 | indirect_blks = (chain + depth) - partial - 1; | ||
706 | |||
707 | /* | ||
708 | * Next look up the indirect map to count the totoal number of | ||
709 | * direct blocks to allocate for this branch. | ||
710 | */ | ||
711 | count = ext4_blks_to_allocate(partial, indirect_blks, | ||
712 | map->m_len, blocks_to_boundary); | ||
713 | /* | ||
714 | * Block out ext4_truncate while we alter the tree | ||
715 | */ | ||
716 | err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, | ||
717 | &count, goal, | ||
718 | offsets + (partial - chain), partial); | ||
719 | |||
720 | /* | ||
721 | * The ext4_splice_branch call will free and forget any buffers | ||
722 | * on the new chain if there is a failure, but that risks using | ||
723 | * up transaction credits, especially for bitmaps where the | ||
724 | * credits cannot be returned. Can we handle this somehow? We | ||
725 | * may need to return -EAGAIN upwards in the worst case. --sct | ||
726 | */ | ||
727 | if (!err) | ||
728 | err = ext4_splice_branch(handle, inode, map->m_lblk, | ||
729 | partial, indirect_blks, count); | ||
730 | if (err) | ||
731 | goto cleanup; | ||
732 | |||
733 | map->m_flags |= EXT4_MAP_NEW; | ||
734 | |||
735 | ext4_update_inode_fsync_trans(handle, inode, 1); | ||
736 | got_it: | ||
737 | map->m_flags |= EXT4_MAP_MAPPED; | ||
738 | map->m_pblk = le32_to_cpu(chain[depth-1].key); | ||
739 | map->m_len = count; | ||
740 | if (count > blocks_to_boundary) | ||
741 | map->m_flags |= EXT4_MAP_BOUNDARY; | ||
742 | err = count; | ||
743 | /* Clean up and exit */ | ||
744 | partial = chain + depth - 1; /* the whole chain */ | ||
745 | cleanup: | ||
746 | while (partial > chain) { | ||
747 | BUFFER_TRACE(partial->bh, "call brelse"); | ||
748 | brelse(partial->bh); | ||
749 | partial--; | ||
750 | } | ||
751 | out: | ||
752 | trace_ext4_ind_map_blocks_exit(inode, map->m_lblk, | ||
753 | map->m_pblk, map->m_len, err); | ||
754 | return err; | ||
755 | } | ||
756 | |||
757 | /* | ||
758 | * O_DIRECT for ext3 (or indirect map) based files | ||
759 | * | ||
760 | * If the O_DIRECT write will extend the file then add this inode to the | ||
761 | * orphan list. So recovery will truncate it back to the original size | ||
762 | * if the machine crashes during the write. | ||
763 | * | ||
764 | * If the O_DIRECT write is intantiating holes inside i_size and the machine | ||
765 | * crashes then stale disk data _may_ be exposed inside the file. But current | ||
766 | * VFS code falls back into buffered path in that case so we are safe. | ||
767 | */ | ||
768 | ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | ||
769 | const struct iovec *iov, loff_t offset, | ||
770 | unsigned long nr_segs) | ||
771 | { | ||
772 | struct file *file = iocb->ki_filp; | ||
773 | struct inode *inode = file->f_mapping->host; | ||
774 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
775 | handle_t *handle; | ||
776 | ssize_t ret; | ||
777 | int orphan = 0; | ||
778 | size_t count = iov_length(iov, nr_segs); | ||
779 | int retries = 0; | ||
780 | |||
781 | if (rw == WRITE) { | ||
782 | loff_t final_size = offset + count; | ||
783 | |||
784 | if (final_size > inode->i_size) { | ||
785 | /* Credits for sb + inode write */ | ||
786 | handle = ext4_journal_start(inode, 2); | ||
787 | if (IS_ERR(handle)) { | ||
788 | ret = PTR_ERR(handle); | ||
789 | goto out; | ||
790 | } | ||
791 | ret = ext4_orphan_add(handle, inode); | ||
792 | if (ret) { | ||
793 | ext4_journal_stop(handle); | ||
794 | goto out; | ||
795 | } | ||
796 | orphan = 1; | ||
797 | ei->i_disksize = inode->i_size; | ||
798 | ext4_journal_stop(handle); | ||
799 | } | ||
800 | } | ||
801 | |||
802 | retry: | ||
803 | if (rw == READ && ext4_should_dioread_nolock(inode)) | ||
804 | ret = __blockdev_direct_IO(rw, iocb, inode, | ||
805 | inode->i_sb->s_bdev, iov, | ||
806 | offset, nr_segs, | ||
807 | ext4_get_block, NULL, NULL, 0); | ||
808 | else { | ||
809 | ret = blockdev_direct_IO(rw, iocb, inode, iov, | ||
810 | offset, nr_segs, ext4_get_block); | ||
811 | |||
812 | if (unlikely((rw & WRITE) && ret < 0)) { | ||
813 | loff_t isize = i_size_read(inode); | ||
814 | loff_t end = offset + iov_length(iov, nr_segs); | ||
815 | |||
816 | if (end > isize) | ||
817 | ext4_truncate_failed_write(inode); | ||
818 | } | ||
819 | } | ||
820 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | ||
821 | goto retry; | ||
822 | |||
823 | if (orphan) { | ||
824 | int err; | ||
825 | |||
826 | /* Credits for sb + inode write */ | ||
827 | handle = ext4_journal_start(inode, 2); | ||
828 | if (IS_ERR(handle)) { | ||
829 | /* This is really bad luck. We've written the data | ||
830 | * but cannot extend i_size. Bail out and pretend | ||
831 | * the write failed... */ | ||
832 | ret = PTR_ERR(handle); | ||
833 | if (inode->i_nlink) | ||
834 | ext4_orphan_del(NULL, inode); | ||
835 | |||
836 | goto out; | ||
837 | } | ||
838 | if (inode->i_nlink) | ||
839 | ext4_orphan_del(handle, inode); | ||
840 | if (ret > 0) { | ||
841 | loff_t end = offset + ret; | ||
842 | if (end > inode->i_size) { | ||
843 | ei->i_disksize = end; | ||
844 | i_size_write(inode, end); | ||
845 | /* | ||
846 | * We're going to return a positive `ret' | ||
847 | * here due to non-zero-length I/O, so there's | ||
848 | * no way of reporting error returns from | ||
849 | * ext4_mark_inode_dirty() to userspace. So | ||
850 | * ignore it. | ||
851 | */ | ||
852 | ext4_mark_inode_dirty(handle, inode); | ||
853 | } | ||
854 | } | ||
855 | err = ext4_journal_stop(handle); | ||
856 | if (ret == 0) | ||
857 | ret = err; | ||
858 | } | ||
859 | out: | ||
860 | return ret; | ||
861 | } | ||
862 | |||
863 | /* | ||
864 | * Calculate the number of metadata blocks need to reserve | ||
865 | * to allocate a new block at @lblocks for non extent file based file | ||
866 | */ | ||
867 | int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) | ||
868 | { | ||
869 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
870 | sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); | ||
871 | int blk_bits; | ||
872 | |||
873 | if (lblock < EXT4_NDIR_BLOCKS) | ||
874 | return 0; | ||
875 | |||
876 | lblock -= EXT4_NDIR_BLOCKS; | ||
877 | |||
878 | if (ei->i_da_metadata_calc_len && | ||
879 | (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { | ||
880 | ei->i_da_metadata_calc_len++; | ||
881 | return 0; | ||
882 | } | ||
883 | ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; | ||
884 | ei->i_da_metadata_calc_len = 1; | ||
885 | blk_bits = order_base_2(lblock); | ||
886 | return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; | ||
887 | } | ||
888 | |||
889 | int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) | ||
890 | { | ||
891 | int indirects; | ||
892 | |||
893 | /* if nrblocks are contiguous */ | ||
894 | if (chunk) { | ||
895 | /* | ||
896 | * With N contiguous data blocks, we need at most | ||
897 | * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, | ||
898 | * 2 dindirect blocks, and 1 tindirect block | ||
899 | */ | ||
900 | return DIV_ROUND_UP(nrblocks, | ||
901 | EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; | ||
902 | } | ||
903 | /* | ||
904 | * if nrblocks are not contiguous, worse case, each block touch | ||
905 | * a indirect block, and each indirect block touch a double indirect | ||
906 | * block, plus a triple indirect block | ||
907 | */ | ||
908 | indirects = nrblocks * 2 + 1; | ||
909 | return indirects; | ||
910 | } | ||
911 | |||
912 | /* | ||
913 | * Truncate transactions can be complex and absolutely huge. So we need to | ||
914 | * be able to restart the transaction at a conventient checkpoint to make | ||
915 | * sure we don't overflow the journal. | ||
916 | * | ||
917 | * start_transaction gets us a new handle for a truncate transaction, | ||
918 | * and extend_transaction tries to extend the existing one a bit. If | ||
919 | * extend fails, we need to propagate the failure up and restart the | ||
920 | * transaction in the top-level truncate loop. --sct | ||
921 | */ | ||
922 | static handle_t *start_transaction(struct inode *inode) | ||
923 | { | ||
924 | handle_t *result; | ||
925 | |||
926 | result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)); | ||
927 | if (!IS_ERR(result)) | ||
928 | return result; | ||
929 | |||
930 | ext4_std_error(inode->i_sb, PTR_ERR(result)); | ||
931 | return result; | ||
932 | } | ||
933 | |||
934 | /* | ||
935 | * Try to extend this transaction for the purposes of truncation. | ||
936 | * | ||
937 | * Returns 0 if we managed to create more room. If we can't create more | ||
938 | * room, and the transaction must be restarted we return 1. | ||
939 | */ | ||
940 | static int try_to_extend_transaction(handle_t *handle, struct inode *inode) | ||
941 | { | ||
942 | if (!ext4_handle_valid(handle)) | ||
943 | return 0; | ||
944 | if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) | ||
945 | return 0; | ||
946 | if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode))) | ||
947 | return 0; | ||
948 | return 1; | ||
949 | } | ||
950 | |||
951 | /* | ||
952 | * Probably it should be a library function... search for first non-zero word | ||
953 | * or memcmp with zero_page, whatever is better for particular architecture. | ||
954 | * Linus? | ||
955 | */ | ||
956 | static inline int all_zeroes(__le32 *p, __le32 *q) | ||
957 | { | ||
958 | while (p < q) | ||
959 | if (*p++) | ||
960 | return 0; | ||
961 | return 1; | ||
962 | } | ||
963 | |||
964 | /** | ||
965 | * ext4_find_shared - find the indirect blocks for partial truncation. | ||
966 | * @inode: inode in question | ||
967 | * @depth: depth of the affected branch | ||
968 | * @offsets: offsets of pointers in that branch (see ext4_block_to_path) | ||
969 | * @chain: place to store the pointers to partial indirect blocks | ||
970 | * @top: place to the (detached) top of branch | ||
971 | * | ||
972 | * This is a helper function used by ext4_truncate(). | ||
973 | * | ||
974 | * When we do truncate() we may have to clean the ends of several | ||
975 | * indirect blocks but leave the blocks themselves alive. Block is | ||
976 | * partially truncated if some data below the new i_size is referred | ||
977 | * from it (and it is on the path to the first completely truncated | ||
978 | * data block, indeed). We have to free the top of that path along | ||
979 | * with everything to the right of the path. Since no allocation | ||
980 | * past the truncation point is possible until ext4_truncate() | ||
981 | * finishes, we may safely do the latter, but top of branch may | ||
982 | * require special attention - pageout below the truncation point | ||
983 | * might try to populate it. | ||
984 | * | ||
985 | * We atomically detach the top of branch from the tree, store the | ||
986 | * block number of its root in *@top, pointers to buffer_heads of | ||
987 | * partially truncated blocks - in @chain[].bh and pointers to | ||
988 | * their last elements that should not be removed - in | ||
989 | * @chain[].p. Return value is the pointer to last filled element | ||
990 | * of @chain. | ||
991 | * | ||
992 | * The work left to caller to do the actual freeing of subtrees: | ||
993 | * a) free the subtree starting from *@top | ||
994 | * b) free the subtrees whose roots are stored in | ||
995 | * (@chain[i].p+1 .. end of @chain[i].bh->b_data) | ||
996 | * c) free the subtrees growing from the inode past the @chain[0]. | ||
997 | * (no partially truncated stuff there). */ | ||
998 | |||
999 | static Indirect *ext4_find_shared(struct inode *inode, int depth, | ||
1000 | ext4_lblk_t offsets[4], Indirect chain[4], | ||
1001 | __le32 *top) | ||
1002 | { | ||
1003 | Indirect *partial, *p; | ||
1004 | int k, err; | ||
1005 | |||
1006 | *top = 0; | ||
1007 | /* Make k index the deepest non-null offset + 1 */ | ||
1008 | for (k = depth; k > 1 && !offsets[k-1]; k--) | ||
1009 | ; | ||
1010 | partial = ext4_get_branch(inode, k, offsets, chain, &err); | ||
1011 | /* Writer: pointers */ | ||
1012 | if (!partial) | ||
1013 | partial = chain + k-1; | ||
1014 | /* | ||
1015 | * If the branch acquired continuation since we've looked at it - | ||
1016 | * fine, it should all survive and (new) top doesn't belong to us. | ||
1017 | */ | ||
1018 | if (!partial->key && *partial->p) | ||
1019 | /* Writer: end */ | ||
1020 | goto no_top; | ||
1021 | for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) | ||
1022 | ; | ||
1023 | /* | ||
1024 | * OK, we've found the last block that must survive. The rest of our | ||
1025 | * branch should be detached before unlocking. However, if that rest | ||
1026 | * of branch is all ours and does not grow immediately from the inode | ||
1027 | * it's easier to cheat and just decrement partial->p. | ||
1028 | */ | ||
1029 | if (p == chain + k - 1 && p > chain) { | ||
1030 | p->p--; | ||
1031 | } else { | ||
1032 | *top = *p->p; | ||
1033 | /* Nope, don't do this in ext4. Must leave the tree intact */ | ||
1034 | #if 0 | ||
1035 | *p->p = 0; | ||
1036 | #endif | ||
1037 | } | ||
1038 | /* Writer: end */ | ||
1039 | |||
1040 | while (partial > p) { | ||
1041 | brelse(partial->bh); | ||
1042 | partial--; | ||
1043 | } | ||
1044 | no_top: | ||
1045 | return partial; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * Zero a number of block pointers in either an inode or an indirect block. | ||
1050 | * If we restart the transaction we must again get write access to the | ||
1051 | * indirect block for further modification. | ||
1052 | * | ||
1053 | * We release `count' blocks on disk, but (last - first) may be greater | ||
1054 | * than `count' because there can be holes in there. | ||
1055 | * | ||
1056 | * Return 0 on success, 1 on invalid block range | ||
1057 | * and < 0 on fatal error. | ||
1058 | */ | ||
1059 | static int ext4_clear_blocks(handle_t *handle, struct inode *inode, | ||
1060 | struct buffer_head *bh, | ||
1061 | ext4_fsblk_t block_to_free, | ||
1062 | unsigned long count, __le32 *first, | ||
1063 | __le32 *last) | ||
1064 | { | ||
1065 | __le32 *p; | ||
1066 | int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; | ||
1067 | int err; | ||
1068 | |||
1069 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) | ||
1070 | flags |= EXT4_FREE_BLOCKS_METADATA; | ||
1071 | |||
1072 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, | ||
1073 | count)) { | ||
1074 | EXT4_ERROR_INODE(inode, "attempt to clear invalid " | ||
1075 | "blocks %llu len %lu", | ||
1076 | (unsigned long long) block_to_free, count); | ||
1077 | return 1; | ||
1078 | } | ||
1079 | |||
1080 | if (try_to_extend_transaction(handle, inode)) { | ||
1081 | if (bh) { | ||
1082 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | ||
1083 | err = ext4_handle_dirty_metadata(handle, inode, bh); | ||
1084 | if (unlikely(err)) | ||
1085 | goto out_err; | ||
1086 | } | ||
1087 | err = ext4_mark_inode_dirty(handle, inode); | ||
1088 | if (unlikely(err)) | ||
1089 | goto out_err; | ||
1090 | err = ext4_truncate_restart_trans(handle, inode, | ||
1091 | ext4_blocks_for_truncate(inode)); | ||
1092 | if (unlikely(err)) | ||
1093 | goto out_err; | ||
1094 | if (bh) { | ||
1095 | BUFFER_TRACE(bh, "retaking write access"); | ||
1096 | err = ext4_journal_get_write_access(handle, bh); | ||
1097 | if (unlikely(err)) | ||
1098 | goto out_err; | ||
1099 | } | ||
1100 | } | ||
1101 | |||
1102 | for (p = first; p < last; p++) | ||
1103 | *p = 0; | ||
1104 | |||
1105 | ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); | ||
1106 | return 0; | ||
1107 | out_err: | ||
1108 | ext4_std_error(inode->i_sb, err); | ||
1109 | return err; | ||
1110 | } | ||
1111 | |||
1112 | /** | ||
1113 | * ext4_free_data - free a list of data blocks | ||
1114 | * @handle: handle for this transaction | ||
1115 | * @inode: inode we are dealing with | ||
1116 | * @this_bh: indirect buffer_head which contains *@first and *@last | ||
1117 | * @first: array of block numbers | ||
1118 | * @last: points immediately past the end of array | ||
1119 | * | ||
1120 | * We are freeing all blocks referred from that array (numbers are stored as | ||
1121 | * little-endian 32-bit) and updating @inode->i_blocks appropriately. | ||
1122 | * | ||
1123 | * We accumulate contiguous runs of blocks to free. Conveniently, if these | ||
1124 | * blocks are contiguous then releasing them at one time will only affect one | ||
1125 | * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't | ||
1126 | * actually use a lot of journal space. | ||
1127 | * | ||
1128 | * @this_bh will be %NULL if @first and @last point into the inode's direct | ||
1129 | * block pointers. | ||
1130 | */ | ||
1131 | static void ext4_free_data(handle_t *handle, struct inode *inode, | ||
1132 | struct buffer_head *this_bh, | ||
1133 | __le32 *first, __le32 *last) | ||
1134 | { | ||
1135 | ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ | ||
1136 | unsigned long count = 0; /* Number of blocks in the run */ | ||
1137 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind | ||
1138 | corresponding to | ||
1139 | block_to_free */ | ||
1140 | ext4_fsblk_t nr; /* Current block # */ | ||
1141 | __le32 *p; /* Pointer into inode/ind | ||
1142 | for current block */ | ||
1143 | int err = 0; | ||
1144 | |||
1145 | if (this_bh) { /* For indirect block */ | ||
1146 | BUFFER_TRACE(this_bh, "get_write_access"); | ||
1147 | err = ext4_journal_get_write_access(handle, this_bh); | ||
1148 | /* Important: if we can't update the indirect pointers | ||
1149 | * to the blocks, we can't free them. */ | ||
1150 | if (err) | ||
1151 | return; | ||
1152 | } | ||
1153 | |||
1154 | for (p = first; p < last; p++) { | ||
1155 | nr = le32_to_cpu(*p); | ||
1156 | if (nr) { | ||
1157 | /* accumulate blocks to free if they're contiguous */ | ||
1158 | if (count == 0) { | ||
1159 | block_to_free = nr; | ||
1160 | block_to_free_p = p; | ||
1161 | count = 1; | ||
1162 | } else if (nr == block_to_free + count) { | ||
1163 | count++; | ||
1164 | } else { | ||
1165 | err = ext4_clear_blocks(handle, inode, this_bh, | ||
1166 | block_to_free, count, | ||
1167 | block_to_free_p, p); | ||
1168 | if (err) | ||
1169 | break; | ||
1170 | block_to_free = nr; | ||
1171 | block_to_free_p = p; | ||
1172 | count = 1; | ||
1173 | } | ||
1174 | } | ||
1175 | } | ||
1176 | |||
1177 | if (!err && count > 0) | ||
1178 | err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, | ||
1179 | count, block_to_free_p, p); | ||
1180 | if (err < 0) | ||
1181 | /* fatal error */ | ||
1182 | return; | ||
1183 | |||
1184 | if (this_bh) { | ||
1185 | BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); | ||
1186 | |||
1187 | /* | ||
1188 | * The buffer head should have an attached journal head at this | ||
1189 | * point. However, if the data is corrupted and an indirect | ||
1190 | * block pointed to itself, it would have been detached when | ||
1191 | * the block was cleared. Check for this instead of OOPSing. | ||
1192 | */ | ||
1193 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) | ||
1194 | ext4_handle_dirty_metadata(handle, inode, this_bh); | ||
1195 | else | ||
1196 | EXT4_ERROR_INODE(inode, | ||
1197 | "circular indirect block detected at " | ||
1198 | "block %llu", | ||
1199 | (unsigned long long) this_bh->b_blocknr); | ||
1200 | } | ||
1201 | } | ||
1202 | |||
1203 | /** | ||
1204 | * ext4_free_branches - free an array of branches | ||
1205 | * @handle: JBD handle for this transaction | ||
1206 | * @inode: inode we are dealing with | ||
1207 | * @parent_bh: the buffer_head which contains *@first and *@last | ||
1208 | * @first: array of block numbers | ||
1209 | * @last: pointer immediately past the end of array | ||
1210 | * @depth: depth of the branches to free | ||
1211 | * | ||
1212 | * We are freeing all blocks referred from these branches (numbers are | ||
1213 | * stored as little-endian 32-bit) and updating @inode->i_blocks | ||
1214 | * appropriately. | ||
1215 | */ | ||
1216 | static void ext4_free_branches(handle_t *handle, struct inode *inode, | ||
1217 | struct buffer_head *parent_bh, | ||
1218 | __le32 *first, __le32 *last, int depth) | ||
1219 | { | ||
1220 | ext4_fsblk_t nr; | ||
1221 | __le32 *p; | ||
1222 | |||
1223 | if (ext4_handle_is_aborted(handle)) | ||
1224 | return; | ||
1225 | |||
1226 | if (depth--) { | ||
1227 | struct buffer_head *bh; | ||
1228 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | ||
1229 | p = last; | ||
1230 | while (--p >= first) { | ||
1231 | nr = le32_to_cpu(*p); | ||
1232 | if (!nr) | ||
1233 | continue; /* A hole */ | ||
1234 | |||
1235 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), | ||
1236 | nr, 1)) { | ||
1237 | EXT4_ERROR_INODE(inode, | ||
1238 | "invalid indirect mapped " | ||
1239 | "block %lu (level %d)", | ||
1240 | (unsigned long) nr, depth); | ||
1241 | break; | ||
1242 | } | ||
1243 | |||
1244 | /* Go read the buffer for the next level down */ | ||
1245 | bh = sb_bread(inode->i_sb, nr); | ||
1246 | |||
1247 | /* | ||
1248 | * A read failure? Report error and clear slot | ||
1249 | * (should be rare). | ||
1250 | */ | ||
1251 | if (!bh) { | ||
1252 | EXT4_ERROR_INODE_BLOCK(inode, nr, | ||
1253 | "Read failure"); | ||
1254 | continue; | ||
1255 | } | ||
1256 | |||
1257 | /* This zaps the entire block. Bottom up. */ | ||
1258 | BUFFER_TRACE(bh, "free child branches"); | ||
1259 | ext4_free_branches(handle, inode, bh, | ||
1260 | (__le32 *) bh->b_data, | ||
1261 | (__le32 *) bh->b_data + addr_per_block, | ||
1262 | depth); | ||
1263 | brelse(bh); | ||
1264 | |||
1265 | /* | ||
1266 | * Everything below this this pointer has been | ||
1267 | * released. Now let this top-of-subtree go. | ||
1268 | * | ||
1269 | * We want the freeing of this indirect block to be | ||
1270 | * atomic in the journal with the updating of the | ||
1271 | * bitmap block which owns it. So make some room in | ||
1272 | * the journal. | ||
1273 | * | ||
1274 | * We zero the parent pointer *after* freeing its | ||
1275 | * pointee in the bitmaps, so if extend_transaction() | ||
1276 | * for some reason fails to put the bitmap changes and | ||
1277 | * the release into the same transaction, recovery | ||
1278 | * will merely complain about releasing a free block, | ||
1279 | * rather than leaking blocks. | ||
1280 | */ | ||
1281 | if (ext4_handle_is_aborted(handle)) | ||
1282 | return; | ||
1283 | if (try_to_extend_transaction(handle, inode)) { | ||
1284 | ext4_mark_inode_dirty(handle, inode); | ||
1285 | ext4_truncate_restart_trans(handle, inode, | ||
1286 | ext4_blocks_for_truncate(inode)); | ||
1287 | } | ||
1288 | |||
1289 | /* | ||
1290 | * The forget flag here is critical because if | ||
1291 | * we are journaling (and not doing data | ||
1292 | * journaling), we have to make sure a revoke | ||
1293 | * record is written to prevent the journal | ||
1294 | * replay from overwriting the (former) | ||
1295 | * indirect block if it gets reallocated as a | ||
1296 | * data block. This must happen in the same | ||
1297 | * transaction where the data blocks are | ||
1298 | * actually freed. | ||
1299 | */ | ||
1300 | ext4_free_blocks(handle, inode, NULL, nr, 1, | ||
1301 | EXT4_FREE_BLOCKS_METADATA| | ||
1302 | EXT4_FREE_BLOCKS_FORGET); | ||
1303 | |||
1304 | if (parent_bh) { | ||
1305 | /* | ||
1306 | * The block which we have just freed is | ||
1307 | * pointed to by an indirect block: journal it | ||
1308 | */ | ||
1309 | BUFFER_TRACE(parent_bh, "get_write_access"); | ||
1310 | if (!ext4_journal_get_write_access(handle, | ||
1311 | parent_bh)){ | ||
1312 | *p = 0; | ||
1313 | BUFFER_TRACE(parent_bh, | ||
1314 | "call ext4_handle_dirty_metadata"); | ||
1315 | ext4_handle_dirty_metadata(handle, | ||
1316 | inode, | ||
1317 | parent_bh); | ||
1318 | } | ||
1319 | } | ||
1320 | } | ||
1321 | } else { | ||
1322 | /* We have reached the bottom of the tree. */ | ||
1323 | BUFFER_TRACE(parent_bh, "free data blocks"); | ||
1324 | ext4_free_data(handle, inode, parent_bh, first, last); | ||
1325 | } | ||
1326 | } | ||
1327 | |||
1328 | void ext4_ind_truncate(struct inode *inode) | ||
1329 | { | ||
1330 | handle_t *handle; | ||
1331 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
1332 | __le32 *i_data = ei->i_data; | ||
1333 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | ||
1334 | struct address_space *mapping = inode->i_mapping; | ||
1335 | ext4_lblk_t offsets[4]; | ||
1336 | Indirect chain[4]; | ||
1337 | Indirect *partial; | ||
1338 | __le32 nr = 0; | ||
1339 | int n = 0; | ||
1340 | ext4_lblk_t last_block, max_block; | ||
1341 | unsigned blocksize = inode->i_sb->s_blocksize; | ||
1342 | |||
1343 | handle = start_transaction(inode); | ||
1344 | if (IS_ERR(handle)) | ||
1345 | return; /* AKPM: return what? */ | ||
1346 | |||
1347 | last_block = (inode->i_size + blocksize-1) | ||
1348 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); | ||
1349 | max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) | ||
1350 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); | ||
1351 | |||
1352 | if (inode->i_size & (blocksize - 1)) | ||
1353 | if (ext4_block_truncate_page(handle, mapping, inode->i_size)) | ||
1354 | goto out_stop; | ||
1355 | |||
1356 | if (last_block != max_block) { | ||
1357 | n = ext4_block_to_path(inode, last_block, offsets, NULL); | ||
1358 | if (n == 0) | ||
1359 | goto out_stop; /* error */ | ||
1360 | } | ||
1361 | |||
1362 | /* | ||
1363 | * OK. This truncate is going to happen. We add the inode to the | ||
1364 | * orphan list, so that if this truncate spans multiple transactions, | ||
1365 | * and we crash, we will resume the truncate when the filesystem | ||
1366 | * recovers. It also marks the inode dirty, to catch the new size. | ||
1367 | * | ||
1368 | * Implication: the file must always be in a sane, consistent | ||
1369 | * truncatable state while each transaction commits. | ||
1370 | */ | ||
1371 | if (ext4_orphan_add(handle, inode)) | ||
1372 | goto out_stop; | ||
1373 | |||
1374 | /* | ||
1375 | * From here we block out all ext4_get_block() callers who want to | ||
1376 | * modify the block allocation tree. | ||
1377 | */ | ||
1378 | down_write(&ei->i_data_sem); | ||
1379 | |||
1380 | ext4_discard_preallocations(inode); | ||
1381 | |||
1382 | /* | ||
1383 | * The orphan list entry will now protect us from any crash which | ||
1384 | * occurs before the truncate completes, so it is now safe to propagate | ||
1385 | * the new, shorter inode size (held for now in i_size) into the | ||
1386 | * on-disk inode. We do this via i_disksize, which is the value which | ||
1387 | * ext4 *really* writes onto the disk inode. | ||
1388 | */ | ||
1389 | ei->i_disksize = inode->i_size; | ||
1390 | |||
1391 | if (last_block == max_block) { | ||
1392 | /* | ||
1393 | * It is unnecessary to free any data blocks if last_block is | ||
1394 | * equal to the indirect block limit. | ||
1395 | */ | ||
1396 | goto out_unlock; | ||
1397 | } else if (n == 1) { /* direct blocks */ | ||
1398 | ext4_free_data(handle, inode, NULL, i_data+offsets[0], | ||
1399 | i_data + EXT4_NDIR_BLOCKS); | ||
1400 | goto do_indirects; | ||
1401 | } | ||
1402 | |||
1403 | partial = ext4_find_shared(inode, n, offsets, chain, &nr); | ||
1404 | /* Kill the top of shared branch (not detached) */ | ||
1405 | if (nr) { | ||
1406 | if (partial == chain) { | ||
1407 | /* Shared branch grows from the inode */ | ||
1408 | ext4_free_branches(handle, inode, NULL, | ||
1409 | &nr, &nr+1, (chain+n-1) - partial); | ||
1410 | *partial->p = 0; | ||
1411 | /* | ||
1412 | * We mark the inode dirty prior to restart, | ||
1413 | * and prior to stop. No need for it here. | ||
1414 | */ | ||
1415 | } else { | ||
1416 | /* Shared branch grows from an indirect block */ | ||
1417 | BUFFER_TRACE(partial->bh, "get_write_access"); | ||
1418 | ext4_free_branches(handle, inode, partial->bh, | ||
1419 | partial->p, | ||
1420 | partial->p+1, (chain+n-1) - partial); | ||
1421 | } | ||
1422 | } | ||
1423 | /* Clear the ends of indirect blocks on the shared branch */ | ||
1424 | while (partial > chain) { | ||
1425 | ext4_free_branches(handle, inode, partial->bh, partial->p + 1, | ||
1426 | (__le32*)partial->bh->b_data+addr_per_block, | ||
1427 | (chain+n-1) - partial); | ||
1428 | BUFFER_TRACE(partial->bh, "call brelse"); | ||
1429 | brelse(partial->bh); | ||
1430 | partial--; | ||
1431 | } | ||
1432 | do_indirects: | ||
1433 | /* Kill the remaining (whole) subtrees */ | ||
1434 | switch (offsets[0]) { | ||
1435 | default: | ||
1436 | nr = i_data[EXT4_IND_BLOCK]; | ||
1437 | if (nr) { | ||
1438 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); | ||
1439 | i_data[EXT4_IND_BLOCK] = 0; | ||
1440 | } | ||
1441 | case EXT4_IND_BLOCK: | ||
1442 | nr = i_data[EXT4_DIND_BLOCK]; | ||
1443 | if (nr) { | ||
1444 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); | ||
1445 | i_data[EXT4_DIND_BLOCK] = 0; | ||
1446 | } | ||
1447 | case EXT4_DIND_BLOCK: | ||
1448 | nr = i_data[EXT4_TIND_BLOCK]; | ||
1449 | if (nr) { | ||
1450 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); | ||
1451 | i_data[EXT4_TIND_BLOCK] = 0; | ||
1452 | } | ||
1453 | case EXT4_TIND_BLOCK: | ||
1454 | ; | ||
1455 | } | ||
1456 | |||
1457 | out_unlock: | ||
1458 | up_write(&ei->i_data_sem); | ||
1459 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); | ||
1460 | ext4_mark_inode_dirty(handle, inode); | ||
1461 | |||
1462 | /* | ||
1463 | * In a multi-transaction truncate, we only make the final transaction | ||
1464 | * synchronous | ||
1465 | */ | ||
1466 | if (IS_SYNC(inode)) | ||
1467 | ext4_handle_sync(handle); | ||
1468 | out_stop: | ||
1469 | /* | ||
1470 | * If this was a simple ftruncate(), and the file will remain alive | ||
1471 | * then we need to clear up the orphan record which we created above. | ||
1472 | * However, if this was a real unlink then we were called by | ||
1473 | * ext4_delete_inode(), and we allow that function to clean up the | ||
1474 | * orphan info for us. | ||
1475 | */ | ||
1476 | if (inode->i_nlink) | ||
1477 | ext4_orphan_del(handle, inode); | ||
1478 | |||
1479 | ext4_journal_stop(handle); | ||
1480 | trace_ext4_truncate_exit(inode); | ||
1481 | } | ||
1482 | |||