diff options
Diffstat (limited to 'fs/minix/itree_common.c')
-rw-r--r-- | fs/minix/itree_common.c | 362 |
1 files changed, 362 insertions, 0 deletions
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c new file mode 100644 index 000000000000..429baf8de105 --- /dev/null +++ b/fs/minix/itree_common.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* Generic part */ | ||
2 | |||
3 | typedef struct { | ||
4 | block_t *p; | ||
5 | block_t key; | ||
6 | struct buffer_head *bh; | ||
7 | } Indirect; | ||
8 | |||
9 | static DEFINE_RWLOCK(pointers_lock); | ||
10 | |||
11 | static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v) | ||
12 | { | ||
13 | p->key = *(p->p = v); | ||
14 | p->bh = bh; | ||
15 | } | ||
16 | |||
17 | static inline int verify_chain(Indirect *from, Indirect *to) | ||
18 | { | ||
19 | while (from <= to && from->key == *from->p) | ||
20 | from++; | ||
21 | return (from > to); | ||
22 | } | ||
23 | |||
24 | static inline block_t *block_end(struct buffer_head *bh) | ||
25 | { | ||
26 | return (block_t *)((char*)bh->b_data + BLOCK_SIZE); | ||
27 | } | ||
28 | |||
29 | static inline Indirect *get_branch(struct inode *inode, | ||
30 | int depth, | ||
31 | int *offsets, | ||
32 | Indirect chain[DEPTH], | ||
33 | int *err) | ||
34 | { | ||
35 | struct super_block *sb = inode->i_sb; | ||
36 | Indirect *p = chain; | ||
37 | struct buffer_head *bh; | ||
38 | |||
39 | *err = 0; | ||
40 | /* i_data is not going away, no lock needed */ | ||
41 | add_chain (chain, NULL, i_data(inode) + *offsets); | ||
42 | if (!p->key) | ||
43 | goto no_block; | ||
44 | while (--depth) { | ||
45 | bh = sb_bread(sb, block_to_cpu(p->key)); | ||
46 | if (!bh) | ||
47 | goto failure; | ||
48 | read_lock(&pointers_lock); | ||
49 | if (!verify_chain(chain, p)) | ||
50 | goto changed; | ||
51 | add_chain(++p, bh, (block_t *)bh->b_data + *++offsets); | ||
52 | read_unlock(&pointers_lock); | ||
53 | if (!p->key) | ||
54 | goto no_block; | ||
55 | } | ||
56 | return NULL; | ||
57 | |||
58 | changed: | ||
59 | read_unlock(&pointers_lock); | ||
60 | brelse(bh); | ||
61 | *err = -EAGAIN; | ||
62 | goto no_block; | ||
63 | failure: | ||
64 | *err = -EIO; | ||
65 | no_block: | ||
66 | return p; | ||
67 | } | ||
68 | |||
69 | static int alloc_branch(struct inode *inode, | ||
70 | int num, | ||
71 | int *offsets, | ||
72 | Indirect *branch) | ||
73 | { | ||
74 | int n = 0; | ||
75 | int i; | ||
76 | int parent = minix_new_block(inode); | ||
77 | |||
78 | branch[0].key = cpu_to_block(parent); | ||
79 | if (parent) for (n = 1; n < num; n++) { | ||
80 | struct buffer_head *bh; | ||
81 | /* Allocate the next block */ | ||
82 | int nr = minix_new_block(inode); | ||
83 | if (!nr) | ||
84 | break; | ||
85 | branch[n].key = cpu_to_block(nr); | ||
86 | bh = sb_getblk(inode->i_sb, parent); | ||
87 | lock_buffer(bh); | ||
88 | memset(bh->b_data, 0, BLOCK_SIZE); | ||
89 | branch[n].bh = bh; | ||
90 | branch[n].p = (block_t*) bh->b_data + offsets[n]; | ||
91 | *branch[n].p = branch[n].key; | ||
92 | set_buffer_uptodate(bh); | ||
93 | unlock_buffer(bh); | ||
94 | mark_buffer_dirty_inode(bh, inode); | ||
95 | parent = nr; | ||
96 | } | ||
97 | if (n == num) | ||
98 | return 0; | ||
99 | |||
100 | /* Allocation failed, free what we already allocated */ | ||
101 | for (i = 1; i < n; i++) | ||
102 | bforget(branch[i].bh); | ||
103 | for (i = 0; i < n; i++) | ||
104 | minix_free_block(inode, block_to_cpu(branch[i].key)); | ||
105 | return -ENOSPC; | ||
106 | } | ||
107 | |||
108 | static inline int splice_branch(struct inode *inode, | ||
109 | Indirect chain[DEPTH], | ||
110 | Indirect *where, | ||
111 | int num) | ||
112 | { | ||
113 | int i; | ||
114 | |||
115 | write_lock(&pointers_lock); | ||
116 | |||
117 | /* Verify that place we are splicing to is still there and vacant */ | ||
118 | if (!verify_chain(chain, where-1) || *where->p) | ||
119 | goto changed; | ||
120 | |||
121 | *where->p = where->key; | ||
122 | |||
123 | write_unlock(&pointers_lock); | ||
124 | |||
125 | /* We are done with atomic stuff, now do the rest of housekeeping */ | ||
126 | |||
127 | inode->i_ctime = CURRENT_TIME_SEC; | ||
128 | |||
129 | /* had we spliced it onto indirect block? */ | ||
130 | if (where->bh) | ||
131 | mark_buffer_dirty_inode(where->bh, inode); | ||
132 | |||
133 | mark_inode_dirty(inode); | ||
134 | return 0; | ||
135 | |||
136 | changed: | ||
137 | write_unlock(&pointers_lock); | ||
138 | for (i = 1; i < num; i++) | ||
139 | bforget(where[i].bh); | ||
140 | for (i = 0; i < num; i++) | ||
141 | minix_free_block(inode, block_to_cpu(where[i].key)); | ||
142 | return -EAGAIN; | ||
143 | } | ||
144 | |||
145 | static inline int get_block(struct inode * inode, sector_t block, | ||
146 | struct buffer_head *bh, int create) | ||
147 | { | ||
148 | int err = -EIO; | ||
149 | int offsets[DEPTH]; | ||
150 | Indirect chain[DEPTH]; | ||
151 | Indirect *partial; | ||
152 | int left; | ||
153 | int depth = block_to_path(inode, block, offsets); | ||
154 | |||
155 | if (depth == 0) | ||
156 | goto out; | ||
157 | |||
158 | reread: | ||
159 | partial = get_branch(inode, depth, offsets, chain, &err); | ||
160 | |||
161 | /* Simplest case - block found, no allocation needed */ | ||
162 | if (!partial) { | ||
163 | got_it: | ||
164 | map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key)); | ||
165 | /* Clean up and exit */ | ||
166 | partial = chain+depth-1; /* the whole chain */ | ||
167 | goto cleanup; | ||
168 | } | ||
169 | |||
170 | /* Next simple case - plain lookup or failed read of indirect block */ | ||
171 | if (!create || err == -EIO) { | ||
172 | cleanup: | ||
173 | while (partial > chain) { | ||
174 | brelse(partial->bh); | ||
175 | partial--; | ||
176 | } | ||
177 | out: | ||
178 | return err; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Indirect block might be removed by truncate while we were | ||
183 | * reading it. Handling of that case (forget what we've got and | ||
184 | * reread) is taken out of the main path. | ||
185 | */ | ||
186 | if (err == -EAGAIN) | ||
187 | goto changed; | ||
188 | |||
189 | left = (chain + depth) - partial; | ||
190 | err = alloc_branch(inode, left, offsets+(partial-chain), partial); | ||
191 | if (err) | ||
192 | goto cleanup; | ||
193 | |||
194 | if (splice_branch(inode, chain, partial, left) < 0) | ||
195 | goto changed; | ||
196 | |||
197 | set_buffer_new(bh); | ||
198 | goto got_it; | ||
199 | |||
200 | changed: | ||
201 | while (partial > chain) { | ||
202 | brelse(partial->bh); | ||
203 | partial--; | ||
204 | } | ||
205 | goto reread; | ||
206 | } | ||
207 | |||
208 | static inline int all_zeroes(block_t *p, block_t *q) | ||
209 | { | ||
210 | while (p < q) | ||
211 | if (*p++) | ||
212 | return 0; | ||
213 | return 1; | ||
214 | } | ||
215 | |||
216 | static Indirect *find_shared(struct inode *inode, | ||
217 | int depth, | ||
218 | int offsets[DEPTH], | ||
219 | Indirect chain[DEPTH], | ||
220 | block_t *top) | ||
221 | { | ||
222 | Indirect *partial, *p; | ||
223 | int k, err; | ||
224 | |||
225 | *top = 0; | ||
226 | for (k = depth; k > 1 && !offsets[k-1]; k--) | ||
227 | ; | ||
228 | partial = get_branch(inode, k, offsets, chain, &err); | ||
229 | |||
230 | write_lock(&pointers_lock); | ||
231 | if (!partial) | ||
232 | partial = chain + k-1; | ||
233 | if (!partial->key && *partial->p) { | ||
234 | write_unlock(&pointers_lock); | ||
235 | goto no_top; | ||
236 | } | ||
237 | for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--) | ||
238 | ; | ||
239 | if (p == chain + k - 1 && p > chain) { | ||
240 | p->p--; | ||
241 | } else { | ||
242 | *top = *p->p; | ||
243 | *p->p = 0; | ||
244 | } | ||
245 | write_unlock(&pointers_lock); | ||
246 | |||
247 | while(partial > p) | ||
248 | { | ||
249 | brelse(partial->bh); | ||
250 | partial--; | ||
251 | } | ||
252 | no_top: | ||
253 | return partial; | ||
254 | } | ||
255 | |||
256 | static inline void free_data(struct inode *inode, block_t *p, block_t *q) | ||
257 | { | ||
258 | unsigned long nr; | ||
259 | |||
260 | for ( ; p < q ; p++) { | ||
261 | nr = block_to_cpu(*p); | ||
262 | if (nr) { | ||
263 | *p = 0; | ||
264 | minix_free_block(inode, nr); | ||
265 | } | ||
266 | } | ||
267 | } | ||
268 | |||
269 | static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth) | ||
270 | { | ||
271 | struct buffer_head * bh; | ||
272 | unsigned long nr; | ||
273 | |||
274 | if (depth--) { | ||
275 | for ( ; p < q ; p++) { | ||
276 | nr = block_to_cpu(*p); | ||
277 | if (!nr) | ||
278 | continue; | ||
279 | *p = 0; | ||
280 | bh = sb_bread(inode->i_sb, nr); | ||
281 | if (!bh) | ||
282 | continue; | ||
283 | free_branches(inode, (block_t*)bh->b_data, | ||
284 | block_end(bh), depth); | ||
285 | bforget(bh); | ||
286 | minix_free_block(inode, nr); | ||
287 | mark_inode_dirty(inode); | ||
288 | } | ||
289 | } else | ||
290 | free_data(inode, p, q); | ||
291 | } | ||
292 | |||
293 | static inline void truncate (struct inode * inode) | ||
294 | { | ||
295 | block_t *idata = i_data(inode); | ||
296 | int offsets[DEPTH]; | ||
297 | Indirect chain[DEPTH]; | ||
298 | Indirect *partial; | ||
299 | block_t nr = 0; | ||
300 | int n; | ||
301 | int first_whole; | ||
302 | long iblock; | ||
303 | |||
304 | iblock = (inode->i_size + BLOCK_SIZE-1) >> 10; | ||
305 | block_truncate_page(inode->i_mapping, inode->i_size, get_block); | ||
306 | |||
307 | n = block_to_path(inode, iblock, offsets); | ||
308 | if (!n) | ||
309 | return; | ||
310 | |||
311 | if (n == 1) { | ||
312 | free_data(inode, idata+offsets[0], idata + DIRECT); | ||
313 | first_whole = 0; | ||
314 | goto do_indirects; | ||
315 | } | ||
316 | |||
317 | first_whole = offsets[0] + 1 - DIRECT; | ||
318 | partial = find_shared(inode, n, offsets, chain, &nr); | ||
319 | if (nr) { | ||
320 | if (partial == chain) | ||
321 | mark_inode_dirty(inode); | ||
322 | else | ||
323 | mark_buffer_dirty_inode(partial->bh, inode); | ||
324 | free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); | ||
325 | } | ||
326 | /* Clear the ends of indirect blocks on the shared branch */ | ||
327 | while (partial > chain) { | ||
328 | free_branches(inode, partial->p + 1, block_end(partial->bh), | ||
329 | (chain+n-1) - partial); | ||
330 | mark_buffer_dirty_inode(partial->bh, inode); | ||
331 | brelse (partial->bh); | ||
332 | partial--; | ||
333 | } | ||
334 | do_indirects: | ||
335 | /* Kill the remaining (whole) subtrees */ | ||
336 | while (first_whole < DEPTH-1) { | ||
337 | nr = idata[DIRECT+first_whole]; | ||
338 | if (nr) { | ||
339 | idata[DIRECT+first_whole] = 0; | ||
340 | mark_inode_dirty(inode); | ||
341 | free_branches(inode, &nr, &nr+1, first_whole+1); | ||
342 | } | ||
343 | first_whole++; | ||
344 | } | ||
345 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | ||
346 | mark_inode_dirty(inode); | ||
347 | } | ||
348 | |||
349 | static inline unsigned nblocks(loff_t size) | ||
350 | { | ||
351 | unsigned blocks, res, direct = DIRECT, i = DEPTH; | ||
352 | blocks = (size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; | ||
353 | res = blocks; | ||
354 | while (--i && blocks > direct) { | ||
355 | blocks -= direct; | ||
356 | blocks += BLOCK_SIZE/sizeof(block_t) - 1; | ||
357 | blocks /= BLOCK_SIZE/sizeof(block_t); | ||
358 | res += blocks; | ||
359 | direct = 1; | ||
360 | } | ||
361 | return res; | ||
362 | } | ||