diff options
Diffstat (limited to 'fs/ufs/inode.c')
-rw-r--r-- | fs/ufs/inode.c | 816 |
1 files changed, 816 insertions, 0 deletions
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c new file mode 100644 index 000000000000..718627ca8b5c --- /dev/null +++ b/fs/ufs/inode.c | |||
@@ -0,0 +1,816 @@ | |||
1 | /* | ||
2 | * linux/fs/ufs/inode.c | ||
3 | * | ||
4 | * Copyright (C) 1998 | ||
5 | * Daniel Pirkl <daniel.pirkl@email.cz> | ||
6 | * Charles University, Faculty of Mathematics and Physics | ||
7 | * | ||
8 | * from | ||
9 | * | ||
10 | * linux/fs/ext2/inode.c | ||
11 | * | ||
12 | * Copyright (C) 1992, 1993, 1994, 1995 | ||
13 | * Remy Card (card@masi.ibp.fr) | ||
14 | * Laboratoire MASI - Institut Blaise Pascal | ||
15 | * Universite Pierre et Marie Curie (Paris VI) | ||
16 | * | ||
17 | * from | ||
18 | * | ||
19 | * linux/fs/minix/inode.c | ||
20 | * | ||
21 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
22 | * | ||
23 | * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 | ||
24 | * Big-endian to little-endian byte-swapping/bitmaps by | ||
25 | * David S. Miller (davem@caip.rutgers.edu), 1995 | ||
26 | */ | ||
27 | |||
28 | #include <asm/uaccess.h> | ||
29 | #include <asm/system.h> | ||
30 | |||
31 | #include <linux/errno.h> | ||
32 | #include <linux/fs.h> | ||
33 | #include <linux/ufs_fs.h> | ||
34 | #include <linux/time.h> | ||
35 | #include <linux/stat.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/mm.h> | ||
38 | #include <linux/smp_lock.h> | ||
39 | #include <linux/buffer_head.h> | ||
40 | |||
41 | #include "swab.h" | ||
42 | #include "util.h" | ||
43 | |||
44 | #undef UFS_INODE_DEBUG | ||
45 | #undef UFS_INODE_DEBUG_MORE | ||
46 | |||
47 | #ifdef UFS_INODE_DEBUG | ||
48 | #define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x; | ||
49 | #else | ||
50 | #define UFSD(x) | ||
51 | #endif | ||
52 | |||
53 | static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) | ||
54 | { | ||
55 | struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; | ||
56 | int ptrs = uspi->s_apb; | ||
57 | int ptrs_bits = uspi->s_apbshift; | ||
58 | const long direct_blocks = UFS_NDADDR, | ||
59 | indirect_blocks = ptrs, | ||
60 | double_blocks = (1 << (ptrs_bits * 2)); | ||
61 | int n = 0; | ||
62 | |||
63 | |||
64 | UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%d \n",ptrs,double_blocks)); | ||
65 | if (i_block < 0) { | ||
66 | ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0"); | ||
67 | } else if (i_block < direct_blocks) { | ||
68 | offsets[n++] = i_block; | ||
69 | } else if ((i_block -= direct_blocks) < indirect_blocks) { | ||
70 | offsets[n++] = UFS_IND_BLOCK; | ||
71 | offsets[n++] = i_block; | ||
72 | } else if ((i_block -= indirect_blocks) < double_blocks) { | ||
73 | offsets[n++] = UFS_DIND_BLOCK; | ||
74 | offsets[n++] = i_block >> ptrs_bits; | ||
75 | offsets[n++] = i_block & (ptrs - 1); | ||
76 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { | ||
77 | offsets[n++] = UFS_TIND_BLOCK; | ||
78 | offsets[n++] = i_block >> (ptrs_bits * 2); | ||
79 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); | ||
80 | offsets[n++] = i_block & (ptrs - 1); | ||
81 | } else { | ||
82 | ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); | ||
83 | } | ||
84 | return n; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Returns the location of the fragment from | ||
89 | * the begining of the filesystem. | ||
90 | */ | ||
91 | |||
92 | u64 ufs_frag_map(struct inode *inode, sector_t frag) | ||
93 | { | ||
94 | struct ufs_inode_info *ufsi = UFS_I(inode); | ||
95 | struct super_block *sb = inode->i_sb; | ||
96 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; | ||
97 | u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; | ||
98 | int shift = uspi->s_apbshift-uspi->s_fpbshift; | ||
99 | sector_t offsets[4], *p; | ||
100 | int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); | ||
101 | u64 ret = 0L; | ||
102 | __fs32 block; | ||
103 | __fs64 u2_block = 0L; | ||
104 | unsigned flags = UFS_SB(sb)->s_flags; | ||
105 | u64 temp = 0L; | ||
106 | |||
107 | UFSD((": frag = %lu depth = %d\n",frag,depth)); | ||
108 | UFSD((": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask)); | ||
109 | |||
110 | if (depth == 0) | ||
111 | return 0; | ||
112 | |||
113 | p = offsets; | ||
114 | |||
115 | lock_kernel(); | ||
116 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) | ||
117 | goto ufs2; | ||
118 | |||
119 | block = ufsi->i_u1.i_data[*p++]; | ||
120 | if (!block) | ||
121 | goto out; | ||
122 | while (--depth) { | ||
123 | struct buffer_head *bh; | ||
124 | sector_t n = *p++; | ||
125 | |||
126 | bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift)); | ||
127 | if (!bh) | ||
128 | goto out; | ||
129 | block = ((__fs32 *) bh->b_data)[n & mask]; | ||
130 | brelse (bh); | ||
131 | if (!block) | ||
132 | goto out; | ||
133 | } | ||
134 | ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); | ||
135 | goto out; | ||
136 | ufs2: | ||
137 | u2_block = ufsi->i_u1.u2_i_data[*p++]; | ||
138 | if (!u2_block) | ||
139 | goto out; | ||
140 | |||
141 | |||
142 | while (--depth) { | ||
143 | struct buffer_head *bh; | ||
144 | sector_t n = *p++; | ||
145 | |||
146 | |||
147 | temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block); | ||
148 | bh = sb_bread(sb, temp +(u64) (n>>shift)); | ||
149 | if (!bh) | ||
150 | goto out; | ||
151 | u2_block = ((__fs64 *)bh->b_data)[n & mask]; | ||
152 | brelse(bh); | ||
153 | if (!u2_block) | ||
154 | goto out; | ||
155 | } | ||
156 | temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block); | ||
157 | ret = temp + (u64) (frag & uspi->s_fpbmask); | ||
158 | |||
159 | out: | ||
160 | unlock_kernel(); | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | static struct buffer_head * ufs_inode_getfrag (struct inode *inode, | ||
165 | unsigned int fragment, unsigned int new_fragment, | ||
166 | unsigned int required, int *err, int metadata, long *phys, int *new) | ||
167 | { | ||
168 | struct ufs_inode_info *ufsi = UFS_I(inode); | ||
169 | struct super_block * sb; | ||
170 | struct ufs_sb_private_info * uspi; | ||
171 | struct buffer_head * result; | ||
172 | unsigned block, blockoff, lastfrag, lastblock, lastblockoff; | ||
173 | unsigned tmp, goal; | ||
174 | __fs32 * p, * p2; | ||
175 | unsigned flags = 0; | ||
176 | |||
177 | UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n", | ||
178 | inode->i_ino, fragment, new_fragment, required)) | ||
179 | |||
180 | sb = inode->i_sb; | ||
181 | uspi = UFS_SB(sb)->s_uspi; | ||
182 | |||
183 | flags = UFS_SB(sb)->s_flags; | ||
184 | /* TODO : to be done for write support | ||
185 | if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) | ||
186 | goto ufs2; | ||
187 | */ | ||
188 | |||
189 | block = ufs_fragstoblks (fragment); | ||
190 | blockoff = ufs_fragnum (fragment); | ||
191 | p = ufsi->i_u1.i_data + block; | ||
192 | goal = 0; | ||
193 | |||
194 | repeat: | ||
195 | tmp = fs32_to_cpu(sb, *p); | ||
196 | lastfrag = ufsi->i_lastfrag; | ||
197 | if (tmp && fragment < lastfrag) { | ||
198 | if (metadata) { | ||
199 | result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); | ||
200 | if (tmp == fs32_to_cpu(sb, *p)) { | ||
201 | UFSD(("EXIT, result %u\n", tmp + blockoff)) | ||
202 | return result; | ||
203 | } | ||
204 | brelse (result); | ||
205 | goto repeat; | ||
206 | } else { | ||
207 | *phys = tmp; | ||
208 | return NULL; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | lastblock = ufs_fragstoblks (lastfrag); | ||
213 | lastblockoff = ufs_fragnum (lastfrag); | ||
214 | /* | ||
215 | * We will extend file into new block beyond last allocated block | ||
216 | */ | ||
217 | if (lastblock < block) { | ||
218 | /* | ||
219 | * We must reallocate last allocated block | ||
220 | */ | ||
221 | if (lastblockoff) { | ||
222 | p2 = ufsi->i_u1.i_data + lastblock; | ||
223 | tmp = ufs_new_fragments (inode, p2, lastfrag, | ||
224 | fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err); | ||
225 | if (!tmp) { | ||
226 | if (lastfrag != ufsi->i_lastfrag) | ||
227 | goto repeat; | ||
228 | else | ||
229 | return NULL; | ||
230 | } | ||
231 | lastfrag = ufsi->i_lastfrag; | ||
232 | |||
233 | } | ||
234 | goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; | ||
235 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, | ||
236 | goal, required + blockoff, err); | ||
237 | } | ||
238 | /* | ||
239 | * We will extend last allocated block | ||
240 | */ | ||
241 | else if (lastblock == block) { | ||
242 | tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff), | ||
243 | fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err); | ||
244 | } | ||
245 | /* | ||
246 | * We will allocate new block before last allocated block | ||
247 | */ | ||
248 | else /* (lastblock > block) */ { | ||
249 | if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) | ||
250 | goal = tmp + uspi->s_fpb; | ||
251 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, | ||
252 | goal, uspi->s_fpb, err); | ||
253 | } | ||
254 | if (!tmp) { | ||
255 | if ((!blockoff && *p) || | ||
256 | (blockoff && lastfrag != ufsi->i_lastfrag)) | ||
257 | goto repeat; | ||
258 | *err = -ENOSPC; | ||
259 | return NULL; | ||
260 | } | ||
261 | |||
262 | /* The nullification of framgents done in ufs/balloc.c is | ||
263 | * something I don't have the stomache to move into here right | ||
264 | * now. -DaveM | ||
265 | */ | ||
266 | if (metadata) { | ||
267 | result = sb_getblk(inode->i_sb, tmp + blockoff); | ||
268 | } else { | ||
269 | *phys = tmp; | ||
270 | result = NULL; | ||
271 | *err = 0; | ||
272 | *new = 1; | ||
273 | } | ||
274 | |||
275 | inode->i_ctime = CURRENT_TIME_SEC; | ||
276 | if (IS_SYNC(inode)) | ||
277 | ufs_sync_inode (inode); | ||
278 | mark_inode_dirty(inode); | ||
279 | UFSD(("EXIT, result %u\n", tmp + blockoff)) | ||
280 | return result; | ||
281 | |||
282 | /* This part : To be implemented .... | ||
283 | Required only for writing, not required for READ-ONLY. | ||
284 | ufs2: | ||
285 | |||
286 | u2_block = ufs_fragstoblks(fragment); | ||
287 | u2_blockoff = ufs_fragnum(fragment); | ||
288 | p = ufsi->i_u1.u2_i_data + block; | ||
289 | goal = 0; | ||
290 | |||
291 | repeat2: | ||
292 | tmp = fs32_to_cpu(sb, *p); | ||
293 | lastfrag = ufsi->i_lastfrag; | ||
294 | |||
295 | */ | ||
296 | } | ||
297 | |||
298 | static struct buffer_head * ufs_block_getfrag (struct inode *inode, | ||
299 | struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment, | ||
300 | unsigned int blocksize, int * err, int metadata, long *phys, int *new) | ||
301 | { | ||
302 | struct super_block * sb; | ||
303 | struct ufs_sb_private_info * uspi; | ||
304 | struct buffer_head * result; | ||
305 | unsigned tmp, goal, block, blockoff; | ||
306 | __fs32 * p; | ||
307 | |||
308 | sb = inode->i_sb; | ||
309 | uspi = UFS_SB(sb)->s_uspi; | ||
310 | block = ufs_fragstoblks (fragment); | ||
311 | blockoff = ufs_fragnum (fragment); | ||
312 | |||
313 | UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment)) | ||
314 | |||
315 | result = NULL; | ||
316 | if (!bh) | ||
317 | goto out; | ||
318 | if (!buffer_uptodate(bh)) { | ||
319 | ll_rw_block (READ, 1, &bh); | ||
320 | wait_on_buffer (bh); | ||
321 | if (!buffer_uptodate(bh)) | ||
322 | goto out; | ||
323 | } | ||
324 | |||
325 | p = (__fs32 *) bh->b_data + block; | ||
326 | repeat: | ||
327 | tmp = fs32_to_cpu(sb, *p); | ||
328 | if (tmp) { | ||
329 | if (metadata) { | ||
330 | result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); | ||
331 | if (tmp == fs32_to_cpu(sb, *p)) | ||
332 | goto out; | ||
333 | brelse (result); | ||
334 | goto repeat; | ||
335 | } else { | ||
336 | *phys = tmp; | ||
337 | goto out; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) | ||
342 | goal = tmp + uspi->s_fpb; | ||
343 | else | ||
344 | goal = bh->b_blocknr + uspi->s_fpb; | ||
345 | tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err); | ||
346 | if (!tmp) { | ||
347 | if (fs32_to_cpu(sb, *p)) | ||
348 | goto repeat; | ||
349 | goto out; | ||
350 | } | ||
351 | |||
352 | /* The nullification of framgents done in ufs/balloc.c is | ||
353 | * something I don't have the stomache to move into here right | ||
354 | * now. -DaveM | ||
355 | */ | ||
356 | if (metadata) { | ||
357 | result = sb_getblk(sb, tmp + blockoff); | ||
358 | } else { | ||
359 | *phys = tmp; | ||
360 | *new = 1; | ||
361 | } | ||
362 | |||
363 | mark_buffer_dirty(bh); | ||
364 | if (IS_SYNC(inode)) | ||
365 | sync_dirty_buffer(bh); | ||
366 | inode->i_ctime = CURRENT_TIME_SEC; | ||
367 | mark_inode_dirty(inode); | ||
368 | out: | ||
369 | brelse (bh); | ||
370 | UFSD(("EXIT, result %u\n", tmp + blockoff)) | ||
371 | return result; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * This function gets the block which contains the fragment. | ||
376 | */ | ||
377 | |||
378 | static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) | ||
379 | { | ||
380 | struct super_block * sb = inode->i_sb; | ||
381 | struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; | ||
382 | struct buffer_head * bh; | ||
383 | int ret, err, new; | ||
384 | unsigned long ptr,phys; | ||
385 | u64 phys64 = 0; | ||
386 | |||
387 | if (!create) { | ||
388 | phys64 = ufs_frag_map(inode, fragment); | ||
389 | UFSD(("phys64 = %lu \n",phys64)); | ||
390 | if (phys64) | ||
391 | map_bh(bh_result, sb, phys64); | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /* This code entered only while writing ....? */ | ||
396 | |||
397 | err = -EIO; | ||
398 | new = 0; | ||
399 | ret = 0; | ||
400 | bh = NULL; | ||
401 | |||
402 | lock_kernel(); | ||
403 | |||
404 | UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment)) | ||
405 | if (fragment < 0) | ||
406 | goto abort_negative; | ||
407 | if (fragment > | ||
408 | ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) | ||
409 | << uspi->s_fpbshift)) | ||
410 | goto abort_too_big; | ||
411 | |||
412 | err = 0; | ||
413 | ptr = fragment; | ||
414 | |||
415 | /* | ||
416 | * ok, these macros clean the logic up a bit and make | ||
417 | * it much more readable: | ||
418 | */ | ||
419 | #define GET_INODE_DATABLOCK(x) \ | ||
420 | ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new) | ||
421 | #define GET_INODE_PTR(x) \ | ||
422 | ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL) | ||
423 | #define GET_INDIRECT_DATABLOCK(x) \ | ||
424 | ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ | ||
425 | &err, 0, &phys, &new); | ||
426 | #define GET_INDIRECT_PTR(x) \ | ||
427 | ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ | ||
428 | &err, 1, NULL, NULL); | ||
429 | |||
430 | if (ptr < UFS_NDIR_FRAGMENT) { | ||
431 | bh = GET_INODE_DATABLOCK(ptr); | ||
432 | goto out; | ||
433 | } | ||
434 | ptr -= UFS_NDIR_FRAGMENT; | ||
435 | if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { | ||
436 | bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); | ||
437 | goto get_indirect; | ||
438 | } | ||
439 | ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); | ||
440 | if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { | ||
441 | bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); | ||
442 | goto get_double; | ||
443 | } | ||
444 | ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); | ||
445 | bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); | ||
446 | bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); | ||
447 | get_double: | ||
448 | bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); | ||
449 | get_indirect: | ||
450 | bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); | ||
451 | |||
452 | #undef GET_INODE_DATABLOCK | ||
453 | #undef GET_INODE_PTR | ||
454 | #undef GET_INDIRECT_DATABLOCK | ||
455 | #undef GET_INDIRECT_PTR | ||
456 | |||
457 | out: | ||
458 | if (err) | ||
459 | goto abort; | ||
460 | if (new) | ||
461 | set_buffer_new(bh_result); | ||
462 | map_bh(bh_result, sb, phys); | ||
463 | abort: | ||
464 | unlock_kernel(); | ||
465 | return err; | ||
466 | |||
467 | abort_negative: | ||
468 | ufs_warning(sb, "ufs_get_block", "block < 0"); | ||
469 | goto abort; | ||
470 | |||
471 | abort_too_big: | ||
472 | ufs_warning(sb, "ufs_get_block", "block > big"); | ||
473 | goto abort; | ||
474 | } | ||
475 | |||
476 | struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment, | ||
477 | int create, int *err) | ||
478 | { | ||
479 | struct buffer_head dummy; | ||
480 | int error; | ||
481 | |||
482 | dummy.b_state = 0; | ||
483 | dummy.b_blocknr = -1000; | ||
484 | error = ufs_getfrag_block(inode, fragment, &dummy, create); | ||
485 | *err = error; | ||
486 | if (!error && buffer_mapped(&dummy)) { | ||
487 | struct buffer_head *bh; | ||
488 | bh = sb_getblk(inode->i_sb, dummy.b_blocknr); | ||
489 | if (buffer_new(&dummy)) { | ||
490 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); | ||
491 | set_buffer_uptodate(bh); | ||
492 | mark_buffer_dirty(bh); | ||
493 | } | ||
494 | return bh; | ||
495 | } | ||
496 | return NULL; | ||
497 | } | ||
498 | |||
499 | struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment, | ||
500 | int create, int * err) | ||
501 | { | ||
502 | struct buffer_head * bh; | ||
503 | |||
504 | UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment)) | ||
505 | bh = ufs_getfrag (inode, fragment, create, err); | ||
506 | if (!bh || buffer_uptodate(bh)) | ||
507 | return bh; | ||
508 | ll_rw_block (READ, 1, &bh); | ||
509 | wait_on_buffer (bh); | ||
510 | if (buffer_uptodate(bh)) | ||
511 | return bh; | ||
512 | brelse (bh); | ||
513 | *err = -EIO; | ||
514 | return NULL; | ||
515 | } | ||
516 | |||
517 | static int ufs_writepage(struct page *page, struct writeback_control *wbc) | ||
518 | { | ||
519 | return block_write_full_page(page,ufs_getfrag_block,wbc); | ||
520 | } | ||
521 | static int ufs_readpage(struct file *file, struct page *page) | ||
522 | { | ||
523 | return block_read_full_page(page,ufs_getfrag_block); | ||
524 | } | ||
525 | static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) | ||
526 | { | ||
527 | return block_prepare_write(page,from,to,ufs_getfrag_block); | ||
528 | } | ||
529 | static sector_t ufs_bmap(struct address_space *mapping, sector_t block) | ||
530 | { | ||
531 | return generic_block_bmap(mapping,block,ufs_getfrag_block); | ||
532 | } | ||
533 | struct address_space_operations ufs_aops = { | ||
534 | .readpage = ufs_readpage, | ||
535 | .writepage = ufs_writepage, | ||
536 | .sync_page = block_sync_page, | ||
537 | .prepare_write = ufs_prepare_write, | ||
538 | .commit_write = generic_commit_write, | ||
539 | .bmap = ufs_bmap | ||
540 | }; | ||
541 | |||
542 | void ufs_read_inode (struct inode * inode) | ||
543 | { | ||
544 | struct ufs_inode_info *ufsi = UFS_I(inode); | ||
545 | struct super_block * sb; | ||
546 | struct ufs_sb_private_info * uspi; | ||
547 | struct ufs_inode * ufs_inode; | ||
548 | struct ufs2_inode *ufs2_inode; | ||
549 | struct buffer_head * bh; | ||
550 | mode_t mode; | ||
551 | unsigned i; | ||
552 | unsigned flags; | ||
553 | |||
554 | UFSD(("ENTER, ino %lu\n", inode->i_ino)) | ||
555 | |||
556 | sb = inode->i_sb; | ||
557 | uspi = UFS_SB(sb)->s_uspi; | ||
558 | flags = UFS_SB(sb)->s_flags; | ||
559 | |||
560 | if (inode->i_ino < UFS_ROOTINO || | ||
561 | inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { | ||
562 | ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); | ||
563 | goto bad_inode; | ||
564 | } | ||
565 | |||
566 | bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); | ||
567 | if (!bh) { | ||
568 | ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); | ||
569 | goto bad_inode; | ||
570 | } | ||
571 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) | ||
572 | goto ufs2_inode; | ||
573 | |||
574 | ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino)); | ||
575 | |||
576 | /* | ||
577 | * Copy data to the in-core inode. | ||
578 | */ | ||
579 | inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); | ||
580 | inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink); | ||
581 | if (inode->i_nlink == 0) | ||
582 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); | ||
583 | |||
584 | /* | ||
585 | * Linux now has 32-bit uid and gid, so we can support EFT. | ||
586 | */ | ||
587 | inode->i_uid = ufs_get_inode_uid(sb, ufs_inode); | ||
588 | inode->i_gid = ufs_get_inode_gid(sb, ufs_inode); | ||
589 | |||
590 | inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); | ||
591 | inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); | ||
592 | inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); | ||
593 | inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); | ||
594 | inode->i_mtime.tv_nsec = 0; | ||
595 | inode->i_atime.tv_nsec = 0; | ||
596 | inode->i_ctime.tv_nsec = 0; | ||
597 | inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); | ||
598 | inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */ | ||
599 | inode->i_version++; | ||
600 | ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); | ||
601 | ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen); | ||
602 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); | ||
603 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); | ||
604 | ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; | ||
605 | |||
606 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { | ||
607 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) | ||
608 | ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; | ||
609 | } | ||
610 | else { | ||
611 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) | ||
612 | ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; | ||
613 | } | ||
614 | ufsi->i_osync = 0; | ||
615 | |||
616 | if (S_ISREG(inode->i_mode)) { | ||
617 | inode->i_op = &ufs_file_inode_operations; | ||
618 | inode->i_fop = &ufs_file_operations; | ||
619 | inode->i_mapping->a_ops = &ufs_aops; | ||
620 | } else if (S_ISDIR(inode->i_mode)) { | ||
621 | inode->i_op = &ufs_dir_inode_operations; | ||
622 | inode->i_fop = &ufs_dir_operations; | ||
623 | } else if (S_ISLNK(inode->i_mode)) { | ||
624 | if (!inode->i_blocks) | ||
625 | inode->i_op = &ufs_fast_symlink_inode_operations; | ||
626 | else { | ||
627 | inode->i_op = &page_symlink_inode_operations; | ||
628 | inode->i_mapping->a_ops = &ufs_aops; | ||
629 | } | ||
630 | } else | ||
631 | init_special_inode(inode, inode->i_mode, | ||
632 | ufs_get_inode_dev(sb, ufsi)); | ||
633 | |||
634 | brelse (bh); | ||
635 | |||
636 | UFSD(("EXIT\n")) | ||
637 | return; | ||
638 | |||
639 | bad_inode: | ||
640 | make_bad_inode(inode); | ||
641 | return; | ||
642 | |||
643 | ufs2_inode : | ||
644 | UFSD(("Reading ufs2 inode, ino %lu\n", inode->i_ino)) | ||
645 | |||
646 | ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino)); | ||
647 | |||
648 | /* | ||
649 | * Copy data to the in-core inode. | ||
650 | */ | ||
651 | inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); | ||
652 | inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink); | ||
653 | if (inode->i_nlink == 0) | ||
654 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); | ||
655 | |||
656 | /* | ||
657 | * Linux now has 32-bit uid and gid, so we can support EFT. | ||
658 | */ | ||
659 | inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid); | ||
660 | inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid); | ||
661 | |||
662 | inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); | ||
663 | inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec); | ||
664 | inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec); | ||
665 | inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec); | ||
666 | inode->i_mtime.tv_nsec = 0; | ||
667 | inode->i_atime.tv_nsec = 0; | ||
668 | inode->i_ctime.tv_nsec = 0; | ||
669 | inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); | ||
670 | inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/ | ||
671 | |||
672 | inode->i_version++; | ||
673 | ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); | ||
674 | ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen); | ||
675 | /* | ||
676 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); | ||
677 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); | ||
678 | */ | ||
679 | ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift; | ||
680 | |||
681 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { | ||
682 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) | ||
683 | ufsi->i_u1.u2_i_data[i] = | ||
684 | ufs2_inode->ui_u2.ui_addr.ui_db[i]; | ||
685 | } | ||
686 | else { | ||
687 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) | ||
688 | ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; | ||
689 | } | ||
690 | ufsi->i_osync = 0; | ||
691 | |||
692 | if (S_ISREG(inode->i_mode)) { | ||
693 | inode->i_op = &ufs_file_inode_operations; | ||
694 | inode->i_fop = &ufs_file_operations; | ||
695 | inode->i_mapping->a_ops = &ufs_aops; | ||
696 | } else if (S_ISDIR(inode->i_mode)) { | ||
697 | inode->i_op = &ufs_dir_inode_operations; | ||
698 | inode->i_fop = &ufs_dir_operations; | ||
699 | } else if (S_ISLNK(inode->i_mode)) { | ||
700 | if (!inode->i_blocks) | ||
701 | inode->i_op = &ufs_fast_symlink_inode_operations; | ||
702 | else { | ||
703 | inode->i_op = &page_symlink_inode_operations; | ||
704 | inode->i_mapping->a_ops = &ufs_aops; | ||
705 | } | ||
706 | } else /* TODO : here ...*/ | ||
707 | init_special_inode(inode, inode->i_mode, | ||
708 | ufs_get_inode_dev(sb, ufsi)); | ||
709 | |||
710 | brelse(bh); | ||
711 | |||
712 | UFSD(("EXIT\n")) | ||
713 | return; | ||
714 | } | ||
715 | |||
716 | static int ufs_update_inode(struct inode * inode, int do_sync) | ||
717 | { | ||
718 | struct ufs_inode_info *ufsi = UFS_I(inode); | ||
719 | struct super_block * sb; | ||
720 | struct ufs_sb_private_info * uspi; | ||
721 | struct buffer_head * bh; | ||
722 | struct ufs_inode * ufs_inode; | ||
723 | unsigned i; | ||
724 | unsigned flags; | ||
725 | |||
726 | UFSD(("ENTER, ino %lu\n", inode->i_ino)) | ||
727 | |||
728 | sb = inode->i_sb; | ||
729 | uspi = UFS_SB(sb)->s_uspi; | ||
730 | flags = UFS_SB(sb)->s_flags; | ||
731 | |||
732 | if (inode->i_ino < UFS_ROOTINO || | ||
733 | inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { | ||
734 | ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); | ||
735 | return -1; | ||
736 | } | ||
737 | |||
738 | bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); | ||
739 | if (!bh) { | ||
740 | ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); | ||
741 | return -1; | ||
742 | } | ||
743 | ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode)); | ||
744 | |||
745 | ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); | ||
746 | ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); | ||
747 | |||
748 | ufs_set_inode_uid(sb, ufs_inode, inode->i_uid); | ||
749 | ufs_set_inode_gid(sb, ufs_inode, inode->i_gid); | ||
750 | |||
751 | ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); | ||
752 | ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); | ||
753 | ufs_inode->ui_atime.tv_usec = 0; | ||
754 | ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); | ||
755 | ufs_inode->ui_ctime.tv_usec = 0; | ||
756 | ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); | ||
757 | ufs_inode->ui_mtime.tv_usec = 0; | ||
758 | ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); | ||
759 | ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); | ||
760 | ufs_inode->ui_gen = cpu_to_fs32(sb, ufsi->i_gen); | ||
761 | |||
762 | if ((flags & UFS_UID_MASK) == UFS_UID_EFT) { | ||
763 | ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); | ||
764 | ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); | ||
765 | } | ||
766 | |||
767 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { | ||
768 | /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ | ||
769 | ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; | ||
770 | } else if (inode->i_blocks) { | ||
771 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) | ||
772 | ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i]; | ||
773 | } | ||
774 | else { | ||
775 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) | ||
776 | ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; | ||
777 | } | ||
778 | |||
779 | if (!inode->i_nlink) | ||
780 | memset (ufs_inode, 0, sizeof(struct ufs_inode)); | ||
781 | |||
782 | mark_buffer_dirty(bh); | ||
783 | if (do_sync) | ||
784 | sync_dirty_buffer(bh); | ||
785 | brelse (bh); | ||
786 | |||
787 | UFSD(("EXIT\n")) | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | int ufs_write_inode (struct inode * inode, int wait) | ||
792 | { | ||
793 | int ret; | ||
794 | lock_kernel(); | ||
795 | ret = ufs_update_inode (inode, wait); | ||
796 | unlock_kernel(); | ||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | int ufs_sync_inode (struct inode *inode) | ||
801 | { | ||
802 | return ufs_update_inode (inode, 1); | ||
803 | } | ||
804 | |||
805 | void ufs_delete_inode (struct inode * inode) | ||
806 | { | ||
807 | /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ | ||
808 | lock_kernel(); | ||
809 | mark_inode_dirty(inode); | ||
810 | ufs_update_inode(inode, IS_SYNC(inode)); | ||
811 | inode->i_size = 0; | ||
812 | if (inode->i_blocks) | ||
813 | ufs_truncate (inode); | ||
814 | ufs_free_inode (inode); | ||
815 | unlock_kernel(); | ||
816 | } | ||