aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ufs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/ufs
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'fs/ufs')
-rw-r--r--fs/ufs/Makefile8
-rw-r--r--fs/ufs/balloc.c818
-rw-r--r--fs/ufs/cylinder.c209
-rw-r--r--fs/ufs/dir.c627
-rw-r--r--fs/ufs/file.c55
-rw-r--r--fs/ufs/ialloc.c302
-rw-r--r--fs/ufs/inode.c816
-rw-r--r--fs/ufs/namei.c375
-rw-r--r--fs/ufs/super.c1347
-rw-r--r--fs/ufs/swab.h133
-rw-r--r--fs/ufs/symlink.c42
-rw-r--r--fs/ufs/truncate.c477
-rw-r--r--fs/ufs/util.c257
-rw-r--r--fs/ufs/util.h526
14 files changed, 5992 insertions, 0 deletions
diff --git a/fs/ufs/Makefile b/fs/ufs/Makefile
new file mode 100644
index 000000000000..dd39980437fc
--- /dev/null
+++ b/fs/ufs/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Linux ufs filesystem routines.
3#
4
5obj-$(CONFIG_UFS_FS) += ufs.o
6
7ufs-objs := balloc.o cylinder.o dir.o file.o ialloc.o inode.o \
8 namei.o super.o symlink.o truncate.o util.o
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
new file mode 100644
index 000000000000..997640c99c7d
--- /dev/null
+++ b/fs/ufs/balloc.c
@@ -0,0 +1,818 @@
1/*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
8
9#include <linux/fs.h>
10#include <linux/ufs_fs.h>
11#include <linux/stat.h>
12#include <linux/time.h>
13#include <linux/string.h>
14#include <linux/quotaops.h>
15#include <linux/buffer_head.h>
16#include <linux/sched.h>
17#include <linux/bitops.h>
18#include <asm/byteorder.h>
19
20#include "swab.h"
21#include "util.h"
22
23#undef UFS_BALLOC_DEBUG
24
25#ifdef UFS_BALLOC_DEBUG
26#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
27#else
28#define UFSD(x)
29#endif
30
31static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
32static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
34static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
35static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
36static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
37
38/*
39 * Free 'count' fragments from fragment number 'fragment'
40 */
41void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
42 struct super_block * sb;
43 struct ufs_sb_private_info * uspi;
44 struct ufs_super_block_first * usb1;
45 struct ufs_cg_private_info * ucpi;
46 struct ufs_cylinder_group * ucg;
47 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
48
49 sb = inode->i_sb;
50 uspi = UFS_SB(sb)->s_uspi;
51 usb1 = ubh_get_usb_first(USPI_UBH);
52
53 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
54
55 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 ufs_error (sb, "ufs_free_fragments", "internal error");
57
58 lock_super(sb);
59
60 cgno = ufs_dtog(fragment);
61 bit = ufs_dtogd(fragment);
62 if (cgno >= uspi->s_ncg) {
63 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
64 goto failed;
65 }
66
67 ucpi = ufs_load_cylinder (sb, cgno);
68 if (!ucpi)
69 goto failed;
70 ucg = ubh_get_ucg (UCPI_UBH);
71 if (!ufs_cg_chkmagic(sb, ucg)) {
72 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
73 goto failed;
74 }
75
76 end_bit = bit + count;
77 bbase = ufs_blknum (bit);
78 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
79 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 for (i = bit; i < end_bit; i++) {
81 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
82 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
83 else ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
85 }
86
87 DQUOT_FREE_BLOCK (inode, count);
88
89
90 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
92 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
94 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
95
96 /*
97 * Trying to reassemble free fragments into block
98 */
99 blkno = ufs_fragstoblks (bbase);
100 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
101 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
103 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105 ufs_clusteracct (sb, ucpi, blkno, 1);
106 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
108 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109 cylno = ufs_cbtocylno (bbase);
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
111 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112 }
113
114 ubh_mark_buffer_dirty (USPI_UBH);
115 ubh_mark_buffer_dirty (UCPI_UBH);
116 if (sb->s_flags & MS_SYNCHRONOUS) {
117 ubh_wait_on_buffer (UCPI_UBH);
118 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
119 ubh_wait_on_buffer (UCPI_UBH);
120 }
121 sb->s_dirt = 1;
122
123 unlock_super (sb);
124 UFSD(("EXIT\n"))
125 return;
126
127failed:
128 unlock_super (sb);
129 UFSD(("EXIT (FAILED)\n"))
130 return;
131}
132
133/*
134 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
135 */
136void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
139 struct ufs_super_block_first * usb1;
140 struct ufs_cg_private_info * ucpi;
141 struct ufs_cylinder_group * ucg;
142 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
143
144 sb = inode->i_sb;
145 uspi = UFS_SB(sb)->s_uspi;
146 usb1 = ubh_get_usb_first(USPI_UBH);
147
148 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
149
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
152 "fragment %u, count %u\n", fragment, count);
153 goto failed;
154 }
155
156 lock_super(sb);
157
158do_more:
159 overflow = 0;
160 cgno = ufs_dtog (fragment);
161 bit = ufs_dtogd (fragment);
162 if (cgno >= uspi->s_ncg) {
163 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
164 goto failed;
165 }
166 end_bit = bit + count;
167 if (end_bit > uspi->s_fpg) {
168 overflow = bit + count - uspi->s_fpg;
169 count -= overflow;
170 end_bit -= overflow;
171 }
172
173 ucpi = ufs_load_cylinder (sb, cgno);
174 if (!ucpi)
175 goto failed;
176 ucg = ubh_get_ucg (UCPI_UBH);
177 if (!ufs_cg_chkmagic(sb, ucg)) {
178 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
179 goto failed;
180 }
181
182 for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 blkno = ufs_fragstoblks(i);
184 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186 }
187 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
188 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
189 ufs_clusteracct (sb, ucpi, blkno, 1);
190 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
191
192 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
194 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195 cylno = ufs_cbtocylno(i);
196 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
197 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
198 }
199
200 ubh_mark_buffer_dirty (USPI_UBH);
201 ubh_mark_buffer_dirty (UCPI_UBH);
202 if (sb->s_flags & MS_SYNCHRONOUS) {
203 ubh_wait_on_buffer (UCPI_UBH);
204 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
205 ubh_wait_on_buffer (UCPI_UBH);
206 }
207
208 if (overflow) {
209 fragment += count;
210 count = overflow;
211 goto do_more;
212 }
213
214 sb->s_dirt = 1;
215 unlock_super (sb);
216 UFSD(("EXIT\n"))
217 return;
218
219failed:
220 unlock_super (sb);
221 UFSD(("EXIT (FAILED)\n"))
222 return;
223}
224
225
226
227#define NULLIFY_FRAGMENTS \
228 for (i = oldcount; i < newcount; i++) { \
229 bh = sb_getblk(sb, result + i); \
230 memset (bh->b_data, 0, sb->s_blocksize); \
231 set_buffer_uptodate(bh); \
232 mark_buffer_dirty (bh); \
233 if (IS_SYNC(inode)) \
234 sync_dirty_buffer(bh); \
235 brelse (bh); \
236 }
237
238unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
239 unsigned goal, unsigned count, int * err )
240{
241 struct super_block * sb;
242 struct ufs_sb_private_info * uspi;
243 struct ufs_super_block_first * usb1;
244 struct buffer_head * bh;
245 unsigned cgno, oldcount, newcount, tmp, request, i, result;
246
247 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
248
249 sb = inode->i_sb;
250 uspi = UFS_SB(sb)->s_uspi;
251 usb1 = ubh_get_usb_first(USPI_UBH);
252 *err = -ENOSPC;
253
254 lock_super (sb);
255
256 tmp = fs32_to_cpu(sb, *p);
257 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
258 ufs_warning (sb, "ufs_new_fragments", "internal warning"
259 " fragment %u, count %u", fragment, count);
260 count = uspi->s_fpb - ufs_fragnum(fragment);
261 }
262 oldcount = ufs_fragnum (fragment);
263 newcount = oldcount + count;
264
265 /*
266 * Somebody else has just allocated our fragments
267 */
268 if (oldcount) {
269 if (!tmp) {
270 ufs_error (sb, "ufs_new_fragments", "internal error, "
271 "fragment %u, tmp %u\n", fragment, tmp);
272 unlock_super (sb);
273 return (unsigned)-1;
274 }
275 if (fragment < UFS_I(inode)->i_lastfrag) {
276 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
277 unlock_super (sb);
278 return 0;
279 }
280 }
281 else {
282 if (tmp) {
283 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
284 unlock_super(sb);
285 return 0;
286 }
287 }
288
289 /*
290 * There is not enough space for user on the device
291 */
292 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
293 unlock_super (sb);
294 UFSD(("EXIT (FAILED)\n"))
295 return 0;
296 }
297
298 if (goal >= uspi->s_size)
299 goal = 0;
300 if (goal == 0)
301 cgno = ufs_inotocg (inode->i_ino);
302 else
303 cgno = ufs_dtog (goal);
304
305 /*
306 * allocate new fragment
307 */
308 if (oldcount == 0) {
309 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
310 if (result) {
311 *p = cpu_to_fs32(sb, result);
312 *err = 0;
313 inode->i_blocks += count << uspi->s_nspfshift;
314 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
315 NULLIFY_FRAGMENTS
316 }
317 unlock_super(sb);
318 UFSD(("EXIT, result %u\n", result))
319 return result;
320 }
321
322 /*
323 * resize block
324 */
325 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
326 if (result) {
327 *err = 0;
328 inode->i_blocks += count << uspi->s_nspfshift;
329 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
330 NULLIFY_FRAGMENTS
331 unlock_super(sb);
332 UFSD(("EXIT, result %u\n", result))
333 return result;
334 }
335
336 /*
337 * allocate new block and move data
338 */
339 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
340 case UFS_OPTSPACE:
341 request = newcount;
342 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
343 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
344 break;
345 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
346 break;
347 default:
348 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
349
350 case UFS_OPTTIME:
351 request = uspi->s_fpb;
352 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
353 (uspi->s_minfree - 2) / 100)
354 break;
355 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
356 break;
357 }
358 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
359 if (result) {
360 for (i = 0; i < oldcount; i++) {
361 bh = sb_bread(sb, tmp + i);
362 if(bh)
363 {
364 clear_buffer_dirty(bh);
365 bh->b_blocknr = result + i;
366 mark_buffer_dirty (bh);
367 if (IS_SYNC(inode))
368 sync_dirty_buffer(bh);
369 brelse (bh);
370 }
371 else
372 {
373 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
374 unlock_super(sb);
375 return 0;
376 }
377 }
378 *p = cpu_to_fs32(sb, result);
379 *err = 0;
380 inode->i_blocks += count << uspi->s_nspfshift;
381 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
382 NULLIFY_FRAGMENTS
383 unlock_super(sb);
384 if (newcount < request)
385 ufs_free_fragments (inode, result + newcount, request - newcount);
386 ufs_free_fragments (inode, tmp, oldcount);
387 UFSD(("EXIT, result %u\n", result))
388 return result;
389 }
390
391 unlock_super(sb);
392 UFSD(("EXIT (FAILED)\n"))
393 return 0;
394}
395
396static unsigned
397ufs_add_fragments (struct inode * inode, unsigned fragment,
398 unsigned oldcount, unsigned newcount, int * err)
399{
400 struct super_block * sb;
401 struct ufs_sb_private_info * uspi;
402 struct ufs_super_block_first * usb1;
403 struct ufs_cg_private_info * ucpi;
404 struct ufs_cylinder_group * ucg;
405 unsigned cgno, fragno, fragoff, count, fragsize, i;
406
407 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
408
409 sb = inode->i_sb;
410 uspi = UFS_SB(sb)->s_uspi;
411 usb1 = ubh_get_usb_first (USPI_UBH);
412 count = newcount - oldcount;
413
414 cgno = ufs_dtog(fragment);
415 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
416 return 0;
417 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
418 return 0;
419 ucpi = ufs_load_cylinder (sb, cgno);
420 if (!ucpi)
421 return 0;
422 ucg = ubh_get_ucg (UCPI_UBH);
423 if (!ufs_cg_chkmagic(sb, ucg)) {
424 ufs_panic (sb, "ufs_add_fragments",
425 "internal error, bad magic number on cg %u", cgno);
426 return 0;
427 }
428
429 fragno = ufs_dtogd (fragment);
430 fragoff = ufs_fragnum (fragno);
431 for (i = oldcount; i < newcount; i++)
432 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
433 return 0;
434 /*
435 * Block can be extended
436 */
437 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
438 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
439 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
440 break;
441 fragsize = i - oldcount;
442 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
443 ufs_panic (sb, "ufs_add_fragments",
444 "internal error or corrupted bitmap on cg %u", cgno);
445 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
446 if (fragsize != count)
447 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
448 for (i = oldcount; i < newcount; i++)
449 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
450 if(DQUOT_ALLOC_BLOCK(inode, count)) {
451 *err = -EDQUOT;
452 return 0;
453 }
454
455 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
456 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
457 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
458
459 ubh_mark_buffer_dirty (USPI_UBH);
460 ubh_mark_buffer_dirty (UCPI_UBH);
461 if (sb->s_flags & MS_SYNCHRONOUS) {
462 ubh_wait_on_buffer (UCPI_UBH);
463 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
464 ubh_wait_on_buffer (UCPI_UBH);
465 }
466 sb->s_dirt = 1;
467
468 UFSD(("EXIT, fragment %u\n", fragment))
469
470 return fragment;
471}
472
473#define UFS_TEST_FREE_SPACE_CG \
474 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
475 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
476 goto cg_found; \
477 for (k = count; k < uspi->s_fpb; k++) \
478 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
479 goto cg_found;
480
481static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
482 unsigned goal, unsigned count, int * err)
483{
484 struct super_block * sb;
485 struct ufs_sb_private_info * uspi;
486 struct ufs_super_block_first * usb1;
487 struct ufs_cg_private_info * ucpi;
488 struct ufs_cylinder_group * ucg;
489 unsigned oldcg, i, j, k, result, allocsize;
490
491 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
492
493 sb = inode->i_sb;
494 uspi = UFS_SB(sb)->s_uspi;
495 usb1 = ubh_get_usb_first(USPI_UBH);
496 oldcg = cgno;
497
498 /*
499 * 1. searching on preferred cylinder group
500 */
501 UFS_TEST_FREE_SPACE_CG
502
503 /*
504 * 2. quadratic rehash
505 */
506 for (j = 1; j < uspi->s_ncg; j *= 2) {
507 cgno += j;
508 if (cgno >= uspi->s_ncg)
509 cgno -= uspi->s_ncg;
510 UFS_TEST_FREE_SPACE_CG
511 }
512
513 /*
514 * 3. brute force search
515 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
516 */
517 cgno = (oldcg + 1) % uspi->s_ncg;
518 for (j = 2; j < uspi->s_ncg; j++) {
519 cgno++;
520 if (cgno >= uspi->s_ncg)
521 cgno = 0;
522 UFS_TEST_FREE_SPACE_CG
523 }
524
525 UFSD(("EXIT (FAILED)\n"))
526 return 0;
527
528cg_found:
529 ucpi = ufs_load_cylinder (sb, cgno);
530 if (!ucpi)
531 return 0;
532 ucg = ubh_get_ucg (UCPI_UBH);
533 if (!ufs_cg_chkmagic(sb, ucg))
534 ufs_panic (sb, "ufs_alloc_fragments",
535 "internal error, bad magic number on cg %u", cgno);
536 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
537
538 if (count == uspi->s_fpb) {
539 result = ufs_alloccg_block (inode, ucpi, goal, err);
540 if (result == (unsigned)-1)
541 return 0;
542 goto succed;
543 }
544
545 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
546 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
547 break;
548
549 if (allocsize == uspi->s_fpb) {
550 result = ufs_alloccg_block (inode, ucpi, goal, err);
551 if (result == (unsigned)-1)
552 return 0;
553 goal = ufs_dtogd (result);
554 for (i = count; i < uspi->s_fpb; i++)
555 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
556 i = uspi->s_fpb - count;
557 DQUOT_FREE_BLOCK(inode, i);
558
559 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
560 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
561 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
562 fs32_add(sb, &ucg->cg_frsum[i], 1);
563 goto succed;
564 }
565
566 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
567 if (result == (unsigned)-1)
568 return 0;
569 if(DQUOT_ALLOC_BLOCK(inode, count)) {
570 *err = -EDQUOT;
571 return 0;
572 }
573 for (i = 0; i < count; i++)
574 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
575
576 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
577 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
578 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
579 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
580
581 if (count != allocsize)
582 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
583
584succed:
585 ubh_mark_buffer_dirty (USPI_UBH);
586 ubh_mark_buffer_dirty (UCPI_UBH);
587 if (sb->s_flags & MS_SYNCHRONOUS) {
588 ubh_wait_on_buffer (UCPI_UBH);
589 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
590 ubh_wait_on_buffer (UCPI_UBH);
591 }
592 sb->s_dirt = 1;
593
594 result += cgno * uspi->s_fpg;
595 UFSD(("EXIT3, result %u\n", result))
596 return result;
597}
598
599static unsigned ufs_alloccg_block (struct inode * inode,
600 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
601{
602 struct super_block * sb;
603 struct ufs_sb_private_info * uspi;
604 struct ufs_super_block_first * usb1;
605 struct ufs_cylinder_group * ucg;
606 unsigned result, cylno, blkno;
607
608 UFSD(("ENTER, goal %u\n", goal))
609
610 sb = inode->i_sb;
611 uspi = UFS_SB(sb)->s_uspi;
612 usb1 = ubh_get_usb_first(USPI_UBH);
613 ucg = ubh_get_ucg(UCPI_UBH);
614
615 if (goal == 0) {
616 goal = ucpi->c_rotor;
617 goto norot;
618 }
619 goal = ufs_blknum (goal);
620 goal = ufs_dtogd (goal);
621
622 /*
623 * If the requested block is available, use it.
624 */
625 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
626 result = goal;
627 goto gotit;
628 }
629
630norot:
631 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
632 if (result == (unsigned)-1)
633 return (unsigned)-1;
634 ucpi->c_rotor = result;
635gotit:
636 blkno = ufs_fragstoblks(result);
637 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
638 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
639 ufs_clusteracct (sb, ucpi, blkno, -1);
640 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
641 *err = -EDQUOT;
642 return (unsigned)-1;
643 }
644
645 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
646 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
647 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
648 cylno = ufs_cbtocylno(result);
649 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
650 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
651
652 UFSD(("EXIT, result %u\n", result))
653
654 return result;
655}
656
657static unsigned ufs_bitmap_search (struct super_block * sb,
658 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
659{
660 struct ufs_sb_private_info * uspi;
661 struct ufs_super_block_first * usb1;
662 struct ufs_cylinder_group * ucg;
663 unsigned start, length, location, result;
664 unsigned possition, fragsize, blockmap, mask;
665
666 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
667
668 uspi = UFS_SB(sb)->s_uspi;
669 usb1 = ubh_get_usb_first (USPI_UBH);
670 ucg = ubh_get_ucg(UCPI_UBH);
671
672 if (goal)
673 start = ufs_dtogd(goal) >> 3;
674 else
675 start = ucpi->c_frotor >> 3;
676
677 length = ((uspi->s_fpg + 7) >> 3) - start;
678 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
679 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
680 1 << (count - 1 + (uspi->s_fpb & 7)));
681 if (location == 0) {
682 length = start + 1;
683 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
684 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
685 1 << (count - 1 + (uspi->s_fpb & 7)));
686 if (location == 0) {
687 ufs_error (sb, "ufs_bitmap_search",
688 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
689 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
690 return (unsigned)-1;
691 }
692 start = 0;
693 }
694 result = (start + length - location) << 3;
695 ucpi->c_frotor = result;
696
697 /*
698 * found the byte in the map
699 */
700 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
701 fragsize = 0;
702 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
703 if (blockmap & mask) {
704 if (!(possition & uspi->s_fpbmask))
705 fragsize = 1;
706 else
707 fragsize++;
708 }
709 else {
710 if (fragsize == count) {
711 result += possition - count;
712 UFSD(("EXIT, result %u\n", result))
713 return result;
714 }
715 fragsize = 0;
716 }
717 }
718 if (fragsize == count) {
719 result += possition - count;
720 UFSD(("EXIT, result %u\n", result))
721 return result;
722 }
723 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
724 UFSD(("EXIT (FAILED)\n"))
725 return (unsigned)-1;
726}
727
728static void ufs_clusteracct(struct super_block * sb,
729 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
730{
731 struct ufs_sb_private_info * uspi;
732 int i, start, end, forw, back;
733
734 uspi = UFS_SB(sb)->s_uspi;
735 if (uspi->s_contigsumsize <= 0)
736 return;
737
738 if (cnt > 0)
739 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
740 else
741 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
742
743 /*
744 * Find the size of the cluster going forward.
745 */
746 start = blkno + 1;
747 end = start + uspi->s_contigsumsize;
748 if ( end >= ucpi->c_nclusterblks)
749 end = ucpi->c_nclusterblks;
750 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
751 if (i > end)
752 i = end;
753 forw = i - start;
754
755 /*
756 * Find the size of the cluster going backward.
757 */
758 start = blkno - 1;
759 end = start - uspi->s_contigsumsize;
760 if (end < 0 )
761 end = -1;
762 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
763 if ( i < end)
764 i = end;
765 back = start - i;
766
767 /*
768 * Account for old cluster and the possibly new forward and
769 * back clusters.
770 */
771 i = back + forw + 1;
772 if (i > uspi->s_contigsumsize)
773 i = uspi->s_contigsumsize;
774 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
775 if (back > 0)
776 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
777 if (forw > 0)
778 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
779}
780
781
782static unsigned char ufs_fragtable_8fpb[] = {
783 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
784 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
785 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
786 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
787 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
788 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
789 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
790 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
791 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
792 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
793 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
794 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
795 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
796 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
797 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
798 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
799};
800
801static unsigned char ufs_fragtable_other[] = {
802 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
803 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
804 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
805 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
806 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
807 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
808 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
809 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
810 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
811 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
812 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
813 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
814 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
815 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
816 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
817 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
818};
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c
new file mode 100644
index 000000000000..14abb8b835f7
--- /dev/null
+++ b/fs/ufs/cylinder.c
@@ -0,0 +1,209 @@
1/*
2 * linux/fs/ufs/cylinder.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * ext2 - inode (block) bitmap caching inspired
9 */
10
11#include <linux/fs.h>
12#include <linux/ufs_fs.h>
13#include <linux/time.h>
14#include <linux/stat.h>
15#include <linux/string.h>
16#include <linux/bitops.h>
17
18#include <asm/byteorder.h>
19
20#include "swab.h"
21#include "util.h"
22
23#undef UFS_CYLINDER_DEBUG
24
25#ifdef UFS_CYLINDER_DEBUG
26#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
27#else
28#define UFSD(x)
29#endif
30
31
32/*
33 * Read cylinder group into cache. The memory space for ufs_cg_private_info
34 * structure is already allocated during ufs_read_super.
35 */
36static void ufs_read_cylinder (struct super_block * sb,
37 unsigned cgno, unsigned bitmap_nr)
38{
39 struct ufs_sb_info * sbi = UFS_SB(sb);
40 struct ufs_sb_private_info * uspi;
41 struct ufs_cg_private_info * ucpi;
42 struct ufs_cylinder_group * ucg;
43 unsigned i, j;
44
45 UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr))
46 uspi = sbi->s_uspi;
47 ucpi = sbi->s_ucpi[bitmap_nr];
48 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
49
50 UCPI_UBH->fragment = ufs_cgcmin(cgno);
51 UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits;
52 /*
53 * We have already the first fragment of cylinder group block in buffer
54 */
55 UCPI_UBH->bh[0] = sbi->s_ucg[cgno];
56 for (i = 1; i < UCPI_UBH->count; i++)
57 if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
58 goto failed;
59 sbi->s_cgno[bitmap_nr] = cgno;
60
61 ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx);
62 ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl);
63 ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk);
64 ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk);
65 ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor);
66 ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor);
67 ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor);
68 ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff);
69 ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff);
70 ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
71 ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff);
72 ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
73 ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
74 ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
75 ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
76 UFSD(("EXIT\n"))
77 return;
78
79failed:
80 for (j = 1; j < i; j++)
81 brelse (sbi->s_ucg[j]);
82 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
83 ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
84}
85
86/*
87 * Remove cylinder group from cache, doesn't release memory
88 * allocated for cylinder group (this is done at ufs_put_super only).
89 */
90void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
91{
92 struct ufs_sb_info * sbi = UFS_SB(sb);
93 struct ufs_sb_private_info * uspi;
94 struct ufs_cg_private_info * ucpi;
95 struct ufs_cylinder_group * ucg;
96 unsigned i;
97
98 UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr))
99
100 uspi = sbi->s_uspi;
101 if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
102 UFSD(("EXIT\n"))
103 return;
104 }
105 ucpi = sbi->s_ucpi[bitmap_nr];
106 ucg = ubh_get_ucg(UCPI_UBH);
107
108 if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
109 ufs_panic (sb, "ufs_put_cylinder", "internal error");
110 return;
111 }
112 /*
113 * rotor is not so important data, so we put it to disk
114 * at the end of working with cylinder
115 */
116 ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
117 ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
118 ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
119 ubh_mark_buffer_dirty (UCPI_UBH);
120 for (i = 1; i < UCPI_UBH->count; i++) {
121 brelse (UCPI_UBH->bh[i]);
122 }
123
124 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
125 UFSD(("EXIT\n"))
126}
127
128/*
129 * Find cylinder group in cache and return it as pointer.
130 * If cylinder group is not in cache, we will load it from disk.
131 *
132 * The cache is managed by LRU algorithm.
133 */
134struct ufs_cg_private_info * ufs_load_cylinder (
135 struct super_block * sb, unsigned cgno)
136{
137 struct ufs_sb_info * sbi = UFS_SB(sb);
138 struct ufs_sb_private_info * uspi;
139 struct ufs_cg_private_info * ucpi;
140 unsigned cg, i, j;
141
142 UFSD(("ENTER, cgno %u\n", cgno))
143
144 uspi = sbi->s_uspi;
145 if (cgno >= uspi->s_ncg) {
146 ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
147 return NULL;
148 }
149 /*
150 * Cylinder group number cg it in cache and it was last used
151 */
152 if (sbi->s_cgno[0] == cgno) {
153 UFSD(("EXIT\n"))
154 return sbi->s_ucpi[0];
155 }
156 /*
157 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
158 */
159 if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
160 if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
161 if (sbi->s_cgno[cgno] != cgno) {
162 ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
163 UFSD(("EXIT (FAILED)\n"))
164 return NULL;
165 }
166 else {
167 UFSD(("EXIT\n"))
168 return sbi->s_ucpi[cgno];
169 }
170 } else {
171 ufs_read_cylinder (sb, cgno, cgno);
172 UFSD(("EXIT\n"))
173 return sbi->s_ucpi[cgno];
174 }
175 }
176 /*
177 * Cylinder group number cg is in cache but it was not last used,
178 * we will move to the first position
179 */
180 for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
181 if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
182 cg = sbi->s_cgno[i];
183 ucpi = sbi->s_ucpi[i];
184 for (j = i; j > 0; j--) {
185 sbi->s_cgno[j] = sbi->s_cgno[j-1];
186 sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
187 }
188 sbi->s_cgno[0] = cg;
189 sbi->s_ucpi[0] = ucpi;
190 /*
191 * Cylinder group number cg is not in cache, we will read it from disk
192 * and put it to the first position
193 */
194 } else {
195 if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
196 sbi->s_cg_loaded++;
197 else
198 ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
199 ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
200 for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
201 sbi->s_cgno[j] = sbi->s_cgno[j-1];
202 sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
203 }
204 sbi->s_ucpi[0] = ucpi;
205 ufs_read_cylinder (sb, cgno, 0);
206 }
207 UFSD(("EXIT\n"))
208 return sbi->s_ucpi[0];
209}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
new file mode 100644
index 000000000000..d0915fba155a
--- /dev/null
+++ b/fs/ufs/dir.c
@@ -0,0 +1,627 @@
1/*
2 * linux/fs/ufs/ufs_dir.c
3 *
4 * Copyright (C) 1996
5 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
6 * Laboratory for Computer Science Research Computing Facility
7 * Rutgers, The State University of New Jersey
8 *
9 * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
10 *
11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
14 */
15
16#include <linux/time.h>
17#include <linux/fs.h>
18#include <linux/ufs_fs.h>
19#include <linux/smp_lock.h>
20#include <linux/buffer_head.h>
21#include <linux/sched.h>
22
23#include "swab.h"
24#include "util.h"
25
26#undef UFS_DIR_DEBUG
27
28#ifdef UFS_DIR_DEBUG
29#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
30#else
31#define UFSD(x)
32#endif
33
34static int
35ufs_check_dir_entry (const char *, struct inode *, struct ufs_dir_entry *,
36 struct buffer_head *, unsigned long);
37
38
39/*
40 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
41 *
42 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
43 */
44static inline int ufs_match(struct super_block *sb, int len,
45 const char * const name, struct ufs_dir_entry * de)
46{
47 if (len != ufs_get_de_namlen(sb, de))
48 return 0;
49 if (!de->d_ino)
50 return 0;
51 return !memcmp(name, de->d_name, len);
52}
53
54/*
55 * This is blatantly stolen from ext2fs
56 */
57static int
58ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
59{
60 struct inode *inode = filp->f_dentry->d_inode;
61 int error = 0;
62 unsigned long offset, lblk;
63 int i, stored;
64 struct buffer_head * bh;
65 struct ufs_dir_entry * de;
66 struct super_block * sb;
67 int de_reclen;
68 unsigned flags;
69 u64 blk= 0L;
70
71 lock_kernel();
72
73 sb = inode->i_sb;
74 flags = UFS_SB(sb)->s_flags;
75
76 UFSD(("ENTER, ino %lu f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos))
77
78 stored = 0;
79 bh = NULL;
80 offset = filp->f_pos & (sb->s_blocksize - 1);
81
82 while (!error && !stored && filp->f_pos < inode->i_size) {
83 lblk = (filp->f_pos) >> sb->s_blocksize_bits;
84 blk = ufs_frag_map(inode, lblk);
85 if (!blk || !(bh = sb_bread(sb, blk))) {
86 /* XXX - error - skip to the next block */
87 printk("ufs_readdir: "
88 "dir inode %lu has a hole at offset %lu\n",
89 inode->i_ino, (unsigned long int)filp->f_pos);
90 filp->f_pos += sb->s_blocksize - offset;
91 continue;
92 }
93
94revalidate:
95 /* If the dir block has changed since the last call to
96 * readdir(2), then we might be pointing to an invalid
97 * dirent right now. Scan from the start of the block
98 * to make sure. */
99 if (filp->f_version != inode->i_version) {
100 for (i = 0; i < sb->s_blocksize && i < offset; ) {
101 de = (struct ufs_dir_entry *)(bh->b_data + i);
102 /* It's too expensive to do a full
103 * dirent test each time round this
104 * loop, but we do have to test at
105 * least that it is non-zero. A
106 * failure will be detected in the
107 * dirent test below. */
108 de_reclen = fs16_to_cpu(sb, de->d_reclen);
109 if (de_reclen < 1)
110 break;
111 i += de_reclen;
112 }
113 offset = i;
114 filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
115 | offset;
116 filp->f_version = inode->i_version;
117 }
118
119 while (!error && filp->f_pos < inode->i_size
120 && offset < sb->s_blocksize) {
121 de = (struct ufs_dir_entry *) (bh->b_data + offset);
122 /* XXX - put in a real ufs_check_dir_entry() */
123 if ((de->d_reclen == 0) || (ufs_get_de_namlen(sb, de) == 0)) {
124 filp->f_pos = (filp->f_pos &
125 (sb->s_blocksize - 1)) +
126 sb->s_blocksize;
127 brelse(bh);
128 unlock_kernel();
129 return stored;
130 }
131 if (!ufs_check_dir_entry ("ufs_readdir", inode, de,
132 bh, offset)) {
133 /* On error, skip the f_pos to the
134 next block. */
135 filp->f_pos = (filp->f_pos |
136 (sb->s_blocksize - 1)) +
137 1;
138 brelse (bh);
139 unlock_kernel();
140 return stored;
141 }
142 offset += fs16_to_cpu(sb, de->d_reclen);
143 if (de->d_ino) {
144 /* We might block in the next section
145 * if the data destination is
146 * currently swapped out. So, use a
147 * version stamp to detect whether or
148 * not the directory has been modified
149 * during the copy operation. */
150 unsigned long version = filp->f_version;
151 unsigned char d_type = DT_UNKNOWN;
152
153 UFSD(("filldir(%s,%u)\n", de->d_name,
154 fs32_to_cpu(sb, de->d_ino)))
155 UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de)))
156
157 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
158 d_type = de->d_u.d_44.d_type;
159 error = filldir(dirent, de->d_name,
160 ufs_get_de_namlen(sb, de), filp->f_pos,
161 fs32_to_cpu(sb, de->d_ino), d_type);
162 if (error)
163 break;
164 if (version != filp->f_version)
165 goto revalidate;
166 stored ++;
167 }
168 filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
169 }
170 offset = 0;
171 brelse (bh);
172 }
173 unlock_kernel();
174 return 0;
175}
176
177/*
178 * define how far ahead to read directories while searching them.
179 */
180#define NAMEI_RA_CHUNKS 2
181#define NAMEI_RA_BLOCKS 4
182#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
183#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
184
185/*
186 * ufs_find_entry()
187 *
188 * finds an entry in the specified directory with the wanted name. It
189 * returns the cache buffer in which the entry was found, and the entry
190 * itself (as a parameter - res_bh). It does NOT read the inode of the
191 * entry - you'll have to do that yourself if you want to.
192 */
193struct ufs_dir_entry * ufs_find_entry (struct dentry *dentry,
194 struct buffer_head ** res_bh)
195{
196 struct super_block * sb;
197 struct buffer_head * bh_use[NAMEI_RA_SIZE];
198 struct buffer_head * bh_read[NAMEI_RA_SIZE];
199 unsigned long offset;
200 int block, toread, i, err;
201 struct inode *dir = dentry->d_parent->d_inode;
202 const char *name = dentry->d_name.name;
203 int namelen = dentry->d_name.len;
204
205 UFSD(("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen))
206
207 *res_bh = NULL;
208
209 sb = dir->i_sb;
210
211 if (namelen > UFS_MAXNAMLEN)
212 return NULL;
213
214 memset (bh_use, 0, sizeof (bh_use));
215 toread = 0;
216 for (block = 0; block < NAMEI_RA_SIZE; ++block) {
217 struct buffer_head * bh;
218
219 if ((block << sb->s_blocksize_bits) >= dir->i_size)
220 break;
221 bh = ufs_getfrag (dir, block, 0, &err);
222 bh_use[block] = bh;
223 if (bh && !buffer_uptodate(bh))
224 bh_read[toread++] = bh;
225 }
226
227 for (block = 0, offset = 0; offset < dir->i_size; block++) {
228 struct buffer_head * bh;
229 struct ufs_dir_entry * de;
230 char * dlimit;
231
232 if ((block % NAMEI_RA_BLOCKS) == 0 && toread) {
233 ll_rw_block (READ, toread, bh_read);
234 toread = 0;
235 }
236 bh = bh_use[block % NAMEI_RA_SIZE];
237 if (!bh) {
238 ufs_error (sb, "ufs_find_entry",
239 "directory #%lu contains a hole at offset %lu",
240 dir->i_ino, offset);
241 offset += sb->s_blocksize;
242 continue;
243 }
244 wait_on_buffer (bh);
245 if (!buffer_uptodate(bh)) {
246 /*
247 * read error: all bets are off
248 */
249 break;
250 }
251
252 de = (struct ufs_dir_entry *) bh->b_data;
253 dlimit = bh->b_data + sb->s_blocksize;
254 while ((char *) de < dlimit && offset < dir->i_size) {
255 /* this code is executed quadratically often */
256 /* do minimal checking by hand */
257 int de_len;
258
259 if ((char *) de + namelen <= dlimit &&
260 ufs_match(sb, namelen, name, de)) {
261 /* found a match -
262 just to be sure, do a full check */
263 if (!ufs_check_dir_entry("ufs_find_entry",
264 dir, de, bh, offset))
265 goto failed;
266 for (i = 0; i < NAMEI_RA_SIZE; ++i) {
267 if (bh_use[i] != bh)
268 brelse (bh_use[i]);
269 }
270 *res_bh = bh;
271 return de;
272 }
273 /* prevent looping on a bad block */
274 de_len = fs16_to_cpu(sb, de->d_reclen);
275 if (de_len <= 0)
276 goto failed;
277 offset += de_len;
278 de = (struct ufs_dir_entry *) ((char *) de + de_len);
279 }
280
281 brelse (bh);
282 if (((block + NAMEI_RA_SIZE) << sb->s_blocksize_bits ) >=
283 dir->i_size)
284 bh = NULL;
285 else
286 bh = ufs_getfrag (dir, block + NAMEI_RA_SIZE, 0, &err);
287 bh_use[block % NAMEI_RA_SIZE] = bh;
288 if (bh && !buffer_uptodate(bh))
289 bh_read[toread++] = bh;
290 }
291
292failed:
293 for (i = 0; i < NAMEI_RA_SIZE; ++i) brelse (bh_use[i]);
294 UFSD(("EXIT\n"))
295 return NULL;
296}
297
298static int
299ufs_check_dir_entry (const char *function, struct inode *dir,
300 struct ufs_dir_entry *de, struct buffer_head *bh,
301 unsigned long offset)
302{
303 struct super_block *sb = dir->i_sb;
304 const char *error_msg = NULL;
305 int rlen = fs16_to_cpu(sb, de->d_reclen);
306
307 if (rlen < UFS_DIR_REC_LEN(1))
308 error_msg = "reclen is smaller than minimal";
309 else if (rlen % 4 != 0)
310 error_msg = "reclen % 4 != 0";
311 else if (rlen < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)))
312 error_msg = "reclen is too small for namlen";
313 else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
314 error_msg = "directory entry across blocks";
315 else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
316 UFS_SB(sb)->s_uspi->s_ncg))
317 error_msg = "inode out of bounds";
318
319 if (error_msg != NULL)
320 ufs_error (sb, function, "bad entry in directory #%lu, size %Lu: %s - "
321 "offset=%lu, inode=%lu, reclen=%d, namlen=%d",
322 dir->i_ino, dir->i_size, error_msg, offset,
323 (unsigned long)fs32_to_cpu(sb, de->d_ino),
324 rlen, ufs_get_de_namlen(sb, de));
325
326 return (error_msg == NULL ? 1 : 0);
327}
328
329struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct buffer_head **p)
330{
331 int err;
332 struct buffer_head *bh = ufs_bread (dir, 0, 0, &err);
333 struct ufs_dir_entry *res = NULL;
334
335 if (bh) {
336 res = (struct ufs_dir_entry *) bh->b_data;
337 res = (struct ufs_dir_entry *)((char *)res +
338 fs16_to_cpu(dir->i_sb, res->d_reclen));
339 }
340 *p = bh;
341 return res;
342}
343ino_t ufs_inode_by_name(struct inode * dir, struct dentry *dentry)
344{
345 ino_t res = 0;
346 struct ufs_dir_entry * de;
347 struct buffer_head *bh;
348
349 de = ufs_find_entry (dentry, &bh);
350 if (de) {
351 res = fs32_to_cpu(dir->i_sb, de->d_ino);
352 brelse(bh);
353 }
354 return res;
355}
356
357void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
358 struct buffer_head *bh, struct inode *inode)
359{
360 dir->i_version++;
361 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
362 mark_buffer_dirty(bh);
363 if (IS_DIRSYNC(dir))
364 sync_dirty_buffer(bh);
365 brelse (bh);
366}
367
368/*
369 * ufs_add_entry()
370 *
371 * adds a file entry to the specified directory, using the same
372 * semantics as ufs_find_entry(). It returns NULL if it failed.
373 */
374int ufs_add_link(struct dentry *dentry, struct inode *inode)
375{
376 struct super_block * sb;
377 struct ufs_sb_private_info * uspi;
378 unsigned long offset;
379 unsigned fragoff;
380 unsigned short rec_len;
381 struct buffer_head * bh;
382 struct ufs_dir_entry * de, * de1;
383 struct inode *dir = dentry->d_parent->d_inode;
384 const char *name = dentry->d_name.name;
385 int namelen = dentry->d_name.len;
386 int err;
387
388 UFSD(("ENTER, name %s, namelen %u\n", name, namelen))
389
390 sb = dir->i_sb;
391 uspi = UFS_SB(sb)->s_uspi;
392
393 if (!namelen)
394 return -EINVAL;
395 bh = ufs_bread (dir, 0, 0, &err);
396 if (!bh)
397 return err;
398 rec_len = UFS_DIR_REC_LEN(namelen);
399 offset = 0;
400 de = (struct ufs_dir_entry *) bh->b_data;
401 while (1) {
402 if ((char *)de >= UFS_SECTOR_SIZE + bh->b_data) {
403 fragoff = offset & ~uspi->s_fmask;
404 if (fragoff != 0 && fragoff != UFS_SECTOR_SIZE)
405 ufs_error (sb, "ufs_add_entry", "internal error"
406 " fragoff %u", fragoff);
407 if (!fragoff) {
408 brelse (bh);
409 bh = ufs_bread (dir, offset >> sb->s_blocksize_bits, 1, &err);
410 if (!bh)
411 return err;
412 }
413 if (dir->i_size <= offset) {
414 if (dir->i_size == 0) {
415 brelse(bh);
416 return -ENOENT;
417 }
418 de = (struct ufs_dir_entry *) (bh->b_data + fragoff);
419 de->d_ino = 0;
420 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE);
421 ufs_set_de_namlen(sb, de, 0);
422 dir->i_size = offset + UFS_SECTOR_SIZE;
423 mark_inode_dirty(dir);
424 } else {
425 de = (struct ufs_dir_entry *) bh->b_data;
426 }
427 }
428 if (!ufs_check_dir_entry ("ufs_add_entry", dir, de, bh, offset)) {
429 brelse (bh);
430 return -ENOENT;
431 }
432 if (ufs_match(sb, namelen, name, de)) {
433 brelse (bh);
434 return -EEXIST;
435 }
436 if (de->d_ino == 0 && fs16_to_cpu(sb, de->d_reclen) >= rec_len)
437 break;
438
439 if (fs16_to_cpu(sb, de->d_reclen) >=
440 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)) + rec_len)
441 break;
442 offset += fs16_to_cpu(sb, de->d_reclen);
443 de = (struct ufs_dir_entry *) ((char *) de + fs16_to_cpu(sb, de->d_reclen));
444 }
445
446 if (de->d_ino) {
447 de1 = (struct ufs_dir_entry *) ((char *) de +
448 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
449 de1->d_reclen =
450 cpu_to_fs16(sb, fs16_to_cpu(sb, de->d_reclen) -
451 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
452 de->d_reclen =
453 cpu_to_fs16(sb, UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
454 de = de1;
455 }
456 de->d_ino = 0;
457 ufs_set_de_namlen(sb, de, namelen);
458 memcpy (de->d_name, name, namelen + 1);
459 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
460 ufs_set_de_type(sb, de, inode->i_mode);
461 mark_buffer_dirty(bh);
462 if (IS_DIRSYNC(dir))
463 sync_dirty_buffer(bh);
464 brelse (bh);
465 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
466 dir->i_version++;
467 mark_inode_dirty(dir);
468
469 UFSD(("EXIT\n"))
470 return 0;
471}
472
473/*
474 * ufs_delete_entry deletes a directory entry by merging it with the
475 * previous entry.
476 */
477int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir,
478 struct buffer_head * bh )
479
480{
481 struct super_block * sb;
482 struct ufs_dir_entry * de, * pde;
483 unsigned i;
484
485 UFSD(("ENTER\n"))
486
487 sb = inode->i_sb;
488 i = 0;
489 pde = NULL;
490 de = (struct ufs_dir_entry *) bh->b_data;
491
492 UFSD(("ino %u, reclen %u, namlen %u, name %s\n",
493 fs32_to_cpu(sb, de->d_ino),
494 fs16to_cpu(sb, de->d_reclen),
495 ufs_get_de_namlen(sb, de), de->d_name))
496
497 while (i < bh->b_size) {
498 if (!ufs_check_dir_entry ("ufs_delete_entry", inode, de, bh, i)) {
499 brelse(bh);
500 return -EIO;
501 }
502 if (de == dir) {
503 if (pde)
504 fs16_add(sb, &pde->d_reclen,
505 fs16_to_cpu(sb, dir->d_reclen));
506 dir->d_ino = 0;
507 inode->i_version++;
508 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
509 mark_inode_dirty(inode);
510 mark_buffer_dirty(bh);
511 if (IS_DIRSYNC(inode))
512 sync_dirty_buffer(bh);
513 brelse(bh);
514 UFSD(("EXIT\n"))
515 return 0;
516 }
517 i += fs16_to_cpu(sb, de->d_reclen);
518 if (i == UFS_SECTOR_SIZE) pde = NULL;
519 else pde = de;
520 de = (struct ufs_dir_entry *)
521 ((char *) de + fs16_to_cpu(sb, de->d_reclen));
522 if (i == UFS_SECTOR_SIZE && de->d_reclen == 0)
523 break;
524 }
525 UFSD(("EXIT\n"))
526 brelse(bh);
527 return -ENOENT;
528}
529
530int ufs_make_empty(struct inode * inode, struct inode *dir)
531{
532 struct super_block * sb = dir->i_sb;
533 struct buffer_head * dir_block;
534 struct ufs_dir_entry * de;
535 int err;
536
537 dir_block = ufs_bread (inode, 0, 1, &err);
538 if (!dir_block)
539 return err;
540
541 inode->i_blocks = sb->s_blocksize / UFS_SECTOR_SIZE;
542 de = (struct ufs_dir_entry *) dir_block->b_data;
543 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
544 ufs_set_de_type(sb, de, inode->i_mode);
545 ufs_set_de_namlen(sb, de, 1);
546 de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1));
547 strcpy (de->d_name, ".");
548 de = (struct ufs_dir_entry *)
549 ((char *)de + fs16_to_cpu(sb, de->d_reclen));
550 de->d_ino = cpu_to_fs32(sb, dir->i_ino);
551 ufs_set_de_type(sb, de, dir->i_mode);
552 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1));
553 ufs_set_de_namlen(sb, de, 2);
554 strcpy (de->d_name, "..");
555 mark_buffer_dirty(dir_block);
556 brelse (dir_block);
557 mark_inode_dirty(inode);
558 return 0;
559}
560
561/*
562 * routine to check that the specified directory is empty (for rmdir)
563 */
564int ufs_empty_dir (struct inode * inode)
565{
566 struct super_block * sb;
567 unsigned long offset;
568 struct buffer_head * bh;
569 struct ufs_dir_entry * de, * de1;
570 int err;
571
572 sb = inode->i_sb;
573
574 if (inode->i_size < UFS_DIR_REC_LEN(1) + UFS_DIR_REC_LEN(2) ||
575 !(bh = ufs_bread (inode, 0, 0, &err))) {
576 ufs_warning (inode->i_sb, "empty_dir",
577 "bad directory (dir #%lu) - no data block",
578 inode->i_ino);
579 return 1;
580 }
581 de = (struct ufs_dir_entry *) bh->b_data;
582 de1 = (struct ufs_dir_entry *)
583 ((char *)de + fs16_to_cpu(sb, de->d_reclen));
584 if (fs32_to_cpu(sb, de->d_ino) != inode->i_ino || de1->d_ino == 0 ||
585 strcmp (".", de->d_name) || strcmp ("..", de1->d_name)) {
586 ufs_warning (inode->i_sb, "empty_dir",
587 "bad directory (dir #%lu) - no `.' or `..'",
588 inode->i_ino);
589 return 1;
590 }
591 offset = fs16_to_cpu(sb, de->d_reclen) + fs16_to_cpu(sb, de1->d_reclen);
592 de = (struct ufs_dir_entry *)
593 ((char *)de1 + fs16_to_cpu(sb, de1->d_reclen));
594 while (offset < inode->i_size ) {
595 if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
596 brelse (bh);
597 bh = ufs_bread (inode, offset >> sb->s_blocksize_bits, 1, &err);
598 if (!bh) {
599 ufs_error (sb, "empty_dir",
600 "directory #%lu contains a hole at offset %lu",
601 inode->i_ino, offset);
602 offset += sb->s_blocksize;
603 continue;
604 }
605 de = (struct ufs_dir_entry *) bh->b_data;
606 }
607 if (!ufs_check_dir_entry ("empty_dir", inode, de, bh, offset)) {
608 brelse (bh);
609 return 1;
610 }
611 if (de->d_ino) {
612 brelse (bh);
613 return 0;
614 }
615 offset += fs16_to_cpu(sb, de->d_reclen);
616 de = (struct ufs_dir_entry *)
617 ((char *)de + fs16_to_cpu(sb, de->d_reclen));
618 }
619 brelse (bh);
620 return 1;
621}
622
623struct file_operations ufs_dir_operations = {
624 .read = generic_read_dir,
625 .readdir = ufs_readdir,
626 .fsync = file_fsync,
627};
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
new file mode 100644
index 000000000000..ed69d7fe1b5d
--- /dev/null
+++ b/fs/ufs/file.c
@@ -0,0 +1,55 @@
1/*
2 * linux/fs/ufs/file.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/file.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/file.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * ext2 fs regular file handling primitives
24 */
25
26#include <asm/uaccess.h>
27#include <asm/system.h>
28
29#include <linux/errno.h>
30#include <linux/fs.h>
31#include <linux/ufs_fs.h>
32#include <linux/fcntl.h>
33#include <linux/time.h>
34#include <linux/stat.h>
35#include <linux/mm.h>
36#include <linux/pagemap.h>
37#include <linux/smp_lock.h>
38
39/*
40 * We have mostly NULL's here: the current defaults are ok for
41 * the ufs filesystem.
42 */
43
44struct file_operations ufs_file_operations = {
45 .llseek = generic_file_llseek,
46 .read = generic_file_read,
47 .write = generic_file_write,
48 .mmap = generic_file_mmap,
49 .open = generic_file_open,
50 .sendfile = generic_file_sendfile,
51};
52
53struct inode_operations ufs_file_inode_operations = {
54 .truncate = ufs_truncate,
55};
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
new file mode 100644
index 000000000000..61a6b1542fc5
--- /dev/null
+++ b/fs/ufs/ialloc.c
@@ -0,0 +1,302 @@
1/*
2 * linux/fs/ufs/ialloc.c
3 *
4 * Copyright (c) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/ialloc.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * BSD ufs-inspired inode and directory allocation by
18 * Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
19 * Big-endian to little-endian byte-swapping/bitmaps by
20 * David S. Miller (davem@caip.rutgers.edu), 1995
21 */
22
23#include <linux/fs.h>
24#include <linux/ufs_fs.h>
25#include <linux/time.h>
26#include <linux/stat.h>
27#include <linux/string.h>
28#include <linux/quotaops.h>
29#include <linux/buffer_head.h>
30#include <linux/sched.h>
31#include <linux/bitops.h>
32#include <asm/byteorder.h>
33
34#include "swab.h"
35#include "util.h"
36
37#undef UFS_IALLOC_DEBUG
38
39#ifdef UFS_IALLOC_DEBUG
40#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
41#else
42#define UFSD(x)
43#endif
44
45/*
46 * NOTE! When we get the inode, we're the only people
47 * that have access to it, and as such there are no
48 * race conditions we have to worry about. The inode
49 * is not on the hash-lists, and it cannot be reached
50 * through the filesystem because the directory entry
51 * has been deleted earlier.
52 *
53 * HOWEVER: we must make sure that we get no aliases,
54 * which means that we have to call "clear_inode()"
55 * _before_ we mark the inode not in use in the inode
56 * bitmaps. Otherwise a newly created file might use
57 * the same inode number (not actually the same pointer
58 * though), and then we'd have two inodes sharing the
59 * same inode number and space on the harddisk.
60 */
61void ufs_free_inode (struct inode * inode)
62{
63 struct super_block * sb;
64 struct ufs_sb_private_info * uspi;
65 struct ufs_super_block_first * usb1;
66 struct ufs_cg_private_info * ucpi;
67 struct ufs_cylinder_group * ucg;
68 int is_directory;
69 unsigned ino, cg, bit;
70
71 UFSD(("ENTER, ino %lu\n", inode->i_ino))
72
73 sb = inode->i_sb;
74 uspi = UFS_SB(sb)->s_uspi;
75 usb1 = ubh_get_usb_first(USPI_UBH);
76
77 ino = inode->i_ino;
78
79 lock_super (sb);
80
81 if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
82 ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
83 unlock_super (sb);
84 return;
85 }
86
87 cg = ufs_inotocg (ino);
88 bit = ufs_inotocgoff (ino);
89 ucpi = ufs_load_cylinder (sb, cg);
90 if (!ucpi) {
91 unlock_super (sb);
92 return;
93 }
94 ucg = ubh_get_ucg(UCPI_UBH);
95 if (!ufs_cg_chkmagic(sb, ucg))
96 ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");
97
98 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
99
100 is_directory = S_ISDIR(inode->i_mode);
101
102 DQUOT_FREE_INODE(inode);
103 DQUOT_DROP(inode);
104
105 clear_inode (inode);
106
107 if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
108 ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
109 else {
110 ubh_clrbit (UCPI_UBH, ucpi->c_iusedoff, bit);
111 if (ino < ucpi->c_irotor)
112 ucpi->c_irotor = ino;
113 fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
114 fs32_add(sb, &usb1->fs_cstotal.cs_nifree, 1);
115 fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);
116
117 if (is_directory) {
118 fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
119 fs32_sub(sb, &usb1->fs_cstotal.cs_ndir, 1);
120 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
121 }
122 }
123
124 ubh_mark_buffer_dirty (USPI_UBH);
125 ubh_mark_buffer_dirty (UCPI_UBH);
126 if (sb->s_flags & MS_SYNCHRONOUS) {
127 ubh_wait_on_buffer (UCPI_UBH);
128 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
129 ubh_wait_on_buffer (UCPI_UBH);
130 }
131
132 sb->s_dirt = 1;
133 unlock_super (sb);
134 UFSD(("EXIT\n"))
135}
136
137/*
138 * There are two policies for allocating an inode. If the new inode is
139 * a directory, then a forward search is made for a block group with both
140 * free space and a low directory-to-inode ratio; if that fails, then of
141 * the groups with above-average free space, that group with the fewest
142 * directories already is chosen.
143 *
144 * For other inodes, search forward from the parent directory's block
145 * group to find a free inode.
146 */
147struct inode * ufs_new_inode(struct inode * dir, int mode)
148{
149 struct super_block * sb;
150 struct ufs_sb_info * sbi;
151 struct ufs_sb_private_info * uspi;
152 struct ufs_super_block_first * usb1;
153 struct ufs_cg_private_info * ucpi;
154 struct ufs_cylinder_group * ucg;
155 struct inode * inode;
156 unsigned cg, bit, i, j, start;
157 struct ufs_inode_info *ufsi;
158
159 UFSD(("ENTER\n"))
160
161 /* Cannot create files in a deleted directory */
162 if (!dir || !dir->i_nlink)
163 return ERR_PTR(-EPERM);
164 sb = dir->i_sb;
165 inode = new_inode(sb);
166 if (!inode)
167 return ERR_PTR(-ENOMEM);
168 ufsi = UFS_I(inode);
169 sbi = UFS_SB(sb);
170 uspi = sbi->s_uspi;
171 usb1 = ubh_get_usb_first(USPI_UBH);
172
173 lock_super (sb);
174
175 /*
176 * Try to place the inode in its parent directory
177 */
178 i = ufs_inotocg(dir->i_ino);
179 if (sbi->fs_cs(i).cs_nifree) {
180 cg = i;
181 goto cg_found;
182 }
183
184 /*
185 * Use a quadratic hash to find a group with a free inode
186 */
187 for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
188 i += j;
189 if (i >= uspi->s_ncg)
190 i -= uspi->s_ncg;
191 if (sbi->fs_cs(i).cs_nifree) {
192 cg = i;
193 goto cg_found;
194 }
195 }
196
197 /*
198 * That failed: try linear search for a free inode
199 */
200 i = ufs_inotocg(dir->i_ino) + 1;
201 for (j = 2; j < uspi->s_ncg; j++) {
202 i++;
203 if (i >= uspi->s_ncg)
204 i = 0;
205 if (sbi->fs_cs(i).cs_nifree) {
206 cg = i;
207 goto cg_found;
208 }
209 }
210
211 goto failed;
212
213cg_found:
214 ucpi = ufs_load_cylinder (sb, cg);
215 if (!ucpi)
216 goto failed;
217 ucg = ubh_get_ucg(UCPI_UBH);
218 if (!ufs_cg_chkmagic(sb, ucg))
219 ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");
220
221 start = ucpi->c_irotor;
222 bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start);
223 if (!(bit < uspi->s_ipg)) {
224 bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start);
225 if (!(bit < start)) {
226 ufs_error (sb, "ufs_new_inode",
227 "cylinder group %u corrupted - error in inode bitmap\n", cg);
228 goto failed;
229 }
230 }
231 UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg))
232 if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
233 ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit);
234 else {
235 ufs_panic (sb, "ufs_new_inode", "internal error");
236 goto failed;
237 }
238
239 fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
240 fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
241 fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
242
243 if (S_ISDIR(mode)) {
244 fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
245 fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
246 fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
247 }
248
249 ubh_mark_buffer_dirty (USPI_UBH);
250 ubh_mark_buffer_dirty (UCPI_UBH);
251 if (sb->s_flags & MS_SYNCHRONOUS) {
252 ubh_wait_on_buffer (UCPI_UBH);
253 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
254 ubh_wait_on_buffer (UCPI_UBH);
255 }
256 sb->s_dirt = 1;
257
258 inode->i_mode = mode;
259 inode->i_uid = current->fsuid;
260 if (dir->i_mode & S_ISGID) {
261 inode->i_gid = dir->i_gid;
262 if (S_ISDIR(mode))
263 inode->i_mode |= S_ISGID;
264 } else
265 inode->i_gid = current->fsgid;
266
267 inode->i_ino = cg * uspi->s_ipg + bit;
268 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
269 inode->i_blocks = 0;
270 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
271 ufsi->i_flags = UFS_I(dir)->i_flags;
272 ufsi->i_lastfrag = 0;
273 ufsi->i_gen = 0;
274 ufsi->i_shadow = 0;
275 ufsi->i_osync = 0;
276 ufsi->i_oeftflag = 0;
277 memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
278
279 insert_inode_hash(inode);
280 mark_inode_dirty(inode);
281
282 unlock_super (sb);
283
284 if (DQUOT_ALLOC_INODE(inode)) {
285 DQUOT_DROP(inode);
286 inode->i_flags |= S_NOQUOTA;
287 inode->i_nlink = 0;
288 iput(inode);
289 return ERR_PTR(-EDQUOT);
290 }
291
292 UFSD(("allocating inode %lu\n", inode->i_ino))
293 UFSD(("EXIT\n"))
294 return inode;
295
296failed:
297 unlock_super (sb);
298 make_bad_inode(inode);
299 iput (inode);
300 UFSD(("EXIT (FAILED)\n"))
301 return ERR_PTR(-ENOSPC);
302}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
new file mode 100644
index 000000000000..718627ca8b5c
--- /dev/null
+++ b/fs/ufs/inode.c
@@ -0,0 +1,816 @@
1/*
2 * linux/fs/ufs/inode.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/inode.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/inode.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
26 */
27
28#include <asm/uaccess.h>
29#include <asm/system.h>
30
31#include <linux/errno.h>
32#include <linux/fs.h>
33#include <linux/ufs_fs.h>
34#include <linux/time.h>
35#include <linux/stat.h>
36#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/smp_lock.h>
39#include <linux/buffer_head.h>
40
41#include "swab.h"
42#include "util.h"
43
44#undef UFS_INODE_DEBUG
45#undef UFS_INODE_DEBUG_MORE
46
47#ifdef UFS_INODE_DEBUG
48#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
49#else
50#define UFSD(x)
51#endif
52
53static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
54{
55 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
56 int ptrs = uspi->s_apb;
57 int ptrs_bits = uspi->s_apbshift;
58 const long direct_blocks = UFS_NDADDR,
59 indirect_blocks = ptrs,
60 double_blocks = (1 << (ptrs_bits * 2));
61 int n = 0;
62
63
64 UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%d \n",ptrs,double_blocks));
65 if (i_block < 0) {
66 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
67 } else if (i_block < direct_blocks) {
68 offsets[n++] = i_block;
69 } else if ((i_block -= direct_blocks) < indirect_blocks) {
70 offsets[n++] = UFS_IND_BLOCK;
71 offsets[n++] = i_block;
72 } else if ((i_block -= indirect_blocks) < double_blocks) {
73 offsets[n++] = UFS_DIND_BLOCK;
74 offsets[n++] = i_block >> ptrs_bits;
75 offsets[n++] = i_block & (ptrs - 1);
76 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
77 offsets[n++] = UFS_TIND_BLOCK;
78 offsets[n++] = i_block >> (ptrs_bits * 2);
79 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
80 offsets[n++] = i_block & (ptrs - 1);
81 } else {
82 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
83 }
84 return n;
85}
86
87/*
88 * Returns the location of the fragment from
89 * the begining of the filesystem.
90 */
91
92u64 ufs_frag_map(struct inode *inode, sector_t frag)
93{
94 struct ufs_inode_info *ufsi = UFS_I(inode);
95 struct super_block *sb = inode->i_sb;
96 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
97 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
98 int shift = uspi->s_apbshift-uspi->s_fpbshift;
99 sector_t offsets[4], *p;
100 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
101 u64 ret = 0L;
102 __fs32 block;
103 __fs64 u2_block = 0L;
104 unsigned flags = UFS_SB(sb)->s_flags;
105 u64 temp = 0L;
106
107 UFSD((": frag = %lu depth = %d\n",frag,depth));
108 UFSD((": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask));
109
110 if (depth == 0)
111 return 0;
112
113 p = offsets;
114
115 lock_kernel();
116 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
117 goto ufs2;
118
119 block = ufsi->i_u1.i_data[*p++];
120 if (!block)
121 goto out;
122 while (--depth) {
123 struct buffer_head *bh;
124 sector_t n = *p++;
125
126 bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
127 if (!bh)
128 goto out;
129 block = ((__fs32 *) bh->b_data)[n & mask];
130 brelse (bh);
131 if (!block)
132 goto out;
133 }
134 ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
135 goto out;
136ufs2:
137 u2_block = ufsi->i_u1.u2_i_data[*p++];
138 if (!u2_block)
139 goto out;
140
141
142 while (--depth) {
143 struct buffer_head *bh;
144 sector_t n = *p++;
145
146
147 temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
148 bh = sb_bread(sb, temp +(u64) (n>>shift));
149 if (!bh)
150 goto out;
151 u2_block = ((__fs64 *)bh->b_data)[n & mask];
152 brelse(bh);
153 if (!u2_block)
154 goto out;
155 }
156 temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
157 ret = temp + (u64) (frag & uspi->s_fpbmask);
158
159out:
160 unlock_kernel();
161 return ret;
162}
163
164static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
165 unsigned int fragment, unsigned int new_fragment,
166 unsigned int required, int *err, int metadata, long *phys, int *new)
167{
168 struct ufs_inode_info *ufsi = UFS_I(inode);
169 struct super_block * sb;
170 struct ufs_sb_private_info * uspi;
171 struct buffer_head * result;
172 unsigned block, blockoff, lastfrag, lastblock, lastblockoff;
173 unsigned tmp, goal;
174 __fs32 * p, * p2;
175 unsigned flags = 0;
176
177 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n",
178 inode->i_ino, fragment, new_fragment, required))
179
180 sb = inode->i_sb;
181 uspi = UFS_SB(sb)->s_uspi;
182
183 flags = UFS_SB(sb)->s_flags;
184 /* TODO : to be done for write support
185 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
186 goto ufs2;
187 */
188
189 block = ufs_fragstoblks (fragment);
190 blockoff = ufs_fragnum (fragment);
191 p = ufsi->i_u1.i_data + block;
192 goal = 0;
193
194repeat:
195 tmp = fs32_to_cpu(sb, *p);
196 lastfrag = ufsi->i_lastfrag;
197 if (tmp && fragment < lastfrag) {
198 if (metadata) {
199 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
200 if (tmp == fs32_to_cpu(sb, *p)) {
201 UFSD(("EXIT, result %u\n", tmp + blockoff))
202 return result;
203 }
204 brelse (result);
205 goto repeat;
206 } else {
207 *phys = tmp;
208 return NULL;
209 }
210 }
211
212 lastblock = ufs_fragstoblks (lastfrag);
213 lastblockoff = ufs_fragnum (lastfrag);
214 /*
215 * We will extend file into new block beyond last allocated block
216 */
217 if (lastblock < block) {
218 /*
219 * We must reallocate last allocated block
220 */
221 if (lastblockoff) {
222 p2 = ufsi->i_u1.i_data + lastblock;
223 tmp = ufs_new_fragments (inode, p2, lastfrag,
224 fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err);
225 if (!tmp) {
226 if (lastfrag != ufsi->i_lastfrag)
227 goto repeat;
228 else
229 return NULL;
230 }
231 lastfrag = ufsi->i_lastfrag;
232
233 }
234 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb;
235 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
236 goal, required + blockoff, err);
237 }
238 /*
239 * We will extend last allocated block
240 */
241 else if (lastblock == block) {
242 tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff),
243 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err);
244 }
245 /*
246 * We will allocate new block before last allocated block
247 */
248 else /* (lastblock > block) */ {
249 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1])))
250 goal = tmp + uspi->s_fpb;
251 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
252 goal, uspi->s_fpb, err);
253 }
254 if (!tmp) {
255 if ((!blockoff && *p) ||
256 (blockoff && lastfrag != ufsi->i_lastfrag))
257 goto repeat;
258 *err = -ENOSPC;
259 return NULL;
260 }
261
262 /* The nullification of framgents done in ufs/balloc.c is
263 * something I don't have the stomache to move into here right
264 * now. -DaveM
265 */
266 if (metadata) {
267 result = sb_getblk(inode->i_sb, tmp + blockoff);
268 } else {
269 *phys = tmp;
270 result = NULL;
271 *err = 0;
272 *new = 1;
273 }
274
275 inode->i_ctime = CURRENT_TIME_SEC;
276 if (IS_SYNC(inode))
277 ufs_sync_inode (inode);
278 mark_inode_dirty(inode);
279 UFSD(("EXIT, result %u\n", tmp + blockoff))
280 return result;
281
282 /* This part : To be implemented ....
283 Required only for writing, not required for READ-ONLY.
284ufs2:
285
286 u2_block = ufs_fragstoblks(fragment);
287 u2_blockoff = ufs_fragnum(fragment);
288 p = ufsi->i_u1.u2_i_data + block;
289 goal = 0;
290
291repeat2:
292 tmp = fs32_to_cpu(sb, *p);
293 lastfrag = ufsi->i_lastfrag;
294
295 */
296}
297
298static struct buffer_head * ufs_block_getfrag (struct inode *inode,
299 struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment,
300 unsigned int blocksize, int * err, int metadata, long *phys, int *new)
301{
302 struct super_block * sb;
303 struct ufs_sb_private_info * uspi;
304 struct buffer_head * result;
305 unsigned tmp, goal, block, blockoff;
306 __fs32 * p;
307
308 sb = inode->i_sb;
309 uspi = UFS_SB(sb)->s_uspi;
310 block = ufs_fragstoblks (fragment);
311 blockoff = ufs_fragnum (fragment);
312
313 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment))
314
315 result = NULL;
316 if (!bh)
317 goto out;
318 if (!buffer_uptodate(bh)) {
319 ll_rw_block (READ, 1, &bh);
320 wait_on_buffer (bh);
321 if (!buffer_uptodate(bh))
322 goto out;
323 }
324
325 p = (__fs32 *) bh->b_data + block;
326repeat:
327 tmp = fs32_to_cpu(sb, *p);
328 if (tmp) {
329 if (metadata) {
330 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
331 if (tmp == fs32_to_cpu(sb, *p))
332 goto out;
333 brelse (result);
334 goto repeat;
335 } else {
336 *phys = tmp;
337 goto out;
338 }
339 }
340
341 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb))
342 goal = tmp + uspi->s_fpb;
343 else
344 goal = bh->b_blocknr + uspi->s_fpb;
345 tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err);
346 if (!tmp) {
347 if (fs32_to_cpu(sb, *p))
348 goto repeat;
349 goto out;
350 }
351
352 /* The nullification of framgents done in ufs/balloc.c is
353 * something I don't have the stomache to move into here right
354 * now. -DaveM
355 */
356 if (metadata) {
357 result = sb_getblk(sb, tmp + blockoff);
358 } else {
359 *phys = tmp;
360 *new = 1;
361 }
362
363 mark_buffer_dirty(bh);
364 if (IS_SYNC(inode))
365 sync_dirty_buffer(bh);
366 inode->i_ctime = CURRENT_TIME_SEC;
367 mark_inode_dirty(inode);
368out:
369 brelse (bh);
370 UFSD(("EXIT, result %u\n", tmp + blockoff))
371 return result;
372}
373
374/*
375 * This function gets the block which contains the fragment.
376 */
377
378static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
379{
380 struct super_block * sb = inode->i_sb;
381 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
382 struct buffer_head * bh;
383 int ret, err, new;
384 unsigned long ptr,phys;
385 u64 phys64 = 0;
386
387 if (!create) {
388 phys64 = ufs_frag_map(inode, fragment);
389 UFSD(("phys64 = %lu \n",phys64));
390 if (phys64)
391 map_bh(bh_result, sb, phys64);
392 return 0;
393 }
394
395 /* This code entered only while writing ....? */
396
397 err = -EIO;
398 new = 0;
399 ret = 0;
400 bh = NULL;
401
402 lock_kernel();
403
404 UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
405 if (fragment < 0)
406 goto abort_negative;
407 if (fragment >
408 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
409 << uspi->s_fpbshift))
410 goto abort_too_big;
411
412 err = 0;
413 ptr = fragment;
414
415 /*
416 * ok, these macros clean the logic up a bit and make
417 * it much more readable:
418 */
419#define GET_INODE_DATABLOCK(x) \
420 ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new)
421#define GET_INODE_PTR(x) \
422 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL)
423#define GET_INDIRECT_DATABLOCK(x) \
424 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
425 &err, 0, &phys, &new);
426#define GET_INDIRECT_PTR(x) \
427 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
428 &err, 1, NULL, NULL);
429
430 if (ptr < UFS_NDIR_FRAGMENT) {
431 bh = GET_INODE_DATABLOCK(ptr);
432 goto out;
433 }
434 ptr -= UFS_NDIR_FRAGMENT;
435 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
436 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
437 goto get_indirect;
438 }
439 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
440 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
441 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
442 goto get_double;
443 }
444 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
445 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
446 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
447get_double:
448 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
449get_indirect:
450 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
451
452#undef GET_INODE_DATABLOCK
453#undef GET_INODE_PTR
454#undef GET_INDIRECT_DATABLOCK
455#undef GET_INDIRECT_PTR
456
457out:
458 if (err)
459 goto abort;
460 if (new)
461 set_buffer_new(bh_result);
462 map_bh(bh_result, sb, phys);
463abort:
464 unlock_kernel();
465 return err;
466
467abort_negative:
468 ufs_warning(sb, "ufs_get_block", "block < 0");
469 goto abort;
470
471abort_too_big:
472 ufs_warning(sb, "ufs_get_block", "block > big");
473 goto abort;
474}
475
476struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment,
477 int create, int *err)
478{
479 struct buffer_head dummy;
480 int error;
481
482 dummy.b_state = 0;
483 dummy.b_blocknr = -1000;
484 error = ufs_getfrag_block(inode, fragment, &dummy, create);
485 *err = error;
486 if (!error && buffer_mapped(&dummy)) {
487 struct buffer_head *bh;
488 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
489 if (buffer_new(&dummy)) {
490 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
491 set_buffer_uptodate(bh);
492 mark_buffer_dirty(bh);
493 }
494 return bh;
495 }
496 return NULL;
497}
498
499struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment,
500 int create, int * err)
501{
502 struct buffer_head * bh;
503
504 UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
505 bh = ufs_getfrag (inode, fragment, create, err);
506 if (!bh || buffer_uptodate(bh))
507 return bh;
508 ll_rw_block (READ, 1, &bh);
509 wait_on_buffer (bh);
510 if (buffer_uptodate(bh))
511 return bh;
512 brelse (bh);
513 *err = -EIO;
514 return NULL;
515}
516
517static int ufs_writepage(struct page *page, struct writeback_control *wbc)
518{
519 return block_write_full_page(page,ufs_getfrag_block,wbc);
520}
521static int ufs_readpage(struct file *file, struct page *page)
522{
523 return block_read_full_page(page,ufs_getfrag_block);
524}
525static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
526{
527 return block_prepare_write(page,from,to,ufs_getfrag_block);
528}
529static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
530{
531 return generic_block_bmap(mapping,block,ufs_getfrag_block);
532}
533struct address_space_operations ufs_aops = {
534 .readpage = ufs_readpage,
535 .writepage = ufs_writepage,
536 .sync_page = block_sync_page,
537 .prepare_write = ufs_prepare_write,
538 .commit_write = generic_commit_write,
539 .bmap = ufs_bmap
540};
541
542void ufs_read_inode (struct inode * inode)
543{
544 struct ufs_inode_info *ufsi = UFS_I(inode);
545 struct super_block * sb;
546 struct ufs_sb_private_info * uspi;
547 struct ufs_inode * ufs_inode;
548 struct ufs2_inode *ufs2_inode;
549 struct buffer_head * bh;
550 mode_t mode;
551 unsigned i;
552 unsigned flags;
553
554 UFSD(("ENTER, ino %lu\n", inode->i_ino))
555
556 sb = inode->i_sb;
557 uspi = UFS_SB(sb)->s_uspi;
558 flags = UFS_SB(sb)->s_flags;
559
560 if (inode->i_ino < UFS_ROOTINO ||
561 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
562 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
563 goto bad_inode;
564 }
565
566 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
567 if (!bh) {
568 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
569 goto bad_inode;
570 }
571 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
572 goto ufs2_inode;
573
574 ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino));
575
576 /*
577 * Copy data to the in-core inode.
578 */
579 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
580 inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink);
581 if (inode->i_nlink == 0)
582 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
583
584 /*
585 * Linux now has 32-bit uid and gid, so we can support EFT.
586 */
587 inode->i_uid = ufs_get_inode_uid(sb, ufs_inode);
588 inode->i_gid = ufs_get_inode_gid(sb, ufs_inode);
589
590 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
591 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
592 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
593 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
594 inode->i_mtime.tv_nsec = 0;
595 inode->i_atime.tv_nsec = 0;
596 inode->i_ctime.tv_nsec = 0;
597 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
598 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */
599 inode->i_version++;
600 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
601 ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen);
602 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
603 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
604 ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
605
606 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
607 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
608 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
609 }
610 else {
611 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
612 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
613 }
614 ufsi->i_osync = 0;
615
616 if (S_ISREG(inode->i_mode)) {
617 inode->i_op = &ufs_file_inode_operations;
618 inode->i_fop = &ufs_file_operations;
619 inode->i_mapping->a_ops = &ufs_aops;
620 } else if (S_ISDIR(inode->i_mode)) {
621 inode->i_op = &ufs_dir_inode_operations;
622 inode->i_fop = &ufs_dir_operations;
623 } else if (S_ISLNK(inode->i_mode)) {
624 if (!inode->i_blocks)
625 inode->i_op = &ufs_fast_symlink_inode_operations;
626 else {
627 inode->i_op = &page_symlink_inode_operations;
628 inode->i_mapping->a_ops = &ufs_aops;
629 }
630 } else
631 init_special_inode(inode, inode->i_mode,
632 ufs_get_inode_dev(sb, ufsi));
633
634 brelse (bh);
635
636 UFSD(("EXIT\n"))
637 return;
638
639bad_inode:
640 make_bad_inode(inode);
641 return;
642
643ufs2_inode :
644 UFSD(("Reading ufs2 inode, ino %lu\n", inode->i_ino))
645
646 ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino));
647
648 /*
649 * Copy data to the in-core inode.
650 */
651 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
652 inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink);
653 if (inode->i_nlink == 0)
654 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
655
656 /*
657 * Linux now has 32-bit uid and gid, so we can support EFT.
658 */
659 inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid);
660 inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid);
661
662 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
663 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec);
664 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec);
665 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec);
666 inode->i_mtime.tv_nsec = 0;
667 inode->i_atime.tv_nsec = 0;
668 inode->i_ctime.tv_nsec = 0;
669 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
670 inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/
671
672 inode->i_version++;
673 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
674 ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen);
675 /*
676 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
677 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
678 */
679 ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift;
680
681 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
682 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
683 ufsi->i_u1.u2_i_data[i] =
684 ufs2_inode->ui_u2.ui_addr.ui_db[i];
685 }
686 else {
687 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
688 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
689 }
690 ufsi->i_osync = 0;
691
692 if (S_ISREG(inode->i_mode)) {
693 inode->i_op = &ufs_file_inode_operations;
694 inode->i_fop = &ufs_file_operations;
695 inode->i_mapping->a_ops = &ufs_aops;
696 } else if (S_ISDIR(inode->i_mode)) {
697 inode->i_op = &ufs_dir_inode_operations;
698 inode->i_fop = &ufs_dir_operations;
699 } else if (S_ISLNK(inode->i_mode)) {
700 if (!inode->i_blocks)
701 inode->i_op = &ufs_fast_symlink_inode_operations;
702 else {
703 inode->i_op = &page_symlink_inode_operations;
704 inode->i_mapping->a_ops = &ufs_aops;
705 }
706 } else /* TODO : here ...*/
707 init_special_inode(inode, inode->i_mode,
708 ufs_get_inode_dev(sb, ufsi));
709
710 brelse(bh);
711
712 UFSD(("EXIT\n"))
713 return;
714}
715
716static int ufs_update_inode(struct inode * inode, int do_sync)
717{
718 struct ufs_inode_info *ufsi = UFS_I(inode);
719 struct super_block * sb;
720 struct ufs_sb_private_info * uspi;
721 struct buffer_head * bh;
722 struct ufs_inode * ufs_inode;
723 unsigned i;
724 unsigned flags;
725
726 UFSD(("ENTER, ino %lu\n", inode->i_ino))
727
728 sb = inode->i_sb;
729 uspi = UFS_SB(sb)->s_uspi;
730 flags = UFS_SB(sb)->s_flags;
731
732 if (inode->i_ino < UFS_ROOTINO ||
733 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
734 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
735 return -1;
736 }
737
738 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
739 if (!bh) {
740 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
741 return -1;
742 }
743 ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode));
744
745 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
746 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
747
748 ufs_set_inode_uid(sb, ufs_inode, inode->i_uid);
749 ufs_set_inode_gid(sb, ufs_inode, inode->i_gid);
750
751 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
752 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
753 ufs_inode->ui_atime.tv_usec = 0;
754 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
755 ufs_inode->ui_ctime.tv_usec = 0;
756 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
757 ufs_inode->ui_mtime.tv_usec = 0;
758 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
759 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
760 ufs_inode->ui_gen = cpu_to_fs32(sb, ufsi->i_gen);
761
762 if ((flags & UFS_UID_MASK) == UFS_UID_EFT) {
763 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
764 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
765 }
766
767 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
768 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
769 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
770 } else if (inode->i_blocks) {
771 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
772 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
773 }
774 else {
775 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
776 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
777 }
778
779 if (!inode->i_nlink)
780 memset (ufs_inode, 0, sizeof(struct ufs_inode));
781
782 mark_buffer_dirty(bh);
783 if (do_sync)
784 sync_dirty_buffer(bh);
785 brelse (bh);
786
787 UFSD(("EXIT\n"))
788 return 0;
789}
790
791int ufs_write_inode (struct inode * inode, int wait)
792{
793 int ret;
794 lock_kernel();
795 ret = ufs_update_inode (inode, wait);
796 unlock_kernel();
797 return ret;
798}
799
800int ufs_sync_inode (struct inode *inode)
801{
802 return ufs_update_inode (inode, 1);
803}
804
805void ufs_delete_inode (struct inode * inode)
806{
807 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
808 lock_kernel();
809 mark_inode_dirty(inode);
810 ufs_update_inode(inode, IS_SYNC(inode));
811 inode->i_size = 0;
812 if (inode->i_blocks)
813 ufs_truncate (inode);
814 ufs_free_inode (inode);
815 unlock_kernel();
816}
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
new file mode 100644
index 000000000000..2958cde7d3d6
--- /dev/null
+++ b/fs/ufs/namei.c
@@ -0,0 +1,375 @@
1/*
2 * linux/fs/ufs/namei.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/namei.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/namei.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * Big-endian to little-endian byte-swapping/bitmaps by
24 * David S. Miller (davem@caip.rutgers.edu), 1995
25 */
26
27#include <linux/time.h>
28#include <linux/fs.h>
29#include <linux/ufs_fs.h>
30#include <linux/smp_lock.h>
31#include <linux/buffer_head.h>
32#include "swab.h" /* will go away - see comment in mknod() */
33#include "util.h"
34
35/*
36#undef UFS_NAMEI_DEBUG
37*/
38#define UFS_NAMEI_DEBUG
39
40#ifdef UFS_NAMEI_DEBUG
41#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
42#else
43#define UFSD(x)
44#endif
45
46static inline void ufs_inc_count(struct inode *inode)
47{
48 inode->i_nlink++;
49 mark_inode_dirty(inode);
50}
51
52static inline void ufs_dec_count(struct inode *inode)
53{
54 inode->i_nlink--;
55 mark_inode_dirty(inode);
56}
57
58static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
59{
60 int err = ufs_add_link(dentry, inode);
61 if (!err) {
62 d_instantiate(dentry, inode);
63 return 0;
64 }
65 ufs_dec_count(inode);
66 iput(inode);
67 return err;
68}
69
70static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
71{
72 struct inode * inode = NULL;
73 ino_t ino;
74
75 if (dentry->d_name.len > UFS_MAXNAMLEN)
76 return ERR_PTR(-ENAMETOOLONG);
77
78 lock_kernel();
79 ino = ufs_inode_by_name(dir, dentry);
80 if (ino) {
81 inode = iget(dir->i_sb, ino);
82 if (!inode) {
83 unlock_kernel();
84 return ERR_PTR(-EACCES);
85 }
86 }
87 unlock_kernel();
88 d_add(dentry, inode);
89 return NULL;
90}
91
92/*
93 * By the time this is called, we already have created
94 * the directory cache entry for the new file, but it
95 * is so far negative - it has no inode.
96 *
97 * If the create succeeds, we fill in the inode information
98 * with d_instantiate().
99 */
100static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
101 struct nameidata *nd)
102{
103 struct inode * inode = ufs_new_inode(dir, mode);
104 int err = PTR_ERR(inode);
105 if (!IS_ERR(inode)) {
106 inode->i_op = &ufs_file_inode_operations;
107 inode->i_fop = &ufs_file_operations;
108 inode->i_mapping->a_ops = &ufs_aops;
109 mark_inode_dirty(inode);
110 lock_kernel();
111 err = ufs_add_nondir(dentry, inode);
112 unlock_kernel();
113 }
114 return err;
115}
116
117static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
118{
119 struct inode *inode;
120 int err;
121
122 if (!old_valid_dev(rdev))
123 return -EINVAL;
124 inode = ufs_new_inode(dir, mode);
125 err = PTR_ERR(inode);
126 if (!IS_ERR(inode)) {
127 init_special_inode(inode, mode, rdev);
128 /* NOTE: that'll go when we get wide dev_t */
129 ufs_set_inode_dev(inode->i_sb, UFS_I(inode), rdev);
130 mark_inode_dirty(inode);
131 lock_kernel();
132 err = ufs_add_nondir(dentry, inode);
133 unlock_kernel();
134 }
135 return err;
136}
137
138static int ufs_symlink (struct inode * dir, struct dentry * dentry,
139 const char * symname)
140{
141 struct super_block * sb = dir->i_sb;
142 int err = -ENAMETOOLONG;
143 unsigned l = strlen(symname)+1;
144 struct inode * inode;
145
146 if (l > sb->s_blocksize)
147 goto out;
148
149 lock_kernel();
150 inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
151 err = PTR_ERR(inode);
152 if (IS_ERR(inode))
153 goto out;
154
155 if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
156 /* slow symlink */
157 inode->i_op = &page_symlink_inode_operations;
158 inode->i_mapping->a_ops = &ufs_aops;
159 err = page_symlink(inode, symname, l);
160 if (err)
161 goto out_fail;
162 } else {
163 /* fast symlink */
164 inode->i_op = &ufs_fast_symlink_inode_operations;
165 memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l);
166 inode->i_size = l-1;
167 }
168 mark_inode_dirty(inode);
169
170 err = ufs_add_nondir(dentry, inode);
171out:
172 unlock_kernel();
173 return err;
174
175out_fail:
176 ufs_dec_count(inode);
177 iput(inode);
178 goto out;
179}
180
181static int ufs_link (struct dentry * old_dentry, struct inode * dir,
182 struct dentry *dentry)
183{
184 struct inode *inode = old_dentry->d_inode;
185 int error;
186
187 lock_kernel();
188 if (inode->i_nlink >= UFS_LINK_MAX) {
189 unlock_kernel();
190 return -EMLINK;
191 }
192
193 inode->i_ctime = CURRENT_TIME_SEC;
194 ufs_inc_count(inode);
195 atomic_inc(&inode->i_count);
196
197 error = ufs_add_nondir(dentry, inode);
198 unlock_kernel();
199 return error;
200}
201
202static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
203{
204 struct inode * inode;
205 int err = -EMLINK;
206
207 if (dir->i_nlink >= UFS_LINK_MAX)
208 goto out;
209
210 lock_kernel();
211 ufs_inc_count(dir);
212
213 inode = ufs_new_inode(dir, S_IFDIR|mode);
214 err = PTR_ERR(inode);
215 if (IS_ERR(inode))
216 goto out_dir;
217
218 inode->i_op = &ufs_dir_inode_operations;
219 inode->i_fop = &ufs_dir_operations;
220
221 ufs_inc_count(inode);
222
223 err = ufs_make_empty(inode, dir);
224 if (err)
225 goto out_fail;
226
227 err = ufs_add_link(dentry, inode);
228 if (err)
229 goto out_fail;
230 unlock_kernel();
231
232 d_instantiate(dentry, inode);
233out:
234 return err;
235
236out_fail:
237 ufs_dec_count(inode);
238 ufs_dec_count(inode);
239 iput (inode);
240out_dir:
241 ufs_dec_count(dir);
242 unlock_kernel();
243 goto out;
244}
245
246static int ufs_unlink(struct inode * dir, struct dentry *dentry)
247{
248 struct inode * inode = dentry->d_inode;
249 struct buffer_head * bh;
250 struct ufs_dir_entry * de;
251 int err = -ENOENT;
252
253 lock_kernel();
254 de = ufs_find_entry (dentry, &bh);
255 if (!de)
256 goto out;
257
258 err = ufs_delete_entry (dir, de, bh);
259 if (err)
260 goto out;
261
262 inode->i_ctime = dir->i_ctime;
263 ufs_dec_count(inode);
264 err = 0;
265out:
266 unlock_kernel();
267 return err;
268}
269
270static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
271{
272 struct inode * inode = dentry->d_inode;
273 int err= -ENOTEMPTY;
274
275 lock_kernel();
276 if (ufs_empty_dir (inode)) {
277 err = ufs_unlink(dir, dentry);
278 if (!err) {
279 inode->i_size = 0;
280 ufs_dec_count(inode);
281 ufs_dec_count(dir);
282 }
283 }
284 unlock_kernel();
285 return err;
286}
287
288static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry,
289 struct inode * new_dir, struct dentry * new_dentry )
290{
291 struct inode *old_inode = old_dentry->d_inode;
292 struct inode *new_inode = new_dentry->d_inode;
293 struct buffer_head *dir_bh = NULL;
294 struct ufs_dir_entry *dir_de = NULL;
295 struct buffer_head *old_bh;
296 struct ufs_dir_entry *old_de;
297 int err = -ENOENT;
298
299 lock_kernel();
300 old_de = ufs_find_entry (old_dentry, &old_bh);
301 if (!old_de)
302 goto out;
303
304 if (S_ISDIR(old_inode->i_mode)) {
305 err = -EIO;
306 dir_de = ufs_dotdot(old_inode, &dir_bh);
307 if (!dir_de)
308 goto out_old;
309 }
310
311 if (new_inode) {
312 struct buffer_head *new_bh;
313 struct ufs_dir_entry *new_de;
314
315 err = -ENOTEMPTY;
316 if (dir_de && !ufs_empty_dir (new_inode))
317 goto out_dir;
318 err = -ENOENT;
319 new_de = ufs_find_entry (new_dentry, &new_bh);
320 if (!new_de)
321 goto out_dir;
322 ufs_inc_count(old_inode);
323 ufs_set_link(new_dir, new_de, new_bh, old_inode);
324 new_inode->i_ctime = CURRENT_TIME_SEC;
325 if (dir_de)
326 new_inode->i_nlink--;
327 ufs_dec_count(new_inode);
328 } else {
329 if (dir_de) {
330 err = -EMLINK;
331 if (new_dir->i_nlink >= UFS_LINK_MAX)
332 goto out_dir;
333 }
334 ufs_inc_count(old_inode);
335 err = ufs_add_link(new_dentry, old_inode);
336 if (err) {
337 ufs_dec_count(old_inode);
338 goto out_dir;
339 }
340 if (dir_de)
341 ufs_inc_count(new_dir);
342 }
343
344 ufs_delete_entry (old_dir, old_de, old_bh);
345
346 ufs_dec_count(old_inode);
347
348 if (dir_de) {
349 ufs_set_link(old_inode, dir_de, dir_bh, new_dir);
350 ufs_dec_count(old_dir);
351 }
352 unlock_kernel();
353 return 0;
354
355out_dir:
356 if (dir_de)
357 brelse(dir_bh);
358out_old:
359 brelse (old_bh);
360out:
361 unlock_kernel();
362 return err;
363}
364
365struct inode_operations ufs_dir_inode_operations = {
366 .create = ufs_create,
367 .lookup = ufs_lookup,
368 .link = ufs_link,
369 .unlink = ufs_unlink,
370 .symlink = ufs_symlink,
371 .mkdir = ufs_mkdir,
372 .rmdir = ufs_rmdir,
373 .mknod = ufs_mknod,
374 .rename = ufs_rename,
375};
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
new file mode 100644
index 000000000000..f036d694ba5a
--- /dev/null
+++ b/fs/ufs/super.c
@@ -0,0 +1,1347 @@
1/*
2 * linux/fs/ufs/super.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
8
9/* Derived from
10 *
11 * linux/fs/ext2/super.c
12 *
13 * Copyright (C) 1992, 1993, 1994, 1995
14 * Remy Card (card@masi.ibp.fr)
15 * Laboratoire MASI - Institut Blaise Pascal
16 * Universite Pierre et Marie Curie (Paris VI)
17 *
18 * from
19 *
20 * linux/fs/minix/inode.c
21 *
22 * Copyright (C) 1991, 1992 Linus Torvalds
23 *
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
26 */
27
28/*
29 * Inspired by
30 *
31 * linux/fs/ufs/super.c
32 *
33 * Copyright (C) 1996
34 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
35 * Laboratory for Computer Science Research Computing Facility
36 * Rutgers, The State University of New Jersey
37 *
38 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
39 *
40 * Kernel module support added on 96/04/26 by
41 * Stefan Reinauer <stepan@home.culture.mipt.ru>
42 *
43 * Module usage counts added on 96/04/29 by
44 * Gertjan van Wingerde <gertjan@cs.vu.nl>
45 *
46 * Clean swab support on 19970406 by
47 * Francois-Rene Rideau <fare@tunes.org>
48 *
49 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
50 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
51 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
52 *
53 * NeXTstep support added on February 5th 1998 by
54 * Niels Kristian Bech Jensen <nkbj@image.dk>.
55 *
56 * write support Daniel Pirkl <daniel.pirkl@email.cz> 1998
57 *
58 * HP/UX hfs filesystem support added by
59 * Martin K. Petersen <mkp@mkp.net>, August 1999
60 *
61 * UFS2 (of FreeBSD 5.x) support added by
62 * Niraj Kumar <niraj17@iitbombay.org>, Jan 2004
63 *
64 */
65
66
67#include <linux/config.h>
68#include <linux/module.h>
69#include <linux/bitops.h>
70
71#include <stdarg.h>
72
73#include <asm/uaccess.h>
74#include <asm/system.h>
75
76#include <linux/errno.h>
77#include <linux/fs.h>
78#include <linux/ufs_fs.h>
79#include <linux/slab.h>
80#include <linux/time.h>
81#include <linux/stat.h>
82#include <linux/string.h>
83#include <linux/blkdev.h>
84#include <linux/init.h>
85#include <linux/parser.h>
86#include <linux/smp_lock.h>
87#include <linux/buffer_head.h>
88#include <linux/vfs.h>
89
90#include "swab.h"
91#include "util.h"
92
93#undef UFS_SUPER_DEBUG
94#undef UFS_SUPER_DEBUG_MORE
95
96
97#undef UFS_SUPER_DEBUG_MORE
98#ifdef UFS_SUPER_DEBUG
99#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
100#else
101#define UFSD(x)
102#endif
103
104#ifdef UFS_SUPER_DEBUG_MORE
105/*
106 * Print contents of ufs_super_block, useful for debugging
107 */
108void ufs_print_super_stuff(struct super_block *sb,
109 struct ufs_super_block_first * usb1,
110 struct ufs_super_block_second * usb2,
111 struct ufs_super_block_third * usb3)
112{
113 printk("ufs_print_super_stuff\n");
114 printk("size of usb: %u\n", sizeof(struct ufs_super_block));
115 printk(" magic: 0x%x\n", fs32_to_cpu(sb, usb3->fs_magic));
116 printk(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno));
117 printk(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno));
118 printk(" iblkno: %u\n", fs32_to_cpu(sb, usb1->fs_iblkno));
119 printk(" dblkno: %u\n", fs32_to_cpu(sb, usb1->fs_dblkno));
120 printk(" cgoffset: %u\n", fs32_to_cpu(sb, usb1->fs_cgoffset));
121 printk(" ~cgmask: 0x%x\n", ~fs32_to_cpu(sb, usb1->fs_cgmask));
122 printk(" size: %u\n", fs32_to_cpu(sb, usb1->fs_size));
123 printk(" dsize: %u\n", fs32_to_cpu(sb, usb1->fs_dsize));
124 printk(" ncg: %u\n", fs32_to_cpu(sb, usb1->fs_ncg));
125 printk(" bsize: %u\n", fs32_to_cpu(sb, usb1->fs_bsize));
126 printk(" fsize: %u\n", fs32_to_cpu(sb, usb1->fs_fsize));
127 printk(" frag: %u\n", fs32_to_cpu(sb, usb1->fs_frag));
128 printk(" fragshift: %u\n", fs32_to_cpu(sb, usb1->fs_fragshift));
129 printk(" ~fmask: %u\n", ~fs32_to_cpu(sb, usb1->fs_fmask));
130 printk(" fshift: %u\n", fs32_to_cpu(sb, usb1->fs_fshift));
131 printk(" sbsize: %u\n", fs32_to_cpu(sb, usb1->fs_sbsize));
132 printk(" spc: %u\n", fs32_to_cpu(sb, usb1->fs_spc));
133 printk(" cpg: %u\n", fs32_to_cpu(sb, usb1->fs_cpg));
134 printk(" ipg: %u\n", fs32_to_cpu(sb, usb1->fs_ipg));
135 printk(" fpg: %u\n", fs32_to_cpu(sb, usb1->fs_fpg));
136 printk(" csaddr: %u\n", fs32_to_cpu(sb, usb1->fs_csaddr));
137 printk(" cssize: %u\n", fs32_to_cpu(sb, usb1->fs_cssize));
138 printk(" cgsize: %u\n", fs32_to_cpu(sb, usb1->fs_cgsize));
139 printk(" fstodb: %u\n", fs32_to_cpu(sb, usb1->fs_fsbtodb));
140 printk(" contigsumsize: %d\n", fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize));
141 printk(" postblformat: %u\n", fs32_to_cpu(sb, usb3->fs_postblformat));
142 printk(" nrpos: %u\n", fs32_to_cpu(sb, usb3->fs_nrpos));
143 printk(" ndir %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir));
144 printk(" nifree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree));
145 printk(" nbfree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree));
146 printk(" nffree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree));
147 printk("\n");
148}
149
150/*
151 * Print contents of ufs2 ufs_super_block, useful for debugging
152 */
153void ufs2_print_super_stuff(
154 struct super_block *sb,
155 struct ufs_super_block *usb)
156{
157 printk("ufs_print_super_stuff\n");
158 printk("size of usb: %u\n", sizeof(struct ufs_super_block));
159 printk(" magic: 0x%x\n", fs32_to_cpu(sb, usb->fs_magic));
160 printk(" fs_size: %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size));
161 printk(" fs_dsize: %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize));
162 printk(" bsize: %u\n", fs32_to_cpu(usb, usb->fs_bsize));
163 printk(" fsize: %u\n", fs32_to_cpu(usb, usb->fs_fsize));
164 printk(" fs_volname: %s\n", usb->fs_u11.fs_u2.fs_volname);
165 printk(" fs_fsmnt: %s\n", usb->fs_u11.fs_u2.fs_fsmnt);
166 printk(" fs_sblockloc: %u\n",fs64_to_cpu(sb,
167 usb->fs_u11.fs_u2.fs_sblockloc));
168 printk(" cs_ndir(No of dirs): %u\n",fs64_to_cpu(sb,
169 usb->fs_u11.fs_u2.fs_cstotal.cs_ndir));
170 printk(" cs_nbfree(No of free blocks): %u\n",fs64_to_cpu(sb,
171 usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree));
172 printk("\n");
173}
174
175/*
176 * Print contents of ufs_cylinder_group, useful for debugging
177 */
178void ufs_print_cylinder_stuff(struct super_block *sb, struct ufs_cylinder_group *cg)
179{
180 printk("\nufs_print_cylinder_stuff\n");
181 printk("size of ucg: %u\n", sizeof(struct ufs_cylinder_group));
182 printk(" magic: %x\n", fs32_to_cpu(sb, cg->cg_magic));
183 printk(" time: %u\n", fs32_to_cpu(sb, cg->cg_time));
184 printk(" cgx: %u\n", fs32_to_cpu(sb, cg->cg_cgx));
185 printk(" ncyl: %u\n", fs16_to_cpu(sb, cg->cg_ncyl));
186 printk(" niblk: %u\n", fs16_to_cpu(sb, cg->cg_niblk));
187 printk(" ndblk: %u\n", fs32_to_cpu(sb, cg->cg_ndblk));
188 printk(" cs_ndir: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_ndir));
189 printk(" cs_nbfree: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nbfree));
190 printk(" cs_nifree: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nifree));
191 printk(" cs_nffree: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nffree));
192 printk(" rotor: %u\n", fs32_to_cpu(sb, cg->cg_rotor));
193 printk(" frotor: %u\n", fs32_to_cpu(sb, cg->cg_frotor));
194 printk(" irotor: %u\n", fs32_to_cpu(sb, cg->cg_irotor));
195 printk(" frsum: %u, %u, %u, %u, %u, %u, %u, %u\n",
196 fs32_to_cpu(sb, cg->cg_frsum[0]), fs32_to_cpu(sb, cg->cg_frsum[1]),
197 fs32_to_cpu(sb, cg->cg_frsum[2]), fs32_to_cpu(sb, cg->cg_frsum[3]),
198 fs32_to_cpu(sb, cg->cg_frsum[4]), fs32_to_cpu(sb, cg->cg_frsum[5]),
199 fs32_to_cpu(sb, cg->cg_frsum[6]), fs32_to_cpu(sb, cg->cg_frsum[7]));
200 printk(" btotoff: %u\n", fs32_to_cpu(sb, cg->cg_btotoff));
201 printk(" boff: %u\n", fs32_to_cpu(sb, cg->cg_boff));
202 printk(" iuseoff: %u\n", fs32_to_cpu(sb, cg->cg_iusedoff));
203 printk(" freeoff: %u\n", fs32_to_cpu(sb, cg->cg_freeoff));
204 printk(" nextfreeoff: %u\n", fs32_to_cpu(sb, cg->cg_nextfreeoff));
205 printk(" clustersumoff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clustersumoff));
206 printk(" clusteroff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clusteroff));
207 printk(" nclusterblks %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_nclusterblks));
208 printk("\n");
209}
210#endif /* UFS_SUPER_DEBUG_MORE */
211
212static struct super_operations ufs_super_ops;
213
214static char error_buf[1024];
215
216void ufs_error (struct super_block * sb, const char * function,
217 const char * fmt, ...)
218{
219 struct ufs_sb_private_info * uspi;
220 struct ufs_super_block_first * usb1;
221 va_list args;
222
223 uspi = UFS_SB(sb)->s_uspi;
224 usb1 = ubh_get_usb_first(USPI_UBH);
225
226 if (!(sb->s_flags & MS_RDONLY)) {
227 usb1->fs_clean = UFS_FSBAD;
228 ubh_mark_buffer_dirty(USPI_UBH);
229 sb->s_dirt = 1;
230 sb->s_flags |= MS_RDONLY;
231 }
232 va_start (args, fmt);
233 vsprintf (error_buf, fmt, args);
234 va_end (args);
235 switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) {
236 case UFS_MOUNT_ONERROR_PANIC:
237 panic ("UFS-fs panic (device %s): %s: %s\n",
238 sb->s_id, function, error_buf);
239
240 case UFS_MOUNT_ONERROR_LOCK:
241 case UFS_MOUNT_ONERROR_UMOUNT:
242 case UFS_MOUNT_ONERROR_REPAIR:
243 printk (KERN_CRIT "UFS-fs error (device %s): %s: %s\n",
244 sb->s_id, function, error_buf);
245 }
246}
247
248void ufs_panic (struct super_block * sb, const char * function,
249 const char * fmt, ...)
250{
251 struct ufs_sb_private_info * uspi;
252 struct ufs_super_block_first * usb1;
253 va_list args;
254
255 uspi = UFS_SB(sb)->s_uspi;
256 usb1 = ubh_get_usb_first(USPI_UBH);
257
258 if (!(sb->s_flags & MS_RDONLY)) {
259 usb1->fs_clean = UFS_FSBAD;
260 ubh_mark_buffer_dirty(USPI_UBH);
261 sb->s_dirt = 1;
262 }
263 va_start (args, fmt);
264 vsprintf (error_buf, fmt, args);
265 va_end (args);
266 sb->s_flags |= MS_RDONLY;
267 printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n",
268 sb->s_id, function, error_buf);
269}
270
271void ufs_warning (struct super_block * sb, const char * function,
272 const char * fmt, ...)
273{
274 va_list args;
275
276 va_start (args, fmt);
277 vsprintf (error_buf, fmt, args);
278 va_end (args);
279 printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n",
280 sb->s_id, function, error_buf);
281}
282
283enum {
284 Opt_type_old, Opt_type_sunx86, Opt_type_sun, Opt_type_44bsd,
285 Opt_type_ufs2, Opt_type_hp, Opt_type_nextstepcd, Opt_type_nextstep,
286 Opt_type_openstep, Opt_onerror_panic, Opt_onerror_lock,
287 Opt_onerror_umount, Opt_onerror_repair, Opt_err
288};
289
290static match_table_t tokens = {
291 {Opt_type_old, "ufstype=old"},
292 {Opt_type_sunx86, "ufstype=sunx86"},
293 {Opt_type_sun, "ufstype=sun"},
294 {Opt_type_44bsd, "ufstype=44bsd"},
295 {Opt_type_ufs2, "ufstype=ufs2"},
296 {Opt_type_ufs2, "ufstype=5xbsd"},
297 {Opt_type_hp, "ufstype=hp"},
298 {Opt_type_nextstepcd, "ufstype=nextstep-cd"},
299 {Opt_type_nextstep, "ufstype=nextstep"},
300 {Opt_type_openstep, "ufstype=openstep"},
301 {Opt_onerror_panic, "onerror=panic"},
302 {Opt_onerror_lock, "onerror=lock"},
303 {Opt_onerror_umount, "onerror=umount"},
304 {Opt_onerror_repair, "onerror=repair"},
305 {Opt_err, NULL}
306};
307
308static int ufs_parse_options (char * options, unsigned * mount_options)
309{
310 char * p;
311
312 UFSD(("ENTER\n"))
313
314 if (!options)
315 return 1;
316
317 while ((p = strsep(&options, ",")) != NULL) {
318 substring_t args[MAX_OPT_ARGS];
319 int token;
320 if (!*p)
321 continue;
322
323 token = match_token(p, tokens, args);
324 switch (token) {
325 case Opt_type_old:
326 ufs_clear_opt (*mount_options, UFSTYPE);
327 ufs_set_opt (*mount_options, UFSTYPE_OLD);
328 break;
329 case Opt_type_sunx86:
330 ufs_clear_opt (*mount_options, UFSTYPE);
331 ufs_set_opt (*mount_options, UFSTYPE_SUNx86);
332 break;
333 case Opt_type_sun:
334 ufs_clear_opt (*mount_options, UFSTYPE);
335 ufs_set_opt (*mount_options, UFSTYPE_SUN);
336 break;
337 case Opt_type_44bsd:
338 ufs_clear_opt (*mount_options, UFSTYPE);
339 ufs_set_opt (*mount_options, UFSTYPE_44BSD);
340 break;
341 case Opt_type_ufs2:
342 ufs_clear_opt(*mount_options, UFSTYPE);
343 ufs_set_opt(*mount_options, UFSTYPE_UFS2);
344 break;
345 case Opt_type_hp:
346 ufs_clear_opt (*mount_options, UFSTYPE);
347 ufs_set_opt (*mount_options, UFSTYPE_HP);
348 break;
349 case Opt_type_nextstepcd:
350 ufs_clear_opt (*mount_options, UFSTYPE);
351 ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP_CD);
352 break;
353 case Opt_type_nextstep:
354 ufs_clear_opt (*mount_options, UFSTYPE);
355 ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP);
356 break;
357 case Opt_type_openstep:
358 ufs_clear_opt (*mount_options, UFSTYPE);
359 ufs_set_opt (*mount_options, UFSTYPE_OPENSTEP);
360 break;
361 case Opt_onerror_panic:
362 ufs_clear_opt (*mount_options, ONERROR);
363 ufs_set_opt (*mount_options, ONERROR_PANIC);
364 break;
365 case Opt_onerror_lock:
366 ufs_clear_opt (*mount_options, ONERROR);
367 ufs_set_opt (*mount_options, ONERROR_LOCK);
368 break;
369 case Opt_onerror_umount:
370 ufs_clear_opt (*mount_options, ONERROR);
371 ufs_set_opt (*mount_options, ONERROR_UMOUNT);
372 break;
373 case Opt_onerror_repair:
374 printk("UFS-fs: Unable to do repair on error, "
375 "will lock lock instead\n");
376 ufs_clear_opt (*mount_options, ONERROR);
377 ufs_set_opt (*mount_options, ONERROR_REPAIR);
378 break;
379 default:
380 printk("UFS-fs: Invalid option: \"%s\" "
381 "or missing value\n", p);
382 return 0;
383 }
384 }
385 return 1;
386}
387
388/*
389 * Read on-disk structures associated with cylinder groups
390 */
391static int ufs_read_cylinder_structures (struct super_block *sb) {
392 struct ufs_sb_info * sbi = UFS_SB(sb);
393 struct ufs_sb_private_info * uspi;
394 struct ufs_super_block *usb;
395 struct ufs_buffer_head * ubh;
396 unsigned char * base, * space;
397 unsigned size, blks, i;
398 unsigned flags = 0;
399
400 UFSD(("ENTER\n"))
401
402 uspi = sbi->s_uspi;
403
404 usb = (struct ufs_super_block *)
405 ((struct ufs_buffer_head *)uspi)->bh[0]->b_data;
406
407 flags = UFS_SB(sb)->s_flags;
408
409 /*
410 * Read cs structures from (usually) first data block
411 * on the device.
412 */
413 size = uspi->s_cssize;
414 blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
415 base = space = kmalloc(size, GFP_KERNEL);
416 if (!base)
417 goto failed;
418 for (i = 0; i < blks; i += uspi->s_fpb) {
419 size = uspi->s_bsize;
420 if (i + uspi->s_fpb > blks)
421 size = (blks - i) * uspi->s_fsize;
422
423 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
424 ubh = ubh_bread(sb,
425 fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_csaddr) + i, size);
426 if (!ubh)
427 goto failed;
428 ubh_ubhcpymem (space, ubh, size);
429 sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
430 }
431 else {
432 ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
433 if (!ubh)
434 goto failed;
435 ubh_ubhcpymem(space, ubh, size);
436 sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
437 }
438 space += size;
439 ubh_brelse (ubh);
440 ubh = NULL;
441 }
442
443 /*
444 * Read cylinder group (we read only first fragment from block
445 * at this time) and prepare internal data structures for cg caching.
446 */
447 if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL)))
448 goto failed;
449 for (i = 0; i < uspi->s_ncg; i++)
450 sbi->s_ucg[i] = NULL;
451 for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
452 sbi->s_ucpi[i] = NULL;
453 sbi->s_cgno[i] = UFS_CGNO_EMPTY;
454 }
455 for (i = 0; i < uspi->s_ncg; i++) {
456 UFSD(("read cg %u\n", i))
457 if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
458 goto failed;
459 if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data))
460 goto failed;
461#ifdef UFS_SUPER_DEBUG_MORE
462 ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data);
463#endif
464 }
465 for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
466 if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL)))
467 goto failed;
468 sbi->s_cgno[i] = UFS_CGNO_EMPTY;
469 }
470 sbi->s_cg_loaded = 0;
471 UFSD(("EXIT\n"))
472 return 1;
473
474failed:
475 if (base) kfree (base);
476 if (sbi->s_ucg) {
477 for (i = 0; i < uspi->s_ncg; i++)
478 if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]);
479 kfree (sbi->s_ucg);
480 for (i = 0; i < UFS_MAX_GROUP_LOADED; i++)
481 if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]);
482 }
483 UFSD(("EXIT (FAILED)\n"))
484 return 0;
485}
486
487/*
488 * Put on-disk structures associated with cylinder groups and
489 * write them back to disk
490 */
491static void ufs_put_cylinder_structures (struct super_block *sb) {
492 struct ufs_sb_info * sbi = UFS_SB(sb);
493 struct ufs_sb_private_info * uspi;
494 struct ufs_buffer_head * ubh;
495 unsigned char * base, * space;
496 unsigned blks, size, i;
497
498 UFSD(("ENTER\n"))
499
500 uspi = sbi->s_uspi;
501
502 size = uspi->s_cssize;
503 blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
504 base = space = (char*) sbi->s_csp[0];
505 for (i = 0; i < blks; i += uspi->s_fpb) {
506 size = uspi->s_bsize;
507 if (i + uspi->s_fpb > blks)
508 size = (blks - i) * uspi->s_fsize;
509 ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
510 ubh_memcpyubh (ubh, space, size);
511 space += size;
512 ubh_mark_buffer_uptodate (ubh, 1);
513 ubh_mark_buffer_dirty (ubh);
514 ubh_brelse (ubh);
515 }
516 for (i = 0; i < sbi->s_cg_loaded; i++) {
517 ufs_put_cylinder (sb, i);
518 kfree (sbi->s_ucpi[i]);
519 }
520 for (; i < UFS_MAX_GROUP_LOADED; i++)
521 kfree (sbi->s_ucpi[i]);
522 for (i = 0; i < uspi->s_ncg; i++)
523 brelse (sbi->s_ucg[i]);
524 kfree (sbi->s_ucg);
525 kfree (base);
526 UFSD(("EXIT\n"))
527}
528
529static int ufs_fill_super(struct super_block *sb, void *data, int silent)
530{
531 struct ufs_sb_info * sbi;
532 struct ufs_sb_private_info * uspi;
533 struct ufs_super_block_first * usb1;
534 struct ufs_super_block_second * usb2;
535 struct ufs_super_block_third * usb3;
536 struct ufs_super_block *usb;
537 struct ufs_buffer_head * ubh;
538 struct inode *inode;
539 unsigned block_size, super_block_size;
540 unsigned flags;
541
542 uspi = NULL;
543 ubh = NULL;
544 flags = 0;
545
546 UFSD(("ENTER\n"))
547
548 sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
549 if (!sbi)
550 goto failed_nomem;
551 sb->s_fs_info = sbi;
552 memset(sbi, 0, sizeof(struct ufs_sb_info));
553
554 UFSD(("flag %u\n", (int)(sb->s_flags & MS_RDONLY)))
555
556#ifndef CONFIG_UFS_FS_WRITE
557 if (!(sb->s_flags & MS_RDONLY)) {
558 printk("ufs was compiled with read-only support, "
559 "can't be mounted as read-write\n");
560 goto failed;
561 }
562#endif
563 /*
564 * Set default mount options
565 * Parse mount options
566 */
567 sbi->s_mount_opt = 0;
568 ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK);
569 if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) {
570 printk("wrong mount options\n");
571 goto failed;
572 }
573 if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) {
574 if (!silent)
575 printk("You didn't specify the type of your ufs filesystem\n\n"
576 "mount -t ufs -o ufstype="
577 "sun|sunx86|44bsd|ufs2|5xbsd|old|hp|nextstep|netxstep-cd|openstep ...\n\n"
578 ">>>WARNING<<< Wrong ufstype may corrupt your filesystem, "
579 "default is ufstype=old\n");
580 ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD);
581 }
582
583 sbi->s_uspi = uspi =
584 kmalloc (sizeof(struct ufs_sb_private_info), GFP_KERNEL);
585 if (!uspi)
586 goto failed;
587
588 /* Keep 2Gig file limit. Some UFS variants need to override
589 this but as I don't know which I'll let those in the know loosen
590 the rules */
591
592 switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
593 case UFS_MOUNT_UFSTYPE_44BSD:
594 UFSD(("ufstype=44bsd\n"))
595 uspi->s_fsize = block_size = 512;
596 uspi->s_fmask = ~(512 - 1);
597 uspi->s_fshift = 9;
598 uspi->s_sbsize = super_block_size = 1536;
599 uspi->s_sbbase = 0;
600 flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
601 break;
602 case UFS_MOUNT_UFSTYPE_UFS2:
603 UFSD(("ufstype=ufs2\n"))
604 uspi->s_fsize = block_size = 512;
605 uspi->s_fmask = ~(512 - 1);
606 uspi->s_fshift = 9;
607 uspi->s_sbsize = super_block_size = 1536;
608 uspi->s_sbbase = 0;
609 flags |= UFS_TYPE_UFS2 | UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
610 if (!(sb->s_flags & MS_RDONLY)) {
611 printk(KERN_INFO "ufstype=ufs2 is supported read-only\n");
612 sb->s_flags |= MS_RDONLY;
613 }
614 break;
615
616 case UFS_MOUNT_UFSTYPE_SUN:
617 UFSD(("ufstype=sun\n"))
618 uspi->s_fsize = block_size = 1024;
619 uspi->s_fmask = ~(1024 - 1);
620 uspi->s_fshift = 10;
621 uspi->s_sbsize = super_block_size = 2048;
622 uspi->s_sbbase = 0;
623 uspi->s_maxsymlinklen = 56;
624 flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUN | UFS_CG_SUN;
625 break;
626
627 case UFS_MOUNT_UFSTYPE_SUNx86:
628 UFSD(("ufstype=sunx86\n"))
629 uspi->s_fsize = block_size = 1024;
630 uspi->s_fmask = ~(1024 - 1);
631 uspi->s_fshift = 10;
632 uspi->s_sbsize = super_block_size = 2048;
633 uspi->s_sbbase = 0;
634 uspi->s_maxsymlinklen = 56;
635 flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUNx86 | UFS_CG_SUN;
636 break;
637
638 case UFS_MOUNT_UFSTYPE_OLD:
639 UFSD(("ufstype=old\n"))
640 uspi->s_fsize = block_size = 1024;
641 uspi->s_fmask = ~(1024 - 1);
642 uspi->s_fshift = 10;
643 uspi->s_sbsize = super_block_size = 2048;
644 uspi->s_sbbase = 0;
645 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
646 if (!(sb->s_flags & MS_RDONLY)) {
647 if (!silent)
648 printk(KERN_INFO "ufstype=old is supported read-only\n");
649 sb->s_flags |= MS_RDONLY;
650 }
651 break;
652
653 case UFS_MOUNT_UFSTYPE_NEXTSTEP:
654 UFSD(("ufstype=nextstep\n"))
655 uspi->s_fsize = block_size = 1024;
656 uspi->s_fmask = ~(1024 - 1);
657 uspi->s_fshift = 10;
658 uspi->s_sbsize = super_block_size = 2048;
659 uspi->s_sbbase = 0;
660 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
661 if (!(sb->s_flags & MS_RDONLY)) {
662 if (!silent)
663 printk(KERN_INFO "ufstype=nextstep is supported read-only\n");
664 sb->s_flags |= MS_RDONLY;
665 }
666 break;
667
668 case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
669 UFSD(("ufstype=nextstep-cd\n"))
670 uspi->s_fsize = block_size = 2048;
671 uspi->s_fmask = ~(2048 - 1);
672 uspi->s_fshift = 11;
673 uspi->s_sbsize = super_block_size = 2048;
674 uspi->s_sbbase = 0;
675 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
676 if (!(sb->s_flags & MS_RDONLY)) {
677 if (!silent)
678 printk(KERN_INFO "ufstype=nextstep-cd is supported read-only\n");
679 sb->s_flags |= MS_RDONLY;
680 }
681 break;
682
683 case UFS_MOUNT_UFSTYPE_OPENSTEP:
684 UFSD(("ufstype=openstep\n"))
685 uspi->s_fsize = block_size = 1024;
686 uspi->s_fmask = ~(1024 - 1);
687 uspi->s_fshift = 10;
688 uspi->s_sbsize = super_block_size = 2048;
689 uspi->s_sbbase = 0;
690 flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
691 if (!(sb->s_flags & MS_RDONLY)) {
692 if (!silent)
693 printk(KERN_INFO "ufstype=openstep is supported read-only\n");
694 sb->s_flags |= MS_RDONLY;
695 }
696 break;
697
698 case UFS_MOUNT_UFSTYPE_HP:
699 UFSD(("ufstype=hp\n"))
700 uspi->s_fsize = block_size = 1024;
701 uspi->s_fmask = ~(1024 - 1);
702 uspi->s_fshift = 10;
703 uspi->s_sbsize = super_block_size = 2048;
704 uspi->s_sbbase = 0;
705 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
706 if (!(sb->s_flags & MS_RDONLY)) {
707 if (!silent)
708 printk(KERN_INFO "ufstype=hp is supported read-only\n");
709 sb->s_flags |= MS_RDONLY;
710 }
711 break;
712 default:
713 if (!silent)
714 printk("unknown ufstype\n");
715 goto failed;
716 }
717
718again:
719 if (!sb_set_blocksize(sb, block_size)) {
720 printk(KERN_ERR "UFS: failed to set blocksize\n");
721 goto failed;
722 }
723
724 /*
725 * read ufs super block from device
726 */
727 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
728 ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + SBLOCK_UFS2/block_size, super_block_size);
729 }
730 else {
731 ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
732 }
733 if (!ubh)
734 goto failed;
735
736
737 usb1 = ubh_get_usb_first(USPI_UBH);
738 usb2 = ubh_get_usb_second(USPI_UBH);
739 usb3 = ubh_get_usb_third(USPI_UBH);
740 usb = (struct ufs_super_block *)
741 ((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
742
743 /*
744 * Check ufs magic number
745 */
746 sbi->s_bytesex = BYTESEX_LE;
747 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
748 case UFS_MAGIC:
749 case UFS2_MAGIC:
750 case UFS_MAGIC_LFN:
751 case UFS_MAGIC_FEA:
752 case UFS_MAGIC_4GB:
753 goto magic_found;
754 }
755 sbi->s_bytesex = BYTESEX_BE;
756 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
757 case UFS_MAGIC:
758 case UFS2_MAGIC:
759 case UFS_MAGIC_LFN:
760 case UFS_MAGIC_FEA:
761 case UFS_MAGIC_4GB:
762 goto magic_found;
763 }
764
765 if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP)
766 || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD)
767 || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP))
768 && uspi->s_sbbase < 256) {
769 ubh_brelse_uspi(uspi);
770 ubh = NULL;
771 uspi->s_sbbase += 8;
772 goto again;
773 }
774 if (!silent)
775 printk("ufs_read_super: bad magic number\n");
776 goto failed;
777
778magic_found:
779 /*
780 * Check block and fragment sizes
781 */
782 uspi->s_bsize = fs32_to_cpu(sb, usb1->fs_bsize);
783 uspi->s_fsize = fs32_to_cpu(sb, usb1->fs_fsize);
784 uspi->s_sbsize = fs32_to_cpu(sb, usb1->fs_sbsize);
785 uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
786 uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
787
788 if (uspi->s_fsize & (uspi->s_fsize - 1)) {
789 printk(KERN_ERR "ufs_read_super: fragment size %u is not a power of 2\n",
790 uspi->s_fsize);
791 goto failed;
792 }
793 if (uspi->s_fsize < 512) {
794 printk(KERN_ERR "ufs_read_super: fragment size %u is too small\n",
795 uspi->s_fsize);
796 goto failed;
797 }
798 if (uspi->s_fsize > 4096) {
799 printk(KERN_ERR "ufs_read_super: fragment size %u is too large\n",
800 uspi->s_fsize);
801 goto failed;
802 }
803 if (uspi->s_bsize & (uspi->s_bsize - 1)) {
804 printk(KERN_ERR "ufs_read_super: block size %u is not a power of 2\n",
805 uspi->s_bsize);
806 goto failed;
807 }
808 if (uspi->s_bsize < 4096) {
809 printk(KERN_ERR "ufs_read_super: block size %u is too small\n",
810 uspi->s_bsize);
811 goto failed;
812 }
813 if (uspi->s_bsize / uspi->s_fsize > 8) {
814 printk(KERN_ERR "ufs_read_super: too many fragments per block (%u)\n",
815 uspi->s_bsize / uspi->s_fsize);
816 goto failed;
817 }
818 if (uspi->s_fsize != block_size || uspi->s_sbsize != super_block_size) {
819 ubh_brelse_uspi(uspi);
820 ubh = NULL;
821 block_size = uspi->s_fsize;
822 super_block_size = uspi->s_sbsize;
823 UFSD(("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size))
824 goto again;
825 }
826
827#ifdef UFS_SUPER_DEBUG_MORE
828 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
829 ufs2_print_super_stuff(sb,usb);
830 else
831 ufs_print_super_stuff(sb, usb1, usb2, usb3);
832#endif
833
834 /*
835 * Check, if file system was correctly unmounted.
836 * If not, make it read only.
837 */
838 if (((flags & UFS_ST_MASK) == UFS_ST_44BSD) ||
839 ((flags & UFS_ST_MASK) == UFS_ST_OLD) ||
840 (((flags & UFS_ST_MASK) == UFS_ST_SUN ||
841 (flags & UFS_ST_MASK) == UFS_ST_SUNx86) &&
842 (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) {
843 switch(usb1->fs_clean) {
844 case UFS_FSCLEAN:
845 UFSD(("fs is clean\n"))
846 break;
847 case UFS_FSSTABLE:
848 UFSD(("fs is stable\n"))
849 break;
850 case UFS_FSOSF1:
851 UFSD(("fs is DEC OSF/1\n"))
852 break;
853 case UFS_FSACTIVE:
854 printk("ufs_read_super: fs is active\n");
855 sb->s_flags |= MS_RDONLY;
856 break;
857 case UFS_FSBAD:
858 printk("ufs_read_super: fs is bad\n");
859 sb->s_flags |= MS_RDONLY;
860 break;
861 default:
862 printk("ufs_read_super: can't grok fs_clean 0x%x\n", usb1->fs_clean);
863 sb->s_flags |= MS_RDONLY;
864 break;
865 }
866 }
867 else {
868 printk("ufs_read_super: fs needs fsck\n");
869 sb->s_flags |= MS_RDONLY;
870 }
871
872 /*
873 * Read ufs_super_block into internal data structures
874 */
875 sb->s_op = &ufs_super_ops;
876 sb->dq_op = NULL; /***/
877 sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
878
879 uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno);
880 uspi->s_cblkno = fs32_to_cpu(sb, usb1->fs_cblkno);
881 uspi->s_iblkno = fs32_to_cpu(sb, usb1->fs_iblkno);
882 uspi->s_dblkno = fs32_to_cpu(sb, usb1->fs_dblkno);
883 uspi->s_cgoffset = fs32_to_cpu(sb, usb1->fs_cgoffset);
884 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
885
886 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
887 uspi->s_u2_size = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size);
888 uspi->s_u2_dsize = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize);
889 }
890 else {
891 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size);
892 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize);
893 }
894
895 uspi->s_ncg = fs32_to_cpu(sb, usb1->fs_ncg);
896 /* s_bsize already set */
897 /* s_fsize already set */
898 uspi->s_fpb = fs32_to_cpu(sb, usb1->fs_frag);
899 uspi->s_minfree = fs32_to_cpu(sb, usb1->fs_minfree);
900 uspi->s_bmask = fs32_to_cpu(sb, usb1->fs_bmask);
901 uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
902 uspi->s_bshift = fs32_to_cpu(sb, usb1->fs_bshift);
903 uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
904 UFSD(("uspi->s_bshift = %d,uspi->s_fshift = %d", uspi->s_bshift,
905 uspi->s_fshift));
906 uspi->s_fpbshift = fs32_to_cpu(sb, usb1->fs_fragshift);
907 uspi->s_fsbtodb = fs32_to_cpu(sb, usb1->fs_fsbtodb);
908 /* s_sbsize already set */
909 uspi->s_csmask = fs32_to_cpu(sb, usb1->fs_csmask);
910 uspi->s_csshift = fs32_to_cpu(sb, usb1->fs_csshift);
911 uspi->s_nindir = fs32_to_cpu(sb, usb1->fs_nindir);
912 uspi->s_inopb = fs32_to_cpu(sb, usb1->fs_inopb);
913 uspi->s_nspf = fs32_to_cpu(sb, usb1->fs_nspf);
914 uspi->s_npsect = ufs_get_fs_npsect(sb, usb1, usb3);
915 uspi->s_interleave = fs32_to_cpu(sb, usb1->fs_interleave);
916 uspi->s_trackskew = fs32_to_cpu(sb, usb1->fs_trackskew);
917 uspi->s_csaddr = fs32_to_cpu(sb, usb1->fs_csaddr);
918 uspi->s_cssize = fs32_to_cpu(sb, usb1->fs_cssize);
919 uspi->s_cgsize = fs32_to_cpu(sb, usb1->fs_cgsize);
920 uspi->s_ntrak = fs32_to_cpu(sb, usb1->fs_ntrak);
921 uspi->s_nsect = fs32_to_cpu(sb, usb1->fs_nsect);
922 uspi->s_spc = fs32_to_cpu(sb, usb1->fs_spc);
923 uspi->s_ipg = fs32_to_cpu(sb, usb1->fs_ipg);
924 uspi->s_fpg = fs32_to_cpu(sb, usb1->fs_fpg);
925 uspi->s_cpc = fs32_to_cpu(sb, usb2->fs_cpc);
926 uspi->s_contigsumsize = fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize);
927 uspi->s_qbmask = ufs_get_fs_qbmask(sb, usb3);
928 uspi->s_qfmask = ufs_get_fs_qfmask(sb, usb3);
929 uspi->s_postblformat = fs32_to_cpu(sb, usb3->fs_postblformat);
930 uspi->s_nrpos = fs32_to_cpu(sb, usb3->fs_nrpos);
931 uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
932 uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
933
934 /*
935 * Compute another frequently used values
936 */
937 uspi->s_fpbmask = uspi->s_fpb - 1;
938 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
939 uspi->s_apbshift = uspi->s_bshift - 3;
940 }
941 else {
942 uspi->s_apbshift = uspi->s_bshift - 2;
943 }
944 uspi->s_2apbshift = uspi->s_apbshift * 2;
945 uspi->s_3apbshift = uspi->s_apbshift * 3;
946 uspi->s_apb = 1 << uspi->s_apbshift;
947 uspi->s_2apb = 1 << uspi->s_2apbshift;
948 uspi->s_3apb = 1 << uspi->s_3apbshift;
949 uspi->s_apbmask = uspi->s_apb - 1;
950 uspi->s_nspfshift = uspi->s_fshift - UFS_SECTOR_BITS;
951 uspi->s_nspb = uspi->s_nspf << uspi->s_fpbshift;
952 uspi->s_inopf = uspi->s_inopb >> uspi->s_fpbshift;
953 uspi->s_bpf = uspi->s_fsize << 3;
954 uspi->s_bpfshift = uspi->s_fshift + 3;
955 uspi->s_bpfmask = uspi->s_bpf - 1;
956 if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) ==
957 UFS_MOUNT_UFSTYPE_44BSD)
958 uspi->s_maxsymlinklen =
959 fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_maxsymlinklen);
960
961 sbi->s_flags = flags;
962
963 inode = iget(sb, UFS_ROOTINO);
964 if (!inode || is_bad_inode(inode))
965 goto failed;
966 sb->s_root = d_alloc_root(inode);
967 if (!sb->s_root)
968 goto dalloc_failed;
969
970
971 /*
972 * Read cylinder group structures
973 */
974 if (!(sb->s_flags & MS_RDONLY))
975 if (!ufs_read_cylinder_structures(sb))
976 goto failed;
977
978 UFSD(("EXIT\n"))
979 return 0;
980
981dalloc_failed:
982 iput(inode);
983failed:
984 if (ubh) ubh_brelse_uspi (uspi);
985 if (uspi) kfree (uspi);
986 if (sbi) kfree(sbi);
987 sb->s_fs_info = NULL;
988 UFSD(("EXIT (FAILED)\n"))
989 return -EINVAL;
990
991failed_nomem:
992 UFSD(("EXIT (NOMEM)\n"))
993 return -ENOMEM;
994}
995
996static void ufs_write_super (struct super_block *sb) {
997 struct ufs_sb_private_info * uspi;
998 struct ufs_super_block_first * usb1;
999 struct ufs_super_block_third * usb3;
1000 unsigned flags;
1001
1002 lock_kernel();
1003
1004 UFSD(("ENTER\n"))
1005 flags = UFS_SB(sb)->s_flags;
1006 uspi = UFS_SB(sb)->s_uspi;
1007 usb1 = ubh_get_usb_first(USPI_UBH);
1008 usb3 = ubh_get_usb_third(USPI_UBH);
1009
1010 if (!(sb->s_flags & MS_RDONLY)) {
1011 usb1->fs_time = cpu_to_fs32(sb, get_seconds());
1012 if ((flags & UFS_ST_MASK) == UFS_ST_SUN
1013 || (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
1014 ufs_set_fs_state(sb, usb1, usb3,
1015 UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
1016 ubh_mark_buffer_dirty (USPI_UBH);
1017 }
1018 sb->s_dirt = 0;
1019 UFSD(("EXIT\n"))
1020 unlock_kernel();
1021}
1022
1023static void ufs_put_super (struct super_block *sb)
1024{
1025 struct ufs_sb_info * sbi = UFS_SB(sb);
1026
1027 UFSD(("ENTER\n"))
1028
1029 if (!(sb->s_flags & MS_RDONLY))
1030 ufs_put_cylinder_structures (sb);
1031
1032 ubh_brelse_uspi (sbi->s_uspi);
1033 kfree (sbi->s_uspi);
1034 kfree (sbi);
1035 sb->s_fs_info = NULL;
1036 return;
1037}
1038
1039
1040static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1041{
1042 struct ufs_sb_private_info * uspi;
1043 struct ufs_super_block_first * usb1;
1044 struct ufs_super_block_third * usb3;
1045 unsigned new_mount_opt, ufstype;
1046 unsigned flags;
1047
1048 uspi = UFS_SB(sb)->s_uspi;
1049 flags = UFS_SB(sb)->s_flags;
1050 usb1 = ubh_get_usb_first(USPI_UBH);
1051 usb3 = ubh_get_usb_third(USPI_UBH);
1052
1053 /*
1054 * Allow the "check" option to be passed as a remount option.
1055 * It is not possible to change ufstype option during remount
1056 */
1057 ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE;
1058 new_mount_opt = 0;
1059 ufs_set_opt (new_mount_opt, ONERROR_LOCK);
1060 if (!ufs_parse_options (data, &new_mount_opt))
1061 return -EINVAL;
1062 if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) {
1063 new_mount_opt |= ufstype;
1064 }
1065 else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
1066 printk("ufstype can't be changed during remount\n");
1067 return -EINVAL;
1068 }
1069
1070 if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
1071 UFS_SB(sb)->s_mount_opt = new_mount_opt;
1072 return 0;
1073 }
1074
1075 /*
1076 * fs was mouted as rw, remounting ro
1077 */
1078 if (*mount_flags & MS_RDONLY) {
1079 ufs_put_cylinder_structures(sb);
1080 usb1->fs_time = cpu_to_fs32(sb, get_seconds());
1081 if ((flags & UFS_ST_MASK) == UFS_ST_SUN
1082 || (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
1083 ufs_set_fs_state(sb, usb1, usb3,
1084 UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
1085 ubh_mark_buffer_dirty (USPI_UBH);
1086 sb->s_dirt = 0;
1087 sb->s_flags |= MS_RDONLY;
1088 }
1089 /*
1090 * fs was mounted as ro, remounting rw
1091 */
1092 else {
1093#ifndef CONFIG_UFS_FS_WRITE
1094 printk("ufs was compiled with read-only support, "
1095 "can't be mounted as read-write\n");
1096 return -EINVAL;
1097#else
1098 if (ufstype != UFS_MOUNT_UFSTYPE_SUN &&
1099 ufstype != UFS_MOUNT_UFSTYPE_44BSD &&
1100 ufstype != UFS_MOUNT_UFSTYPE_SUNx86) {
1101 printk("this ufstype is read-only supported\n");
1102 return -EINVAL;
1103 }
1104 if (!ufs_read_cylinder_structures (sb)) {
1105 printk("failed during remounting\n");
1106 return -EPERM;
1107 }
1108 sb->s_flags &= ~MS_RDONLY;
1109#endif
1110 }
1111 UFS_SB(sb)->s_mount_opt = new_mount_opt;
1112 return 0;
1113}
1114
1115static int ufs_statfs (struct super_block *sb, struct kstatfs *buf)
1116{
1117 struct ufs_sb_private_info * uspi;
1118 struct ufs_super_block_first * usb1;
1119 struct ufs_super_block * usb;
1120 unsigned flags = 0;
1121
1122 lock_kernel();
1123
1124 uspi = UFS_SB(sb)->s_uspi;
1125 usb1 = ubh_get_usb_first (USPI_UBH);
1126 usb = (struct ufs_super_block *)
1127 ((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
1128
1129 flags = UFS_SB(sb)->s_flags;
1130 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
1131 buf->f_type = UFS2_MAGIC;
1132 buf->f_blocks = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize);
1133 buf->f_bfree = ufs_blkstofrags(fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree)) +
1134 fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nffree);
1135 buf->f_ffree = fs64_to_cpu(sb,
1136 usb->fs_u11.fs_u2.fs_cstotal.cs_nifree);
1137 }
1138 else {
1139 buf->f_type = UFS_MAGIC;
1140 buf->f_blocks = uspi->s_dsize;
1141 buf->f_bfree = ufs_blkstofrags(fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree)) +
1142 fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree);
1143 buf->f_ffree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree);
1144 }
1145 buf->f_bsize = sb->s_blocksize;
1146 buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
1147 ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
1148 buf->f_files = uspi->s_ncg * uspi->s_ipg;
1149 buf->f_namelen = UFS_MAXNAMLEN;
1150
1151 unlock_kernel();
1152
1153 return 0;
1154}
1155
1156static kmem_cache_t * ufs_inode_cachep;
1157
1158static struct inode *ufs_alloc_inode(struct super_block *sb)
1159{
1160 struct ufs_inode_info *ei;
1161 ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL);
1162 if (!ei)
1163 return NULL;
1164 ei->vfs_inode.i_version = 1;
1165 return &ei->vfs_inode;
1166}
1167
1168static void ufs_destroy_inode(struct inode *inode)
1169{
1170 kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
1171}
1172
1173static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
1174{
1175 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
1176
1177 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
1178 SLAB_CTOR_CONSTRUCTOR)
1179 inode_init_once(&ei->vfs_inode);
1180}
1181
1182static int init_inodecache(void)
1183{
1184 ufs_inode_cachep = kmem_cache_create("ufs_inode_cache",
1185 sizeof(struct ufs_inode_info),
1186 0, SLAB_RECLAIM_ACCOUNT,
1187 init_once, NULL);
1188 if (ufs_inode_cachep == NULL)
1189 return -ENOMEM;
1190 return 0;
1191}
1192
1193static void destroy_inodecache(void)
1194{
1195 if (kmem_cache_destroy(ufs_inode_cachep))
1196 printk(KERN_INFO "ufs_inode_cache: not all structures were freed\n");
1197}
1198
1199#ifdef CONFIG_QUOTA
1200static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t);
1201static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t);
1202#endif
1203
1204static struct super_operations ufs_super_ops = {
1205 .alloc_inode = ufs_alloc_inode,
1206 .destroy_inode = ufs_destroy_inode,
1207 .read_inode = ufs_read_inode,
1208 .write_inode = ufs_write_inode,
1209 .delete_inode = ufs_delete_inode,
1210 .put_super = ufs_put_super,
1211 .write_super = ufs_write_super,
1212 .statfs = ufs_statfs,
1213 .remount_fs = ufs_remount,
1214#ifdef CONFIG_QUOTA
1215 .quota_read = ufs_quota_read,
1216 .quota_write = ufs_quota_write,
1217#endif
1218};
1219
1220#ifdef CONFIG_QUOTA
1221
1222/* Read data from quotafile - avoid pagecache and such because we cannot afford
1223 * acquiring the locks... As quota files are never truncated and quota code
1224 * itself serializes the operations (and noone else should touch the files)
1225 * we don't have to be afraid of races */
1226static ssize_t ufs_quota_read(struct super_block *sb, int type, char *data,
1227 size_t len, loff_t off)
1228{
1229 struct inode *inode = sb_dqopt(sb)->files[type];
1230 sector_t blk = off >> sb->s_blocksize_bits;
1231 int err = 0;
1232 int offset = off & (sb->s_blocksize - 1);
1233 int tocopy;
1234 size_t toread;
1235 struct buffer_head *bh;
1236 loff_t i_size = i_size_read(inode);
1237
1238 if (off > i_size)
1239 return 0;
1240 if (off+len > i_size)
1241 len = i_size-off;
1242 toread = len;
1243 while (toread > 0) {
1244 tocopy = sb->s_blocksize - offset < toread ?
1245 sb->s_blocksize - offset : toread;
1246
1247 bh = ufs_bread(inode, blk, 0, &err);
1248 if (err)
1249 return err;
1250 if (!bh) /* A hole? */
1251 memset(data, 0, tocopy);
1252 else {
1253 memcpy(data, bh->b_data+offset, tocopy);
1254 brelse(bh);
1255 }
1256 offset = 0;
1257 toread -= tocopy;
1258 data += tocopy;
1259 blk++;
1260 }
1261 return len;
1262}
1263
1264/* Write to quotafile */
1265static ssize_t ufs_quota_write(struct super_block *sb, int type,
1266 const char *data, size_t len, loff_t off)
1267{
1268 struct inode *inode = sb_dqopt(sb)->files[type];
1269 sector_t blk = off >> sb->s_blocksize_bits;
1270 int err = 0;
1271 int offset = off & (sb->s_blocksize - 1);
1272 int tocopy;
1273 size_t towrite = len;
1274 struct buffer_head *bh;
1275
1276 down(&inode->i_sem);
1277 while (towrite > 0) {
1278 tocopy = sb->s_blocksize - offset < towrite ?
1279 sb->s_blocksize - offset : towrite;
1280
1281 bh = ufs_bread(inode, blk, 1, &err);
1282 if (!bh)
1283 goto out;
1284 lock_buffer(bh);
1285 memcpy(bh->b_data+offset, data, tocopy);
1286 flush_dcache_page(bh->b_page);
1287 set_buffer_uptodate(bh);
1288 mark_buffer_dirty(bh);
1289 unlock_buffer(bh);
1290 brelse(bh);
1291 offset = 0;
1292 towrite -= tocopy;
1293 data += tocopy;
1294 blk++;
1295 }
1296out:
1297 if (len == towrite)
1298 return err;
1299 if (inode->i_size < off+len-towrite)
1300 i_size_write(inode, off+len-towrite);
1301 inode->i_version++;
1302 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1303 mark_inode_dirty(inode);
1304 up(&inode->i_sem);
1305 return len - towrite;
1306}
1307
1308#endif
1309
1310static struct super_block *ufs_get_sb(struct file_system_type *fs_type,
1311 int flags, const char *dev_name, void *data)
1312{
1313 return get_sb_bdev(fs_type, flags, dev_name, data, ufs_fill_super);
1314}
1315
1316static struct file_system_type ufs_fs_type = {
1317 .owner = THIS_MODULE,
1318 .name = "ufs",
1319 .get_sb = ufs_get_sb,
1320 .kill_sb = kill_block_super,
1321 .fs_flags = FS_REQUIRES_DEV,
1322};
1323
1324static int __init init_ufs_fs(void)
1325{
1326 int err = init_inodecache();
1327 if (err)
1328 goto out1;
1329 err = register_filesystem(&ufs_fs_type);
1330 if (err)
1331 goto out;
1332 return 0;
1333out:
1334 destroy_inodecache();
1335out1:
1336 return err;
1337}
1338
1339static void __exit exit_ufs_fs(void)
1340{
1341 unregister_filesystem(&ufs_fs_type);
1342 destroy_inodecache();
1343}
1344
1345module_init(init_ufs_fs)
1346module_exit(exit_ufs_fs)
1347MODULE_LICENSE("GPL");
diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
new file mode 100644
index 000000000000..1683d2bee614
--- /dev/null
+++ b/fs/ufs/swab.h
@@ -0,0 +1,133 @@
1/*
2 * linux/fs/ufs/swab.h
3 *
4 * Copyright (C) 1997, 1998 Francois-Rene Rideau <fare@tunes.org>
5 * Copyright (C) 1998 Jakub Jelinek <jj@ultra.linux.cz>
6 * Copyright (C) 2001 Christoph Hellwig <hch@infradead.org>
7 */
8
9#ifndef _UFS_SWAB_H
10#define _UFS_SWAB_H
11
12/*
13 * Notes:
14 * HERE WE ASSUME EITHER BIG OR LITTLE ENDIAN UFSes
15 * in case there are ufs implementations that have strange bytesexes,
16 * you'll need to modify code here as well as in ufs_super.c and ufs_fs.h
17 * to support them.
18 */
19
20enum {
21 BYTESEX_LE,
22 BYTESEX_BE
23};
24
25static inline u64
26fs64_to_cpu(struct super_block *sbp, __fs64 n)
27{
28 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
29 return le64_to_cpu((__force __le64)n);
30 else
31 return be64_to_cpu((__force __be64)n);
32}
33
34static inline __fs64
35cpu_to_fs64(struct super_block *sbp, u64 n)
36{
37 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
38 return (__force __fs64)cpu_to_le64(n);
39 else
40 return (__force __fs64)cpu_to_be64(n);
41}
42
43static __inline u32
44fs64_add(struct super_block *sbp, u32 *n, int d)
45{
46 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
47 return *n = cpu_to_le64(le64_to_cpu(*n)+d);
48 else
49 return *n = cpu_to_be64(be64_to_cpu(*n)+d);
50}
51
52static __inline u32
53fs64_sub(struct super_block *sbp, u32 *n, int d)
54{
55 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56 return *n = cpu_to_le64(le64_to_cpu(*n)-d);
57 else
58 return *n = cpu_to_be64(be64_to_cpu(*n)-d);
59}
60
61static __inline u32
62fs32_to_cpu(struct super_block *sbp, __fs32 n)
63{
64 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
65 return le32_to_cpu((__force __le32)n);
66 else
67 return be32_to_cpu((__force __be32)n);
68}
69
70static inline __fs32
71cpu_to_fs32(struct super_block *sbp, u32 n)
72{
73 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
74 return (__force __fs32)cpu_to_le32(n);
75 else
76 return (__force __fs32)cpu_to_be32(n);
77}
78
79static inline void
80fs32_add(struct super_block *sbp, __fs32 *n, int d)
81{
82 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
83 *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)+d);
84 else
85 *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)+d);
86}
87
88static inline void
89fs32_sub(struct super_block *sbp, __fs32 *n, int d)
90{
91 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
92 *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)-d);
93 else
94 *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)-d);
95}
96
97static inline u16
98fs16_to_cpu(struct super_block *sbp, __fs16 n)
99{
100 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
101 return le16_to_cpu((__force __le16)n);
102 else
103 return be16_to_cpu((__force __be16)n);
104}
105
106static inline __fs16
107cpu_to_fs16(struct super_block *sbp, u16 n)
108{
109 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
110 return (__force __fs16)cpu_to_le16(n);
111 else
112 return (__force __fs16)cpu_to_be16(n);
113}
114
115static inline void
116fs16_add(struct super_block *sbp, __fs16 *n, int d)
117{
118 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
119 *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
120 else
121 *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
122}
123
124static inline void
125fs16_sub(struct super_block *sbp, __fs16 *n, int d)
126{
127 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
128 *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)-d);
129 else
130 *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)-d);
131}
132
133#endif /* _UFS_SWAB_H */
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
new file mode 100644
index 000000000000..a0e49149098f
--- /dev/null
+++ b/fs/ufs/symlink.c
@@ -0,0 +1,42 @@
1/*
2 * linux/fs/ufs/symlink.c
3 *
4 * Only fast symlinks left here - the rest is done by generic code. AV, 1999
5 *
6 * Copyright (C) 1998
7 * Daniel Pirkl <daniel.pirkl@emai.cz>
8 * Charles University, Faculty of Mathematics and Physics
9 *
10 * from
11 *
12 * linux/fs/ext2/symlink.c
13 *
14 * Copyright (C) 1992, 1993, 1994, 1995
15 * Remy Card (card@masi.ibp.fr)
16 * Laboratoire MASI - Institut Blaise Pascal
17 * Universite Pierre et Marie Curie (Paris VI)
18 *
19 * from
20 *
21 * linux/fs/minix/symlink.c
22 *
23 * Copyright (C) 1991, 1992 Linus Torvalds
24 *
25 * ext2 symlink handling code
26 */
27
28#include <linux/fs.h>
29#include <linux/namei.h>
30#include <linux/ufs_fs.h>
31
32static int ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
33{
34 struct ufs_inode_info *p = UFS_I(dentry->d_inode);
35 nd_set_link(nd, (char*)p->i_u1.i_symlink);
36 return 0;
37}
38
39struct inode_operations ufs_fast_symlink_inode_operations = {
40 .readlink = generic_readlink,
41 .follow_link = ufs_follow_link,
42};
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
new file mode 100644
index 000000000000..e312bf8bad9f
--- /dev/null
+++ b/fs/ufs/truncate.c
@@ -0,0 +1,477 @@
1/*
2 * linux/fs/ufs/truncate.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/truncate.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/truncate.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * Big-endian to little-endian byte-swapping/bitmaps by
24 * David S. Miller (davem@caip.rutgers.edu), 1995
25 */
26
27/*
28 * Real random numbers for secure rm added 94/02/18
29 * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
30 */
31
32#include <linux/errno.h>
33#include <linux/fs.h>
34#include <linux/ufs_fs.h>
35#include <linux/fcntl.h>
36#include <linux/time.h>
37#include <linux/stat.h>
38#include <linux/string.h>
39#include <linux/smp_lock.h>
40#include <linux/buffer_head.h>
41#include <linux/blkdev.h>
42#include <linux/sched.h>
43
44#include "swab.h"
45#include "util.h"
46
47#undef UFS_TRUNCATE_DEBUG
48
49#ifdef UFS_TRUNCATE_DEBUG
50#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
51#else
52#define UFSD(x)
53#endif
54
55/*
56 * Secure deletion currently doesn't work. It interacts very badly
57 * with buffers shared with memory mappings, and for that reason
58 * can't be done in the truncate() routines. It should instead be
59 * done separately in "release()" before calling the truncate routines
60 * that will release the actual file blocks.
61 *
62 * Linus
63 */
64
65#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
66#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
67
68#define DATA_BUFFER_USED(bh) \
69 (atomic_read(&bh->b_count)>1 || buffer_locked(bh))
70
71static int ufs_trunc_direct (struct inode * inode)
72{
73 struct ufs_inode_info *ufsi = UFS_I(inode);
74 struct super_block * sb;
75 struct ufs_sb_private_info * uspi;
76 struct buffer_head * bh;
77 __fs32 * p;
78 unsigned frag1, frag2, frag3, frag4, block1, block2;
79 unsigned frag_to_free, free_count;
80 unsigned i, j, tmp;
81 int retry;
82
83 UFSD(("ENTER\n"))
84
85 sb = inode->i_sb;
86 uspi = UFS_SB(sb)->s_uspi;
87
88 frag_to_free = 0;
89 free_count = 0;
90 retry = 0;
91
92 frag1 = DIRECT_FRAGMENT;
93 frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
94 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
95 frag3 = frag4 & ~uspi->s_fpbmask;
96 block1 = block2 = 0;
97 if (frag2 > frag3) {
98 frag2 = frag4;
99 frag3 = frag4 = 0;
100 }
101 else if (frag2 < frag3) {
102 block1 = ufs_fragstoblks (frag2);
103 block2 = ufs_fragstoblks (frag3);
104 }
105
106 UFSD(("frag1 %u, frag2 %u, block1 %u, block2 %u, frag3 %u, frag4 %u\n", frag1, frag2, block1, block2, frag3, frag4))
107
108 if (frag1 >= frag2)
109 goto next1;
110
111 /*
112 * Free first free fragments
113 */
114 p = ufsi->i_u1.i_data + ufs_fragstoblks (frag1);
115 tmp = fs32_to_cpu(sb, *p);
116 if (!tmp )
117 ufs_panic (sb, "ufs_trunc_direct", "internal error");
118 frag1 = ufs_fragnum (frag1);
119 frag2 = ufs_fragnum (frag2);
120 for (j = frag1; j < frag2; j++) {
121 bh = sb_find_get_block (sb, tmp + j);
122 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
123 retry = 1;
124 brelse (bh);
125 goto next1;
126 }
127 bforget (bh);
128 }
129 inode->i_blocks -= (frag2-frag1) << uspi->s_nspfshift;
130 mark_inode_dirty(inode);
131 ufs_free_fragments (inode, tmp + frag1, frag2 - frag1);
132 frag_to_free = tmp + frag1;
133
134next1:
135 /*
136 * Free whole blocks
137 */
138 for (i = block1 ; i < block2; i++) {
139 p = ufsi->i_u1.i_data + i;
140 tmp = fs32_to_cpu(sb, *p);
141 if (!tmp)
142 continue;
143 for (j = 0; j < uspi->s_fpb; j++) {
144 bh = sb_find_get_block(sb, tmp + j);
145 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
146 retry = 1;
147 brelse (bh);
148 goto next2;
149 }
150 bforget (bh);
151 }
152 *p = 0;
153 inode->i_blocks -= uspi->s_nspb;
154 mark_inode_dirty(inode);
155 if (free_count == 0) {
156 frag_to_free = tmp;
157 free_count = uspi->s_fpb;
158 } else if (free_count > 0 && frag_to_free == tmp - free_count)
159 free_count += uspi->s_fpb;
160 else {
161 ufs_free_blocks (inode, frag_to_free, free_count);
162 frag_to_free = tmp;
163 free_count = uspi->s_fpb;
164 }
165next2:;
166 }
167
168 if (free_count > 0)
169 ufs_free_blocks (inode, frag_to_free, free_count);
170
171 if (frag3 >= frag4)
172 goto next3;
173
174 /*
175 * Free last free fragments
176 */
177 p = ufsi->i_u1.i_data + ufs_fragstoblks (frag3);
178 tmp = fs32_to_cpu(sb, *p);
179 if (!tmp )
180 ufs_panic(sb, "ufs_truncate_direct", "internal error");
181 frag4 = ufs_fragnum (frag4);
182 for (j = 0; j < frag4; j++) {
183 bh = sb_find_get_block (sb, tmp + j);
184 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
185 retry = 1;
186 brelse (bh);
187 goto next1;
188 }
189 bforget (bh);
190 }
191 *p = 0;
192 inode->i_blocks -= frag4 << uspi->s_nspfshift;
193 mark_inode_dirty(inode);
194 ufs_free_fragments (inode, tmp, frag4);
195 next3:
196
197 UFSD(("EXIT\n"))
198 return retry;
199}
200
201
202static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
203{
204 struct super_block * sb;
205 struct ufs_sb_private_info * uspi;
206 struct ufs_buffer_head * ind_ubh;
207 struct buffer_head * bh;
208 __fs32 * ind;
209 unsigned indirect_block, i, j, tmp;
210 unsigned frag_to_free, free_count;
211 int retry;
212
213 UFSD(("ENTER\n"))
214
215 sb = inode->i_sb;
216 uspi = UFS_SB(sb)->s_uspi;
217
218 frag_to_free = 0;
219 free_count = 0;
220 retry = 0;
221
222 tmp = fs32_to_cpu(sb, *p);
223 if (!tmp)
224 return 0;
225 ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
226 if (tmp != fs32_to_cpu(sb, *p)) {
227 ubh_brelse (ind_ubh);
228 return 1;
229 }
230 if (!ind_ubh) {
231 *p = 0;
232 return 0;
233 }
234
235 indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
236 for (i = indirect_block; i < uspi->s_apb; i++) {
237 ind = ubh_get_addr32 (ind_ubh, i);
238 tmp = fs32_to_cpu(sb, *ind);
239 if (!tmp)
240 continue;
241 for (j = 0; j < uspi->s_fpb; j++) {
242 bh = sb_find_get_block(sb, tmp + j);
243 if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
244 retry = 1;
245 brelse (bh);
246 goto next;
247 }
248 bforget (bh);
249 }
250 *ind = 0;
251 ubh_mark_buffer_dirty(ind_ubh);
252 if (free_count == 0) {
253 frag_to_free = tmp;
254 free_count = uspi->s_fpb;
255 } else if (free_count > 0 && frag_to_free == tmp - free_count)
256 free_count += uspi->s_fpb;
257 else {
258 ufs_free_blocks (inode, frag_to_free, free_count);
259 frag_to_free = tmp;
260 free_count = uspi->s_fpb;
261 }
262 inode->i_blocks -= uspi->s_nspb;
263 mark_inode_dirty(inode);
264next:;
265 }
266
267 if (free_count > 0) {
268 ufs_free_blocks (inode, frag_to_free, free_count);
269 }
270 for (i = 0; i < uspi->s_apb; i++)
271 if (*ubh_get_addr32(ind_ubh,i))
272 break;
273 if (i >= uspi->s_apb) {
274 if (ubh_max_bcount(ind_ubh) != 1) {
275 retry = 1;
276 }
277 else {
278 tmp = fs32_to_cpu(sb, *p);
279 *p = 0;
280 inode->i_blocks -= uspi->s_nspb;
281 mark_inode_dirty(inode);
282 ufs_free_blocks (inode, tmp, uspi->s_fpb);
283 ubh_bforget(ind_ubh);
284 ind_ubh = NULL;
285 }
286 }
287 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
288 ubh_wait_on_buffer (ind_ubh);
289 ubh_ll_rw_block (WRITE, 1, &ind_ubh);
290 ubh_wait_on_buffer (ind_ubh);
291 }
292 ubh_brelse (ind_ubh);
293
294 UFSD(("EXIT\n"))
295
296 return retry;
297}
298
299static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
300{
301 struct super_block * sb;
302 struct ufs_sb_private_info * uspi;
303 struct ufs_buffer_head * dind_bh;
304 unsigned i, tmp, dindirect_block;
305 __fs32 * dind;
306 int retry = 0;
307
308 UFSD(("ENTER\n"))
309
310 sb = inode->i_sb;
311 uspi = UFS_SB(sb)->s_uspi;
312
313 dindirect_block = (DIRECT_BLOCK > offset)
314 ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
315 retry = 0;
316
317 tmp = fs32_to_cpu(sb, *p);
318 if (!tmp)
319 return 0;
320 dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
321 if (tmp != fs32_to_cpu(sb, *p)) {
322 ubh_brelse (dind_bh);
323 return 1;
324 }
325 if (!dind_bh) {
326 *p = 0;
327 return 0;
328 }
329
330 for (i = dindirect_block ; i < uspi->s_apb ; i++) {
331 dind = ubh_get_addr32 (dind_bh, i);
332 tmp = fs32_to_cpu(sb, *dind);
333 if (!tmp)
334 continue;
335 retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
336 ubh_mark_buffer_dirty(dind_bh);
337 }
338
339 for (i = 0; i < uspi->s_apb; i++)
340 if (*ubh_get_addr32 (dind_bh, i))
341 break;
342 if (i >= uspi->s_apb) {
343 if (ubh_max_bcount(dind_bh) != 1)
344 retry = 1;
345 else {
346 tmp = fs32_to_cpu(sb, *p);
347 *p = 0;
348 inode->i_blocks -= uspi->s_nspb;
349 mark_inode_dirty(inode);
350 ufs_free_blocks (inode, tmp, uspi->s_fpb);
351 ubh_bforget(dind_bh);
352 dind_bh = NULL;
353 }
354 }
355 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
356 ubh_wait_on_buffer (dind_bh);
357 ubh_ll_rw_block (WRITE, 1, &dind_bh);
358 ubh_wait_on_buffer (dind_bh);
359 }
360 ubh_brelse (dind_bh);
361
362 UFSD(("EXIT\n"))
363
364 return retry;
365}
366
367static int ufs_trunc_tindirect (struct inode * inode)
368{
369 struct ufs_inode_info *ufsi = UFS_I(inode);
370 struct super_block * sb;
371 struct ufs_sb_private_info * uspi;
372 struct ufs_buffer_head * tind_bh;
373 unsigned tindirect_block, tmp, i;
374 __fs32 * tind, * p;
375 int retry;
376
377 UFSD(("ENTER\n"))
378
379 sb = inode->i_sb;
380 uspi = UFS_SB(sb)->s_uspi;
381 retry = 0;
382
383 tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
384 ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
385 p = ufsi->i_u1.i_data + UFS_TIND_BLOCK;
386 if (!(tmp = fs32_to_cpu(sb, *p)))
387 return 0;
388 tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
389 if (tmp != fs32_to_cpu(sb, *p)) {
390 ubh_brelse (tind_bh);
391 return 1;
392 }
393 if (!tind_bh) {
394 *p = 0;
395 return 0;
396 }
397
398 for (i = tindirect_block ; i < uspi->s_apb ; i++) {
399 tind = ubh_get_addr32 (tind_bh, i);
400 retry |= ufs_trunc_dindirect(inode, UFS_NDADDR +
401 uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
402 ubh_mark_buffer_dirty(tind_bh);
403 }
404 for (i = 0; i < uspi->s_apb; i++)
405 if (*ubh_get_addr32 (tind_bh, i))
406 break;
407 if (i >= uspi->s_apb) {
408 if (ubh_max_bcount(tind_bh) != 1)
409 retry = 1;
410 else {
411 tmp = fs32_to_cpu(sb, *p);
412 *p = 0;
413 inode->i_blocks -= uspi->s_nspb;
414 mark_inode_dirty(inode);
415 ufs_free_blocks (inode, tmp, uspi->s_fpb);
416 ubh_bforget(tind_bh);
417 tind_bh = NULL;
418 }
419 }
420 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
421 ubh_wait_on_buffer (tind_bh);
422 ubh_ll_rw_block (WRITE, 1, &tind_bh);
423 ubh_wait_on_buffer (tind_bh);
424 }
425 ubh_brelse (tind_bh);
426
427 UFSD(("EXIT\n"))
428 return retry;
429}
430
431void ufs_truncate (struct inode * inode)
432{
433 struct ufs_inode_info *ufsi = UFS_I(inode);
434 struct super_block * sb;
435 struct ufs_sb_private_info * uspi;
436 struct buffer_head * bh;
437 unsigned offset;
438 int err, retry;
439
440 UFSD(("ENTER\n"))
441 sb = inode->i_sb;
442 uspi = UFS_SB(sb)->s_uspi;
443
444 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
445 return;
446 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
447 return;
448 lock_kernel();
449 while (1) {
450 retry = ufs_trunc_direct(inode);
451 retry |= ufs_trunc_indirect (inode, UFS_IND_BLOCK,
452 (__fs32 *) &ufsi->i_u1.i_data[UFS_IND_BLOCK]);
453 retry |= ufs_trunc_dindirect (inode, UFS_IND_BLOCK + uspi->s_apb,
454 (__fs32 *) &ufsi->i_u1.i_data[UFS_DIND_BLOCK]);
455 retry |= ufs_trunc_tindirect (inode);
456 if (!retry)
457 break;
458 if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
459 ufs_sync_inode (inode);
460 blk_run_address_space(inode->i_mapping);
461 yield();
462 }
463 offset = inode->i_size & uspi->s_fshift;
464 if (offset) {
465 bh = ufs_bread (inode, inode->i_size >> uspi->s_fshift, 0, &err);
466 if (bh) {
467 memset (bh->b_data + offset, 0, uspi->s_fsize - offset);
468 mark_buffer_dirty (bh);
469 brelse (bh);
470 }
471 }
472 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
473 ufsi->i_lastfrag = DIRECT_FRAGMENT;
474 unlock_kernel();
475 mark_inode_dirty(inode);
476 UFSD(("EXIT\n"))
477}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
new file mode 100644
index 000000000000..59acc8f073ac
--- /dev/null
+++ b/fs/ufs/util.c
@@ -0,0 +1,257 @@
1/*
2 * linux/fs/ufs/util.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
8
9#include <linux/string.h>
10#include <linux/slab.h>
11#include <linux/ufs_fs.h>
12#include <linux/buffer_head.h>
13
14#include "swab.h"
15#include "util.h"
16
17#undef UFS_UTILS_DEBUG
18
19#ifdef UFS_UTILS_DEBUG
20#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
21#else
22#define UFSD(x)
23#endif
24
25
26struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
27 struct super_block *sb, u64 fragment, u64 size)
28{
29 struct ufs_buffer_head * ubh;
30 unsigned i, j ;
31 u64 count = 0;
32 if (size & ~uspi->s_fmask)
33 return NULL;
34 count = size >> uspi->s_fshift;
35 if (count > UFS_MAXFRAG)
36 return NULL;
37 ubh = (struct ufs_buffer_head *)
38 kmalloc (sizeof (struct ufs_buffer_head), GFP_KERNEL);
39 if (!ubh)
40 return NULL;
41 ubh->fragment = fragment;
42 ubh->count = count;
43 for (i = 0; i < count; i++)
44 if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
45 goto failed;
46 for (; i < UFS_MAXFRAG; i++)
47 ubh->bh[i] = NULL;
48 return ubh;
49failed:
50 for (j = 0; j < i; j++)
51 brelse (ubh->bh[j]);
52 kfree(ubh);
53 return NULL;
54}
55
56struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
57 struct super_block *sb, u64 fragment, u64 size)
58{
59 unsigned i, j;
60 u64 count = 0;
61 if (size & ~uspi->s_fmask)
62 return NULL;
63 count = size >> uspi->s_fshift;
64 if (count <= 0 || count > UFS_MAXFRAG)
65 return NULL;
66 USPI_UBH->fragment = fragment;
67 USPI_UBH->count = count;
68 for (i = 0; i < count; i++)
69 if (!(USPI_UBH->bh[i] = sb_bread(sb, fragment + i)))
70 goto failed;
71 for (; i < UFS_MAXFRAG; i++)
72 USPI_UBH->bh[i] = NULL;
73 return USPI_UBH;
74failed:
75 for (j = 0; j < i; j++)
76 brelse (USPI_UBH->bh[j]);
77 return NULL;
78}
79
80void ubh_brelse (struct ufs_buffer_head * ubh)
81{
82 unsigned i;
83 if (!ubh)
84 return;
85 for (i = 0; i < ubh->count; i++)
86 brelse (ubh->bh[i]);
87 kfree (ubh);
88}
89
90void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
91{
92 unsigned i;
93 if (!USPI_UBH)
94 return;
95 for ( i = 0; i < USPI_UBH->count; i++ ) {
96 brelse (USPI_UBH->bh[i]);
97 USPI_UBH->bh[i] = NULL;
98 }
99}
100
101void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
102{
103 unsigned i;
104 if (!ubh)
105 return;
106 for ( i = 0; i < ubh->count; i++ )
107 mark_buffer_dirty (ubh->bh[i]);
108}
109
110void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
111{
112 unsigned i;
113 if (!ubh)
114 return;
115 if (flag) {
116 for ( i = 0; i < ubh->count; i++ )
117 set_buffer_uptodate (ubh->bh[i]);
118 } else {
119 for ( i = 0; i < ubh->count; i++ )
120 clear_buffer_uptodate (ubh->bh[i]);
121 }
122}
123
124void ubh_ll_rw_block (int rw, unsigned nr, struct ufs_buffer_head * ubh[])
125{
126 unsigned i;
127 if (!ubh)
128 return;
129 for ( i = 0; i < nr; i++ )
130 ll_rw_block (rw, ubh[i]->count, ubh[i]->bh);
131}
132
133void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
134{
135 unsigned i;
136 if (!ubh)
137 return;
138 for ( i = 0; i < ubh->count; i++ )
139 wait_on_buffer (ubh->bh[i]);
140}
141
142unsigned ubh_max_bcount (struct ufs_buffer_head * ubh)
143{
144 unsigned i;
145 unsigned max = 0;
146 if (!ubh)
147 return 0;
148 for ( i = 0; i < ubh->count; i++ )
149 if ( atomic_read(&ubh->bh[i]->b_count) > max )
150 max = atomic_read(&ubh->bh[i]->b_count);
151 return max;
152}
153
154void ubh_bforget (struct ufs_buffer_head * ubh)
155{
156 unsigned i;
157 if (!ubh)
158 return;
159 for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] )
160 bforget (ubh->bh[i]);
161}
162
163int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
164{
165 unsigned i;
166 unsigned result = 0;
167 if (!ubh)
168 return 0;
169 for ( i = 0; i < ubh->count; i++ )
170 result |= buffer_dirty(ubh->bh[i]);
171 return result;
172}
173
174void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi,
175 unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
176{
177 unsigned len, bhno;
178 if (size > (ubh->count << uspi->s_fshift))
179 size = ubh->count << uspi->s_fshift;
180 bhno = 0;
181 while (size) {
182 len = min_t(unsigned int, size, uspi->s_fsize);
183 memcpy (mem, ubh->bh[bhno]->b_data, len);
184 mem += uspi->s_fsize;
185 size -= len;
186 bhno++;
187 }
188}
189
190void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi,
191 struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
192{
193 unsigned len, bhno;
194 if (size > (ubh->count << uspi->s_fshift))
195 size = ubh->count << uspi->s_fshift;
196 bhno = 0;
197 while (size) {
198 len = min_t(unsigned int, size, uspi->s_fsize);
199 memcpy (ubh->bh[bhno]->b_data, mem, len);
200 mem += uspi->s_fsize;
201 size -= len;
202 bhno++;
203 }
204}
205
206dev_t
207ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
208{
209 __fs32 fs32;
210 dev_t dev;
211
212 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
213 fs32 = ufsi->i_u1.i_data[1];
214 else
215 fs32 = ufsi->i_u1.i_data[0];
216 fs32 = fs32_to_cpu(sb, fs32);
217 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
218 case UFS_ST_SUNx86:
219 case UFS_ST_SUN:
220 if ((fs32 & 0xffff0000) == 0 ||
221 (fs32 & 0xffff0000) == 0xffff0000)
222 dev = old_decode_dev(fs32 & 0x7fff);
223 else
224 dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
225 break;
226
227 default:
228 dev = old_decode_dev(fs32);
229 break;
230 }
231 return dev;
232}
233
234void
235ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
236{
237 __fs32 fs32;
238
239 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
240 case UFS_ST_SUNx86:
241 case UFS_ST_SUN:
242 fs32 = sysv_encode_dev(dev);
243 if ((fs32 & 0xffff8000) == 0) {
244 fs32 = old_encode_dev(dev);
245 }
246 break;
247
248 default:
249 fs32 = old_encode_dev(dev);
250 break;
251 }
252 fs32 = cpu_to_fs32(sb, fs32);
253 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
254 ufsi->i_u1.i_data[1] = fs32;
255 else
256 ufsi->i_u1.i_data[0] = fs32;
257}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
new file mode 100644
index 000000000000..b2640076679a
--- /dev/null
+++ b/fs/ufs/util.h
@@ -0,0 +1,526 @@
1/*
2 * linux/fs/ufs/util.h
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
8
9#include <linux/buffer_head.h>
10#include <linux/fs.h>
11#include "swab.h"
12
13
14/*
15 * some useful macros
16 */
17#define in_range(b,first,len) ((b)>=(first)&&(b)<(first)+(len))
18
19/*
20 * macros used for retyping
21 */
22#define UCPI_UBH ((struct ufs_buffer_head *)ucpi)
23#define USPI_UBH ((struct ufs_buffer_head *)uspi)
24
25
26
27/*
28 * macros used for accessing structures
29 */
30static inline s32
31ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
32 struct ufs_super_block_third *usb3)
33{
34 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
35 case UFS_ST_SUN:
36 return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state);
37 case UFS_ST_SUNx86:
38 return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
39 case UFS_ST_44BSD:
40 default:
41 return fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_state);
42 }
43}
44
45static inline void
46ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
47 struct ufs_super_block_third *usb3, s32 value)
48{
49 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
50 case UFS_ST_SUN:
51 usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value);
52 break;
53 case UFS_ST_SUNx86:
54 usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
55 break;
56 case UFS_ST_44BSD:
57 usb3->fs_u2.fs_44.fs_state = cpu_to_fs32(sb, value);
58 break;
59 }
60}
61
62static inline u32
63ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
64 struct ufs_super_block_third *usb3)
65{
66 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
67 return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect);
68 else
69 return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
70}
71
72static inline u64
73ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
74{
75 __fs64 tmp;
76
77 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
78 case UFS_ST_SUN:
79 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0];
80 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1];
81 break;
82 case UFS_ST_SUNx86:
83 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qbmask[0];
84 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qbmask[1];
85 break;
86 case UFS_ST_44BSD:
87 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qbmask[0];
88 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qbmask[1];
89 break;
90 }
91
92 return fs64_to_cpu(sb, tmp);
93}
94
95static inline u64
96ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
97{
98 __fs64 tmp;
99
100 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
101 case UFS_ST_SUN:
102 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0];
103 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1];
104 break;
105 case UFS_ST_SUNx86:
106 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qfmask[0];
107 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qfmask[1];
108 break;
109 case UFS_ST_44BSD:
110 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qfmask[0];
111 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qfmask[1];
112 break;
113 }
114
115 return fs64_to_cpu(sb, tmp);
116}
117
118static inline u16
119ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
120{
121 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
122 return fs16_to_cpu(sb, de->d_u.d_namlen);
123 else
124 return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
125}
126
127static inline void
128ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
129{
130 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
131 de->d_u.d_namlen = cpu_to_fs16(sb, value);
132 else
133 de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
134}
135
136static inline void
137ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
138{
139 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
140 return;
141
142 /*
143 * TODO turn this into a table lookup
144 */
145 switch (mode & S_IFMT) {
146 case S_IFSOCK:
147 de->d_u.d_44.d_type = DT_SOCK;
148 break;
149 case S_IFLNK:
150 de->d_u.d_44.d_type = DT_LNK;
151 break;
152 case S_IFREG:
153 de->d_u.d_44.d_type = DT_REG;
154 break;
155 case S_IFBLK:
156 de->d_u.d_44.d_type = DT_BLK;
157 break;
158 case S_IFDIR:
159 de->d_u.d_44.d_type = DT_DIR;
160 break;
161 case S_IFCHR:
162 de->d_u.d_44.d_type = DT_CHR;
163 break;
164 case S_IFIFO:
165 de->d_u.d_44.d_type = DT_FIFO;
166 break;
167 default:
168 de->d_u.d_44.d_type = DT_UNKNOWN;
169 }
170}
171
172static inline u32
173ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
174{
175 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
176 case UFS_UID_EFT:
177 return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
178 case UFS_UID_44BSD:
179 return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
180 default:
181 return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
182 }
183}
184
185static inline void
186ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
187{
188 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
189 case UFS_UID_EFT:
190 inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
191 break;
192 case UFS_UID_44BSD:
193 inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
194 break;
195 }
196 inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
197}
198
199static inline u32
200ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
201{
202 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
203 case UFS_UID_EFT:
204 return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
205 case UFS_UID_44BSD:
206 return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
207 default:
208 return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
209 }
210}
211
212static inline void
213ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
214{
215 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
216 case UFS_UID_EFT:
217 inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
218 break;
219 case UFS_UID_44BSD:
220 inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
221 break;
222 }
223 inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value);
224}
225
226extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
227extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
228
229/*
230 * These functions manipulate ufs buffers
231 */
232#define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)
233extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
234extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
235extern void ubh_brelse (struct ufs_buffer_head *);
236extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
237extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
238extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
239extern void ubh_ll_rw_block (int, unsigned, struct ufs_buffer_head **);
240extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
241extern unsigned ubh_max_bcount (struct ufs_buffer_head *);
242extern void ubh_bforget (struct ufs_buffer_head *);
243extern int ubh_buffer_dirty (struct ufs_buffer_head *);
244#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
245extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struct ufs_buffer_head *, unsigned);
246#define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size)
247extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
248
249
250
251/*
252 * macros to get important structures from ufs_buffer_head
253 */
254#define ubh_get_usb_first(ubh) \
255 ((struct ufs_super_block_first *)((ubh)->bh[0]->b_data))
256
257#define ubh_get_usb_second(ubh) \
258 ((struct ufs_super_block_second *)(ubh)-> \
259 bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask))
260
261#define ubh_get_usb_third(ubh) \
262 ((struct ufs_super_block_third *)((ubh)-> \
263 bh[UFS_SECTOR_SIZE*2 >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE*2 & ~uspi->s_fmask)))
264
265#define ubh_get_ucg(ubh) \
266 ((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
267
268
269/*
270 * Extract byte from ufs_buffer_head
271 * Extract the bits for a block from a map inside ufs_buffer_head
272 */
273#define ubh_get_addr8(ubh,begin) \
274 ((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
275 ((begin) & ~uspi->s_fmask))
276
277#define ubh_get_addr16(ubh,begin) \
278 (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
279 ((begin) & (uspi->fsize>>1) - 1)))
280
281#define ubh_get_addr32(ubh,begin) \
282 (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
283 ((begin) & ((uspi->s_fsize>>2) - 1)))
284
285#define ubh_get_addr ubh_get_addr8
286
287#define ubh_blkmap(ubh,begin,bit) \
288 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
289
290
291/*
292 * Macros for access to superblock array structures
293 */
294#define ubh_postbl(ubh,cylno,i) \
295 ((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
296 ? (*(__s16*)(ubh_get_addr(ubh, \
297 (unsigned)(&((struct ufs_super_block *)0)->fs_opostbl) \
298 + (((cylno) * 16 + (i)) << 1) ) )) \
299 : (*(__s16*)(ubh_get_addr(ubh, \
300 uspi->s_postbloff + (((cylno) * uspi->s_nrpos + (i)) << 1) ))))
301
302#define ubh_rotbl(ubh,i) \
303 ((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
304 ? (*(__u8*)(ubh_get_addr(ubh, \
305 (unsigned)(&((struct ufs_super_block *)0)->fs_space) + (i)))) \
306 : (*(__u8*)(ubh_get_addr(ubh, uspi->s_rotbloff + (i)))))
307
308/*
309 * Determine the number of available frags given a
310 * percentage to hold in reserve.
311 */
312#define ufs_freespace(usb, percentreserved) \
313 (ufs_blkstofrags(fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nbfree)) + \
314 fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nffree) - (uspi->s_dsize * (percentreserved) / 100))
315
316/*
317 * Macros to access cylinder group array structures
318 */
319#define ubh_cg_blktot(ucpi,cylno) \
320 (*((__fs32*)ubh_get_addr(UCPI_UBH, (ucpi)->c_btotoff + ((cylno) << 2))))
321
322#define ubh_cg_blks(ucpi,cylno,rpos) \
323 (*((__fs16*)ubh_get_addr(UCPI_UBH, \
324 (ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
325
326/*
327 * Bitmap operations
328 * These functions work like classical bitmap operations.
329 * The difference is that we don't have the whole bitmap
330 * in one contiguous chunk of memory, but in several buffers.
331 * The parameters of each function are super_block, ufs_buffer_head and
332 * position of the beginning of the bitmap.
333 */
334#define ubh_setbit(ubh,begin,bit) \
335 (*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))
336
337#define ubh_clrbit(ubh,begin,bit) \
338 (*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))
339
340#define ubh_isset(ubh,begin,bit) \
341 (*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))
342
343#define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))
344
345#define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)
346
347#define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
348static inline unsigned _ubh_find_next_zero_bit_(
349 struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
350 unsigned begin, unsigned size, unsigned offset)
351{
352 unsigned base, count, pos;
353
354 size -= offset;
355 begin <<= 3;
356 offset += begin;
357 base = offset >> uspi->s_bpfshift;
358 offset &= uspi->s_bpfmask;
359 for (;;) {
360 count = min_t(unsigned int, size + offset, uspi->s_bpf);
361 size -= count - offset;
362 pos = ext2_find_next_zero_bit (ubh->bh[base]->b_data, count, offset);
363 if (pos < count || !size)
364 break;
365 base++;
366 offset = 0;
367 }
368 return (base << uspi->s_bpfshift) + pos - begin;
369}
370
371static inline unsigned find_last_zero_bit (unsigned char * bitmap,
372 unsigned size, unsigned offset)
373{
374 unsigned bit, i;
375 unsigned char * mapp;
376 unsigned char map;
377
378 mapp = bitmap + (size >> 3);
379 map = *mapp--;
380 bit = 1 << (size & 7);
381 for (i = size; i > offset; i--) {
382 if ((map & bit) == 0)
383 break;
384 if ((i & 7) != 0) {
385 bit >>= 1;
386 } else {
387 map = *mapp--;
388 bit = 1 << 7;
389 }
390 }
391 return i;
392}
393
394#define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
395static inline unsigned _ubh_find_last_zero_bit_(
396 struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
397 unsigned begin, unsigned start, unsigned end)
398{
399 unsigned base, count, pos, size;
400
401 size = start - end;
402 begin <<= 3;
403 start += begin;
404 base = start >> uspi->s_bpfshift;
405 start &= uspi->s_bpfmask;
406 for (;;) {
407 count = min_t(unsigned int,
408 size + (uspi->s_bpf - start), uspi->s_bpf)
409 - (uspi->s_bpf - start);
410 size -= count;
411 pos = find_last_zero_bit (ubh->bh[base]->b_data,
412 start, start - count);
413 if (pos > start - count || !size)
414 break;
415 base--;
416 start = uspi->s_bpf;
417 }
418 return (base << uspi->s_bpfshift) + pos - begin;
419}
420
421#define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block))
422
423#define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block)
424static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
425 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
426{
427 switch (uspi->s_fpb) {
428 case 8:
429 return (*ubh_get_addr (ubh, begin + block) == 0xff);
430 case 4:
431 return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
432 case 2:
433 return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
434 case 1:
435 return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
436 }
437 return 0;
438}
439
440#define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block)
441static inline void _ubh_clrblock_(struct ufs_sb_private_info * uspi,
442 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
443{
444 switch (uspi->s_fpb) {
445 case 8:
446 *ubh_get_addr (ubh, begin + block) = 0x00;
447 return;
448 case 4:
449 *ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2));
450 return;
451 case 2:
452 *ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1));
453 return;
454 case 1:
455 *ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07)));
456 return;
457 }
458}
459
460#define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block)
461static inline void _ubh_setblock_(struct ufs_sb_private_info * uspi,
462 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
463{
464 switch (uspi->s_fpb) {
465 case 8:
466 *ubh_get_addr(ubh, begin + block) = 0xff;
467 return;
468 case 4:
469 *ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2));
470 return;
471 case 2:
472 *ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1));
473 return;
474 case 1:
475 *ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07)));
476 return;
477 }
478}
479
480static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
481 __fs32 * fraglist, int cnt)
482{
483 struct ufs_sb_private_info * uspi;
484 unsigned fragsize, pos;
485
486 uspi = UFS_SB(sb)->s_uspi;
487
488 fragsize = 0;
489 for (pos = 0; pos < uspi->s_fpb; pos++) {
490 if (blockmap & (1 << pos)) {
491 fragsize++;
492 }
493 else if (fragsize > 0) {
494 fs32_add(sb, &fraglist[fragsize], cnt);
495 fragsize = 0;
496 }
497 }
498 if (fragsize > 0 && fragsize < uspi->s_fpb)
499 fs32_add(sb, &fraglist[fragsize], cnt);
500}
501
502#define ubh_scanc(ubh,begin,size,table,mask) _ubh_scanc_(uspi,ubh,begin,size,table,mask)
503static inline unsigned _ubh_scanc_(struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
504 unsigned begin, unsigned size, unsigned char * table, unsigned char mask)
505{
506 unsigned rest, offset;
507 unsigned char * cp;
508
509
510 offset = begin & ~uspi->s_fmask;
511 begin >>= uspi->s_fshift;
512 for (;;) {
513 if ((offset + size) < uspi->s_fsize)
514 rest = size;
515 else
516 rest = uspi->s_fsize - offset;
517 size -= rest;
518 cp = ubh->bh[begin]->b_data + offset;
519 while ((table[*cp++] & mask) == 0 && --rest);
520 if (rest || !size)
521 break;
522 begin++;
523 offset = 0;
524 }
525 return (size + rest);
526}