aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ufs/dir.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ufs/dir.c')
-rw-r--r--fs/ufs/dir.c989
1 files changed, 512 insertions, 477 deletions
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 1a561202d3f4..9473df5bff51 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -11,13 +11,15 @@
11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by 11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based 12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. 13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
14 *
15 * Migration to usage of "page cache" on May 2006 by
16 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
14 */ 17 */
15 18
16#include <linux/time.h> 19#include <linux/time.h>
17#include <linux/fs.h> 20#include <linux/fs.h>
18#include <linux/ufs_fs.h> 21#include <linux/ufs_fs.h>
19#include <linux/smp_lock.h> 22#include <linux/smp_lock.h>
20#include <linux/buffer_head.h>
21#include <linux/sched.h> 23#include <linux/sched.h>
22 24
23#include "swab.h" 25#include "swab.h"
@@ -31,11 +33,6 @@
31#define UFSD(x) 33#define UFSD(x)
32#endif 34#endif
33 35
34static int
35ufs_check_dir_entry (const char *, struct inode *, struct ufs_dir_entry *,
36 struct buffer_head *, unsigned long);
37
38
39/* 36/*
40 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure. 37 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
41 * 38 *
@@ -51,495 +48,540 @@ static inline int ufs_match(struct super_block *sb, int len,
51 return !memcmp(name, de->d_name, len); 48 return !memcmp(name, de->d_name, len);
52} 49}
53 50
54/* 51static int ufs_commit_chunk(struct page *page, unsigned from, unsigned to)
55 * This is blatantly stolen from ext2fs
56 */
57static int
58ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
59{ 52{
60 struct inode *inode = filp->f_dentry->d_inode; 53 struct inode *dir = page->mapping->host;
61 int error = 0; 54 int err = 0;
62 unsigned long offset, lblk; 55 dir->i_version++;
63 int i, stored; 56 page->mapping->a_ops->commit_write(NULL, page, from, to);
64 struct buffer_head * bh; 57 if (IS_DIRSYNC(dir))
65 struct ufs_dir_entry * de; 58 err = write_one_page(page, 1);
66 struct super_block * sb; 59 else
67 int de_reclen; 60 unlock_page(page);
68 unsigned flags; 61 return err;
69 u64 blk= 0L; 62}
70
71 lock_kernel();
72
73 sb = inode->i_sb;
74 flags = UFS_SB(sb)->s_flags;
75
76 UFSD(("ENTER, ino %lu f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos))
77
78 stored = 0;
79 bh = NULL;
80 offset = filp->f_pos & (sb->s_blocksize - 1);
81
82 while (!error && !stored && filp->f_pos < inode->i_size) {
83 lblk = (filp->f_pos) >> sb->s_blocksize_bits;
84 blk = ufs_frag_map(inode, lblk);
85 if (!blk || !(bh = sb_bread(sb, blk))) {
86 /* XXX - error - skip to the next block */
87 printk("ufs_readdir: "
88 "dir inode %lu has a hole at offset %lu\n",
89 inode->i_ino, (unsigned long int)filp->f_pos);
90 filp->f_pos += sb->s_blocksize - offset;
91 continue;
92 }
93
94revalidate:
95 /* If the dir block has changed since the last call to
96 * readdir(2), then we might be pointing to an invalid
97 * dirent right now. Scan from the start of the block
98 * to make sure. */
99 if (filp->f_version != inode->i_version) {
100 for (i = 0; i < sb->s_blocksize && i < offset; ) {
101 de = (struct ufs_dir_entry *)(bh->b_data + i);
102 /* It's too expensive to do a full
103 * dirent test each time round this
104 * loop, but we do have to test at
105 * least that it is non-zero. A
106 * failure will be detected in the
107 * dirent test below. */
108 de_reclen = fs16_to_cpu(sb, de->d_reclen);
109 if (de_reclen < 1)
110 break;
111 i += de_reclen;
112 }
113 offset = i;
114 filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
115 | offset;
116 filp->f_version = inode->i_version;
117 }
118 63
119 while (!error && filp->f_pos < inode->i_size 64static inline void ufs_put_page(struct page *page)
120 && offset < sb->s_blocksize) { 65{
121 de = (struct ufs_dir_entry *) (bh->b_data + offset); 66 kunmap(page);
122 /* XXX - put in a real ufs_check_dir_entry() */ 67 page_cache_release(page);
123 if ((de->d_reclen == 0) || (ufs_get_de_namlen(sb, de) == 0)) { 68}
124 filp->f_pos = (filp->f_pos &
125 (sb->s_blocksize - 1)) +
126 sb->s_blocksize;
127 brelse(bh);
128 unlock_kernel();
129 return stored;
130 }
131 if (!ufs_check_dir_entry ("ufs_readdir", inode, de,
132 bh, offset)) {
133 /* On error, skip the f_pos to the
134 next block. */
135 filp->f_pos = (filp->f_pos |
136 (sb->s_blocksize - 1)) +
137 1;
138 brelse (bh);
139 unlock_kernel();
140 return stored;
141 }
142 offset += fs16_to_cpu(sb, de->d_reclen);
143 if (de->d_ino) {
144 /* We might block in the next section
145 * if the data destination is
146 * currently swapped out. So, use a
147 * version stamp to detect whether or
148 * not the directory has been modified
149 * during the copy operation. */
150 unsigned long version = filp->f_version;
151 unsigned char d_type = DT_UNKNOWN;
152 69
153 UFSD(("filldir(%s,%u)\n", de->d_name, 70static inline unsigned long ufs_dir_pages(struct inode *inode)
154 fs32_to_cpu(sb, de->d_ino))) 71{
155 UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de))) 72 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
73}
156 74
157 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) 75ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry)
158 d_type = de->d_u.d_44.d_type; 76{
159 error = filldir(dirent, de->d_name, 77 ino_t res = 0;
160 ufs_get_de_namlen(sb, de), filp->f_pos, 78 struct ufs_dir_entry *de;
161 fs32_to_cpu(sb, de->d_ino), d_type); 79 struct page *page;
162 if (error) 80
163 break; 81 de = ufs_find_entry(dir, dentry, &page);
164 if (version != filp->f_version) 82 if (de) {
165 goto revalidate; 83 res = fs32_to_cpu(dir->i_sb, de->d_ino);
166 stored ++; 84 ufs_put_page(page);
167 }
168 filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
169 }
170 offset = 0;
171 brelse (bh);
172 } 85 }
173 unlock_kernel(); 86 return res;
174 return 0;
175} 87}
176 88
177/*
178 * define how far ahead to read directories while searching them.
179 */
180#define NAMEI_RA_CHUNKS 2
181#define NAMEI_RA_BLOCKS 4
182#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
183#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
184 89
185/* 90/* Releases the page */
186 * ufs_find_entry() 91void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
187 * 92 struct page *page, struct inode *inode)
188 * finds an entry in the specified directory with the wanted name. It
189 * returns the cache buffer in which the entry was found, and the entry
190 * itself (as a parameter - res_bh). It does NOT read the inode of the
191 * entry - you'll have to do that yourself if you want to.
192 */
193struct ufs_dir_entry * ufs_find_entry (struct dentry *dentry,
194 struct buffer_head ** res_bh)
195{ 93{
196 struct super_block * sb; 94 unsigned from = (char *) de - (char *) page_address(page);
197 struct buffer_head * bh_use[NAMEI_RA_SIZE]; 95 unsigned to = from + fs16_to_cpu(dir->i_sb, de->d_reclen);
198 struct buffer_head * bh_read[NAMEI_RA_SIZE]; 96 int err;
199 unsigned long offset;
200 int block, toread, i, err;
201 struct inode *dir = dentry->d_parent->d_inode;
202 const char *name = dentry->d_name.name;
203 int namelen = dentry->d_name.len;
204 97
205 UFSD(("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen)) 98 lock_page(page);
206 99 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
207 *res_bh = NULL; 100 BUG_ON(err);
208 101 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
209 sb = dir->i_sb; 102 ufs_set_de_type(dir->i_sb, de, inode->i_mode);
210 103 err = ufs_commit_chunk(page, from, to);
211 if (namelen > UFS_MAXNAMLEN) 104 ufs_put_page(page);
212 return NULL; 105 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
106 mark_inode_dirty(dir);
107}
213 108
214 memset (bh_use, 0, sizeof (bh_use));
215 toread = 0;
216 for (block = 0; block < NAMEI_RA_SIZE; ++block) {
217 struct buffer_head * bh;
218 109
219 if ((block << sb->s_blocksize_bits) >= dir->i_size) 110static void ufs_check_page(struct page *page)
220 break; 111{
221 bh = ufs_getfrag (dir, block, 0, &err); 112 struct inode *dir = page->mapping->host;
222 bh_use[block] = bh; 113 struct super_block *sb = dir->i_sb;
223 if (bh && !buffer_uptodate(bh)) 114 char *kaddr = page_address(page);
224 bh_read[toread++] = bh; 115 unsigned offs, rec_len;
116 unsigned limit = PAGE_CACHE_SIZE;
117 struct ufs_dir_entry *p;
118 char *error;
119
120 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
121 limit = dir->i_size & ~PAGE_CACHE_MASK;
122 if (limit & (UFS_SECTOR_SIZE - 1))
123 goto Ebadsize;
124 if (!limit)
125 goto out;
225 } 126 }
127 for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) {
128 p = (struct ufs_dir_entry *)(kaddr + offs);
129 rec_len = fs16_to_cpu(sb, p->d_reclen);
130
131 if (rec_len < UFS_DIR_REC_LEN(1))
132 goto Eshort;
133 if (rec_len & 3)
134 goto Ealign;
135 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p)))
136 goto Enamelen;
137 if (((offs + rec_len - 1) ^ offs) & ~(UFS_SECTOR_SIZE-1))
138 goto Espan;
139 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
140 UFS_SB(sb)->s_uspi->s_ncg))
141 goto Einumber;
142 }
143 if (offs != limit)
144 goto Eend;
145out:
146 SetPageChecked(page);
147 return;
148
149 /* Too bad, we had an error */
150
151Ebadsize:
152 ufs_error(sb, "ufs_check_page",
153 "size of directory #%lu is not a multiple of chunk size",
154 dir->i_ino
155 );
156 goto fail;
157Eshort:
158 error = "rec_len is smaller than minimal";
159 goto bad_entry;
160Ealign:
161 error = "unaligned directory entry";
162 goto bad_entry;
163Enamelen:
164 error = "rec_len is too small for name_len";
165 goto bad_entry;
166Espan:
167 error = "directory entry across blocks";
168 goto bad_entry;
169Einumber:
170 error = "inode out of bounds";
171bad_entry:
172 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
173 "offset=%lu, rec_len=%d, name_len=%d",
174 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
175 rec_len, ufs_get_de_namlen(sb, p));
176 goto fail;
177Eend:
178 p = (struct ufs_dir_entry *)(kaddr + offs);
179 ufs_error (sb, "ext2_check_page",
180 "entry in directory #%lu spans the page boundary"
181 "offset=%lu",
182 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
183fail:
184 SetPageChecked(page);
185 SetPageError(page);
186}
226 187
227 for (block = 0, offset = 0; offset < dir->i_size; block++) { 188static struct page *ufs_get_page(struct inode *dir, unsigned long n)
228 struct buffer_head * bh; 189{
229 struct ufs_dir_entry * de; 190 struct address_space *mapping = dir->i_mapping;
230 char * dlimit; 191 struct page *page = read_cache_page(mapping, n,
231 192 (filler_t*)mapping->a_ops->readpage, NULL);
232 if ((block % NAMEI_RA_BLOCKS) == 0 && toread) { 193 if (!IS_ERR(page)) {
233 ll_rw_block (READ, toread, bh_read); 194 wait_on_page_locked(page);
234 toread = 0; 195 kmap(page);
235 } 196 if (!PageUptodate(page))
236 bh = bh_use[block % NAMEI_RA_SIZE]; 197 goto fail;
237 if (!bh) { 198 if (!PageChecked(page))
238 ufs_error (sb, "ufs_find_entry", 199 ufs_check_page(page);
239 "directory #%lu contains a hole at offset %lu", 200 if (PageError(page))
240 dir->i_ino, offset); 201 goto fail;
241 offset += sb->s_blocksize;
242 continue;
243 }
244 wait_on_buffer (bh);
245 if (!buffer_uptodate(bh)) {
246 /*
247 * read error: all bets are off
248 */
249 break;
250 }
251
252 de = (struct ufs_dir_entry *) bh->b_data;
253 dlimit = bh->b_data + sb->s_blocksize;
254 while ((char *) de < dlimit && offset < dir->i_size) {
255 /* this code is executed quadratically often */
256 /* do minimal checking by hand */
257 int de_len;
258
259 if ((char *) de + namelen <= dlimit &&
260 ufs_match(sb, namelen, name, de)) {
261 /* found a match -
262 just to be sure, do a full check */
263 if (!ufs_check_dir_entry("ufs_find_entry",
264 dir, de, bh, offset))
265 goto failed;
266 for (i = 0; i < NAMEI_RA_SIZE; ++i) {
267 if (bh_use[i] != bh)
268 brelse (bh_use[i]);
269 }
270 *res_bh = bh;
271 return de;
272 }
273 /* prevent looping on a bad block */
274 de_len = fs16_to_cpu(sb, de->d_reclen);
275 if (de_len <= 0)
276 goto failed;
277 offset += de_len;
278 de = (struct ufs_dir_entry *) ((char *) de + de_len);
279 }
280
281 brelse (bh);
282 if (((block + NAMEI_RA_SIZE) << sb->s_blocksize_bits ) >=
283 dir->i_size)
284 bh = NULL;
285 else
286 bh = ufs_getfrag (dir, block + NAMEI_RA_SIZE, 0, &err);
287 bh_use[block % NAMEI_RA_SIZE] = bh;
288 if (bh && !buffer_uptodate(bh))
289 bh_read[toread++] = bh;
290 } 202 }
203 return page;
291 204
292failed: 205fail:
293 for (i = 0; i < NAMEI_RA_SIZE; ++i) brelse (bh_use[i]); 206 ufs_put_page(page);
294 UFSD(("EXIT\n")) 207 return ERR_PTR(-EIO);
295 return NULL;
296} 208}
297 209
298static int 210/*
299ufs_check_dir_entry (const char *function, struct inode *dir, 211 * Return the offset into page `page_nr' of the last valid
300 struct ufs_dir_entry *de, struct buffer_head *bh, 212 * byte in that page, plus one.
301 unsigned long offset) 213 */
214static unsigned
215ufs_last_byte(struct inode *inode, unsigned long page_nr)
302{ 216{
303 struct super_block *sb = dir->i_sb; 217 unsigned last_byte = inode->i_size;
304 const char *error_msg = NULL; 218
305 int rlen = fs16_to_cpu(sb, de->d_reclen); 219 last_byte -= page_nr << PAGE_CACHE_SHIFT;
306 220 if (last_byte > PAGE_CACHE_SIZE)
307 if (rlen < UFS_DIR_REC_LEN(1)) 221 last_byte = PAGE_CACHE_SIZE;
308 error_msg = "reclen is smaller than minimal"; 222 return last_byte;
309 else if (rlen % 4 != 0)
310 error_msg = "reclen % 4 != 0";
311 else if (rlen < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)))
312 error_msg = "reclen is too small for namlen";
313 else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
314 error_msg = "directory entry across blocks";
315 else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
316 UFS_SB(sb)->s_uspi->s_ncg))
317 error_msg = "inode out of bounds";
318
319 if (error_msg != NULL)
320 ufs_error (sb, function, "bad entry in directory #%lu, size %Lu: %s - "
321 "offset=%lu, inode=%lu, reclen=%d, namlen=%d",
322 dir->i_ino, dir->i_size, error_msg, offset,
323 (unsigned long)fs32_to_cpu(sb, de->d_ino),
324 rlen, ufs_get_de_namlen(sb, de));
325
326 return (error_msg == NULL ? 1 : 0);
327} 223}
328 224
329struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct buffer_head **p) 225static inline struct ufs_dir_entry *
226ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
330{ 227{
331 int err; 228 return (struct ufs_dir_entry *)((char *)p +
332 struct buffer_head *bh = ufs_bread (dir, 0, 0, &err); 229 fs16_to_cpu(sb, p->d_reclen));
333 struct ufs_dir_entry *res = NULL;
334
335 if (bh) {
336 res = (struct ufs_dir_entry *) bh->b_data;
337 res = (struct ufs_dir_entry *)((char *)res +
338 fs16_to_cpu(dir->i_sb, res->d_reclen));
339 }
340 *p = bh;
341 return res;
342} 230}
343ino_t ufs_inode_by_name(struct inode * dir, struct dentry *dentry) 231
232struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
344{ 233{
345 ino_t res = 0; 234 struct page *page = ufs_get_page(dir, 0);
346 struct ufs_dir_entry * de; 235 struct ufs_dir_entry *de = NULL;
347 struct buffer_head *bh;
348 236
349 de = ufs_find_entry (dentry, &bh); 237 if (!IS_ERR(page)) {
350 if (de) { 238 de = ufs_next_entry(dir->i_sb,
351 res = fs32_to_cpu(dir->i_sb, de->d_ino); 239 (struct ufs_dir_entry *)page_address(page));
352 brelse(bh); 240 *p = page;
353 } 241 }
354 return res; 242 return de;
355} 243}
356 244
357void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 245/*
358 struct buffer_head *bh, struct inode *inode) 246 * ufs_find_entry()
247 *
248 * finds an entry in the specified directory with the wanted name. It
249 * returns the page in which the entry was found, and the entry itself
250 * (as a parameter - res_dir). Page is returned mapped and unlocked.
251 * Entry is guaranteed to be valid.
252 */
253struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
254 struct page **res_page)
359{ 255{
360 dir->i_version++; 256 struct super_block *sb = dir->i_sb;
361 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); 257 const char *name = dentry->d_name.name;
362 mark_buffer_dirty(bh); 258 int namelen = dentry->d_name.len;
363 if (IS_DIRSYNC(dir)) 259 unsigned reclen = UFS_DIR_REC_LEN(namelen);
364 sync_dirty_buffer(bh); 260 unsigned long start, n;
365 brelse (bh); 261 unsigned long npages = ufs_dir_pages(dir);
262 struct page *page = NULL;
263 struct ufs_dir_entry *de;
264
265 UFSD(("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen));
266
267 if (npages == 0 || namelen > UFS_MAXNAMLEN)
268 goto out;
269
270 /* OFFSET_CACHE */
271 *res_page = NULL;
272
273 /* start = ei->i_dir_start_lookup; */
274 start = 0;
275 if (start >= npages)
276 start = 0;
277 n = start;
278 do {
279 char *kaddr;
280 page = ufs_get_page(dir, n);
281 if (!IS_ERR(page)) {
282 kaddr = page_address(page);
283 de = (struct ufs_dir_entry *) kaddr;
284 kaddr += ufs_last_byte(dir, n) - reclen;
285 while ((char *) de <= kaddr) {
286 if (de->d_reclen == 0) {
287 ufs_error(dir->i_sb, __FUNCTION__,
288 "zero-length directory entry");
289 ufs_put_page(page);
290 goto out;
291 }
292 if (ufs_match(sb, namelen, name, de))
293 goto found;
294 de = ufs_next_entry(sb, de);
295 }
296 ufs_put_page(page);
297 }
298 if (++n >= npages)
299 n = 0;
300 } while (n != start);
301out:
302 return NULL;
303
304found:
305 *res_page = page;
306 /* ei->i_dir_start_lookup = n; */
307 return de;
366} 308}
367 309
368/* 310/*
369 * ufs_add_entry() 311 * Parent is locked.
370 *
371 * adds a file entry to the specified directory, using the same
372 * semantics as ufs_find_entry(). It returns NULL if it failed.
373 */ 312 */
374int ufs_add_link(struct dentry *dentry, struct inode *inode) 313int ufs_add_link(struct dentry *dentry, struct inode *inode)
375{ 314{
376 struct super_block * sb;
377 struct ufs_sb_private_info * uspi;
378 unsigned long offset;
379 unsigned fragoff;
380 unsigned short rec_len;
381 struct buffer_head * bh;
382 struct ufs_dir_entry * de, * de1;
383 struct inode *dir = dentry->d_parent->d_inode; 315 struct inode *dir = dentry->d_parent->d_inode;
384 const char *name = dentry->d_name.name; 316 const char *name = dentry->d_name.name;
385 int namelen = dentry->d_name.len; 317 int namelen = dentry->d_name.len;
318 struct super_block *sb = dir->i_sb;
319 unsigned reclen = UFS_DIR_REC_LEN(namelen);
320 unsigned short rec_len, name_len;
321 struct page *page = NULL;
322 struct ufs_dir_entry *de;
323 unsigned long npages = ufs_dir_pages(dir);
324 unsigned long n;
325 char *kaddr;
326 unsigned from, to;
386 int err; 327 int err;
387 328
388 UFSD(("ENTER, name %s, namelen %u\n", name, namelen)) 329 UFSD(("ENTER, name %s, namelen %u\n", name, namelen));
389 330
390 sb = dir->i_sb; 331 /*
391 uspi = UFS_SB(sb)->s_uspi; 332 * We take care of directory expansion in the same loop.
392 333 * This code plays outside i_size, so it locks the page
393 if (!namelen) 334 * to protect that region.
394 return -EINVAL; 335 */
395 bh = ufs_bread (dir, 0, 0, &err); 336 for (n = 0; n <= npages; n++) {
396 if (!bh) 337 char *dir_end;
397 return err; 338
398 rec_len = UFS_DIR_REC_LEN(namelen); 339 page = ufs_get_page(dir, n);
399 offset = 0; 340 err = PTR_ERR(page);
400 de = (struct ufs_dir_entry *) bh->b_data; 341 if (IS_ERR(page))
401 while (1) { 342 goto out;
402 if ((char *)de >= UFS_SECTOR_SIZE + bh->b_data) { 343 lock_page(page);
403 fragoff = offset & ~uspi->s_fmask; 344 kaddr = page_address(page);
404 if (fragoff != 0 && fragoff != UFS_SECTOR_SIZE) 345 dir_end = kaddr + ufs_last_byte(dir, n);
405 ufs_error (sb, "ufs_add_entry", "internal error" 346 de = (struct ufs_dir_entry *)kaddr;
406 " fragoff %u", fragoff); 347 kaddr += PAGE_CACHE_SIZE - reclen;
407 if (!fragoff) { 348 while ((char *)de <= kaddr) {
408 brelse (bh); 349 if ((char *)de == dir_end) {
409 bh = ufs_bread (dir, offset >> sb->s_blocksize_bits, 1, &err); 350 /* We hit i_size */
410 if (!bh) 351 name_len = 0;
411 return err; 352 rec_len = UFS_SECTOR_SIZE;
412 }
413 if (dir->i_size <= offset) {
414 if (dir->i_size == 0) {
415 brelse(bh);
416 return -ENOENT;
417 }
418 de = (struct ufs_dir_entry *) (bh->b_data + fragoff);
419 de->d_ino = 0;
420 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE); 353 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE);
421 ufs_set_de_namlen(sb, de, 0); 354 de->d_ino = 0;
422 dir->i_size = offset + UFS_SECTOR_SIZE; 355 goto got_it;
423 mark_inode_dirty(dir);
424 } else {
425 de = (struct ufs_dir_entry *) bh->b_data;
426 } 356 }
357 if (de->d_reclen == 0) {
358 ufs_error(dir->i_sb, __FUNCTION__,
359 "zero-length directory entry");
360 err = -EIO;
361 goto out_unlock;
362 }
363 err = -EEXIST;
364 if (ufs_match(sb, namelen, name, de))
365 goto out_unlock;
366 name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de));
367 rec_len = fs16_to_cpu(sb, de->d_reclen);
368 if (!de->d_ino && rec_len >= reclen)
369 goto got_it;
370 if (rec_len >= name_len + reclen)
371 goto got_it;
372 de = (struct ufs_dir_entry *) ((char *) de + rec_len);
427 } 373 }
428 if (!ufs_check_dir_entry ("ufs_add_entry", dir, de, bh, offset)) { 374 unlock_page(page);
429 brelse (bh); 375 ufs_put_page(page);
430 return -ENOENT;
431 }
432 if (ufs_match(sb, namelen, name, de)) {
433 brelse (bh);
434 return -EEXIST;
435 }
436 if (de->d_ino == 0 && fs16_to_cpu(sb, de->d_reclen) >= rec_len)
437 break;
438
439 if (fs16_to_cpu(sb, de->d_reclen) >=
440 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)) + rec_len)
441 break;
442 offset += fs16_to_cpu(sb, de->d_reclen);
443 de = (struct ufs_dir_entry *) ((char *) de + fs16_to_cpu(sb, de->d_reclen));
444 } 376 }
445 377 BUG();
378 return -EINVAL;
379
380got_it:
381 from = (char*)de - (char*)page_address(page);
382 to = from + rec_len;
383 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
384 if (err)
385 goto out_unlock;
446 if (de->d_ino) { 386 if (de->d_ino) {
447 de1 = (struct ufs_dir_entry *) ((char *) de + 387 struct ufs_dir_entry *de1 =
448 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de))); 388 (struct ufs_dir_entry *) ((char *) de + name_len);
449 de1->d_reclen = 389 de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len);
450 cpu_to_fs16(sb, fs16_to_cpu(sb, de->d_reclen) - 390 de->d_reclen = cpu_to_fs16(sb, name_len);
451 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de))); 391
452 de->d_reclen =
453 cpu_to_fs16(sb, UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
454 de = de1; 392 de = de1;
455 } 393 }
456 de->d_ino = 0; 394
457 ufs_set_de_namlen(sb, de, namelen); 395 ufs_set_de_namlen(sb, de, namelen);
458 memcpy (de->d_name, name, namelen + 1); 396 memcpy(de->d_name, name, namelen + 1);
459 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 397 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
460 ufs_set_de_type(sb, de, inode->i_mode); 398 ufs_set_de_type(sb, de, inode->i_mode);
461 mark_buffer_dirty(bh); 399
462 if (IS_DIRSYNC(dir)) 400 err = ufs_commit_chunk(page, from, to);
463 sync_dirty_buffer(bh);
464 brelse (bh);
465 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 401 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
466 dir->i_version++; 402
467 mark_inode_dirty(dir); 403 mark_inode_dirty(dir);
404 /* OFFSET_CACHE */
405out_put:
406 ufs_put_page(page);
407out:
408 return err;
409out_unlock:
410 unlock_page(page);
411 goto out_put;
412}
413
414static inline unsigned
415ufs_validate_entry(struct super_block *sb, char *base,
416 unsigned offset, unsigned mask)
417{
418 struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset);
419 struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask));
420 while ((char*)p < (char*)de) {
421 if (p->d_reclen == 0)
422 break;
423 p = ufs_next_entry(sb, p);
424 }
425 return (char *)p - base;
426}
468 427
469 UFSD(("EXIT\n")) 428
429/*
430 * This is blatantly stolen from ext2fs
431 */
432static int
433ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
434{
435 loff_t pos = filp->f_pos;
436 struct inode *inode = filp->f_dentry->d_inode;
437 struct super_block *sb = inode->i_sb;
438 unsigned int offset = pos & ~PAGE_CACHE_MASK;
439 unsigned long n = pos >> PAGE_CACHE_SHIFT;
440 unsigned long npages = ufs_dir_pages(inode);
441 unsigned chunk_mask = ~(UFS_SECTOR_SIZE - 1);
442 int need_revalidate = filp->f_version != inode->i_version;
443 unsigned flags = UFS_SB(sb)->s_flags;
444
445 UFSD(("BEGIN"));
446
447 if (pos > inode->i_size - UFS_DIR_REC_LEN(1))
448 return 0;
449
450 for ( ; n < npages; n++, offset = 0) {
451 char *kaddr, *limit;
452 struct ufs_dir_entry *de;
453
454 struct page *page = ufs_get_page(inode, n);
455
456 if (IS_ERR(page)) {
457 ufs_error(sb, __FUNCTION__,
458 "bad page in #%lu",
459 inode->i_ino);
460 filp->f_pos += PAGE_CACHE_SIZE - offset;
461 return -EIO;
462 }
463 kaddr = page_address(page);
464 if (unlikely(need_revalidate)) {
465 if (offset) {
466 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
467 filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
468 }
469 filp->f_version = inode->i_version;
470 need_revalidate = 0;
471 }
472 de = (struct ufs_dir_entry *)(kaddr+offset);
473 limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
474 for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
475 if (de->d_reclen == 0) {
476 ufs_error(sb, __FUNCTION__,
477 "zero-length directory entry");
478 ufs_put_page(page);
479 return -EIO;
480 }
481 if (de->d_ino) {
482 int over;
483 unsigned char d_type = DT_UNKNOWN;
484
485 offset = (char *)de - kaddr;
486
487 UFSD(("filldir(%s,%u)\n", de->d_name,
488 fs32_to_cpu(sb, de->d_ino)));
489 UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de)));
490
491 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
492 d_type = de->d_u.d_44.d_type;
493
494 over = filldir(dirent, de->d_name,
495 ufs_get_de_namlen(sb, de),
496 (n<<PAGE_CACHE_SHIFT) | offset,
497 fs32_to_cpu(sb, de->d_ino), d_type);
498 if (over) {
499 ufs_put_page(page);
500 return 0;
501 }
502 }
503 filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
504 }
505 ufs_put_page(page);
506 }
470 return 0; 507 return 0;
471} 508}
472 509
510
473/* 511/*
474 * ufs_delete_entry deletes a directory entry by merging it with the 512 * ufs_delete_entry deletes a directory entry by merging it with the
475 * previous entry. 513 * previous entry.
476 */ 514 */
477int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir, 515int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
478 struct buffer_head * bh ) 516 struct page * page)
479
480{ 517{
481 struct super_block * sb; 518 struct super_block *sb = inode->i_sb;
482 struct ufs_dir_entry * de, * pde; 519 struct address_space *mapping = page->mapping;
483 unsigned i; 520 char *kaddr = page_address(page);
484 521 unsigned from = ((char*)dir - kaddr) & ~(UFS_SECTOR_SIZE - 1);
485 UFSD(("ENTER\n")) 522 unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
523 struct ufs_dir_entry *pde = NULL;
524 struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
525 int err;
486 526
487 sb = inode->i_sb; 527 UFSD(("ENTER\n"));
488 i = 0;
489 pde = NULL;
490 de = (struct ufs_dir_entry *) bh->b_data;
491
492 UFSD(("ino %u, reclen %u, namlen %u, name %s\n",
493 fs32_to_cpu(sb, de->d_ino),
494 fs16_to_cpu(sb, de->d_reclen),
495 ufs_get_de_namlen(sb, de), de->d_name))
496 528
497 while (i < bh->b_size) { 529 UFSD(("ino %u, reclen %u, namlen %u, name %s\n",
498 if (!ufs_check_dir_entry ("ufs_delete_entry", inode, de, bh, i)) { 530 fs32_to_cpu(sb, de->d_ino),
499 brelse(bh); 531 fs16_to_cpu(sb, de->d_reclen),
500 return -EIO; 532 ufs_get_de_namlen(sb, de), de->d_name));
501 } 533
502 if (de == dir) { 534 while ((char*)de < (char*)dir) {
503 if (pde) 535 if (de->d_reclen == 0) {
504 fs16_add(sb, &pde->d_reclen, 536 ufs_error(inode->i_sb, __FUNCTION__,
505 fs16_to_cpu(sb, dir->d_reclen)); 537 "zero-length directory entry");
506 dir->d_ino = 0; 538 err = -EIO;
507 inode->i_version++; 539 goto out;
508 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
509 mark_inode_dirty(inode);
510 mark_buffer_dirty(bh);
511 if (IS_DIRSYNC(inode))
512 sync_dirty_buffer(bh);
513 brelse(bh);
514 UFSD(("EXIT\n"))
515 return 0;
516 } 540 }
517 i += fs16_to_cpu(sb, de->d_reclen); 541 pde = de;
518 if (i == UFS_SECTOR_SIZE) pde = NULL; 542 de = ufs_next_entry(sb, de);
519 else pde = de;
520 de = (struct ufs_dir_entry *)
521 ((char *) de + fs16_to_cpu(sb, de->d_reclen));
522 if (i == UFS_SECTOR_SIZE && de->d_reclen == 0)
523 break;
524 } 543 }
525 UFSD(("EXIT\n")) 544 if (pde)
526 brelse(bh); 545 from = (char*)pde - (char*)page_address(page);
527 return -ENOENT; 546 lock_page(page);
547 err = mapping->a_ops->prepare_write(NULL, page, from, to);
548 BUG_ON(err);
549 if (pde)
550 pde->d_reclen = cpu_to_fs16(sb, to-from);
551 dir->d_ino = 0;
552 err = ufs_commit_chunk(page, from, to);
553 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
554 mark_inode_dirty(inode);
555out:
556 ufs_put_page(page);
557 UFSD(("EXIT\n"));
558 return err;
528} 559}
529 560
530int ufs_make_empty(struct inode * inode, struct inode *dir) 561int ufs_make_empty(struct inode * inode, struct inode *dir)
531{ 562{
532 struct super_block * sb = dir->i_sb; 563 struct super_block * sb = dir->i_sb;
533 struct buffer_head * dir_block; 564 struct address_space *mapping = inode->i_mapping;
565 struct page *page = grab_cache_page(mapping, 0);
534 struct ufs_dir_entry * de; 566 struct ufs_dir_entry * de;
567 char *base;
535 int err; 568 int err;
536 569
537 dir_block = ufs_bread (inode, 0, 1, &err); 570 if (!page)
538 if (!dir_block) 571 return -ENOMEM;
539 return err; 572 kmap(page);
573 err = mapping->a_ops->prepare_write(NULL, page, 0, UFS_SECTOR_SIZE);
574 if (err) {
575 unlock_page(page);
576 goto fail;
577 }
578
579
580 base = (char*)page_address(page);
581 memset(base, 0, PAGE_CACHE_SIZE);
582
583 de = (struct ufs_dir_entry *) base;
540 584
541 inode->i_blocks = sb->s_blocksize / UFS_SECTOR_SIZE;
542 de = (struct ufs_dir_entry *) dir_block->b_data;
543 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 585 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
544 ufs_set_de_type(sb, de, inode->i_mode); 586 ufs_set_de_type(sb, de, inode->i_mode);
545 ufs_set_de_namlen(sb, de, 1); 587 ufs_set_de_namlen(sb, de, 1);
@@ -552,72 +594,65 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
552 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1)); 594 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1));
553 ufs_set_de_namlen(sb, de, 2); 595 ufs_set_de_namlen(sb, de, 2);
554 strcpy (de->d_name, ".."); 596 strcpy (de->d_name, "..");
555 mark_buffer_dirty(dir_block); 597
556 brelse (dir_block); 598 err = ufs_commit_chunk(page, 0, UFS_SECTOR_SIZE);
557 mark_inode_dirty(inode); 599fail:
558 return 0; 600 kunmap(page);
601 page_cache_release(page);
602 return err;
559} 603}
560 604
561/* 605/*
562 * routine to check that the specified directory is empty (for rmdir) 606 * routine to check that the specified directory is empty (for rmdir)
563 */ 607 */
564int ufs_empty_dir (struct inode * inode) 608int ufs_empty_dir(struct inode * inode)
565{ 609{
566 struct super_block * sb; 610 struct super_block *sb = inode->i_sb;
567 unsigned long offset; 611 struct page *page = NULL;
568 struct buffer_head * bh; 612 unsigned long i, npages = ufs_dir_pages(inode);
569 struct ufs_dir_entry * de, * de1; 613
570 int err; 614 for (i = 0; i < npages; i++) {
571 615 char *kaddr;
572 sb = inode->i_sb; 616 struct ufs_dir_entry *de;
573 617 page = ufs_get_page(inode, i);
574 if (inode->i_size < UFS_DIR_REC_LEN(1) + UFS_DIR_REC_LEN(2) || 618
575 !(bh = ufs_bread (inode, 0, 0, &err))) { 619 if (IS_ERR(page))
576 ufs_warning (inode->i_sb, "empty_dir", 620 continue;
577 "bad directory (dir #%lu) - no data block", 621
578 inode->i_ino); 622 kaddr = page_address(page);
579 return 1; 623 de = (struct ufs_dir_entry *)kaddr;
580 } 624 kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
581 de = (struct ufs_dir_entry *) bh->b_data; 625
582 de1 = (struct ufs_dir_entry *) 626 while ((char *)de <= kaddr) {
583 ((char *)de + fs16_to_cpu(sb, de->d_reclen)); 627 if (de->d_reclen == 0) {
584 if (fs32_to_cpu(sb, de->d_ino) != inode->i_ino || de1->d_ino == 0 || 628 ufs_error(inode->i_sb, __FUNCTION__,
585 strcmp (".", de->d_name) || strcmp ("..", de1->d_name)) { 629 "zero-length directory entry: "
586 ufs_warning (inode->i_sb, "empty_dir", 630 "kaddr=%p, de=%p\n", kaddr, de);
587 "bad directory (dir #%lu) - no `.' or `..'", 631 goto not_empty;
588 inode->i_ino);
589 return 1;
590 }
591 offset = fs16_to_cpu(sb, de->d_reclen) + fs16_to_cpu(sb, de1->d_reclen);
592 de = (struct ufs_dir_entry *)
593 ((char *)de1 + fs16_to_cpu(sb, de1->d_reclen));
594 while (offset < inode->i_size ) {
595 if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
596 brelse (bh);
597 bh = ufs_bread (inode, offset >> sb->s_blocksize_bits, 1, &err);
598 if (!bh) {
599 ufs_error (sb, "empty_dir",
600 "directory #%lu contains a hole at offset %lu",
601 inode->i_ino, offset);
602 offset += sb->s_blocksize;
603 continue;
604 } 632 }
605 de = (struct ufs_dir_entry *) bh->b_data; 633 if (de->d_ino) {
606 } 634 u16 namelen=ufs_get_de_namlen(sb, de);
607 if (!ufs_check_dir_entry ("empty_dir", inode, de, bh, offset)) { 635 /* check for . and .. */
608 brelse (bh); 636 if (de->d_name[0] != '.')
609 return 1; 637 goto not_empty;
610 } 638 if (namelen > 2)
611 if (de->d_ino) { 639 goto not_empty;
612 brelse (bh); 640 if (namelen < 2) {
613 return 0; 641 if (inode->i_ino !=
642 fs32_to_cpu(sb, de->d_ino))
643 goto not_empty;
644 } else if (de->d_name[1] != '.')
645 goto not_empty;
646 }
647 de = ufs_next_entry(sb, de);
614 } 648 }
615 offset += fs16_to_cpu(sb, de->d_reclen); 649 ufs_put_page(page);
616 de = (struct ufs_dir_entry *)
617 ((char *)de + fs16_to_cpu(sb, de->d_reclen));
618 } 650 }
619 brelse (bh);
620 return 1; 651 return 1;
652
653not_empty:
654 ufs_put_page(page);
655 return 0;
621} 656}
622 657
623const struct file_operations ufs_dir_operations = { 658const struct file_operations ufs_dir_operations = {