aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ufs/dir.c
diff options
context:
space:
mode:
authorEvgeniy Dushistov <dushistov@mail.ru>2007-02-08 17:20:25 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-09 12:25:46 -0500
commitf336953bfdee8d5e7f69cb8e080704546541f04b (patch)
tree03c86a0c0ce782edb4cf077fddea08f29dbdae84 /fs/ufs/dir.c
parentcbb9450234fb28f60fea36520cd710a988ac5812 (diff)
[PATCH] ufs: restore back support of openstep
This is a fix of regression, which triggered by ~2.6.16. Patch with name ufs-directory-and-page-cache-from-blocks-to-pages.patch: in additional to conversation from block to page cache mechanism added new checks of directory integrity, one of them that directory entry do not across directory chunks. But some kinds of UFS: OpenStep UFS and Apple UFS (looks like these are the same filesystems) have different directory chunk size, then common UFSes(BSD and Solaris UFS). So this patch adds ability to works with variable size of directory chunks, and set it for ufstype=openstep to right size. Tested on darwin ufs. Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ufs/dir.c')
-rw-r--r--fs/ufs/dir.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 433b6f68403a..a6c0ca9f48bf 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -106,12 +106,13 @@ static void ufs_check_page(struct page *page)
106 char *kaddr = page_address(page); 106 char *kaddr = page_address(page);
107 unsigned offs, rec_len; 107 unsigned offs, rec_len;
108 unsigned limit = PAGE_CACHE_SIZE; 108 unsigned limit = PAGE_CACHE_SIZE;
109 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
109 struct ufs_dir_entry *p; 110 struct ufs_dir_entry *p;
110 char *error; 111 char *error;
111 112
112 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { 113 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
113 limit = dir->i_size & ~PAGE_CACHE_MASK; 114 limit = dir->i_size & ~PAGE_CACHE_MASK;
114 if (limit & (UFS_SECTOR_SIZE - 1)) 115 if (limit & chunk_mask)
115 goto Ebadsize; 116 goto Ebadsize;
116 if (!limit) 117 if (!limit)
117 goto out; 118 goto out;
@@ -126,7 +127,7 @@ static void ufs_check_page(struct page *page)
126 goto Ealign; 127 goto Ealign;
127 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p))) 128 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p)))
128 goto Enamelen; 129 goto Enamelen;
129 if (((offs + rec_len - 1) ^ offs) & ~(UFS_SECTOR_SIZE-1)) 130 if (((offs + rec_len - 1) ^ offs) & ~chunk_mask)
130 goto Espan; 131 goto Espan;
131 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * 132 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
132 UFS_SB(sb)->s_uspi->s_ncg)) 133 UFS_SB(sb)->s_uspi->s_ncg))
@@ -310,6 +311,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
310 int namelen = dentry->d_name.len; 311 int namelen = dentry->d_name.len;
311 struct super_block *sb = dir->i_sb; 312 struct super_block *sb = dir->i_sb;
312 unsigned reclen = UFS_DIR_REC_LEN(namelen); 313 unsigned reclen = UFS_DIR_REC_LEN(namelen);
314 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
313 unsigned short rec_len, name_len; 315 unsigned short rec_len, name_len;
314 struct page *page = NULL; 316 struct page *page = NULL;
315 struct ufs_dir_entry *de; 317 struct ufs_dir_entry *de;
@@ -342,8 +344,8 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
342 if ((char *)de == dir_end) { 344 if ((char *)de == dir_end) {
343 /* We hit i_size */ 345 /* We hit i_size */
344 name_len = 0; 346 name_len = 0;
345 rec_len = UFS_SECTOR_SIZE; 347 rec_len = chunk_size;
346 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE); 348 de->d_reclen = cpu_to_fs16(sb, chunk_size);
347 de->d_ino = 0; 349 de->d_ino = 0;
348 goto got_it; 350 goto got_it;
349 } 351 }
@@ -431,7 +433,7 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
431 unsigned int offset = pos & ~PAGE_CACHE_MASK; 433 unsigned int offset = pos & ~PAGE_CACHE_MASK;
432 unsigned long n = pos >> PAGE_CACHE_SHIFT; 434 unsigned long n = pos >> PAGE_CACHE_SHIFT;
433 unsigned long npages = ufs_dir_pages(inode); 435 unsigned long npages = ufs_dir_pages(inode);
434 unsigned chunk_mask = ~(UFS_SECTOR_SIZE - 1); 436 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
435 int need_revalidate = filp->f_version != inode->i_version; 437 int need_revalidate = filp->f_version != inode->i_version;
436 unsigned flags = UFS_SB(sb)->s_flags; 438 unsigned flags = UFS_SB(sb)->s_flags;
437 439
@@ -511,7 +513,7 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
511 struct super_block *sb = inode->i_sb; 513 struct super_block *sb = inode->i_sb;
512 struct address_space *mapping = page->mapping; 514 struct address_space *mapping = page->mapping;
513 char *kaddr = page_address(page); 515 char *kaddr = page_address(page);
514 unsigned from = ((char*)dir - kaddr) & ~(UFS_SECTOR_SIZE - 1); 516 unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
515 unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen); 517 unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
516 struct ufs_dir_entry *pde = NULL; 518 struct ufs_dir_entry *pde = NULL;
517 struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from); 519 struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
@@ -556,6 +558,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
556 struct super_block * sb = dir->i_sb; 558 struct super_block * sb = dir->i_sb;
557 struct address_space *mapping = inode->i_mapping; 559 struct address_space *mapping = inode->i_mapping;
558 struct page *page = grab_cache_page(mapping, 0); 560 struct page *page = grab_cache_page(mapping, 0);
561 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
559 struct ufs_dir_entry * de; 562 struct ufs_dir_entry * de;
560 char *base; 563 char *base;
561 int err; 564 int err;
@@ -563,7 +566,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
563 if (!page) 566 if (!page)
564 return -ENOMEM; 567 return -ENOMEM;
565 kmap(page); 568 kmap(page);
566 err = mapping->a_ops->prepare_write(NULL, page, 0, UFS_SECTOR_SIZE); 569 err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
567 if (err) { 570 if (err) {
568 unlock_page(page); 571 unlock_page(page);
569 goto fail; 572 goto fail;
@@ -584,11 +587,11 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
584 ((char *)de + fs16_to_cpu(sb, de->d_reclen)); 587 ((char *)de + fs16_to_cpu(sb, de->d_reclen));
585 de->d_ino = cpu_to_fs32(sb, dir->i_ino); 588 de->d_ino = cpu_to_fs32(sb, dir->i_ino);
586 ufs_set_de_type(sb, de, dir->i_mode); 589 ufs_set_de_type(sb, de, dir->i_mode);
587 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1)); 590 de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1));
588 ufs_set_de_namlen(sb, de, 2); 591 ufs_set_de_namlen(sb, de, 2);
589 strcpy (de->d_name, ".."); 592 strcpy (de->d_name, "..");
590 593
591 err = ufs_commit_chunk(page, 0, UFS_SECTOR_SIZE); 594 err = ufs_commit_chunk(page, 0, chunk_size);
592fail: 595fail:
593 kunmap(page); 596 kunmap(page);
594 page_cache_release(page); 597 page_cache_release(page);