aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/file.c6
-rw-r--r--fs/bio.c25
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/buffer.c36
-rw-r--r--fs/cifs/cifsfs.c18
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cramfs/inode.c31
-rw-r--r--fs/dcache.c14
-rw-r--r--fs/dcookies.c25
-rw-r--r--fs/direct-io.c27
-rw-r--r--fs/dnotify.c4
-rw-r--r--fs/eventpoll.c6
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ext2/inode.c14
-rw-r--r--fs/ext3/balloc.c109
-rw-r--r--fs/ext3/dir.c5
-rw-r--r--fs/ext3/inode.c582
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/hfs/inode.c13
-rw-r--r--fs/hfsplus/inode.c13
-rw-r--r--fs/inode.c8
-rw-r--r--fs/inotify.c12
-rw-r--r--fs/jbd/transaction.c13
-rw-r--r--fs/jffs2/compr_zlib.c19
-rw-r--r--fs/jfs/inode.c5
-rw-r--r--fs/jfs/jfs_logmgr.c27
-rw-r--r--fs/jfs/jfs_metapage.c11
-rw-r--r--fs/lockd/host.c19
-rw-r--r--fs/lockd/svc.c17
-rw-r--r--fs/lockd/svcsubs.c17
-rw-r--r--fs/locks.c41
-rw-r--r--fs/mpage.c104
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/callback.c11
-rw-r--r--fs/nfs/file.c3
-rw-r--r--fs/nfs/read.c6
-rw-r--r--fs/nfs/write.c12
-rw-r--r--fs/nfsd/nfs4state.c47
-rw-r--r--fs/ntfs/logfile.c4
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/ntfs/ntfs.h29
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/journal.c10
-rw-r--r--fs/ocfs2/namei.c5
-rw-r--r--fs/partitions/devfs.c12
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/array.c5
-rw-r--r--fs/proc/generic.c32
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/reiserfs/inode.c9
-rw-r--r--fs/reiserfs/prints.c2
-rw-r--r--fs/super.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c5
58 files changed, 885 insertions, 603 deletions
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 150b19227922..7bb716887e29 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -28,7 +28,7 @@ static int afs_file_release(struct inode *inode, struct file *file);
28#endif 28#endif
29 29
30static int afs_file_readpage(struct file *file, struct page *page); 30static int afs_file_readpage(struct file *file, struct page *page);
31static int afs_file_invalidatepage(struct page *page, unsigned long offset); 31static void afs_file_invalidatepage(struct page *page, unsigned long offset);
32static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); 32static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
33 33
34struct inode_operations afs_file_inode_operations = { 34struct inode_operations afs_file_inode_operations = {
@@ -212,7 +212,7 @@ int afs_cache_get_page_cookie(struct page *page,
212/* 212/*
213 * invalidate part or all of a page 213 * invalidate part or all of a page
214 */ 214 */
215static int afs_file_invalidatepage(struct page *page, unsigned long offset) 215static void afs_file_invalidatepage(struct page *page, unsigned long offset)
216{ 216{
217 int ret = 1; 217 int ret = 1;
218 218
@@ -238,11 +238,11 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset)
238 if (!PageWriteback(page)) 238 if (!PageWriteback(page))
239 ret = page->mapping->a_ops->releasepage(page, 239 ret = page->mapping->a_ops->releasepage(page,
240 0); 240 0);
241 /* possibly should BUG_ON(!ret); - neilb */
241 } 242 }
242 } 243 }
243 244
244 _leave(" = %d", ret); 245 _leave(" = %d", ret);
245 return ret;
246} /* end afs_file_invalidatepage() */ 246} /* end afs_file_invalidatepage() */
247 247
248/*****************************************************************************/ 248/*****************************************************************************/
diff --git a/fs/bio.c b/fs/bio.c
index 73e664c01d30..eb8fbc53f2cd 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
30 30
31#define BIO_POOL_SIZE 256 31#define BIO_POOL_SIZE 256
32 32
33static kmem_cache_t *bio_slab; 33static kmem_cache_t *bio_slab __read_mostly;
34 34
35#define BIOVEC_NR_POOLS 6 35#define BIOVEC_NR_POOLS 6
36 36
@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab;
39 * basically we just need to survive 39 * basically we just need to survive
40 */ 40 */
41#define BIO_SPLIT_ENTRIES 8 41#define BIO_SPLIT_ENTRIES 8
42mempool_t *bio_split_pool; 42mempool_t *bio_split_pool __read_mostly;
43 43
44struct biovec_slab { 44struct biovec_slab {
45 int nr_vecs; 45 int nr_vecs;
@@ -1125,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1125 return bp; 1125 return bp;
1126} 1126}
1127 1127
1128static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
1129{
1130 return kmalloc(sizeof(struct bio_pair), gfp_flags);
1131}
1132
1133static void bio_pair_free(void *bp, void *data)
1134{
1135 kfree(bp);
1136}
1137
1138 1128
1139/* 1129/*
1140 * create memory pools for biovec's in a bio_set. 1130 * create memory pools for biovec's in a bio_set.
@@ -1151,8 +1141,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
1151 if (i >= scale) 1141 if (i >= scale)
1152 pool_entries >>= 1; 1142 pool_entries >>= 1;
1153 1143
1154 *bvp = mempool_create(pool_entries, mempool_alloc_slab, 1144 *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
1155 mempool_free_slab, bp->slab);
1156 if (!*bvp) 1145 if (!*bvp)
1157 return -ENOMEM; 1146 return -ENOMEM;
1158 } 1147 }
@@ -1189,9 +1178,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
1189 if (!bs) 1178 if (!bs)
1190 return NULL; 1179 return NULL;
1191 1180
1192 bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, 1181 bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
1193 mempool_free_slab, bio_slab);
1194
1195 if (!bs->bio_pool) 1182 if (!bs->bio_pool)
1196 goto bad; 1183 goto bad;
1197 1184
@@ -1254,8 +1241,8 @@ static int __init init_bio(void)
1254 if (!fs_bio_set) 1241 if (!fs_bio_set)
1255 panic("bio: can't allocate bios\n"); 1242 panic("bio: can't allocate bios\n");
1256 1243
1257 bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, 1244 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
1258 bio_pair_alloc, bio_pair_free, NULL); 1245 sizeof(struct bio_pair));
1259 if (!bio_split_pool) 1246 if (!bio_split_pool)
1260 panic("bio: can't create split pool\n"); 1247 panic("bio: can't create split pool\n");
1261 1248
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 573fc8e0b67a..5983d42df015 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -131,9 +131,10 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
131 131
132static int 132static int
133blkdev_get_blocks(struct inode *inode, sector_t iblock, 133blkdev_get_blocks(struct inode *inode, sector_t iblock,
134 unsigned long max_blocks, struct buffer_head *bh, int create) 134 struct buffer_head *bh, int create)
135{ 135{
136 sector_t end_block = max_block(I_BDEV(inode)); 136 sector_t end_block = max_block(I_BDEV(inode));
137 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
137 138
138 if ((iblock + max_blocks) > end_block) { 139 if ((iblock + max_blocks) > end_block) {
139 max_blocks = end_block - iblock; 140 max_blocks = end_block - iblock;
@@ -234,7 +235,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
234 */ 235 */
235 236
236static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); 237static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
237static kmem_cache_t * bdev_cachep; 238static kmem_cache_t * bdev_cachep __read_mostly;
238 239
239static struct inode *bdev_alloc_inode(struct super_block *sb) 240static struct inode *bdev_alloc_inode(struct super_block *sb)
240{ 241{
@@ -308,7 +309,7 @@ static struct file_system_type bd_type = {
308 .kill_sb = kill_anon_super, 309 .kill_sb = kill_anon_super,
309}; 310};
310 311
311static struct vfsmount *bd_mnt; 312static struct vfsmount *bd_mnt __read_mostly;
312struct super_block *blockdev_superblock; 313struct super_block *blockdev_superblock;
313 314
314void __init bdev_cache_init(void) 315void __init bdev_cache_init(void)
diff --git a/fs/buffer.c b/fs/buffer.c
index 4342ab0ad99a..d597758dd129 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -426,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
426 if (all_mapped) { 426 if (all_mapped) {
427 printk("__find_get_block_slow() failed. " 427 printk("__find_get_block_slow() failed. "
428 "block=%llu, b_blocknr=%llu\n", 428 "block=%llu, b_blocknr=%llu\n",
429 (unsigned long long)block, (unsigned long long)bh->b_blocknr); 429 (unsigned long long)block,
430 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); 430 (unsigned long long)bh->b_blocknr);
431 printk("b_state=0x%08lx, b_size=%zu\n",
432 bh->b_state, bh->b_size);
431 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); 433 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
432 } 434 }
433out_unlock: 435out_unlock:
@@ -1590,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page);
1590 * point. Because the caller is about to free (and possibly reuse) those 1592 * point. Because the caller is about to free (and possibly reuse) those
1591 * blocks on-disk. 1593 * blocks on-disk.
1592 */ 1594 */
1593int block_invalidatepage(struct page *page, unsigned long offset) 1595void block_invalidatepage(struct page *page, unsigned long offset)
1594{ 1596{
1595 struct buffer_head *head, *bh, *next; 1597 struct buffer_head *head, *bh, *next;
1596 unsigned int curr_off = 0; 1598 unsigned int curr_off = 0;
1597 int ret = 1;
1598 1599
1599 BUG_ON(!PageLocked(page)); 1600 BUG_ON(!PageLocked(page));
1600 if (!page_has_buffers(page)) 1601 if (!page_has_buffers(page))
@@ -1621,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset)
1621 * so real IO is not possible anymore. 1622 * so real IO is not possible anymore.
1622 */ 1623 */
1623 if (offset == 0) 1624 if (offset == 0)
1624 ret = try_to_release_page(page, 0); 1625 try_to_release_page(page, 0);
1625out: 1626out:
1626 return ret; 1627 return;
1627} 1628}
1628EXPORT_SYMBOL(block_invalidatepage); 1629EXPORT_SYMBOL(block_invalidatepage);
1629 1630
1630int do_invalidatepage(struct page *page, unsigned long offset) 1631void do_invalidatepage(struct page *page, unsigned long offset)
1631{ 1632{
1632 int (*invalidatepage)(struct page *, unsigned long); 1633 void (*invalidatepage)(struct page *, unsigned long);
1633 invalidatepage = page->mapping->a_ops->invalidatepage; 1634 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1634 if (invalidatepage == NULL) 1635 block_invalidatepage;
1635 invalidatepage = block_invalidatepage; 1636 (*invalidatepage)(page, offset);
1636 return (*invalidatepage)(page, offset);
1637} 1637}
1638 1638
1639/* 1639/*
@@ -1735,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1735 sector_t block; 1735 sector_t block;
1736 sector_t last_block; 1736 sector_t last_block;
1737 struct buffer_head *bh, *head; 1737 struct buffer_head *bh, *head;
1738 const unsigned blocksize = 1 << inode->i_blkbits;
1738 int nr_underway = 0; 1739 int nr_underway = 0;
1739 1740
1740 BUG_ON(!PageLocked(page)); 1741 BUG_ON(!PageLocked(page));
@@ -1742,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1742 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 1743 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1743 1744
1744 if (!page_has_buffers(page)) { 1745 if (!page_has_buffers(page)) {
1745 create_empty_buffers(page, 1 << inode->i_blkbits, 1746 create_empty_buffers(page, blocksize,
1746 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1747 (1 << BH_Dirty)|(1 << BH_Uptodate));
1747 } 1748 }
1748 1749
@@ -1777,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1777 clear_buffer_dirty(bh); 1778 clear_buffer_dirty(bh);
1778 set_buffer_uptodate(bh); 1779 set_buffer_uptodate(bh);
1779 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1780 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1781 WARN_ON(bh->b_size != blocksize);
1780 err = get_block(inode, block, bh, 1); 1782 err = get_block(inode, block, bh, 1);
1781 if (err) 1783 if (err)
1782 goto recover; 1784 goto recover;
@@ -1930,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1930 if (buffer_new(bh)) 1932 if (buffer_new(bh))
1931 clear_buffer_new(bh); 1933 clear_buffer_new(bh);
1932 if (!buffer_mapped(bh)) { 1934 if (!buffer_mapped(bh)) {
1935 WARN_ON(bh->b_size != blocksize);
1933 err = get_block(inode, block, bh, 1); 1936 err = get_block(inode, block, bh, 1);
1934 if (err) 1937 if (err)
1935 break; 1938 break;
@@ -2085,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2085 2088
2086 fully_mapped = 0; 2089 fully_mapped = 0;
2087 if (iblock < lblock) { 2090 if (iblock < lblock) {
2091 WARN_ON(bh->b_size != blocksize);
2088 err = get_block(inode, iblock, bh, 0); 2092 err = get_block(inode, iblock, bh, 0);
2089 if (err) 2093 if (err)
2090 SetPageError(page); 2094 SetPageError(page);
@@ -2406,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2406 create = 1; 2410 create = 1;
2407 if (block_start >= to) 2411 if (block_start >= to)
2408 create = 0; 2412 create = 0;
2413 map_bh.b_size = blocksize;
2409 ret = get_block(inode, block_in_file + block_in_page, 2414 ret = get_block(inode, block_in_file + block_in_page,
2410 &map_bh, create); 2415 &map_bh, create);
2411 if (ret) 2416 if (ret)
@@ -2666,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping,
2666 2671
2667 err = 0; 2672 err = 0;
2668 if (!buffer_mapped(bh)) { 2673 if (!buffer_mapped(bh)) {
2674 WARN_ON(bh->b_size != blocksize);
2669 err = get_block(inode, iblock, bh, 0); 2675 err = get_block(inode, iblock, bh, 0);
2670 if (err) 2676 if (err)
2671 goto unlock; 2677 goto unlock;
@@ -2752,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2752 struct inode *inode = mapping->host; 2758 struct inode *inode = mapping->host;
2753 tmp.b_state = 0; 2759 tmp.b_state = 0;
2754 tmp.b_blocknr = 0; 2760 tmp.b_blocknr = 0;
2761 tmp.b_size = 1 << inode->i_blkbits;
2755 get_block(inode, block, &tmp, 0); 2762 get_block(inode, block, &tmp, 0);
2756 return tmp.b_blocknr; 2763 return tmp.b_blocknr;
2757} 2764}
@@ -3004,7 +3011,7 @@ out:
3004} 3011}
3005EXPORT_SYMBOL(try_to_free_buffers); 3012EXPORT_SYMBOL(try_to_free_buffers);
3006 3013
3007int block_sync_page(struct page *page) 3014void block_sync_page(struct page *page)
3008{ 3015{
3009 struct address_space *mapping; 3016 struct address_space *mapping;
3010 3017
@@ -3012,7 +3019,6 @@ int block_sync_page(struct page *page)
3012 mapping = page_mapping(page); 3019 mapping = page_mapping(page);
3013 if (mapping) 3020 if (mapping)
3014 blk_run_backing_dev(mapping->backing_dev_info, page); 3021 blk_run_backing_dev(mapping->backing_dev_info, page);
3015 return 0;
3016} 3022}
3017 3023
3018/* 3024/*
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 221b3334b737..6b99b51d6694 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -738,10 +738,8 @@ cifs_init_request_bufs(void)
738 cERROR(1,("cifs_min_rcv set to maximum (64)")); 738 cERROR(1,("cifs_min_rcv set to maximum (64)"));
739 } 739 }
740 740
741 cifs_req_poolp = mempool_create(cifs_min_rcv, 741 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
742 mempool_alloc_slab, 742 cifs_req_cachep);
743 mempool_free_slab,
744 cifs_req_cachep);
745 743
746 if(cifs_req_poolp == NULL) { 744 if(cifs_req_poolp == NULL) {
747 kmem_cache_destroy(cifs_req_cachep); 745 kmem_cache_destroy(cifs_req_cachep);
@@ -771,10 +769,8 @@ cifs_init_request_bufs(void)
771 cFYI(1,("cifs_min_small set to maximum (256)")); 769 cFYI(1,("cifs_min_small set to maximum (256)"));
772 } 770 }
773 771
774 cifs_sm_req_poolp = mempool_create(cifs_min_small, 772 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
775 mempool_alloc_slab, 773 cifs_sm_req_cachep);
776 mempool_free_slab,
777 cifs_sm_req_cachep);
778 774
779 if(cifs_sm_req_poolp == NULL) { 775 if(cifs_sm_req_poolp == NULL) {
780 mempool_destroy(cifs_req_poolp); 776 mempool_destroy(cifs_req_poolp);
@@ -808,10 +804,8 @@ cifs_init_mids(void)
808 if (cifs_mid_cachep == NULL) 804 if (cifs_mid_cachep == NULL)
809 return -ENOMEM; 805 return -ENOMEM;
810 806
811 cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */, 807 /* 3 is a reasonable minimum number of simultaneous operations */
812 mempool_alloc_slab, 808 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
813 mempool_free_slab,
814 cifs_mid_cachep);
815 if(cifs_mid_poolp == NULL) { 809 if(cifs_mid_poolp == NULL) {
816 kmem_cache_destroy(cifs_mid_cachep); 810 kmem_cache_destroy(cifs_mid_cachep);
817 return -ENOMEM; 811 return -ENOMEM;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 165d67426381..fb49aef1f2ec 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1339,7 +1339,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1339 return rc; 1339 return rc;
1340} 1340}
1341 1341
1342/* static int cifs_sync_page(struct page *page) 1342/* static void cifs_sync_page(struct page *page)
1343{ 1343{
1344 struct address_space *mapping; 1344 struct address_space *mapping;
1345 struct inode *inode; 1345 struct inode *inode;
@@ -1353,16 +1353,18 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1353 return 0; 1353 return 0;
1354 inode = mapping->host; 1354 inode = mapping->host;
1355 if (!inode) 1355 if (!inode)
1356 return 0; */ 1356 return; */
1357 1357
1358/* fill in rpages then 1358/* fill in rpages then
1359 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ 1359 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1360 1360
1361/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index)); 1361/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1362 1362
1363#if 0
1363 if (rc < 0) 1364 if (rc < 0)
1364 return rc; 1365 return rc;
1365 return 0; 1366 return 0;
1367#endif
1366} */ 1368} */
1367 1369
1368/* 1370/*
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ff93a9f81d1c..598eec9778f6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -163,9 +163,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
163 163
164 if (num_of_bytes < end_of_file) 164 if (num_of_bytes < end_of_file)
165 cFYI(1, ("allocation size less than end of file")); 165 cFYI(1, ("allocation size less than end of file"));
166 cFYI(1, 166 cFYI(1, ("Size %ld and blocks %llu",
167 ("Size %ld and blocks %ld", 167 (unsigned long) inode->i_size,
168 (unsigned long) inode->i_size, inode->i_blocks)); 168 (unsigned long long)inode->i_blocks));
169 if (S_ISREG(inode->i_mode)) { 169 if (S_ISREG(inode->i_mode)) {
170 cFYI(1, ("File inode")); 170 cFYI(1, ("File inode"));
171 inode->i_op = &cifs_file_inode_ops; 171 inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index edb3b6eb34bc..488bd0d81dcf 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -197,10 +197,10 @@ static void fill_in_inode(struct inode *tmp_inode,
197 197
198 if (allocation_size < end_of_file) 198 if (allocation_size < end_of_file)
199 cFYI(1, ("May be sparse file, allocation less than file size")); 199 cFYI(1, ("May be sparse file, allocation less than file size"));
200 cFYI(1, 200 cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld",
201 ("File Size %ld and blocks %ld and blocksize %ld", 201 (unsigned long)tmp_inode->i_size,
202 (unsigned long)tmp_inode->i_size, tmp_inode->i_blocks, 202 (unsigned long long)tmp_inode->i_blocks,
203 tmp_inode->i_blksize)); 203 tmp_inode->i_blksize));
204 if (S_ISREG(tmp_inode->i_mode)) { 204 if (S_ISREG(tmp_inode->i_mode)) {
205 cFYI(1, ("File inode")); 205 cFYI(1, ("File inode"));
206 tmp_inode->i_op = &cifs_file_inode_ops; 206 tmp_inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 8ad52f5bf255..acc1b2c10a86 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -22,6 +22,7 @@
22#include <linux/cramfs_fs_sb.h> 22#include <linux/cramfs_fs_sb.h>
23#include <linux/buffer_head.h> 23#include <linux/buffer_head.h>
24#include <linux/vfs.h> 24#include <linux/vfs.h>
25#include <linux/mutex.h>
25#include <asm/semaphore.h> 26#include <asm/semaphore.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
@@ -31,7 +32,7 @@ static struct inode_operations cramfs_dir_inode_operations;
31static struct file_operations cramfs_directory_operations; 32static struct file_operations cramfs_directory_operations;
32static struct address_space_operations cramfs_aops; 33static struct address_space_operations cramfs_aops;
33 34
34static DECLARE_MUTEX(read_mutex); 35static DEFINE_MUTEX(read_mutex);
35 36
36 37
37/* These two macros may change in future, to provide better st_ino 38/* These two macros may change in future, to provide better st_ino
@@ -250,20 +251,20 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
250 memset(sbi, 0, sizeof(struct cramfs_sb_info)); 251 memset(sbi, 0, sizeof(struct cramfs_sb_info));
251 252
252 /* Invalidate the read buffers on mount: think disk change.. */ 253 /* Invalidate the read buffers on mount: think disk change.. */
253 down(&read_mutex); 254 mutex_lock(&read_mutex);
254 for (i = 0; i < READ_BUFFERS; i++) 255 for (i = 0; i < READ_BUFFERS; i++)
255 buffer_blocknr[i] = -1; 256 buffer_blocknr[i] = -1;
256 257
257 /* Read the first block and get the superblock from it */ 258 /* Read the first block and get the superblock from it */
258 memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super)); 259 memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
259 up(&read_mutex); 260 mutex_unlock(&read_mutex);
260 261
261 /* Do sanity checks on the superblock */ 262 /* Do sanity checks on the superblock */
262 if (super.magic != CRAMFS_MAGIC) { 263 if (super.magic != CRAMFS_MAGIC) {
263 /* check at 512 byte offset */ 264 /* check at 512 byte offset */
264 down(&read_mutex); 265 mutex_lock(&read_mutex);
265 memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super)); 266 memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
266 up(&read_mutex); 267 mutex_unlock(&read_mutex);
267 if (super.magic != CRAMFS_MAGIC) { 268 if (super.magic != CRAMFS_MAGIC) {
268 if (!silent) 269 if (!silent)
269 printk(KERN_ERR "cramfs: wrong magic\n"); 270 printk(KERN_ERR "cramfs: wrong magic\n");
@@ -366,7 +367,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
366 mode_t mode; 367 mode_t mode;
367 int namelen, error; 368 int namelen, error;
368 369
369 down(&read_mutex); 370 mutex_lock(&read_mutex);
370 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256); 371 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256);
371 name = (char *)(de+1); 372 name = (char *)(de+1);
372 373
@@ -379,7 +380,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
379 memcpy(buf, name, namelen); 380 memcpy(buf, name, namelen);
380 ino = CRAMINO(de); 381 ino = CRAMINO(de);
381 mode = de->mode; 382 mode = de->mode;
382 up(&read_mutex); 383 mutex_unlock(&read_mutex);
383 nextoffset = offset + sizeof(*de) + namelen; 384 nextoffset = offset + sizeof(*de) + namelen;
384 for (;;) { 385 for (;;) {
385 if (!namelen) { 386 if (!namelen) {
@@ -410,7 +411,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
410 unsigned int offset = 0; 411 unsigned int offset = 0;
411 int sorted; 412 int sorted;
412 413
413 down(&read_mutex); 414 mutex_lock(&read_mutex);
414 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; 415 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
415 while (offset < dir->i_size) { 416 while (offset < dir->i_size) {
416 struct cramfs_inode *de; 417 struct cramfs_inode *de;
@@ -433,7 +434,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
433 434
434 for (;;) { 435 for (;;) {
435 if (!namelen) { 436 if (!namelen) {
436 up(&read_mutex); 437 mutex_unlock(&read_mutex);
437 return ERR_PTR(-EIO); 438 return ERR_PTR(-EIO);
438 } 439 }
439 if (name[namelen-1]) 440 if (name[namelen-1])
@@ -447,7 +448,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
447 continue; 448 continue;
448 if (!retval) { 449 if (!retval) {
449 struct cramfs_inode entry = *de; 450 struct cramfs_inode entry = *de;
450 up(&read_mutex); 451 mutex_unlock(&read_mutex);
451 d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); 452 d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
452 return NULL; 453 return NULL;
453 } 454 }
@@ -455,7 +456,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
455 if (sorted) 456 if (sorted)
456 break; 457 break;
457 } 458 }
458 up(&read_mutex); 459 mutex_unlock(&read_mutex);
459 d_add(dentry, NULL); 460 d_add(dentry, NULL);
460 return NULL; 461 return NULL;
461} 462}
@@ -474,21 +475,21 @@ static int cramfs_readpage(struct file *file, struct page * page)
474 u32 start_offset, compr_len; 475 u32 start_offset, compr_len;
475 476
476 start_offset = OFFSET(inode) + maxblock*4; 477 start_offset = OFFSET(inode) + maxblock*4;
477 down(&read_mutex); 478 mutex_lock(&read_mutex);
478 if (page->index) 479 if (page->index)
479 start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4); 480 start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4);
480 compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset); 481 compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset);
481 up(&read_mutex); 482 mutex_unlock(&read_mutex);
482 pgdata = kmap(page); 483 pgdata = kmap(page);
483 if (compr_len == 0) 484 if (compr_len == 0)
484 ; /* hole */ 485 ; /* hole */
485 else { 486 else {
486 down(&read_mutex); 487 mutex_lock(&read_mutex);
487 bytes_filled = cramfs_uncompress_block(pgdata, 488 bytes_filled = cramfs_uncompress_block(pgdata,
488 PAGE_CACHE_SIZE, 489 PAGE_CACHE_SIZE,
489 cramfs_read(sb, start_offset, compr_len), 490 cramfs_read(sb, start_offset, compr_len),
490 compr_len); 491 compr_len);
491 up(&read_mutex); 492 mutex_unlock(&read_mutex);
492 } 493 }
493 } else 494 } else
494 pgdata = kmap(page); 495 pgdata = kmap(page);
diff --git a/fs/dcache.c b/fs/dcache.c
index 0778f49f993b..19458d399502 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -35,7 +35,7 @@
35#include <linux/bootmem.h> 35#include <linux/bootmem.h>
36 36
37 37
38int sysctl_vfs_cache_pressure = 100; 38int sysctl_vfs_cache_pressure __read_mostly = 100;
39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
40 40
41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
@@ -43,7 +43,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
43 43
44EXPORT_SYMBOL(dcache_lock); 44EXPORT_SYMBOL(dcache_lock);
45 45
46static kmem_cache_t *dentry_cache; 46static kmem_cache_t *dentry_cache __read_mostly;
47 47
48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
49 49
@@ -58,9 +58,9 @@ static kmem_cache_t *dentry_cache;
58#define D_HASHBITS d_hash_shift 58#define D_HASHBITS d_hash_shift
59#define D_HASHMASK d_hash_mask 59#define D_HASHMASK d_hash_mask
60 60
61static unsigned int d_hash_mask; 61static unsigned int d_hash_mask __read_mostly;
62static unsigned int d_hash_shift; 62static unsigned int d_hash_shift __read_mostly;
63static struct hlist_head *dentry_hashtable; 63static struct hlist_head *dentry_hashtable __read_mostly;
64static LIST_HEAD(dentry_unused); 64static LIST_HEAD(dentry_unused);
65 65
66/* Statistics gathering. */ 66/* Statistics gathering. */
@@ -1710,10 +1710,10 @@ static void __init dcache_init(unsigned long mempages)
1710} 1710}
1711 1711
1712/* SLAB cache for __getname() consumers */ 1712/* SLAB cache for __getname() consumers */
1713kmem_cache_t *names_cachep; 1713kmem_cache_t *names_cachep __read_mostly;
1714 1714
1715/* SLAB cache for file structures */ 1715/* SLAB cache for file structures */
1716kmem_cache_t *filp_cachep; 1716kmem_cache_t *filp_cachep __read_mostly;
1717 1717
1718EXPORT_SYMBOL(d_genocide); 1718EXPORT_SYMBOL(d_genocide);
1719 1719
diff --git a/fs/dcookies.c b/fs/dcookies.c
index f8274a8f83bd..8749339bf4f6 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/dcookies.h> 25#include <linux/dcookies.h>
26#include <linux/mutex.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27 28
28/* The dcookies are allocated from a kmem_cache and 29/* The dcookies are allocated from a kmem_cache and
@@ -36,10 +37,10 @@ struct dcookie_struct {
36}; 37};
37 38
38static LIST_HEAD(dcookie_users); 39static LIST_HEAD(dcookie_users);
39static DECLARE_MUTEX(dcookie_sem); 40static DEFINE_MUTEX(dcookie_mutex);
40static kmem_cache_t * dcookie_cache; 41static kmem_cache_t *dcookie_cache __read_mostly;
41static struct list_head * dcookie_hashtable; 42static struct list_head *dcookie_hashtable __read_mostly;
42static size_t hash_size; 43static size_t hash_size __read_mostly;
43 44
44static inline int is_live(void) 45static inline int is_live(void)
45{ 46{
@@ -114,7 +115,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
114 int err = 0; 115 int err = 0;
115 struct dcookie_struct * dcs; 116 struct dcookie_struct * dcs;
116 117
117 down(&dcookie_sem); 118 mutex_lock(&dcookie_mutex);
118 119
119 if (!is_live()) { 120 if (!is_live()) {
120 err = -EINVAL; 121 err = -EINVAL;
@@ -134,7 +135,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
134 *cookie = dcookie_value(dcs); 135 *cookie = dcookie_value(dcs);
135 136
136out: 137out:
137 up(&dcookie_sem); 138 mutex_unlock(&dcookie_mutex);
138 return err; 139 return err;
139} 140}
140 141
@@ -157,7 +158,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
157 if (!capable(CAP_SYS_ADMIN)) 158 if (!capable(CAP_SYS_ADMIN))
158 return -EPERM; 159 return -EPERM;
159 160
160 down(&dcookie_sem); 161 mutex_lock(&dcookie_mutex);
161 162
162 if (!is_live()) { 163 if (!is_live()) {
163 err = -EINVAL; 164 err = -EINVAL;
@@ -192,7 +193,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
192out_free: 193out_free:
193 kfree(kbuf); 194 kfree(kbuf);
194out: 195out:
195 up(&dcookie_sem); 196 mutex_unlock(&dcookie_mutex);
196 return err; 197 return err;
197} 198}
198 199
@@ -290,7 +291,7 @@ struct dcookie_user * dcookie_register(void)
290{ 291{
291 struct dcookie_user * user; 292 struct dcookie_user * user;
292 293
293 down(&dcookie_sem); 294 mutex_lock(&dcookie_mutex);
294 295
295 user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL); 296 user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
296 if (!user) 297 if (!user)
@@ -302,7 +303,7 @@ struct dcookie_user * dcookie_register(void)
302 list_add(&user->next, &dcookie_users); 303 list_add(&user->next, &dcookie_users);
303 304
304out: 305out:
305 up(&dcookie_sem); 306 mutex_unlock(&dcookie_mutex);
306 return user; 307 return user;
307out_free: 308out_free:
308 kfree(user); 309 kfree(user);
@@ -313,7 +314,7 @@ out_free:
313 314
314void dcookie_unregister(struct dcookie_user * user) 315void dcookie_unregister(struct dcookie_user * user)
315{ 316{
316 down(&dcookie_sem); 317 mutex_lock(&dcookie_mutex);
317 318
318 list_del(&user->next); 319 list_del(&user->next);
319 kfree(user); 320 kfree(user);
@@ -321,7 +322,7 @@ void dcookie_unregister(struct dcookie_user * user)
321 if (!is_live()) 322 if (!is_live())
322 dcookie_exit(); 323 dcookie_exit();
323 324
324 up(&dcookie_sem); 325 mutex_unlock(&dcookie_mutex);
325} 326}
326 327
327EXPORT_SYMBOL_GPL(dcookie_register); 328EXPORT_SYMBOL_GPL(dcookie_register);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 235ed8d1f11e..9d1d2aa73e42 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -86,12 +86,12 @@ struct dio {
86 unsigned first_block_in_page; /* doesn't change, Used only once */ 86 unsigned first_block_in_page; /* doesn't change, Used only once */
87 int boundary; /* prev block is at a boundary */ 87 int boundary; /* prev block is at a boundary */
88 int reap_counter; /* rate limit reaping */ 88 int reap_counter; /* rate limit reaping */
89 get_blocks_t *get_blocks; /* block mapping function */ 89 get_block_t *get_block; /* block mapping function */
90 dio_iodone_t *end_io; /* IO completion function */ 90 dio_iodone_t *end_io; /* IO completion function */
91 sector_t final_block_in_bio; /* current final block in bio + 1 */ 91 sector_t final_block_in_bio; /* current final block in bio + 1 */
92 sector_t next_block_for_io; /* next block to be put under IO, 92 sector_t next_block_for_io; /* next block to be put under IO,
93 in dio_blocks units */ 93 in dio_blocks units */
94 struct buffer_head map_bh; /* last get_blocks() result */ 94 struct buffer_head map_bh; /* last get_block() result */
95 95
96 /* 96 /*
97 * Deferred addition of a page to the dio. These variables are 97 * Deferred addition of a page to the dio. These variables are
@@ -211,9 +211,9 @@ static struct page *dio_get_page(struct dio *dio)
211 211
212/* 212/*
213 * Called when all DIO BIO I/O has been completed - let the filesystem 213 * Called when all DIO BIO I/O has been completed - let the filesystem
214 * know, if it registered an interest earlier via get_blocks. Pass the 214 * know, if it registered an interest earlier via get_block. Pass the
215 * private field of the map buffer_head so that filesystems can use it 215 * private field of the map buffer_head so that filesystems can use it
216 * to hold additional state between get_blocks calls and dio_complete. 216 * to hold additional state between get_block calls and dio_complete.
217 */ 217 */
218static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) 218static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
219{ 219{
@@ -493,7 +493,7 @@ static int dio_bio_reap(struct dio *dio)
493 * The fs is allowed to map lots of blocks at once. If it wants to do that, 493 * The fs is allowed to map lots of blocks at once. If it wants to do that,
494 * it uses the passed inode-relative block number as the file offset, as usual. 494 * it uses the passed inode-relative block number as the file offset, as usual.
495 * 495 *
496 * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io 496 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
497 * has remaining to do. The fs should not map more than this number of blocks. 497 * has remaining to do. The fs should not map more than this number of blocks.
498 * 498 *
499 * If the fs has mapped a lot of blocks, it should populate bh->b_size to 499 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
@@ -506,7 +506,7 @@ static int dio_bio_reap(struct dio *dio)
506 * In the case of filesystem holes: the fs may return an arbitrarily-large 506 * In the case of filesystem holes: the fs may return an arbitrarily-large
507 * hole by returning an appropriate value in b_size and by clearing 507 * hole by returning an appropriate value in b_size and by clearing
508 * buffer_mapped(). However the direct-io code will only process holes one 508 * buffer_mapped(). However the direct-io code will only process holes one
509 * block at a time - it will repeatedly call get_blocks() as it walks the hole. 509 * block at a time - it will repeatedly call get_block() as it walks the hole.
510 */ 510 */
511static int get_more_blocks(struct dio *dio) 511static int get_more_blocks(struct dio *dio)
512{ 512{
@@ -548,7 +548,8 @@ static int get_more_blocks(struct dio *dio)
548 * at a higher level for inside-i_size block-instantiating 548 * at a higher level for inside-i_size block-instantiating
549 * writes. 549 * writes.
550 */ 550 */
551 ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count, 551 map_bh->b_size = fs_count << dio->blkbits;
552 ret = (*dio->get_block)(dio->inode, fs_startblk,
552 map_bh, create); 553 map_bh, create);
553 } 554 }
554 return ret; 555 return ret;
@@ -783,11 +784,11 @@ static void dio_zero_block(struct dio *dio, int end)
783 * happily perform page-sized but 512-byte aligned IOs. It is important that 784 * happily perform page-sized but 512-byte aligned IOs. It is important that
784 * blockdev IO be able to have fine alignment and large sizes. 785 * blockdev IO be able to have fine alignment and large sizes.
785 * 786 *
786 * So what we do is to permit the ->get_blocks function to populate bh.b_size 787 * So what we do is to permit the ->get_block function to populate bh.b_size
787 * with the size of IO which is permitted at this offset and this i_blkbits. 788 * with the size of IO which is permitted at this offset and this i_blkbits.
788 * 789 *
789 * For best results, the blockdev should be set up with 512-byte i_blkbits and 790 * For best results, the blockdev should be set up with 512-byte i_blkbits and
790 * it should set b_size to PAGE_SIZE or more inside get_blocks(). This gives 791 * it should set b_size to PAGE_SIZE or more inside get_block(). This gives
791 * fine alignment but still allows this function to work in PAGE_SIZE units. 792 * fine alignment but still allows this function to work in PAGE_SIZE units.
792 */ 793 */
793static int do_direct_IO(struct dio *dio) 794static int do_direct_IO(struct dio *dio)
@@ -947,7 +948,7 @@ out:
947static ssize_t 948static ssize_t
948direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 949direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
949 const struct iovec *iov, loff_t offset, unsigned long nr_segs, 950 const struct iovec *iov, loff_t offset, unsigned long nr_segs,
950 unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io, 951 unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
951 struct dio *dio) 952 struct dio *dio)
952{ 953{
953 unsigned long user_addr; 954 unsigned long user_addr;
@@ -969,7 +970,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
969 970
970 dio->boundary = 0; 971 dio->boundary = 0;
971 dio->reap_counter = 0; 972 dio->reap_counter = 0;
972 dio->get_blocks = get_blocks; 973 dio->get_block = get_block;
973 dio->end_io = end_io; 974 dio->end_io = end_io;
974 dio->map_bh.b_private = NULL; 975 dio->map_bh.b_private = NULL;
975 dio->final_block_in_bio = -1; 976 dio->final_block_in_bio = -1;
@@ -1177,7 +1178,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1177ssize_t 1178ssize_t
1178__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1179__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1179 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1180 struct block_device *bdev, const struct iovec *iov, loff_t offset,
1180 unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, 1181 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1181 int dio_lock_type) 1182 int dio_lock_type)
1182{ 1183{
1183 int seg; 1184 int seg;
@@ -1273,7 +1274,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1273 (end > i_size_read(inode))); 1274 (end > i_size_read(inode)));
1274 1275
1275 retval = direct_io_worker(rw, iocb, inode, iov, offset, 1276 retval = direct_io_worker(rw, iocb, inode, iov, offset,
1276 nr_segs, blkbits, get_blocks, end_io, dio); 1277 nr_segs, blkbits, get_block, end_io, dio);
1277 1278
1278 if (rw == READ && dio_lock_type == DIO_LOCKING) 1279 if (rw == READ && dio_lock_type == DIO_LOCKING)
1279 release_i_mutex = 0; 1280 release_i_mutex = 0;
diff --git a/fs/dnotify.c b/fs/dnotify.c
index f3b540dd5d11..f932591df5a4 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -21,9 +21,9 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24int dir_notify_enable = 1; 24int dir_notify_enable __read_mostly = 1;
25 25
26static kmem_cache_t *dn_cache; 26static kmem_cache_t *dn_cache __read_mostly;
27 27
28static void redo_inode_mask(struct inode *inode) 28static void redo_inode_mask(struct inode *inode)
29{ 29{
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a0f682cdd03e..e067a06c6464 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -281,13 +281,13 @@ static struct mutex epmutex;
281static struct poll_safewake psw; 281static struct poll_safewake psw;
282 282
283/* Slab cache used to allocate "struct epitem" */ 283/* Slab cache used to allocate "struct epitem" */
284static kmem_cache_t *epi_cache; 284static kmem_cache_t *epi_cache __read_mostly;
285 285
286/* Slab cache used to allocate "struct eppoll_entry" */ 286/* Slab cache used to allocate "struct eppoll_entry" */
287static kmem_cache_t *pwq_cache; 287static kmem_cache_t *pwq_cache __read_mostly;
288 288
289/* Virtual fs used to allocate inodes for eventpoll files */ 289/* Virtual fs used to allocate inodes for eventpoll files */
290static struct vfsmount *eventpoll_mnt; 290static struct vfsmount *eventpoll_mnt __read_mostly;
291 291
292/* File callbacks that implement the eventpoll file behaviour */ 292/* File callbacks that implement the eventpoll file behaviour */
293static struct file_operations eventpoll_fops = { 293static struct file_operations eventpoll_fops = {
diff --git a/fs/exec.c b/fs/exec.c
index 995cba3c62b8..c7397c46ad6d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -632,7 +632,7 @@ static int de_thread(struct task_struct *tsk)
632 * synchronize with any firing (by calling del_timer_sync) 632 * synchronize with any firing (by calling del_timer_sync)
633 * before we can safely let the old group leader die. 633 * before we can safely let the old group leader die.
634 */ 634 */
635 sig->real_timer.data = current; 635 sig->tsk = current;
636 spin_unlock_irq(lock); 636 spin_unlock_irq(lock);
637 if (hrtimer_cancel(&sig->real_timer)) 637 if (hrtimer_cancel(&sig->real_timer))
638 hrtimer_restart(&sig->real_timer); 638 hrtimer_restart(&sig->real_timer);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index a717837f272e..04af9c45dce2 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -667,18 +667,6 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
667 return generic_block_bmap(mapping,block,ext2_get_block); 667 return generic_block_bmap(mapping,block,ext2_get_block);
668} 668}
669 669
670static int
671ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
672 struct buffer_head *bh_result, int create)
673{
674 int ret;
675
676 ret = ext2_get_block(inode, iblock, bh_result, create);
677 if (ret == 0)
678 bh_result->b_size = (1 << inode->i_blkbits);
679 return ret;
680}
681
682static ssize_t 670static ssize_t
683ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 671ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
684 loff_t offset, unsigned long nr_segs) 672 loff_t offset, unsigned long nr_segs)
@@ -687,7 +675,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
687 struct inode *inode = file->f_mapping->host; 675 struct inode *inode = file->f_mapping->host;
688 676
689 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 677 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
690 offset, nr_segs, ext2_get_blocks, NULL); 678 offset, nr_segs, ext2_get_block, NULL);
691} 679}
692 680
693static int 681static int
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 46623f77666b..77927d6938f6 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -653,9 +653,11 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
653 */ 653 */
654static int 654static int
655ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, 655ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
656 struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv) 656 struct buffer_head *bitmap_bh, int goal,
657 unsigned long *count, struct ext3_reserve_window *my_rsv)
657{ 658{
658 int group_first_block, start, end; 659 int group_first_block, start, end;
660 unsigned long num = 0;
659 661
660 /* we do allocation within the reservation window if we have a window */ 662 /* we do allocation within the reservation window if we have a window */
661 if (my_rsv) { 663 if (my_rsv) {
@@ -713,8 +715,18 @@ repeat:
713 goto fail_access; 715 goto fail_access;
714 goto repeat; 716 goto repeat;
715 } 717 }
716 return goal; 718 num++;
719 goal++;
720 while (num < *count && goal < end
721 && ext3_test_allocatable(goal, bitmap_bh)
722 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
723 num++;
724 goal++;
725 }
726 *count = num;
727 return goal - num;
717fail_access: 728fail_access:
729 *count = num;
718 return -1; 730 return -1;
719} 731}
720 732
@@ -999,6 +1011,31 @@ retry:
999 goto retry; 1011 goto retry;
1000} 1012}
1001 1013
1014static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1015 struct super_block *sb, int size)
1016{
1017 struct ext3_reserve_window_node *next_rsv;
1018 struct rb_node *next;
1019 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1020
1021 if (!spin_trylock(rsv_lock))
1022 return;
1023
1024 next = rb_next(&my_rsv->rsv_node);
1025
1026 if (!next)
1027 my_rsv->rsv_end += size;
1028 else {
1029 next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
1030
1031 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1032 my_rsv->rsv_end += size;
1033 else
1034 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1035 }
1036 spin_unlock(rsv_lock);
1037}
1038
1002/* 1039/*
1003 * This is the main function used to allocate a new block and its reservation 1040 * This is the main function used to allocate a new block and its reservation
1004 * window. 1041 * window.
@@ -1024,11 +1061,12 @@ static int
1024ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, 1061ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1025 unsigned int group, struct buffer_head *bitmap_bh, 1062 unsigned int group, struct buffer_head *bitmap_bh,
1026 int goal, struct ext3_reserve_window_node * my_rsv, 1063 int goal, struct ext3_reserve_window_node * my_rsv,
1027 int *errp) 1064 unsigned long *count, int *errp)
1028{ 1065{
1029 unsigned long group_first_block; 1066 unsigned long group_first_block;
1030 int ret = 0; 1067 int ret = 0;
1031 int fatal; 1068 int fatal;
1069 unsigned long num = *count;
1032 1070
1033 *errp = 0; 1071 *errp = 0;
1034 1072
@@ -1051,7 +1089,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1051 * or last attempt to allocate a block with reservation turned on failed 1089 * or last attempt to allocate a block with reservation turned on failed
1052 */ 1090 */
1053 if (my_rsv == NULL ) { 1091 if (my_rsv == NULL ) {
1054 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); 1092 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1093 goal, count, NULL);
1055 goto out; 1094 goto out;
1056 } 1095 }
1057 /* 1096 /*
@@ -1081,6 +1120,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1081 while (1) { 1120 while (1) {
1082 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || 1121 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1083 !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { 1122 !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) {
1123 if (my_rsv->rsv_goal_size < *count)
1124 my_rsv->rsv_goal_size = *count;
1084 ret = alloc_new_reservation(my_rsv, goal, sb, 1125 ret = alloc_new_reservation(my_rsv, goal, sb,
1085 group, bitmap_bh); 1126 group, bitmap_bh);
1086 if (ret < 0) 1127 if (ret < 0)
@@ -1088,16 +1129,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1088 1129
1089 if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) 1130 if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb))
1090 goal = -1; 1131 goal = -1;
1091 } 1132 } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count)
1133 try_to_extend_reservation(my_rsv, sb,
1134 *count-my_rsv->rsv_end + goal - 1);
1135
1092 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) 1136 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
1093 || (my_rsv->rsv_end < group_first_block)) 1137 || (my_rsv->rsv_end < group_first_block))
1094 BUG(); 1138 BUG();
1095 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, 1139 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
1096 &my_rsv->rsv_window); 1140 &num, &my_rsv->rsv_window);
1097 if (ret >= 0) { 1141 if (ret >= 0) {
1098 my_rsv->rsv_alloc_hit++; 1142 my_rsv->rsv_alloc_hit += num;
1143 *count = num;
1099 break; /* succeed */ 1144 break; /* succeed */
1100 } 1145 }
1146 num = *count;
1101 } 1147 }
1102out: 1148out:
1103 if (ret >= 0) { 1149 if (ret >= 0) {
@@ -1154,8 +1200,8 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1154 * bitmap, and then for any free bit if that fails. 1200 * bitmap, and then for any free bit if that fails.
1155 * This function also updates quota and i_blocks field. 1201 * This function also updates quota and i_blocks field.
1156 */ 1202 */
1157int ext3_new_block(handle_t *handle, struct inode *inode, 1203int ext3_new_blocks(handle_t *handle, struct inode *inode,
1158 unsigned long goal, int *errp) 1204 unsigned long goal, unsigned long *count, int *errp)
1159{ 1205{
1160 struct buffer_head *bitmap_bh = NULL; 1206 struct buffer_head *bitmap_bh = NULL;
1161 struct buffer_head *gdp_bh; 1207 struct buffer_head *gdp_bh;
@@ -1178,6 +1224,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
1178 static int goal_hits, goal_attempts; 1224 static int goal_hits, goal_attempts;
1179#endif 1225#endif
1180 unsigned long ngroups; 1226 unsigned long ngroups;
1227 unsigned long num = *count;
1181 1228
1182 *errp = -ENOSPC; 1229 *errp = -ENOSPC;
1183 sb = inode->i_sb; 1230 sb = inode->i_sb;
@@ -1189,7 +1236,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
1189 /* 1236 /*
1190 * Check quota for allocation of this block. 1237 * Check quota for allocation of this block.
1191 */ 1238 */
1192 if (DQUOT_ALLOC_BLOCK(inode, 1)) { 1239 if (DQUOT_ALLOC_BLOCK(inode, num)) {
1193 *errp = -EDQUOT; 1240 *errp = -EDQUOT;
1194 return 0; 1241 return 0;
1195 } 1242 }
@@ -1244,7 +1291,7 @@ retry:
1244 if (!bitmap_bh) 1291 if (!bitmap_bh)
1245 goto io_error; 1292 goto io_error;
1246 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, 1293 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
1247 bitmap_bh, ret_block, my_rsv, &fatal); 1294 bitmap_bh, ret_block, my_rsv, &num, &fatal);
1248 if (fatal) 1295 if (fatal)
1249 goto out; 1296 goto out;
1250 if (ret_block >= 0) 1297 if (ret_block >= 0)
@@ -1281,7 +1328,7 @@ retry:
1281 if (!bitmap_bh) 1328 if (!bitmap_bh)
1282 goto io_error; 1329 goto io_error;
1283 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, 1330 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
1284 bitmap_bh, -1, my_rsv, &fatal); 1331 bitmap_bh, -1, my_rsv, &num, &fatal);
1285 if (fatal) 1332 if (fatal)
1286 goto out; 1333 goto out;
1287 if (ret_block >= 0) 1334 if (ret_block >= 0)
@@ -1316,13 +1363,15 @@ allocated:
1316 target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) 1363 target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
1317 + le32_to_cpu(es->s_first_data_block); 1364 + le32_to_cpu(es->s_first_data_block);
1318 1365
1319 if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || 1366 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) ||
1320 target_block == le32_to_cpu(gdp->bg_inode_bitmap) || 1367 in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) ||
1321 in_range(target_block, le32_to_cpu(gdp->bg_inode_table), 1368 in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
1369 EXT3_SB(sb)->s_itb_per_group) ||
1370 in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1322 EXT3_SB(sb)->s_itb_per_group)) 1371 EXT3_SB(sb)->s_itb_per_group))
1323 ext3_error(sb, "ext3_new_block", 1372 ext3_error(sb, "ext3_new_block",
1324 "Allocating block in system zone - " 1373 "Allocating block in system zone - "
1325 "block = %u", target_block); 1374 "blocks from %u, length %lu", target_block, num);
1326 1375
1327 performed_allocation = 1; 1376 performed_allocation = 1;
1328 1377
@@ -1341,10 +1390,14 @@ allocated:
1341 jbd_lock_bh_state(bitmap_bh); 1390 jbd_lock_bh_state(bitmap_bh);
1342 spin_lock(sb_bgl_lock(sbi, group_no)); 1391 spin_lock(sb_bgl_lock(sbi, group_no));
1343 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { 1392 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1344 if (ext3_test_bit(ret_block, 1393 int i;
1345 bh2jh(bitmap_bh)->b_committed_data)) { 1394
1346 printk("%s: block was unexpectedly set in " 1395 for (i = 0; i < num; i++) {
1347 "b_committed_data\n", __FUNCTION__); 1396 if (ext3_test_bit(ret_block,
1397 bh2jh(bitmap_bh)->b_committed_data)) {
1398 printk("%s: block was unexpectedly set in "
1399 "b_committed_data\n", __FUNCTION__);
1400 }
1348 } 1401 }
1349 } 1402 }
1350 ext3_debug("found bit %d\n", ret_block); 1403 ext3_debug("found bit %d\n", ret_block);
@@ -1355,7 +1408,7 @@ allocated:
1355 /* ret_block was blockgroup-relative. Now it becomes fs-relative */ 1408 /* ret_block was blockgroup-relative. Now it becomes fs-relative */
1356 ret_block = target_block; 1409 ret_block = target_block;
1357 1410
1358 if (ret_block >= le32_to_cpu(es->s_blocks_count)) { 1411 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
1359 ext3_error(sb, "ext3_new_block", 1412 ext3_error(sb, "ext3_new_block",
1360 "block(%d) >= blocks count(%d) - " 1413 "block(%d) >= blocks count(%d) - "
1361 "block_group = %d, es == %p ", ret_block, 1414 "block_group = %d, es == %p ", ret_block,
@@ -1373,9 +1426,9 @@ allocated:
1373 1426
1374 spin_lock(sb_bgl_lock(sbi, group_no)); 1427 spin_lock(sb_bgl_lock(sbi, group_no));
1375 gdp->bg_free_blocks_count = 1428 gdp->bg_free_blocks_count =
1376 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); 1429 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
1377 spin_unlock(sb_bgl_lock(sbi, group_no)); 1430 spin_unlock(sb_bgl_lock(sbi, group_no));
1378 percpu_counter_mod(&sbi->s_freeblocks_counter, -1); 1431 percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
1379 1432
1380 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); 1433 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1381 err = ext3_journal_dirty_metadata(handle, gdp_bh); 1434 err = ext3_journal_dirty_metadata(handle, gdp_bh);
@@ -1388,6 +1441,8 @@ allocated:
1388 1441
1389 *errp = 0; 1442 *errp = 0;
1390 brelse(bitmap_bh); 1443 brelse(bitmap_bh);
1444 DQUOT_FREE_BLOCK(inode, *count-num);
1445 *count = num;
1391 return ret_block; 1446 return ret_block;
1392 1447
1393io_error: 1448io_error:
@@ -1401,11 +1456,19 @@ out:
1401 * Undo the block allocation 1456 * Undo the block allocation
1402 */ 1457 */
1403 if (!performed_allocation) 1458 if (!performed_allocation)
1404 DQUOT_FREE_BLOCK(inode, 1); 1459 DQUOT_FREE_BLOCK(inode, *count);
1405 brelse(bitmap_bh); 1460 brelse(bitmap_bh);
1406 return 0; 1461 return 0;
1407} 1462}
1408 1463
1464int ext3_new_block(handle_t *handle, struct inode *inode,
1465 unsigned long goal, int *errp)
1466{
1467 unsigned long count = 1;
1468
1469 return ext3_new_blocks(handle, inode, goal, &count, errp);
1470}
1471
1409unsigned long ext3_count_free_blocks(struct super_block *sb) 1472unsigned long ext3_count_free_blocks(struct super_block *sb)
1410{ 1473{
1411 unsigned long desc_count; 1474 unsigned long desc_count;
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 773459164bb2..38bd3f6ec147 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp,
131 struct buffer_head *bh = NULL; 131 struct buffer_head *bh = NULL;
132 132
133 map_bh.b_state = 0; 133 map_bh.b_state = 0;
134 err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); 134 err = ext3_get_blocks_handle(NULL, inode, blk, 1,
135 if (!err) { 135 &map_bh, 0, 0);
136 if (err > 0) {
136 page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, 137 page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
137 &filp->f_ra, 138 &filp->f_ra,
138 filp, 139 filp,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2c361377e0a5..48ae0339af17 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode);
44/* 44/*
45 * Test whether an inode is a fast symlink. 45 * Test whether an inode is a fast symlink.
46 */ 46 */
47static inline int ext3_inode_is_fast_symlink(struct inode *inode) 47static int ext3_inode_is_fast_symlink(struct inode *inode)
48{ 48{
49 int ea_blocks = EXT3_I(inode)->i_file_acl ? 49 int ea_blocks = EXT3_I(inode)->i_file_acl ?
50 (inode->i_sb->s_blocksize >> 9) : 0; 50 (inode->i_sb->s_blocksize >> 9) : 0;
51 51
52 return (S_ISLNK(inode->i_mode) && 52 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
53 inode->i_blocks - ea_blocks == 0);
54} 53}
55 54
56/* The ext3 forget function must perform a revoke if we are freeing data 55/*
56 * The ext3 forget function must perform a revoke if we are freeing data
57 * which has been journaled. Metadata (eg. indirect blocks) must be 57 * which has been journaled. Metadata (eg. indirect blocks) must be
58 * revoked in all cases. 58 * revoked in all cases.
59 * 59 *
@@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode)
61 * but there may still be a record of it in the journal, and that record 61 * but there may still be a record of it in the journal, and that record
62 * still needs to be revoked. 62 * still needs to be revoked.
63 */ 63 */
64 64int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
65int ext3_forget(handle_t *handle, int is_metadata, 65 struct buffer_head *bh, int blocknr)
66 struct inode *inode, struct buffer_head *bh,
67 int blocknr)
68{ 66{
69 int err; 67 int err;
70 68
@@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata,
104} 102}
105 103
106/* 104/*
107 * Work out how many blocks we need to progress with the next chunk of a 105 * Work out how many blocks we need to proceed with the next chunk of a
108 * truncate transaction. 106 * truncate transaction.
109 */ 107 */
110
111static unsigned long blocks_for_truncate(struct inode *inode) 108static unsigned long blocks_for_truncate(struct inode *inode)
112{ 109{
113 unsigned long needed; 110 unsigned long needed;
@@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode)
141 * extend fails, we need to propagate the failure up and restart the 138 * extend fails, we need to propagate the failure up and restart the
142 * transaction in the top-level truncate loop. --sct 139 * transaction in the top-level truncate loop. --sct
143 */ 140 */
144
145static handle_t *start_transaction(struct inode *inode) 141static handle_t *start_transaction(struct inode *inode)
146{ 142{
147 handle_t *result; 143 handle_t *result;
@@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode)
194 190
195 handle = start_transaction(inode); 191 handle = start_transaction(inode);
196 if (IS_ERR(handle)) { 192 if (IS_ERR(handle)) {
197 /* If we're going to skip the normal cleanup, we still 193 /*
198 * need to make sure that the in-core orphan linked list 194 * If we're going to skip the normal cleanup, we still need to
199 * is properly cleaned up. */ 195 * make sure that the in-core orphan linked list is properly
196 * cleaned up.
197 */
200 ext3_orphan_del(NULL, inode); 198 ext3_orphan_del(NULL, inode);
201 goto no_delete; 199 goto no_delete;
202 } 200 }
@@ -235,16 +233,6 @@ no_delete:
235 clear_inode(inode); /* We must guarantee clearing of inode... */ 233 clear_inode(inode); /* We must guarantee clearing of inode... */
236} 234}
237 235
238static int ext3_alloc_block (handle_t *handle,
239 struct inode * inode, unsigned long goal, int *err)
240{
241 unsigned long result;
242
243 result = ext3_new_block(handle, inode, goal, err);
244 return result;
245}
246
247
248typedef struct { 236typedef struct {
249 __le32 *p; 237 __le32 *p;
250 __le32 key; 238 __le32 key;
@@ -257,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
257 p->bh = bh; 245 p->bh = bh;
258} 246}
259 247
260static inline int verify_chain(Indirect *from, Indirect *to) 248static int verify_chain(Indirect *from, Indirect *to)
261{ 249{
262 while (from <= to && from->key == *from->p) 250 while (from <= to && from->key == *from->p)
263 from++; 251 from++;
@@ -327,10 +315,10 @@ static int ext3_block_to_path(struct inode *inode,
327 offsets[n++] = i_block & (ptrs - 1); 315 offsets[n++] = i_block & (ptrs - 1);
328 final = ptrs; 316 final = ptrs;
329 } else { 317 } else {
330 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); 318 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
331 } 319 }
332 if (boundary) 320 if (boundary)
333 *boundary = (i_block & (ptrs - 1)) == (final - 1); 321 *boundary = final - 1 - (i_block & (ptrs - 1));
334 return n; 322 return n;
335} 323}
336 324
@@ -419,7 +407,6 @@ no_block:
419 * 407 *
420 * Caller must make sure that @ind is valid and will stay that way. 408 * Caller must make sure that @ind is valid and will stay that way.
421 */ 409 */
422
423static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) 410static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
424{ 411{
425 struct ext3_inode_info *ei = EXT3_I(inode); 412 struct ext3_inode_info *ei = EXT3_I(inode);
@@ -429,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
429 unsigned long colour; 416 unsigned long colour;
430 417
431 /* Try to find previous block */ 418 /* Try to find previous block */
432 for (p = ind->p - 1; p >= start; p--) 419 for (p = ind->p - 1; p >= start; p--) {
433 if (*p) 420 if (*p)
434 return le32_to_cpu(*p); 421 return le32_to_cpu(*p);
422 }
435 423
436 /* No such thing, so let's try location of indirect block */ 424 /* No such thing, so let's try location of indirect block */
437 if (ind->bh) 425 if (ind->bh)
438 return ind->bh->b_blocknr; 426 return ind->bh->b_blocknr;
439 427
440 /* 428 /*
441 * It is going to be refered from inode itself? OK, just put it into 429 * It is going to be referred to from the inode itself? OK, just put it
442 * the same cylinder group then. 430 * into the same cylinder group then.
443 */ 431 */
444 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + 432 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
445 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); 433 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
@@ -463,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
463static unsigned long ext3_find_goal(struct inode *inode, long block, 451static unsigned long ext3_find_goal(struct inode *inode, long block,
464 Indirect chain[4], Indirect *partial) 452 Indirect chain[4], Indirect *partial)
465{ 453{
466 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; 454 struct ext3_block_alloc_info *block_i;
455
456 block_i = EXT3_I(inode)->i_block_alloc_info;
467 457
468 /* 458 /*
469 * try the heuristic for sequential allocation, 459 * try the heuristic for sequential allocation,
@@ -478,13 +468,113 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
478} 468}
479 469
480/** 470/**
471 * ext3_blks_to_allocate: Look up the block map and count the number
472 * of direct blocks need to be allocated for the given branch.
473 *
474 * @branch: chain of indirect blocks
475 * @k: number of blocks need for indirect blocks
476 * @blks: number of data blocks to be mapped.
477 * @blocks_to_boundary: the offset in the indirect block
478 *
479 * return the total number of blocks to be allocate, including the
480 * direct and indirect blocks.
481 */
482static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
483 int blocks_to_boundary)
484{
485 unsigned long count = 0;
486
487 /*
488 * Simple case, [t,d]Indirect block(s) has not allocated yet
489 * then it's clear blocks on that path have not allocated
490 */
491 if (k > 0) {
492 /* right now we don't handle cross boundary allocation */
493 if (blks < blocks_to_boundary + 1)
494 count += blks;
495 else
496 count += blocks_to_boundary + 1;
497 return count;
498 }
499
500 count++;
501 while (count < blks && count <= blocks_to_boundary &&
502 le32_to_cpu(*(branch[0].p + count)) == 0) {
503 count++;
504 }
505 return count;
506}
507
508/**
509 * ext3_alloc_blocks: multiple allocate blocks needed for a branch
510 * @indirect_blks: the number of blocks need to allocate for indirect
511 * blocks
512 *
513 * @new_blocks: on return it will store the new block numbers for
514 * the indirect blocks(if needed) and the first direct block,
515 * @blks: on return it will store the total number of allocated
516 * direct blocks
517 */
518static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
519 unsigned long goal, int indirect_blks, int blks,
520 unsigned long long new_blocks[4], int *err)
521{
522 int target, i;
523 unsigned long count = 0;
524 int index = 0;
525 unsigned long current_block = 0;
526 int ret = 0;
527
528 /*
529 * Here we try to allocate the requested multiple blocks at once,
530 * on a best-effort basis.
531 * To build a branch, we should allocate blocks for
532 * the indirect blocks(if not allocated yet), and at least
533 * the first direct block of this branch. That's the
534 * minimum number of blocks need to allocate(required)
535 */
536 target = blks + indirect_blks;
537
538 while (1) {
539 count = target;
540 /* allocating blocks for indirect blocks and direct blocks */
541 current_block = ext3_new_blocks(handle,inode,goal,&count,err);
542 if (*err)
543 goto failed_out;
544
545 target -= count;
546 /* allocate blocks for indirect blocks */
547 while (index < indirect_blks && count) {
548 new_blocks[index++] = current_block++;
549 count--;
550 }
551
552 if (count > 0)
553 break;
554 }
555
556 /* save the new block number for the first direct block */
557 new_blocks[index] = current_block;
558
559 /* total number of blocks allocated for direct blocks */
560 ret = count;
561 *err = 0;
562 return ret;
563failed_out:
564 for (i = 0; i <index; i++)
565 ext3_free_blocks(handle, inode, new_blocks[i], 1);
566 return ret;
567}
568
569/**
481 * ext3_alloc_branch - allocate and set up a chain of blocks. 570 * ext3_alloc_branch - allocate and set up a chain of blocks.
482 * @inode: owner 571 * @inode: owner
483 * @num: depth of the chain (number of blocks to allocate) 572 * @indirect_blks: number of allocated indirect blocks
573 * @blks: number of allocated direct blocks
484 * @offsets: offsets (in the blocks) to store the pointers to next. 574 * @offsets: offsets (in the blocks) to store the pointers to next.
485 * @branch: place to store the chain in. 575 * @branch: place to store the chain in.
486 * 576 *
487 * This function allocates @num blocks, zeroes out all but the last one, 577 * This function allocates blocks, zeroes out all but the last one,
488 * links them into chain and (if we are synchronous) writes them to disk. 578 * links them into chain and (if we are synchronous) writes them to disk.
489 * In other words, it prepares a branch that can be spliced onto the 579 * In other words, it prepares a branch that can be spliced onto the
490 * inode. It stores the information about that chain in the branch[], in 580 * inode. It stores the information about that chain in the branch[], in
@@ -501,97 +591,106 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
501 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain 591 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
502 * as described above and return 0. 592 * as described above and return 0.
503 */ 593 */
504
505static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 594static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
506 int num, 595 int indirect_blks, int *blks, unsigned long goal,
507 unsigned long goal, 596 int *offsets, Indirect *branch)
508 int *offsets,
509 Indirect *branch)
510{ 597{
511 int blocksize = inode->i_sb->s_blocksize; 598 int blocksize = inode->i_sb->s_blocksize;
512 int n = 0, keys = 0; 599 int i, n = 0;
513 int err = 0; 600 int err = 0;
514 int i; 601 struct buffer_head *bh;
515 int parent = ext3_alloc_block(handle, inode, goal, &err); 602 int num;
516 603 unsigned long long new_blocks[4];
517 branch[0].key = cpu_to_le32(parent); 604 unsigned long long current_block;
518 if (parent) {
519 for (n = 1; n < num; n++) {
520 struct buffer_head *bh;
521 /* Allocate the next block */
522 int nr = ext3_alloc_block(handle, inode, parent, &err);
523 if (!nr)
524 break;
525 branch[n].key = cpu_to_le32(nr);
526 605
527 /* 606 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
528 * Get buffer_head for parent block, zero it out 607 *blks, new_blocks, &err);
529 * and set the pointer to new one, then send 608 if (err)
530 * parent to disk. 609 return err;
531 */
532 bh = sb_getblk(inode->i_sb, parent);
533 if (!bh)
534 break;
535 keys = n+1;
536 branch[n].bh = bh;
537 lock_buffer(bh);
538 BUFFER_TRACE(bh, "call get_create_access");
539 err = ext3_journal_get_create_access(handle, bh);
540 if (err) {
541 unlock_buffer(bh);
542 brelse(bh);
543 break;
544 }
545 610
546 memset(bh->b_data, 0, blocksize); 611 branch[0].key = cpu_to_le32(new_blocks[0]);
547 branch[n].p = (__le32*) bh->b_data + offsets[n]; 612 /*
548 *branch[n].p = branch[n].key; 613 * metadata blocks and data blocks are allocated.
549 BUFFER_TRACE(bh, "marking uptodate"); 614 */
550 set_buffer_uptodate(bh); 615 for (n = 1; n <= indirect_blks; n++) {
616 /*
617 * Get buffer_head for parent block, zero it out
618 * and set the pointer to new one, then send
619 * parent to disk.
620 */
621 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
622 branch[n].bh = bh;
623 lock_buffer(bh);
624 BUFFER_TRACE(bh, "call get_create_access");
625 err = ext3_journal_get_create_access(handle, bh);
626 if (err) {
551 unlock_buffer(bh); 627 unlock_buffer(bh);
628 brelse(bh);
629 goto failed;
630 }
552 631
553 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 632 memset(bh->b_data, 0, blocksize);
554 err = ext3_journal_dirty_metadata(handle, bh); 633 branch[n].p = (__le32 *) bh->b_data + offsets[n];
555 if (err) 634 branch[n].key = cpu_to_le32(new_blocks[n]);
556 break; 635 *branch[n].p = branch[n].key;
557 636 if ( n == indirect_blks) {
558 parent = nr; 637 current_block = new_blocks[n];
638 /*
639 * End of chain, update the last new metablock of
640 * the chain to point to the new allocated
641 * data blocks numbers
642 */
643 for (i=1; i < num; i++)
644 *(branch[n].p + i) = cpu_to_le32(++current_block);
559 } 645 }
560 } 646 BUFFER_TRACE(bh, "marking uptodate");
561 if (n == num) 647 set_buffer_uptodate(bh);
562 return 0; 648 unlock_buffer(bh);
563 649
650 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
651 err = ext3_journal_dirty_metadata(handle, bh);
652 if (err)
653 goto failed;
654 }
655 *blks = num;
656 return err;
657failed:
564 /* Allocation failed, free what we already allocated */ 658 /* Allocation failed, free what we already allocated */
565 for (i = 1; i < keys; i++) { 659 for (i = 1; i <= n ; i++) {
566 BUFFER_TRACE(branch[i].bh, "call journal_forget"); 660 BUFFER_TRACE(branch[i].bh, "call journal_forget");
567 ext3_journal_forget(handle, branch[i].bh); 661 ext3_journal_forget(handle, branch[i].bh);
568 } 662 }
569 for (i = 0; i < keys; i++) 663 for (i = 0; i <indirect_blks; i++)
570 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); 664 ext3_free_blocks(handle, inode, new_blocks[i], 1);
665
666 ext3_free_blocks(handle, inode, new_blocks[i], num);
667
571 return err; 668 return err;
572} 669}
573 670
574/** 671/**
575 * ext3_splice_branch - splice the allocated branch onto inode. 672 * ext3_splice_branch - splice the allocated branch onto inode.
576 * @inode: owner 673 * @inode: owner
577 * @block: (logical) number of block we are adding 674 * @block: (logical) number of block we are adding
578 * @chain: chain of indirect blocks (with a missing link - see 675 * @chain: chain of indirect blocks (with a missing link - see
579 * ext3_alloc_branch) 676 * ext3_alloc_branch)
580 * @where: location of missing link 677 * @where: location of missing link
581 * @num: number of blocks we are adding 678 * @num: number of indirect blocks we are adding
582 * 679 * @blks: number of direct blocks we are adding
583 * This function fills the missing link and does all housekeeping needed in 680 *
584 * inode (->i_blocks, etc.). In case of success we end up with the full 681 * This function fills the missing link and does all housekeeping needed in
585 * chain to new block and return 0. 682 * inode (->i_blocks, etc.). In case of success we end up with the full
683 * chain to new block and return 0.
586 */ 684 */
587 685static int ext3_splice_branch(handle_t *handle, struct inode *inode,
588static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, 686 long block, Indirect *where, int num, int blks)
589 Indirect chain[4], Indirect *where, int num)
590{ 687{
591 int i; 688 int i;
592 int err = 0; 689 int err = 0;
593 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; 690 struct ext3_block_alloc_info *block_i;
691 unsigned long current_block;
594 692
693 block_i = EXT3_I(inode)->i_block_alloc_info;
595 /* 694 /*
596 * If we're splicing into a [td]indirect block (as opposed to the 695 * If we're splicing into a [td]indirect block (as opposed to the
597 * inode) then we need to get write access to the [td]indirect block 696 * inode) then we need to get write access to the [td]indirect block
@@ -608,13 +707,24 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
608 *where->p = where->key; 707 *where->p = where->key;
609 708
610 /* 709 /*
710 * Update the host buffer_head or inode to point to more just allocated
711 * direct blocks blocks
712 */
713 if (num == 0 && blks > 1) {
714 current_block = le32_to_cpu(where->key + 1);
715 for (i = 1; i < blks; i++)
716 *(where->p + i ) = cpu_to_le32(current_block++);
717 }
718
719 /*
611 * update the most recently allocated logical & physical block 720 * update the most recently allocated logical & physical block
612 * in i_block_alloc_info, to assist find the proper goal block for next 721 * in i_block_alloc_info, to assist find the proper goal block for next
613 * allocation 722 * allocation
614 */ 723 */
615 if (block_i) { 724 if (block_i) {
616 block_i->last_alloc_logical_block = block; 725 block_i->last_alloc_logical_block = block + blks - 1;
617 block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); 726 block_i->last_alloc_physical_block =
727 le32_to_cpu(where[num].key + blks - 1);
618 } 728 }
619 729
620 /* We are done with atomic stuff, now do the rest of housekeeping */ 730 /* We are done with atomic stuff, now do the rest of housekeeping */
@@ -625,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
625 /* had we spliced it onto indirect block? */ 735 /* had we spliced it onto indirect block? */
626 if (where->bh) { 736 if (where->bh) {
627 /* 737 /*
628 * akpm: If we spliced it onto an indirect block, we haven't 738 * If we spliced it onto an indirect block, we haven't
629 * altered the inode. Note however that if it is being spliced 739 * altered the inode. Note however that if it is being spliced
630 * onto an indirect block at the very end of the file (the 740 * onto an indirect block at the very end of the file (the
631 * file is growing) then we *will* alter the inode to reflect 741 * file is growing) then we *will* alter the inode to reflect
@@ -647,10 +757,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
647 return err; 757 return err;
648 758
649err_out: 759err_out:
650 for (i = 1; i < num; i++) { 760 for (i = 1; i <= num; i++) {
651 BUFFER_TRACE(where[i].bh, "call journal_forget"); 761 BUFFER_TRACE(where[i].bh, "call journal_forget");
652 ext3_journal_forget(handle, where[i].bh); 762 ext3_journal_forget(handle, where[i].bh);
763 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
653 } 764 }
765 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
766
654 return err; 767 return err;
655} 768}
656 769
@@ -666,26 +779,33 @@ err_out:
666 * allocations is needed - we simply release blocks and do not touch anything 779 * allocations is needed - we simply release blocks and do not touch anything
667 * reachable from inode. 780 * reachable from inode.
668 * 781 *
669 * akpm: `handle' can be NULL if create == 0. 782 * `handle' can be NULL if create == 0.
670 * 783 *
671 * The BKL may not be held on entry here. Be sure to take it early. 784 * The BKL may not be held on entry here. Be sure to take it early.
785 * return > 0, # of blocks mapped or allocated.
786 * return = 0, if plain lookup failed.
787 * return < 0, error case.
672 */ 788 */
673 789int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
674int 790 sector_t iblock, unsigned long maxblocks,
675ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, 791 struct buffer_head *bh_result,
676 struct buffer_head *bh_result, int create, int extend_disksize) 792 int create, int extend_disksize)
677{ 793{
678 int err = -EIO; 794 int err = -EIO;
679 int offsets[4]; 795 int offsets[4];
680 Indirect chain[4]; 796 Indirect chain[4];
681 Indirect *partial; 797 Indirect *partial;
682 unsigned long goal; 798 unsigned long goal;
683 int left; 799 int indirect_blks;
684 int boundary = 0; 800 int blocks_to_boundary = 0;
685 const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary); 801 int depth;
686 struct ext3_inode_info *ei = EXT3_I(inode); 802 struct ext3_inode_info *ei = EXT3_I(inode);
803 int count = 0;
804 unsigned long first_block = 0;
805
687 806
688 J_ASSERT(handle != NULL || create == 0); 807 J_ASSERT(handle != NULL || create == 0);
808 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
689 809
690 if (depth == 0) 810 if (depth == 0)
691 goto out; 811 goto out;
@@ -694,8 +814,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
694 814
695 /* Simplest case - block found, no allocation needed */ 815 /* Simplest case - block found, no allocation needed */
696 if (!partial) { 816 if (!partial) {
817 first_block = chain[depth - 1].key;
697 clear_buffer_new(bh_result); 818 clear_buffer_new(bh_result);
698 goto got_it; 819 count++;
820 /*map more blocks*/
821 while (count < maxblocks && count <= blocks_to_boundary) {
822 if (!verify_chain(chain, partial)) {
823 /*
824 * Indirect block might be removed by
825 * truncate while we were reading it.
826 * Handling of that case: forget what we've
827 * got now. Flag the err as EAGAIN, so it
828 * will reread.
829 */
830 err = -EAGAIN;
831 count = 0;
832 break;
833 }
834 if (le32_to_cpu(*(chain[depth-1].p+count) ==
835 (first_block + count)))
836 count++;
837 else
838 break;
839 }
840 if (err != -EAGAIN)
841 goto got_it;
699 } 842 }
700 843
701 /* Next simple case - plain lookup or failed read of indirect block */ 844 /* Next simple case - plain lookup or failed read of indirect block */
@@ -723,6 +866,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
723 } 866 }
724 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 867 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
725 if (!partial) { 868 if (!partial) {
869 count++;
726 mutex_unlock(&ei->truncate_mutex); 870 mutex_unlock(&ei->truncate_mutex);
727 if (err) 871 if (err)
728 goto cleanup; 872 goto cleanup;
@@ -740,12 +884,19 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
740 884
741 goal = ext3_find_goal(inode, iblock, chain, partial); 885 goal = ext3_find_goal(inode, iblock, chain, partial);
742 886
743 left = (chain + depth) - partial; 887 /* the number of blocks need to allocate for [d,t]indirect blocks */
888 indirect_blks = (chain + depth) - partial - 1;
744 889
745 /* 890 /*
891 * Next look up the indirect map to count the totoal number of
892 * direct blocks to allocate for this branch.
893 */
894 count = ext3_blks_to_allocate(partial, indirect_blks,
895 maxblocks, blocks_to_boundary);
896 /*
746 * Block out ext3_truncate while we alter the tree 897 * Block out ext3_truncate while we alter the tree
747 */ 898 */
748 err = ext3_alloc_branch(handle, inode, left, goal, 899 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
749 offsets + (partial - chain), partial); 900 offsets + (partial - chain), partial);
750 901
751 /* 902 /*
@@ -756,8 +907,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
756 * may need to return -EAGAIN upwards in the worst case. --sct 907 * may need to return -EAGAIN upwards in the worst case. --sct
757 */ 908 */
758 if (!err) 909 if (!err)
759 err = ext3_splice_branch(handle, inode, iblock, chain, 910 err = ext3_splice_branch(handle, inode, iblock,
760 partial, left); 911 partial, indirect_blks, count);
761 /* 912 /*
762 * i_disksize growing is protected by truncate_mutex. Don't forget to 913 * i_disksize growing is protected by truncate_mutex. Don't forget to
763 * protect it if you're about to implement concurrent 914 * protect it if you're about to implement concurrent
@@ -772,8 +923,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
772 set_buffer_new(bh_result); 923 set_buffer_new(bh_result);
773got_it: 924got_it:
774 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 925 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
775 if (boundary) 926 if (blocks_to_boundary == 0)
776 set_buffer_boundary(bh_result); 927 set_buffer_boundary(bh_result);
928 err = count;
777 /* Clean up and exit */ 929 /* Clean up and exit */
778 partial = chain + depth - 1; /* the whole chain */ 930 partial = chain + depth - 1; /* the whole chain */
779cleanup: 931cleanup:
@@ -787,34 +939,21 @@ out:
787 return err; 939 return err;
788} 940}
789 941
790static int ext3_get_block(struct inode *inode, sector_t iblock,
791 struct buffer_head *bh_result, int create)
792{
793 handle_t *handle = NULL;
794 int ret;
795
796 if (create) {
797 handle = ext3_journal_current_handle();
798 J_ASSERT(handle != 0);
799 }
800 ret = ext3_get_block_handle(handle, inode, iblock,
801 bh_result, create, 1);
802 return ret;
803}
804
805#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) 942#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
806 943
807static int 944static int ext3_get_block(struct inode *inode, sector_t iblock,
808ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, 945 struct buffer_head *bh_result, int create)
809 unsigned long max_blocks, struct buffer_head *bh_result,
810 int create)
811{ 946{
812 handle_t *handle = journal_current_handle(); 947 handle_t *handle = journal_current_handle();
813 int ret = 0; 948 int ret = 0;
949 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
814 950
815 if (!handle) 951 if (!create)
816 goto get_block; /* A read */ 952 goto get_block; /* A read */
817 953
954 if (max_blocks == 1)
955 goto get_block; /* A single block get */
956
818 if (handle->h_transaction->t_state == T_LOCKED) { 957 if (handle->h_transaction->t_state == T_LOCKED) {
819 /* 958 /*
820 * Huge direct-io writes can hold off commits for long 959 * Huge direct-io writes can hold off commits for long
@@ -841,18 +980,22 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
841 } 980 }
842 981
843get_block: 982get_block:
844 if (ret == 0) 983 if (ret == 0) {
845 ret = ext3_get_block_handle(handle, inode, iblock, 984 ret = ext3_get_blocks_handle(handle, inode, iblock,
846 bh_result, create, 0); 985 max_blocks, bh_result, create, 0);
847 bh_result->b_size = (1 << inode->i_blkbits); 986 if (ret > 0) {
987 bh_result->b_size = (ret << inode->i_blkbits);
988 ret = 0;
989 }
990 }
848 return ret; 991 return ret;
849} 992}
850 993
851/* 994/*
852 * `handle' can be NULL if create is zero 995 * `handle' can be NULL if create is zero
853 */ 996 */
854struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, 997struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
855 long block, int create, int * errp) 998 long block, int create, int *errp)
856{ 999{
857 struct buffer_head dummy; 1000 struct buffer_head dummy;
858 int fatal = 0, err; 1001 int fatal = 0, err;
@@ -862,8 +1005,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
862 dummy.b_state = 0; 1005 dummy.b_state = 0;
863 dummy.b_blocknr = -1000; 1006 dummy.b_blocknr = -1000;
864 buffer_trace_init(&dummy.b_history); 1007 buffer_trace_init(&dummy.b_history);
865 *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1); 1008 err = ext3_get_blocks_handle(handle, inode, block, 1,
866 if (!*errp && buffer_mapped(&dummy)) { 1009 &dummy, create, 1);
1010 if (err == 1) {
1011 err = 0;
1012 } else if (err >= 0) {
1013 WARN_ON(1);
1014 err = -EIO;
1015 }
1016 *errp = err;
1017 if (!err && buffer_mapped(&dummy)) {
867 struct buffer_head *bh; 1018 struct buffer_head *bh;
868 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1019 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
869 if (!bh) { 1020 if (!bh) {
@@ -874,17 +1025,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
874 J_ASSERT(create != 0); 1025 J_ASSERT(create != 0);
875 J_ASSERT(handle != 0); 1026 J_ASSERT(handle != 0);
876 1027
877 /* Now that we do not always journal data, we 1028 /*
878 should keep in mind whether this should 1029 * Now that we do not always journal data, we should
879 always journal the new buffer as metadata. 1030 * keep in mind whether this should always journal the
880 For now, regular file writes use 1031 * new buffer as metadata. For now, regular file
881 ext3_get_block instead, so it's not a 1032 * writes use ext3_get_block instead, so it's not a
882 problem. */ 1033 * problem.
1034 */
883 lock_buffer(bh); 1035 lock_buffer(bh);
884 BUFFER_TRACE(bh, "call get_create_access"); 1036 BUFFER_TRACE(bh, "call get_create_access");
885 fatal = ext3_journal_get_create_access(handle, bh); 1037 fatal = ext3_journal_get_create_access(handle, bh);
886 if (!fatal && !buffer_uptodate(bh)) { 1038 if (!fatal && !buffer_uptodate(bh)) {
887 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1039 memset(bh->b_data,0,inode->i_sb->s_blocksize);
888 set_buffer_uptodate(bh); 1040 set_buffer_uptodate(bh);
889 } 1041 }
890 unlock_buffer(bh); 1042 unlock_buffer(bh);
@@ -906,7 +1058,7 @@ err:
906 return NULL; 1058 return NULL;
907} 1059}
908 1060
909struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, 1061struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
910 int block, int create, int *err) 1062 int block, int create, int *err)
911{ 1063{
912 struct buffer_head * bh; 1064 struct buffer_head * bh;
@@ -982,9 +1134,8 @@ static int walk_page_buffers( handle_t *handle,
982 * is elevated. We'll still have enough credits for the tiny quotafile 1134 * is elevated. We'll still have enough credits for the tiny quotafile
983 * write. 1135 * write.
984 */ 1136 */
985 1137static int do_journal_get_write_access(handle_t *handle,
986static int do_journal_get_write_access(handle_t *handle, 1138 struct buffer_head *bh)
987 struct buffer_head *bh)
988{ 1139{
989 if (!buffer_mapped(bh) || buffer_freed(bh)) 1140 if (!buffer_mapped(bh) || buffer_freed(bh))
990 return 0; 1141 return 0;
@@ -1025,8 +1176,7 @@ out:
1025 return ret; 1176 return ret;
1026} 1177}
1027 1178
1028int 1179int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1029ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1030{ 1180{
1031 int err = journal_dirty_data(handle, bh); 1181 int err = journal_dirty_data(handle, bh);
1032 if (err) 1182 if (err)
@@ -1051,7 +1201,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1051 * ext3 never places buffers on inode->i_mapping->private_list. metadata 1201 * ext3 never places buffers on inode->i_mapping->private_list. metadata
1052 * buffers are managed internally. 1202 * buffers are managed internally.
1053 */ 1203 */
1054
1055static int ext3_ordered_commit_write(struct file *file, struct page *page, 1204static int ext3_ordered_commit_write(struct file *file, struct page *page,
1056 unsigned from, unsigned to) 1205 unsigned from, unsigned to)
1057{ 1206{
@@ -1261,7 +1410,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1261 * we don't need to open a transaction here. 1410 * we don't need to open a transaction here.
1262 */ 1411 */
1263static int ext3_ordered_writepage(struct page *page, 1412static int ext3_ordered_writepage(struct page *page,
1264 struct writeback_control *wbc) 1413 struct writeback_control *wbc)
1265{ 1414{
1266 struct inode *inode = page->mapping->host; 1415 struct inode *inode = page->mapping->host;
1267 struct buffer_head *page_bufs; 1416 struct buffer_head *page_bufs;
@@ -1430,7 +1579,7 @@ ext3_readpages(struct file *file, struct address_space *mapping,
1430 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); 1579 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1431} 1580}
1432 1581
1433static int ext3_invalidatepage(struct page *page, unsigned long offset) 1582static void ext3_invalidatepage(struct page *page, unsigned long offset)
1434{ 1583{
1435 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1584 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1436 1585
@@ -1440,7 +1589,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
1440 if (offset == 0) 1589 if (offset == 0)
1441 ClearPageChecked(page); 1590 ClearPageChecked(page);
1442 1591
1443 return journal_invalidatepage(journal, page, offset); 1592 journal_invalidatepage(journal, page, offset);
1444} 1593}
1445 1594
1446static int ext3_releasepage(struct page *page, gfp_t wait) 1595static int ext3_releasepage(struct page *page, gfp_t wait)
@@ -1492,11 +1641,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1492 1641
1493 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1642 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1494 offset, nr_segs, 1643 offset, nr_segs,
1495 ext3_direct_io_get_blocks, NULL); 1644 ext3_get_block, NULL);
1496 1645
1497 /* 1646 /*
1498 * Reacquire the handle: ext3_direct_io_get_block() can restart the 1647 * Reacquire the handle: ext3_get_block() can restart the transaction
1499 * transaction
1500 */ 1648 */
1501 handle = journal_current_handle(); 1649 handle = journal_current_handle();
1502 1650
@@ -1752,11 +1900,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
1752 * c) free the subtrees growing from the inode past the @chain[0]. 1900 * c) free the subtrees growing from the inode past the @chain[0].
1753 * (no partially truncated stuff there). */ 1901 * (no partially truncated stuff there). */
1754 1902
1755static Indirect *ext3_find_shared(struct inode *inode, 1903static Indirect *ext3_find_shared(struct inode *inode, int depth,
1756 int depth, 1904 int offsets[4], Indirect chain[4], __le32 *top)
1757 int offsets[4],
1758 Indirect chain[4],
1759 __le32 *top)
1760{ 1905{
1761 Indirect *partial, *p; 1906 Indirect *partial, *p;
1762 int k, err; 1907 int k, err;
@@ -1795,8 +1940,7 @@ static Indirect *ext3_find_shared(struct inode *inode,
1795 } 1940 }
1796 /* Writer: end */ 1941 /* Writer: end */
1797 1942
1798 while(partial > p) 1943 while(partial > p) {
1799 {
1800 brelse(partial->bh); 1944 brelse(partial->bh);
1801 partial--; 1945 partial--;
1802 } 1946 }
@@ -1812,10 +1956,9 @@ no_top:
1812 * We release `count' blocks on disk, but (last - first) may be greater 1956 * We release `count' blocks on disk, but (last - first) may be greater
1813 * than `count' because there can be holes in there. 1957 * than `count' because there can be holes in there.
1814 */ 1958 */
1815static void 1959static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
1816ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, 1960 struct buffer_head *bh, unsigned long block_to_free,
1817 unsigned long block_to_free, unsigned long count, 1961 unsigned long count, __le32 *first, __le32 *last)
1818 __le32 *first, __le32 *last)
1819{ 1962{
1820 __le32 *p; 1963 __le32 *p;
1821 if (try_to_extend_transaction(handle, inode)) { 1964 if (try_to_extend_transaction(handle, inode)) {
@@ -2076,8 +2219,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2076 * that's fine - as long as they are linked from the inode, the post-crash 2219 * that's fine - as long as they are linked from the inode, the post-crash
2077 * ext3_truncate() run will find them and release them. 2220 * ext3_truncate() run will find them and release them.
2078 */ 2221 */
2079 2222void ext3_truncate(struct inode *inode)
2080void ext3_truncate(struct inode * inode)
2081{ 2223{
2082 handle_t *handle; 2224 handle_t *handle;
2083 struct ext3_inode_info *ei = EXT3_I(inode); 2225 struct ext3_inode_info *ei = EXT3_I(inode);
@@ -2201,29 +2343,26 @@ void ext3_truncate(struct inode * inode)
2201do_indirects: 2343do_indirects:
2202 /* Kill the remaining (whole) subtrees */ 2344 /* Kill the remaining (whole) subtrees */
2203 switch (offsets[0]) { 2345 switch (offsets[0]) {
2204 default: 2346 default:
2205 nr = i_data[EXT3_IND_BLOCK]; 2347 nr = i_data[EXT3_IND_BLOCK];
2206 if (nr) { 2348 if (nr) {
2207 ext3_free_branches(handle, inode, NULL, 2349 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2208 &nr, &nr+1, 1); 2350 i_data[EXT3_IND_BLOCK] = 0;
2209 i_data[EXT3_IND_BLOCK] = 0; 2351 }
2210 } 2352 case EXT3_IND_BLOCK:
2211 case EXT3_IND_BLOCK: 2353 nr = i_data[EXT3_DIND_BLOCK];
2212 nr = i_data[EXT3_DIND_BLOCK]; 2354 if (nr) {
2213 if (nr) { 2355 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2214 ext3_free_branches(handle, inode, NULL, 2356 i_data[EXT3_DIND_BLOCK] = 0;
2215 &nr, &nr+1, 2); 2357 }
2216 i_data[EXT3_DIND_BLOCK] = 0; 2358 case EXT3_DIND_BLOCK:
2217 } 2359 nr = i_data[EXT3_TIND_BLOCK];
2218 case EXT3_DIND_BLOCK: 2360 if (nr) {
2219 nr = i_data[EXT3_TIND_BLOCK]; 2361 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2220 if (nr) { 2362 i_data[EXT3_TIND_BLOCK] = 0;
2221 ext3_free_branches(handle, inode, NULL, 2363 }
2222 &nr, &nr+1, 3); 2364 case EXT3_TIND_BLOCK:
2223 i_data[EXT3_TIND_BLOCK] = 0; 2365 ;
2224 }
2225 case EXT3_TIND_BLOCK:
2226 ;
2227 } 2366 }
2228 2367
2229 ext3_discard_reservation(inode); 2368 ext3_discard_reservation(inode);
@@ -2232,8 +2371,10 @@ do_indirects:
2232 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 2371 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2233 ext3_mark_inode_dirty(handle, inode); 2372 ext3_mark_inode_dirty(handle, inode);
2234 2373
2235 /* In a multi-transaction truncate, we only make the final 2374 /*
2236 * transaction synchronous */ 2375 * In a multi-transaction truncate, we only make the final transaction
2376 * synchronous
2377 */
2237 if (IS_SYNC(inode)) 2378 if (IS_SYNC(inode))
2238 handle->h_sync = 1; 2379 handle->h_sync = 1;
2239out_stop: 2380out_stop:
@@ -2259,20 +2400,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
2259 struct ext3_group_desc * gdp; 2400 struct ext3_group_desc * gdp;
2260 2401
2261 2402
2262 if ((ino != EXT3_ROOT_INO && 2403 if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO &&
2263 ino != EXT3_JOURNAL_INO && 2404 ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) ||
2264 ino != EXT3_RESIZE_INO && 2405 ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) {
2265 ino < EXT3_FIRST_INO(sb)) || 2406 ext3_error(sb, "ext3_get_inode_block",
2266 ino > le32_to_cpu(
2267 EXT3_SB(sb)->s_es->s_inodes_count)) {
2268 ext3_error (sb, "ext3_get_inode_block",
2269 "bad inode number: %lu", ino); 2407 "bad inode number: %lu", ino);
2270 return 0; 2408 return 0;
2271 } 2409 }
2272 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); 2410 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2273 if (block_group >= EXT3_SB(sb)->s_groups_count) { 2411 if (block_group >= EXT3_SB(sb)->s_groups_count) {
2274 ext3_error (sb, "ext3_get_inode_block", 2412 ext3_error(sb,"ext3_get_inode_block","group >= groups count");
2275 "group >= groups count");
2276 return 0; 2413 return 0;
2277 } 2414 }
2278 smp_rmb(); 2415 smp_rmb();
@@ -2285,7 +2422,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
2285 return 0; 2422 return 0;
2286 } 2423 }
2287 2424
2288 gdp = (struct ext3_group_desc *) bh->b_data; 2425 gdp = (struct ext3_group_desc *)bh->b_data;
2289 /* 2426 /*
2290 * Figure out the offset within the block group inode table 2427 * Figure out the offset within the block group inode table
2291 */ 2428 */
@@ -2834,7 +2971,7 @@ err_out:
2834 2971
2835 2972
2836/* 2973/*
2837 * akpm: how many blocks doth make a writepage()? 2974 * How many blocks doth make a writepage()?
2838 * 2975 *
2839 * With N blocks per page, it may be: 2976 * With N blocks per page, it may be:
2840 * N data blocks 2977 * N data blocks
@@ -2924,8 +3061,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
2924} 3061}
2925 3062
2926/* 3063/*
2927 * akpm: What we do here is to mark the in-core inode as clean 3064 * What we do here is to mark the in-core inode as clean with respect to inode
2928 * with respect to inode dirtiness (it may still be data-dirty). 3065 * dirtiness (it may still be data-dirty).
2929 * This means that the in-core inode may be reaped by prune_icache 3066 * This means that the in-core inode may be reaped by prune_icache
2930 * without having to perform any I/O. This is a very good thing, 3067 * without having to perform any I/O. This is a very good thing,
2931 * because *any* task may call prune_icache - even ones which 3068 * because *any* task may call prune_icache - even ones which
@@ -2957,7 +3094,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
2957} 3094}
2958 3095
2959/* 3096/*
2960 * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() 3097 * ext3_dirty_inode() is called from __mark_inode_dirty()
2961 * 3098 *
2962 * We're really interested in the case where a file is being extended. 3099 * We're really interested in the case where a file is being extended.
2963 * i_size has been changed by generic_commit_write() and we thus need 3100 * i_size has been changed by generic_commit_write() and we thus need
@@ -2993,7 +3130,7 @@ out:
2993 return; 3130 return;
2994} 3131}
2995 3132
2996#ifdef AKPM 3133#if 0
2997/* 3134/*
2998 * Bind an inode's backing buffer_head into this transaction, to prevent 3135 * Bind an inode's backing buffer_head into this transaction, to prevent
2999 * it from being flushed to disk early. Unlike 3136 * it from being flushed to disk early. Unlike
@@ -3001,8 +3138,7 @@ out:
3001 * returns no iloc structure, so the caller needs to repeat the iloc 3138 * returns no iloc structure, so the caller needs to repeat the iloc
3002 * lookup to mark the inode dirty later. 3139 * lookup to mark the inode dirty later.
3003 */ 3140 */
3004static inline int 3141static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3005ext3_pin_inode(handle_t *handle, struct inode *inode)
3006{ 3142{
3007 struct ext3_iloc iloc; 3143 struct ext3_iloc iloc;
3008 3144
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 86e443182de4..f8a5266ea1ff 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1678,12 +1678,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1678 } 1678 }
1679 1679
1680 if (test_opt(sb, NOBH)) { 1680 if (test_opt(sb, NOBH)) {
1681 if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) {
1682 printk(KERN_WARNING "EXT3-fs: Ignoring nobh option "
1683 "since filesystem blocksize doesn't match "
1684 "pagesize\n");
1685 clear_opt(sbi->s_mount_opt, NOBH);
1686 }
1687 if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { 1681 if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
1688 printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " 1682 printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - "
1689 "its supported only with writeback mode\n"); 1683 "its supported only with writeback mode\n");
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 297300fe81c2..404bfc9f7385 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -101,11 +101,11 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
101} 101}
102 102
103static int fat_get_blocks(struct inode *inode, sector_t iblock, 103static int fat_get_blocks(struct inode *inode, sector_t iblock,
104 unsigned long max_blocks,
105 struct buffer_head *bh_result, int create) 104 struct buffer_head *bh_result, int create)
106{ 105{
107 struct super_block *sb = inode->i_sb; 106 struct super_block *sb = inode->i_sb;
108 int err; 107 int err;
108 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
109 109
110 err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create); 110 err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
111 if (err) 111 if (err)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 03c789560fb8..2a2479196f96 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -412,7 +412,7 @@ out:
412 412
413/* Table to convert sigio signal codes into poll band bitmaps */ 413/* Table to convert sigio signal codes into poll band bitmaps */
414 414
415static long band_table[NSIGPOLL] = { 415static const long band_table[NSIGPOLL] = {
416 POLLIN | POLLRDNORM, /* POLL_IN */ 416 POLLIN | POLLRDNORM, /* POLL_IN */
417 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ 417 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
418 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ 418 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
531} 531}
532 532
533static DEFINE_RWLOCK(fasync_lock); 533static DEFINE_RWLOCK(fasync_lock);
534static kmem_cache_t *fasync_cache; 534static kmem_cache_t *fasync_cache __read_mostly;
535 535
536/* 536/*
537 * fasync_helper() is used by some character device drivers (mainly mice) 537 * fasync_helper() is used by some character device drivers (mainly mice)
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 39fd85b9b916..2c564701724f 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -98,17 +98,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
98 return res ? try_to_free_buffers(page) : 0; 98 return res ? try_to_free_buffers(page) : 0;
99} 99}
100 100
101static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
102 struct buffer_head *bh_result, int create)
103{
104 int ret;
105
106 ret = hfs_get_block(inode, iblock, bh_result, create);
107 if (!ret)
108 bh_result->b_size = (1 << inode->i_blkbits);
109 return ret;
110}
111
112static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, 101static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
113 const struct iovec *iov, loff_t offset, unsigned long nr_segs) 102 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
114{ 103{
@@ -116,7 +105,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
116 struct inode *inode = file->f_dentry->d_inode->i_mapping->host; 105 struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
117 106
118 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 107 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
119 offset, nr_segs, hfs_get_blocks, NULL); 108 offset, nr_segs, hfs_get_block, NULL);
120} 109}
121 110
122static int hfs_writepages(struct address_space *mapping, 111static int hfs_writepages(struct address_space *mapping,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 12ed2b7d046b..9fbe4d2aeece 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -93,17 +93,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
93 return res ? try_to_free_buffers(page) : 0; 93 return res ? try_to_free_buffers(page) : 0;
94} 94}
95 95
96static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
97 struct buffer_head *bh_result, int create)
98{
99 int ret;
100
101 ret = hfsplus_get_block(inode, iblock, bh_result, create);
102 if (!ret)
103 bh_result->b_size = (1 << inode->i_blkbits);
104 return ret;
105}
106
107static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, 96static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
108 const struct iovec *iov, loff_t offset, unsigned long nr_segs) 97 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
109{ 98{
@@ -111,7 +100,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
111 struct inode *inode = file->f_dentry->d_inode->i_mapping->host; 100 struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
112 101
113 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 102 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
114 offset, nr_segs, hfsplus_get_blocks, NULL); 103 offset, nr_segs, hfsplus_get_block, NULL);
115} 104}
116 105
117static int hfsplus_writepages(struct address_space *mapping, 106static int hfsplus_writepages(struct address_space *mapping,
diff --git a/fs/inode.c b/fs/inode.c
index 85da11044adc..1fddf2803af8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -56,8 +56,8 @@
56#define I_HASHBITS i_hash_shift 56#define I_HASHBITS i_hash_shift
57#define I_HASHMASK i_hash_mask 57#define I_HASHMASK i_hash_mask
58 58
59static unsigned int i_hash_mask; 59static unsigned int i_hash_mask __read_mostly;
60static unsigned int i_hash_shift; 60static unsigned int i_hash_shift __read_mostly;
61 61
62/* 62/*
63 * Each inode can be on two separate lists. One is 63 * Each inode can be on two separate lists. One is
@@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
73 73
74LIST_HEAD(inode_in_use); 74LIST_HEAD(inode_in_use);
75LIST_HEAD(inode_unused); 75LIST_HEAD(inode_unused);
76static struct hlist_head *inode_hashtable; 76static struct hlist_head *inode_hashtable __read_mostly;
77 77
78/* 78/*
79 * A simple spinlock to protect the list manipulations. 79 * A simple spinlock to protect the list manipulations.
@@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex);
98 */ 98 */
99struct inodes_stat_t inodes_stat; 99struct inodes_stat_t inodes_stat;
100 100
101static kmem_cache_t * inode_cachep; 101static kmem_cache_t * inode_cachep __read_mostly;
102 102
103static struct inode *alloc_inode(struct super_block *sb) 103static struct inode *alloc_inode(struct super_block *sb)
104{ 104{
diff --git a/fs/inotify.c b/fs/inotify.c
index a61e93e17853..f48a3dae0712 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -39,15 +39,15 @@
39 39
40static atomic_t inotify_cookie; 40static atomic_t inotify_cookie;
41 41
42static kmem_cache_t *watch_cachep; 42static kmem_cache_t *watch_cachep __read_mostly;
43static kmem_cache_t *event_cachep; 43static kmem_cache_t *event_cachep __read_mostly;
44 44
45static struct vfsmount *inotify_mnt; 45static struct vfsmount *inotify_mnt __read_mostly;
46 46
47/* these are configurable via /proc/sys/fs/inotify/ */ 47/* these are configurable via /proc/sys/fs/inotify/ */
48int inotify_max_user_instances; 48int inotify_max_user_instances __read_mostly;
49int inotify_max_user_watches; 49int inotify_max_user_watches __read_mostly;
50int inotify_max_queued_events; 50int inotify_max_queued_events __read_mostly;
51 51
52/* 52/*
53 * Lock ordering: 53 * Lock ordering:
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index ada31fa272e3..c609f5034fcd 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1873,16 +1873,15 @@ zap_buffer_unlocked:
1873} 1873}
1874 1874
1875/** 1875/**
1876 * int journal_invalidatepage() 1876 * void journal_invalidatepage()
1877 * @journal: journal to use for flush... 1877 * @journal: journal to use for flush...
1878 * @page: page to flush 1878 * @page: page to flush
1879 * @offset: length of page to invalidate. 1879 * @offset: length of page to invalidate.
1880 * 1880 *
1881 * Reap page buffers containing data after offset in page. 1881 * Reap page buffers containing data after offset in page.
1882 * 1882 *
1883 * Return non-zero if the page's buffers were successfully reaped.
1884 */ 1883 */
1885int journal_invalidatepage(journal_t *journal, 1884void journal_invalidatepage(journal_t *journal,
1886 struct page *page, 1885 struct page *page,
1887 unsigned long offset) 1886 unsigned long offset)
1888{ 1887{
@@ -1893,7 +1892,7 @@ int journal_invalidatepage(journal_t *journal,
1893 if (!PageLocked(page)) 1892 if (!PageLocked(page))
1894 BUG(); 1893 BUG();
1895 if (!page_has_buffers(page)) 1894 if (!page_has_buffers(page))
1896 return 1; 1895 return;
1897 1896
1898 /* We will potentially be playing with lists other than just the 1897 /* We will potentially be playing with lists other than just the
1899 * data lists (especially for journaled data mode), so be 1898 * data lists (especially for journaled data mode), so be
@@ -1916,11 +1915,9 @@ int journal_invalidatepage(journal_t *journal,
1916 } while (bh != head); 1915 } while (bh != head);
1917 1916
1918 if (!offset) { 1917 if (!offset) {
1919 if (!may_free || !try_to_free_buffers(page)) 1918 if (may_free && try_to_free_buffers(page))
1920 return 0; 1919 J_ASSERT(!page_has_buffers(page));
1921 J_ASSERT(!page_has_buffers(page));
1922 } 1920 }
1923 return 1;
1924} 1921}
1925 1922
1926/* 1923/*
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 4db8be8e90cc..5c63e0cdcf4c 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -33,13 +33,14 @@
33 */ 33 */
34#define STREAM_END_SPACE 12 34#define STREAM_END_SPACE 12
35 35
36static DECLARE_MUTEX(deflate_sem); 36static DEFINE_MUTEX(deflate_mutex);
37static DECLARE_MUTEX(inflate_sem); 37static DEFINE_MUTEX(inflate_mutex);
38static z_stream inf_strm, def_strm; 38static z_stream inf_strm, def_strm;
39 39
40#ifdef __KERNEL__ /* Linux-only */ 40#ifdef __KERNEL__ /* Linux-only */
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/mutex.h>
43 44
44static int __init alloc_workspaces(void) 45static int __init alloc_workspaces(void)
45{ 46{
@@ -79,11 +80,11 @@ static int jffs2_zlib_compress(unsigned char *data_in,
79 if (*dstlen <= STREAM_END_SPACE) 80 if (*dstlen <= STREAM_END_SPACE)
80 return -1; 81 return -1;
81 82
82 down(&deflate_sem); 83 mutex_lock(&deflate_mutex);
83 84
84 if (Z_OK != zlib_deflateInit(&def_strm, 3)) { 85 if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
85 printk(KERN_WARNING "deflateInit failed\n"); 86 printk(KERN_WARNING "deflateInit failed\n");
86 up(&deflate_sem); 87 mutex_unlock(&deflate_mutex);
87 return -1; 88 return -1;
88 } 89 }
89 90
@@ -104,7 +105,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
104 if (ret != Z_OK) { 105 if (ret != Z_OK) {
105 D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); 106 D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
106 zlib_deflateEnd(&def_strm); 107 zlib_deflateEnd(&def_strm);
107 up(&deflate_sem); 108 mutex_unlock(&deflate_mutex);
108 return -1; 109 return -1;
109 } 110 }
110 } 111 }
@@ -133,7 +134,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
133 *sourcelen = def_strm.total_in; 134 *sourcelen = def_strm.total_in;
134 ret = 0; 135 ret = 0;
135 out: 136 out:
136 up(&deflate_sem); 137 mutex_unlock(&deflate_mutex);
137 return ret; 138 return ret;
138} 139}
139 140
@@ -145,7 +146,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
145 int ret; 146 int ret;
146 int wbits = MAX_WBITS; 147 int wbits = MAX_WBITS;
147 148
148 down(&inflate_sem); 149 mutex_lock(&inflate_mutex);
149 150
150 inf_strm.next_in = data_in; 151 inf_strm.next_in = data_in;
151 inf_strm.avail_in = srclen; 152 inf_strm.avail_in = srclen;
@@ -173,7 +174,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
173 174
174 if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { 175 if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
175 printk(KERN_WARNING "inflateInit failed\n"); 176 printk(KERN_WARNING "inflateInit failed\n");
176 up(&inflate_sem); 177 mutex_unlock(&inflate_mutex);
177 return 1; 178 return 1;
178 } 179 }
179 180
@@ -183,7 +184,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
183 printk(KERN_NOTICE "inflate returned %d\n", ret); 184 printk(KERN_NOTICE "inflate returned %d\n", ret);
184 } 185 }
185 zlib_inflateEnd(&inf_strm); 186 zlib_inflateEnd(&inf_strm);
186 up(&inflate_sem); 187 mutex_unlock(&inflate_mutex);
187 return 0; 188 return 0;
188} 189}
189 190
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 51a5fed90cca..04eb78f1252e 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -258,7 +258,8 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
258static int jfs_get_block(struct inode *ip, sector_t lblock, 258static int jfs_get_block(struct inode *ip, sector_t lblock,
259 struct buffer_head *bh_result, int create) 259 struct buffer_head *bh_result, int create)
260{ 260{
261 return jfs_get_blocks(ip, lblock, 1, bh_result, create); 261 return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits,
262 bh_result, create);
262} 263}
263 264
264static int jfs_writepage(struct page *page, struct writeback_control *wbc) 265static int jfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -301,7 +302,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
301 struct inode *inode = file->f_mapping->host; 302 struct inode *inode = file->f_mapping->host;
302 303
303 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 304 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
304 offset, nr_segs, jfs_get_blocks, NULL); 305 offset, nr_segs, jfs_get_block, NULL);
305} 306}
306 307
307struct address_space_operations jfs_aops = { 308struct address_space_operations jfs_aops = {
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 0b348b13b551..3315f0b1fbc0 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -69,6 +69,7 @@
69#include <linux/bio.h> 69#include <linux/bio.h>
70#include <linux/suspend.h> 70#include <linux/suspend.h>
71#include <linux/delay.h> 71#include <linux/delay.h>
72#include <linux/mutex.h>
72#include "jfs_incore.h" 73#include "jfs_incore.h"
73#include "jfs_filsys.h" 74#include "jfs_filsys.h"
74#include "jfs_metapage.h" 75#include "jfs_metapage.h"
@@ -165,7 +166,7 @@ do { \
165 */ 166 */
166static LIST_HEAD(jfs_external_logs); 167static LIST_HEAD(jfs_external_logs);
167static struct jfs_log *dummy_log = NULL; 168static struct jfs_log *dummy_log = NULL;
168static DECLARE_MUTEX(jfs_log_sem); 169static DEFINE_MUTEX(jfs_log_mutex);
169 170
170/* 171/*
171 * forward references 172 * forward references
@@ -1085,20 +1086,20 @@ int lmLogOpen(struct super_block *sb)
1085 if (sbi->mntflag & JFS_INLINELOG) 1086 if (sbi->mntflag & JFS_INLINELOG)
1086 return open_inline_log(sb); 1087 return open_inline_log(sb);
1087 1088
1088 down(&jfs_log_sem); 1089 mutex_lock(&jfs_log_mutex);
1089 list_for_each_entry(log, &jfs_external_logs, journal_list) { 1090 list_for_each_entry(log, &jfs_external_logs, journal_list) {
1090 if (log->bdev->bd_dev == sbi->logdev) { 1091 if (log->bdev->bd_dev == sbi->logdev) {
1091 if (memcmp(log->uuid, sbi->loguuid, 1092 if (memcmp(log->uuid, sbi->loguuid,
1092 sizeof(log->uuid))) { 1093 sizeof(log->uuid))) {
1093 jfs_warn("wrong uuid on JFS journal\n"); 1094 jfs_warn("wrong uuid on JFS journal\n");
1094 up(&jfs_log_sem); 1095 mutex_unlock(&jfs_log_mutex);
1095 return -EINVAL; 1096 return -EINVAL;
1096 } 1097 }
1097 /* 1098 /*
1098 * add file system to log active file system list 1099 * add file system to log active file system list
1099 */ 1100 */
1100 if ((rc = lmLogFileSystem(log, sbi, 1))) { 1101 if ((rc = lmLogFileSystem(log, sbi, 1))) {
1101 up(&jfs_log_sem); 1102 mutex_unlock(&jfs_log_mutex);
1102 return rc; 1103 return rc;
1103 } 1104 }
1104 goto journal_found; 1105 goto journal_found;
@@ -1106,7 +1107,7 @@ int lmLogOpen(struct super_block *sb)
1106 } 1107 }
1107 1108
1108 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { 1109 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
1109 up(&jfs_log_sem); 1110 mutex_unlock(&jfs_log_mutex);
1110 return -ENOMEM; 1111 return -ENOMEM;
1111 } 1112 }
1112 INIT_LIST_HEAD(&log->sb_list); 1113 INIT_LIST_HEAD(&log->sb_list);
@@ -1151,7 +1152,7 @@ journal_found:
1151 sbi->log = log; 1152 sbi->log = log;
1152 LOG_UNLOCK(log); 1153 LOG_UNLOCK(log);
1153 1154
1154 up(&jfs_log_sem); 1155 mutex_unlock(&jfs_log_mutex);
1155 return 0; 1156 return 0;
1156 1157
1157 /* 1158 /*
@@ -1168,7 +1169,7 @@ journal_found:
1168 blkdev_put(bdev); 1169 blkdev_put(bdev);
1169 1170
1170 free: /* free log descriptor */ 1171 free: /* free log descriptor */
1171 up(&jfs_log_sem); 1172 mutex_unlock(&jfs_log_mutex);
1172 kfree(log); 1173 kfree(log);
1173 1174
1174 jfs_warn("lmLogOpen: exit(%d)", rc); 1175 jfs_warn("lmLogOpen: exit(%d)", rc);
@@ -1212,11 +1213,11 @@ static int open_dummy_log(struct super_block *sb)
1212{ 1213{
1213 int rc; 1214 int rc;
1214 1215
1215 down(&jfs_log_sem); 1216 mutex_lock(&jfs_log_mutex);
1216 if (!dummy_log) { 1217 if (!dummy_log) {
1217 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); 1218 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
1218 if (!dummy_log) { 1219 if (!dummy_log) {
1219 up(&jfs_log_sem); 1220 mutex_unlock(&jfs_log_mutex);
1220 return -ENOMEM; 1221 return -ENOMEM;
1221 } 1222 }
1222 INIT_LIST_HEAD(&dummy_log->sb_list); 1223 INIT_LIST_HEAD(&dummy_log->sb_list);
@@ -1229,7 +1230,7 @@ static int open_dummy_log(struct super_block *sb)
1229 if (rc) { 1230 if (rc) {
1230 kfree(dummy_log); 1231 kfree(dummy_log);
1231 dummy_log = NULL; 1232 dummy_log = NULL;
1232 up(&jfs_log_sem); 1233 mutex_unlock(&jfs_log_mutex);
1233 return rc; 1234 return rc;
1234 } 1235 }
1235 } 1236 }
@@ -1238,7 +1239,7 @@ static int open_dummy_log(struct super_block *sb)
1238 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); 1239 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
1239 JFS_SBI(sb)->log = dummy_log; 1240 JFS_SBI(sb)->log = dummy_log;
1240 LOG_UNLOCK(dummy_log); 1241 LOG_UNLOCK(dummy_log);
1241 up(&jfs_log_sem); 1242 mutex_unlock(&jfs_log_mutex);
1242 1243
1243 return 0; 1244 return 0;
1244} 1245}
@@ -1466,7 +1467,7 @@ int lmLogClose(struct super_block *sb)
1466 1467
1467 jfs_info("lmLogClose: log:0x%p", log); 1468 jfs_info("lmLogClose: log:0x%p", log);
1468 1469
1469 down(&jfs_log_sem); 1470 mutex_lock(&jfs_log_mutex);
1470 LOG_LOCK(log); 1471 LOG_LOCK(log);
1471 list_del(&sbi->log_list); 1472 list_del(&sbi->log_list);
1472 LOG_UNLOCK(log); 1473 LOG_UNLOCK(log);
@@ -1516,7 +1517,7 @@ int lmLogClose(struct super_block *sb)
1516 kfree(log); 1517 kfree(log);
1517 1518
1518 out: 1519 out:
1519 up(&jfs_log_sem); 1520 mutex_unlock(&jfs_log_mutex);
1520 jfs_info("lmLogClose: exit(%d)", rc); 1521 jfs_info("lmLogClose: exit(%d)", rc);
1521 return rc; 1522 return rc;
1522} 1523}
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 5fbaeaadccd3..f28696f235c4 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -220,8 +220,8 @@ int __init metapage_init(void)
220 if (metapage_cache == NULL) 220 if (metapage_cache == NULL)
221 return -ENOMEM; 221 return -ENOMEM;
222 222
223 metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab, 223 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
224 mempool_free_slab, metapage_cache); 224 metapage_cache);
225 225
226 if (metapage_mempool == NULL) { 226 if (metapage_mempool == NULL) {
227 kmem_cache_destroy(metapage_cache); 227 kmem_cache_destroy(metapage_cache);
@@ -578,14 +578,13 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
578 return 0; 578 return 0;
579} 579}
580 580
581static int metapage_invalidatepage(struct page *page, unsigned long offset) 581static void metapage_invalidatepage(struct page *page, unsigned long offset)
582{ 582{
583 BUG_ON(offset); 583 BUG_ON(offset);
584 584
585 if (PageWriteback(page)) 585 BUG_ON(PageWriteback(page));
586 return 0;
587 586
588 return metapage_releasepage(page, 0); 587 metapage_releasepage(page, 0);
589} 588}
590 589
591struct address_space_operations jfs_metapage_aops = { 590struct address_space_operations jfs_metapage_aops = {
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 112ebf8b8dfe..729ac427d359 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -16,6 +16,7 @@
16#include <linux/sunrpc/svc.h> 16#include <linux/sunrpc/svc.h>
17#include <linux/lockd/lockd.h> 17#include <linux/lockd/lockd.h>
18#include <linux/lockd/sm_inter.h> 18#include <linux/lockd/sm_inter.h>
19#include <linux/mutex.h>
19 20
20 21
21#define NLMDBG_FACILITY NLMDBG_HOSTCACHE 22#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
@@ -30,7 +31,7 @@
30static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; 31static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
31static unsigned long next_gc; 32static unsigned long next_gc;
32static int nrhosts; 33static int nrhosts;
33static DECLARE_MUTEX(nlm_host_sema); 34static DEFINE_MUTEX(nlm_host_mutex);
34 35
35 36
36static void nlm_gc_hosts(void); 37static void nlm_gc_hosts(void);
@@ -71,7 +72,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
71 hash = NLM_ADDRHASH(sin->sin_addr.s_addr); 72 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
72 73
73 /* Lock hash table */ 74 /* Lock hash table */
74 down(&nlm_host_sema); 75 mutex_lock(&nlm_host_mutex);
75 76
76 if (time_after_eq(jiffies, next_gc)) 77 if (time_after_eq(jiffies, next_gc))
77 nlm_gc_hosts(); 78 nlm_gc_hosts();
@@ -91,7 +92,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
91 nlm_hosts[hash] = host; 92 nlm_hosts[hash] = host;
92 } 93 }
93 nlm_get_host(host); 94 nlm_get_host(host);
94 up(&nlm_host_sema); 95 mutex_unlock(&nlm_host_mutex);
95 return host; 96 return host;
96 } 97 }
97 } 98 }
@@ -130,7 +131,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
130 next_gc = 0; 131 next_gc = 0;
131 132
132nohost: 133nohost:
133 up(&nlm_host_sema); 134 mutex_unlock(&nlm_host_mutex);
134 return host; 135 return host;
135} 136}
136 137
@@ -141,19 +142,19 @@ nlm_find_client(void)
141 * and return it 142 * and return it
142 */ 143 */
143 int hash; 144 int hash;
144 down(&nlm_host_sema); 145 mutex_lock(&nlm_host_mutex);
145 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { 146 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
146 struct nlm_host *host, **hp; 147 struct nlm_host *host, **hp;
147 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { 148 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
148 if (host->h_server && 149 if (host->h_server &&
149 host->h_killed == 0) { 150 host->h_killed == 0) {
150 nlm_get_host(host); 151 nlm_get_host(host);
151 up(&nlm_host_sema); 152 mutex_unlock(&nlm_host_mutex);
152 return host; 153 return host;
153 } 154 }
154 } 155 }
155 } 156 }
156 up(&nlm_host_sema); 157 mutex_unlock(&nlm_host_mutex);
157 return NULL; 158 return NULL;
158} 159}
159 160
@@ -265,7 +266,7 @@ nlm_shutdown_hosts(void)
265 int i; 266 int i;
266 267
267 dprintk("lockd: shutting down host module\n"); 268 dprintk("lockd: shutting down host module\n");
268 down(&nlm_host_sema); 269 mutex_lock(&nlm_host_mutex);
269 270
270 /* First, make all hosts eligible for gc */ 271 /* First, make all hosts eligible for gc */
271 dprintk("lockd: nuking all hosts...\n"); 272 dprintk("lockd: nuking all hosts...\n");
@@ -276,7 +277,7 @@ nlm_shutdown_hosts(void)
276 277
277 /* Then, perform a garbage collection pass */ 278 /* Then, perform a garbage collection pass */
278 nlm_gc_hosts(); 279 nlm_gc_hosts();
279 up(&nlm_host_sema); 280 mutex_unlock(&nlm_host_mutex);
280 281
281 /* complain if any hosts are left */ 282 /* complain if any hosts are left */
282 if (nrhosts) { 283 if (nrhosts) {
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 5e85bde6c123..fd56c8872f34 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/smp.h> 26#include <linux/smp.h>
27#include <linux/smp_lock.h> 27#include <linux/smp_lock.h>
28#include <linux/mutex.h>
28 29
29#include <linux/sunrpc/types.h> 30#include <linux/sunrpc/types.h>
30#include <linux/sunrpc/stats.h> 31#include <linux/sunrpc/stats.h>
@@ -43,13 +44,13 @@ static struct svc_program nlmsvc_program;
43struct nlmsvc_binding * nlmsvc_ops; 44struct nlmsvc_binding * nlmsvc_ops;
44EXPORT_SYMBOL(nlmsvc_ops); 45EXPORT_SYMBOL(nlmsvc_ops);
45 46
46static DECLARE_MUTEX(nlmsvc_sema); 47static DEFINE_MUTEX(nlmsvc_mutex);
47static unsigned int nlmsvc_users; 48static unsigned int nlmsvc_users;
48static pid_t nlmsvc_pid; 49static pid_t nlmsvc_pid;
49int nlmsvc_grace_period; 50int nlmsvc_grace_period;
50unsigned long nlmsvc_timeout; 51unsigned long nlmsvc_timeout;
51 52
52static DECLARE_MUTEX_LOCKED(lockd_start); 53static DECLARE_COMPLETION(lockd_start_done);
53static DECLARE_WAIT_QUEUE_HEAD(lockd_exit); 54static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
54 55
55/* 56/*
@@ -112,7 +113,7 @@ lockd(struct svc_rqst *rqstp)
112 * Let our maker know we're running. 113 * Let our maker know we're running.
113 */ 114 */
114 nlmsvc_pid = current->pid; 115 nlmsvc_pid = current->pid;
115 up(&lockd_start); 116 complete(&lockd_start_done);
116 117
117 daemonize("lockd"); 118 daemonize("lockd");
118 119
@@ -215,7 +216,7 @@ lockd_up(void)
215 struct svc_serv * serv; 216 struct svc_serv * serv;
216 int error = 0; 217 int error = 0;
217 218
218 down(&nlmsvc_sema); 219 mutex_lock(&nlmsvc_mutex);
219 /* 220 /*
220 * Unconditionally increment the user count ... this is 221 * Unconditionally increment the user count ... this is
221 * the number of clients who _want_ a lockd process. 222 * the number of clients who _want_ a lockd process.
@@ -263,7 +264,7 @@ lockd_up(void)
263 "lockd_up: create thread failed, error=%d\n", error); 264 "lockd_up: create thread failed, error=%d\n", error);
264 goto destroy_and_out; 265 goto destroy_and_out;
265 } 266 }
266 down(&lockd_start); 267 wait_for_completion(&lockd_start_done);
267 268
268 /* 269 /*
269 * Note: svc_serv structures have an initial use count of 1, 270 * Note: svc_serv structures have an initial use count of 1,
@@ -272,7 +273,7 @@ lockd_up(void)
272destroy_and_out: 273destroy_and_out:
273 svc_destroy(serv); 274 svc_destroy(serv);
274out: 275out:
275 up(&nlmsvc_sema); 276 mutex_unlock(&nlmsvc_mutex);
276 return error; 277 return error;
277} 278}
278EXPORT_SYMBOL(lockd_up); 279EXPORT_SYMBOL(lockd_up);
@@ -285,7 +286,7 @@ lockd_down(void)
285{ 286{
286 static int warned; 287 static int warned;
287 288
288 down(&nlmsvc_sema); 289 mutex_lock(&nlmsvc_mutex);
289 if (nlmsvc_users) { 290 if (nlmsvc_users) {
290 if (--nlmsvc_users) 291 if (--nlmsvc_users)
291 goto out; 292 goto out;
@@ -315,7 +316,7 @@ lockd_down(void)
315 recalc_sigpending(); 316 recalc_sigpending();
316 spin_unlock_irq(&current->sighand->siglock); 317 spin_unlock_irq(&current->sighand->siglock);
317out: 318out:
318 up(&nlmsvc_sema); 319 mutex_unlock(&nlmsvc_mutex);
319} 320}
320EXPORT_SYMBOL(lockd_down); 321EXPORT_SYMBOL(lockd_down);
321 322
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index c7a6e3ae44d6..a570e5c8a930 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -11,6 +11,7 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/in.h> 13#include <linux/in.h>
14#include <linux/mutex.h>
14#include <linux/sunrpc/svc.h> 15#include <linux/sunrpc/svc.h>
15#include <linux/sunrpc/clnt.h> 16#include <linux/sunrpc/clnt.h>
16#include <linux/nfsd/nfsfh.h> 17#include <linux/nfsd/nfsfh.h>
@@ -28,7 +29,7 @@
28#define FILE_HASH_BITS 5 29#define FILE_HASH_BITS 5
29#define FILE_NRHASH (1<<FILE_HASH_BITS) 30#define FILE_NRHASH (1<<FILE_HASH_BITS)
30static struct nlm_file * nlm_files[FILE_NRHASH]; 31static struct nlm_file * nlm_files[FILE_NRHASH];
31static DECLARE_MUTEX(nlm_file_sema); 32static DEFINE_MUTEX(nlm_file_mutex);
32 33
33#ifdef NFSD_DEBUG 34#ifdef NFSD_DEBUG
34static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) 35static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
@@ -91,7 +92,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
91 hash = file_hash(f); 92 hash = file_hash(f);
92 93
93 /* Lock file table */ 94 /* Lock file table */
94 down(&nlm_file_sema); 95 mutex_lock(&nlm_file_mutex);
95 96
96 for (file = nlm_files[hash]; file; file = file->f_next) 97 for (file = nlm_files[hash]; file; file = file->f_next)
97 if (!nfs_compare_fh(&file->f_handle, f)) 98 if (!nfs_compare_fh(&file->f_handle, f))
@@ -130,7 +131,7 @@ found:
130 nfserr = 0; 131 nfserr = 0;
131 132
132out_unlock: 133out_unlock:
133 up(&nlm_file_sema); 134 mutex_unlock(&nlm_file_mutex);
134 return nfserr; 135 return nfserr;
135 136
136out_free: 137out_free:
@@ -239,14 +240,14 @@ nlm_traverse_files(struct nlm_host *host, int action)
239 struct nlm_file *file, **fp; 240 struct nlm_file *file, **fp;
240 int i; 241 int i;
241 242
242 down(&nlm_file_sema); 243 mutex_lock(&nlm_file_mutex);
243 for (i = 0; i < FILE_NRHASH; i++) { 244 for (i = 0; i < FILE_NRHASH; i++) {
244 fp = nlm_files + i; 245 fp = nlm_files + i;
245 while ((file = *fp) != NULL) { 246 while ((file = *fp) != NULL) {
246 /* Traverse locks, blocks and shares of this file 247 /* Traverse locks, blocks and shares of this file
247 * and update file->f_locks count */ 248 * and update file->f_locks count */
248 if (nlm_inspect_file(host, file, action)) { 249 if (nlm_inspect_file(host, file, action)) {
249 up(&nlm_file_sema); 250 mutex_unlock(&nlm_file_mutex);
250 return 1; 251 return 1;
251 } 252 }
252 253
@@ -261,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action)
261 } 262 }
262 } 263 }
263 } 264 }
264 up(&nlm_file_sema); 265 mutex_unlock(&nlm_file_mutex);
265 return 0; 266 return 0;
266} 267}
267 268
@@ -281,7 +282,7 @@ nlm_release_file(struct nlm_file *file)
281 file, file->f_count); 282 file, file->f_count);
282 283
283 /* Lock file table */ 284 /* Lock file table */
284 down(&nlm_file_sema); 285 mutex_lock(&nlm_file_mutex);
285 286
286 /* If there are no more locks etc, delete the file */ 287 /* If there are no more locks etc, delete the file */
287 if(--file->f_count == 0) { 288 if(--file->f_count == 0) {
@@ -289,7 +290,7 @@ nlm_release_file(struct nlm_file *file)
289 nlm_delete_file(file); 290 nlm_delete_file(file);
290 } 291 }
291 292
292 up(&nlm_file_sema); 293 mutex_unlock(&nlm_file_mutex);
293} 294}
294 295
295/* 296/*
diff --git a/fs/locks.c b/fs/locks.c
index 56f996e98bbc..4d9e71d43e7e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,7 +142,7 @@ int lease_break_time = 45;
142static LIST_HEAD(file_lock_list); 142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list); 143static LIST_HEAD(blocked_list);
144 144
145static kmem_cache_t *filelock_cache; 145static kmem_cache_t *filelock_cache __read_mostly;
146 146
147/* Allocate an empty lock structure. */ 147/* Allocate an empty lock structure. */
148static struct file_lock *locks_alloc_lock(void) 148static struct file_lock *locks_alloc_lock(void)
@@ -533,12 +533,7 @@ static void locks_delete_block(struct file_lock *waiter)
533static void locks_insert_block(struct file_lock *blocker, 533static void locks_insert_block(struct file_lock *blocker,
534 struct file_lock *waiter) 534 struct file_lock *waiter)
535{ 535{
536 if (!list_empty(&waiter->fl_block)) { 536 BUG_ON(!list_empty(&waiter->fl_block));
537 printk(KERN_ERR "locks_insert_block: removing duplicated lock "
538 "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid,
539 waiter->fl_start, waiter->fl_end, waiter->fl_type);
540 __locks_delete_block(waiter);
541 }
542 list_add_tail(&waiter->fl_block, &blocker->fl_block); 537 list_add_tail(&waiter->fl_block, &blocker->fl_block);
543 waiter->fl_next = blocker; 538 waiter->fl_next = blocker;
544 if (IS_POSIX(blocker)) 539 if (IS_POSIX(blocker))
@@ -797,9 +792,7 @@ out:
797 return error; 792 return error;
798} 793}
799 794
800EXPORT_SYMBOL(posix_lock_file); 795static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
801
802static int __posix_lock_file(struct inode *inode, struct file_lock *request)
803{ 796{
804 struct file_lock *fl; 797 struct file_lock *fl;
805 struct file_lock *new_fl, *new_fl2; 798 struct file_lock *new_fl, *new_fl2;
@@ -823,6 +816,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
823 continue; 816 continue;
824 if (!posix_locks_conflict(request, fl)) 817 if (!posix_locks_conflict(request, fl))
825 continue; 818 continue;
819 if (conflock)
820 locks_copy_lock(conflock, fl);
826 error = -EAGAIN; 821 error = -EAGAIN;
827 if (!(request->fl_flags & FL_SLEEP)) 822 if (!(request->fl_flags & FL_SLEEP))
828 goto out; 823 goto out;
@@ -992,8 +987,24 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
992 */ 987 */
993int posix_lock_file(struct file *filp, struct file_lock *fl) 988int posix_lock_file(struct file *filp, struct file_lock *fl)
994{ 989{
995 return __posix_lock_file(filp->f_dentry->d_inode, fl); 990 return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL);
991}
992EXPORT_SYMBOL(posix_lock_file);
993
994/**
995 * posix_lock_file_conf - Apply a POSIX-style lock to a file
996 * @filp: The file to apply the lock to
997 * @fl: The lock to be applied
998 * @conflock: Place to return a copy of the conflicting lock, if found.
999 *
1000 * Except for the conflock parameter, acts just like posix_lock_file.
1001 */
1002int posix_lock_file_conf(struct file *filp, struct file_lock *fl,
1003 struct file_lock *conflock)
1004{
1005 return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock);
996} 1006}
1007EXPORT_SYMBOL(posix_lock_file_conf);
997 1008
998/** 1009/**
999 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1010 * posix_lock_file_wait - Apply a POSIX-style lock to a file
@@ -1009,7 +1020,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1009 int error; 1020 int error;
1010 might_sleep (); 1021 might_sleep ();
1011 for (;;) { 1022 for (;;) {
1012 error = __posix_lock_file(filp->f_dentry->d_inode, fl); 1023 error = posix_lock_file(filp, fl);
1013 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1024 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
1014 break; 1025 break;
1015 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1026 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1081,7 +1092,7 @@ int locks_mandatory_area(int read_write, struct inode *inode,
1081 fl.fl_end = offset + count - 1; 1092 fl.fl_end = offset + count - 1;
1082 1093
1083 for (;;) { 1094 for (;;) {
1084 error = __posix_lock_file(inode, &fl); 1095 error = __posix_lock_file_conf(inode, &fl, NULL);
1085 if (error != -EAGAIN) 1096 if (error != -EAGAIN)
1086 break; 1097 break;
1087 if (!(fl.fl_flags & FL_SLEEP)) 1098 if (!(fl.fl_flags & FL_SLEEP))
@@ -1694,7 +1705,7 @@ again:
1694 error = filp->f_op->lock(filp, cmd, file_lock); 1705 error = filp->f_op->lock(filp, cmd, file_lock);
1695 else { 1706 else {
1696 for (;;) { 1707 for (;;) {
1697 error = __posix_lock_file(inode, file_lock); 1708 error = posix_lock_file(filp, file_lock);
1698 if ((error != -EAGAIN) || (cmd == F_SETLK)) 1709 if ((error != -EAGAIN) || (cmd == F_SETLK))
1699 break; 1710 break;
1700 error = wait_event_interruptible(file_lock->fl_wait, 1711 error = wait_event_interruptible(file_lock->fl_wait,
@@ -1837,7 +1848,7 @@ again:
1837 error = filp->f_op->lock(filp, cmd, file_lock); 1848 error = filp->f_op->lock(filp, cmd, file_lock);
1838 else { 1849 else {
1839 for (;;) { 1850 for (;;) {
1840 error = __posix_lock_file(inode, file_lock); 1851 error = posix_lock_file(filp, file_lock);
1841 if ((error != -EAGAIN) || (cmd == F_SETLK64)) 1852 if ((error != -EAGAIN) || (cmd == F_SETLK64))
1842 break; 1853 break;
1843 error = wait_event_interruptible(file_lock->fl_wait, 1854 error = wait_event_interruptible(file_lock->fl_wait,
diff --git a/fs/mpage.c b/fs/mpage.c
index e431cb3878d6..9bf2eb30e6f4 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
163 } while (page_bh != head); 163 } while (page_bh != head);
164} 164}
165 165
166/*
167 * This is the worker routine which does all the work of mapping the disk
168 * blocks and constructs largest possible bios, submits them for IO if the
169 * blocks are not contiguous on the disk.
170 *
171 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
172 * represent the validity of its disk mapping and to decide when to do the next
173 * get_block() call.
174 */
166static struct bio * 175static struct bio *
167do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, 176do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
168 sector_t *last_block_in_bio, get_block_t get_block) 177 sector_t *last_block_in_bio, struct buffer_head *map_bh,
178 unsigned long *first_logical_block, get_block_t get_block)
169{ 179{
170 struct inode *inode = page->mapping->host; 180 struct inode *inode = page->mapping->host;
171 const unsigned blkbits = inode->i_blkbits; 181 const unsigned blkbits = inode->i_blkbits;
@@ -173,33 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
173 const unsigned blocksize = 1 << blkbits; 183 const unsigned blocksize = 1 << blkbits;
174 sector_t block_in_file; 184 sector_t block_in_file;
175 sector_t last_block; 185 sector_t last_block;
186 sector_t last_block_in_file;
176 sector_t blocks[MAX_BUF_PER_PAGE]; 187 sector_t blocks[MAX_BUF_PER_PAGE];
177 unsigned page_block; 188 unsigned page_block;
178 unsigned first_hole = blocks_per_page; 189 unsigned first_hole = blocks_per_page;
179 struct block_device *bdev = NULL; 190 struct block_device *bdev = NULL;
180 struct buffer_head bh;
181 int length; 191 int length;
182 int fully_mapped = 1; 192 int fully_mapped = 1;
193 unsigned nblocks;
194 unsigned relative_block;
183 195
184 if (page_has_buffers(page)) 196 if (page_has_buffers(page))
185 goto confused; 197 goto confused;
186 198
187 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 199 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
188 last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; 200 last_block = block_in_file + nr_pages * blocks_per_page;
201 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
202 if (last_block > last_block_in_file)
203 last_block = last_block_in_file;
204 page_block = 0;
205
206 /*
207 * Map blocks using the result from the previous get_blocks call first.
208 */
209 nblocks = map_bh->b_size >> blkbits;
210 if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
211 block_in_file < (*first_logical_block + nblocks)) {
212 unsigned map_offset = block_in_file - *first_logical_block;
213 unsigned last = nblocks - map_offset;
214
215 for (relative_block = 0; ; relative_block++) {
216 if (relative_block == last) {
217 clear_buffer_mapped(map_bh);
218 break;
219 }
220 if (page_block == blocks_per_page)
221 break;
222 blocks[page_block] = map_bh->b_blocknr + map_offset +
223 relative_block;
224 page_block++;
225 block_in_file++;
226 }
227 bdev = map_bh->b_bdev;
228 }
229
230 /*
231 * Then do more get_blocks calls until we are done with this page.
232 */
233 map_bh->b_page = page;
234 while (page_block < blocks_per_page) {
235 map_bh->b_state = 0;
236 map_bh->b_size = 0;
189 237
190 bh.b_page = page;
191 for (page_block = 0; page_block < blocks_per_page;
192 page_block++, block_in_file++) {
193 bh.b_state = 0;
194 if (block_in_file < last_block) { 238 if (block_in_file < last_block) {
195 if (get_block(inode, block_in_file, &bh, 0)) 239 map_bh->b_size = (last_block-block_in_file) << blkbits;
240 if (get_block(inode, block_in_file, map_bh, 0))
196 goto confused; 241 goto confused;
242 *first_logical_block = block_in_file;
197 } 243 }
198 244
199 if (!buffer_mapped(&bh)) { 245 if (!buffer_mapped(map_bh)) {
200 fully_mapped = 0; 246 fully_mapped = 0;
201 if (first_hole == blocks_per_page) 247 if (first_hole == blocks_per_page)
202 first_hole = page_block; 248 first_hole = page_block;
249 page_block++;
250 block_in_file++;
251 clear_buffer_mapped(map_bh);
203 continue; 252 continue;
204 } 253 }
205 254
@@ -209,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
209 * we just collected from get_block into the page's buffers 258 * we just collected from get_block into the page's buffers
210 * so readpage doesn't have to repeat the get_block call 259 * so readpage doesn't have to repeat the get_block call
211 */ 260 */
212 if (buffer_uptodate(&bh)) { 261 if (buffer_uptodate(map_bh)) {
213 map_buffer_to_page(page, &bh, page_block); 262 map_buffer_to_page(page, map_bh, page_block);
214 goto confused; 263 goto confused;
215 } 264 }
216 265
@@ -218,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
218 goto confused; /* hole -> non-hole */ 267 goto confused; /* hole -> non-hole */
219 268
220 /* Contiguous blocks? */ 269 /* Contiguous blocks? */
221 if (page_block && blocks[page_block-1] != bh.b_blocknr-1) 270 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
222 goto confused; 271 goto confused;
223 blocks[page_block] = bh.b_blocknr; 272 nblocks = map_bh->b_size >> blkbits;
224 bdev = bh.b_bdev; 273 for (relative_block = 0; ; relative_block++) {
274 if (relative_block == nblocks) {
275 clear_buffer_mapped(map_bh);
276 break;
277 } else if (page_block == blocks_per_page)
278 break;
279 blocks[page_block] = map_bh->b_blocknr+relative_block;
280 page_block++;
281 block_in_file++;
282 }
283 bdev = map_bh->b_bdev;
225 } 284 }
226 285
227 if (first_hole != blocks_per_page) { 286 if (first_hole != blocks_per_page) {
@@ -260,7 +319,7 @@ alloc_new:
260 goto alloc_new; 319 goto alloc_new;
261 } 320 }
262 321
263 if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) 322 if (buffer_boundary(map_bh) || (first_hole != blocks_per_page))
264 bio = mpage_bio_submit(READ, bio); 323 bio = mpage_bio_submit(READ, bio);
265 else 324 else
266 *last_block_in_bio = blocks[blocks_per_page - 1]; 325 *last_block_in_bio = blocks[blocks_per_page - 1];
@@ -331,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
331 unsigned page_idx; 390 unsigned page_idx;
332 sector_t last_block_in_bio = 0; 391 sector_t last_block_in_bio = 0;
333 struct pagevec lru_pvec; 392 struct pagevec lru_pvec;
393 struct buffer_head map_bh;
394 unsigned long first_logical_block = 0;
334 395
396 clear_buffer_mapped(&map_bh);
335 pagevec_init(&lru_pvec, 0); 397 pagevec_init(&lru_pvec, 0);
336 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 398 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
337 struct page *page = list_entry(pages->prev, struct page, lru); 399 struct page *page = list_entry(pages->prev, struct page, lru);
@@ -342,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
342 page->index, GFP_KERNEL)) { 404 page->index, GFP_KERNEL)) {
343 bio = do_mpage_readpage(bio, page, 405 bio = do_mpage_readpage(bio, page,
344 nr_pages - page_idx, 406 nr_pages - page_idx,
345 &last_block_in_bio, get_block); 407 &last_block_in_bio, &map_bh,
408 &first_logical_block,
409 get_block);
346 if (!pagevec_add(&lru_pvec, page)) 410 if (!pagevec_add(&lru_pvec, page))
347 __pagevec_lru_add(&lru_pvec); 411 __pagevec_lru_add(&lru_pvec);
348 } else { 412 } else {
@@ -364,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
364{ 428{
365 struct bio *bio = NULL; 429 struct bio *bio = NULL;
366 sector_t last_block_in_bio = 0; 430 sector_t last_block_in_bio = 0;
431 struct buffer_head map_bh;
432 unsigned long first_logical_block = 0;
367 433
368 bio = do_mpage_readpage(bio, page, 1, 434 clear_buffer_mapped(&map_bh);
369 &last_block_in_bio, get_block); 435 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
436 &map_bh, &first_logical_block, get_block);
370 if (bio) 437 if (bio)
371 mpage_bio_submit(READ, bio); 438 mpage_bio_submit(READ, bio);
372 return 0; 439 return 0;
@@ -472,6 +539,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
472 for (page_block = 0; page_block < blocks_per_page; ) { 539 for (page_block = 0; page_block < blocks_per_page; ) {
473 540
474 map_bh.b_state = 0; 541 map_bh.b_state = 0;
542 map_bh.b_size = 1 << blkbits;
475 if (get_block(inode, block_in_file, &map_bh, 1)) 543 if (get_block(inode, block_in_file, &map_bh, 1))
476 goto confused; 544 goto confused;
477 if (buffer_new(&map_bh)) 545 if (buffer_new(&map_bh))
diff --git a/fs/namespace.c b/fs/namespace.c
index 71e75bcf4d28..e069a4c5e389 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
43 43
44static int event; 44static int event;
45 45
46static struct list_head *mount_hashtable; 46static struct list_head *mount_hashtable __read_mostly;
47static int hash_mask __read_mostly, hash_bits __read_mostly; 47static int hash_mask __read_mostly, hash_bits __read_mostly;
48static kmem_cache_t *mnt_cache; 48static kmem_cache_t *mnt_cache __read_mostly;
49static struct rw_semaphore namespace_sem; 49static struct rw_semaphore namespace_sem;
50 50
51/* /sys/fs */ 51/* /sys/fs */
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 99d2cfbce863..90c95adc8c1b 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -14,6 +14,7 @@
14#include <linux/sunrpc/svc.h> 14#include <linux/sunrpc/svc.h>
15#include <linux/sunrpc/svcsock.h> 15#include <linux/sunrpc/svcsock.h>
16#include <linux/nfs_fs.h> 16#include <linux/nfs_fs.h>
17#include <linux/mutex.h>
17 18
18#include <net/inet_sock.h> 19#include <net/inet_sock.h>
19 20
@@ -31,7 +32,7 @@ struct nfs_callback_data {
31}; 32};
32 33
33static struct nfs_callback_data nfs_callback_info; 34static struct nfs_callback_data nfs_callback_info;
34static DECLARE_MUTEX(nfs_callback_sema); 35static DEFINE_MUTEX(nfs_callback_mutex);
35static struct svc_program nfs4_callback_program; 36static struct svc_program nfs4_callback_program;
36 37
37unsigned int nfs_callback_set_tcpport; 38unsigned int nfs_callback_set_tcpport;
@@ -95,7 +96,7 @@ int nfs_callback_up(void)
95 int ret = 0; 96 int ret = 0;
96 97
97 lock_kernel(); 98 lock_kernel();
98 down(&nfs_callback_sema); 99 mutex_lock(&nfs_callback_mutex);
99 if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) 100 if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
100 goto out; 101 goto out;
101 init_completion(&nfs_callback_info.started); 102 init_completion(&nfs_callback_info.started);
@@ -121,7 +122,7 @@ int nfs_callback_up(void)
121 nfs_callback_info.serv = serv; 122 nfs_callback_info.serv = serv;
122 wait_for_completion(&nfs_callback_info.started); 123 wait_for_completion(&nfs_callback_info.started);
123out: 124out:
124 up(&nfs_callback_sema); 125 mutex_unlock(&nfs_callback_mutex);
125 unlock_kernel(); 126 unlock_kernel();
126 return ret; 127 return ret;
127out_destroy: 128out_destroy:
@@ -139,7 +140,7 @@ int nfs_callback_down(void)
139 int ret = 0; 140 int ret = 0;
140 141
141 lock_kernel(); 142 lock_kernel();
142 down(&nfs_callback_sema); 143 mutex_lock(&nfs_callback_mutex);
143 nfs_callback_info.users--; 144 nfs_callback_info.users--;
144 do { 145 do {
145 if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0) 146 if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0)
@@ -147,7 +148,7 @@ int nfs_callback_down(void)
147 if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0) 148 if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
148 break; 149 break;
149 } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0); 150 } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
150 up(&nfs_callback_sema); 151 mutex_unlock(&nfs_callback_mutex);
151 unlock_kernel(); 152 unlock_kernel();
152 return ret; 153 return ret;
153} 154}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 5263b2864a44..dee49a0cb995 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -318,10 +318,9 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
318 return status; 318 return status;
319} 319}
320 320
321static int nfs_invalidate_page(struct page *page, unsigned long offset) 321static void nfs_invalidate_page(struct page *page, unsigned long offset)
322{ 322{
323 /* FIXME: we really should cancel any unstarted writes on this page */ 323 /* FIXME: we really should cancel any unstarted writes on this page */
324 return 1;
325} 324}
326 325
327static int nfs_release_page(struct page *page, gfp_t gfp) 326static int nfs_release_page(struct page *page, gfp_t gfp)
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 3961524fd4ab..624ca7146b6b 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -663,10 +663,8 @@ int nfs_init_readpagecache(void)
663 if (nfs_rdata_cachep == NULL) 663 if (nfs_rdata_cachep == NULL)
664 return -ENOMEM; 664 return -ENOMEM;
665 665
666 nfs_rdata_mempool = mempool_create(MIN_POOL_READ, 666 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
667 mempool_alloc_slab, 667 nfs_rdata_cachep);
668 mempool_free_slab,
669 nfs_rdata_cachep);
670 if (nfs_rdata_mempool == NULL) 668 if (nfs_rdata_mempool == NULL)
671 return -ENOMEM; 669 return -ENOMEM;
672 670
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3f5225404c97..4cfada2cc09f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1521,17 +1521,13 @@ int nfs_init_writepagecache(void)
1521 if (nfs_wdata_cachep == NULL) 1521 if (nfs_wdata_cachep == NULL)
1522 return -ENOMEM; 1522 return -ENOMEM;
1523 1523
1524 nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE, 1524 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1525 mempool_alloc_slab, 1525 nfs_wdata_cachep);
1526 mempool_free_slab,
1527 nfs_wdata_cachep);
1528 if (nfs_wdata_mempool == NULL) 1526 if (nfs_wdata_mempool == NULL)
1529 return -ENOMEM; 1527 return -ENOMEM;
1530 1528
1531 nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT, 1529 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1532 mempool_alloc_slab, 1530 nfs_wdata_cachep);
1533 mempool_free_slab,
1534 nfs_wdata_cachep);
1535 if (nfs_commit_mempool == NULL) 1531 if (nfs_commit_mempool == NULL)
1536 return -ENOMEM; 1532 return -ENOMEM;
1537 1533
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6ab762bea99..47ec112b266c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,6 +49,7 @@
49#include <linux/nfsd/state.h> 49#include <linux/nfsd/state.h>
50#include <linux/nfsd/xdr4.h> 50#include <linux/nfsd/xdr4.h>
51#include <linux/namei.h> 51#include <linux/namei.h>
52#include <linux/mutex.h>
52 53
53#define NFSDDBG_FACILITY NFSDDBG_PROC 54#define NFSDDBG_FACILITY NFSDDBG_PROC
54 55
@@ -77,11 +78,11 @@ static void nfs4_set_recdir(char *recdir);
77 78
78/* Locking: 79/* Locking:
79 * 80 *
80 * client_sema: 81 * client_mutex:
81 * protects clientid_hashtbl[], clientstr_hashtbl[], 82 * protects clientid_hashtbl[], clientstr_hashtbl[],
82 * unconfstr_hashtbl[], uncofid_hashtbl[]. 83 * unconfstr_hashtbl[], uncofid_hashtbl[].
83 */ 84 */
84static DECLARE_MUTEX(client_sema); 85static DEFINE_MUTEX(client_mutex);
85 86
86static kmem_cache_t *stateowner_slab = NULL; 87static kmem_cache_t *stateowner_slab = NULL;
87static kmem_cache_t *file_slab = NULL; 88static kmem_cache_t *file_slab = NULL;
@@ -91,13 +92,13 @@ static kmem_cache_t *deleg_slab = NULL;
91void 92void
92nfs4_lock_state(void) 93nfs4_lock_state(void)
93{ 94{
94 down(&client_sema); 95 mutex_lock(&client_mutex);
95} 96}
96 97
97void 98void
98nfs4_unlock_state(void) 99nfs4_unlock_state(void)
99{ 100{
100 up(&client_sema); 101 mutex_unlock(&client_mutex);
101} 102}
102 103
103static inline u32 104static inline u32
@@ -2749,37 +2750,31 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2749 * Note: locks.c uses the BKL to protect the inode's lock list. 2750 * Note: locks.c uses the BKL to protect the inode's lock list.
2750 */ 2751 */
2751 2752
2752 status = posix_lock_file(filp, &file_lock); 2753 /* XXX?: Just to divert the locks_release_private at the start of
2753 dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status); 2754 * locks_copy_lock: */
2755 conflock.fl_ops = NULL;
2756 conflock.fl_lmops = NULL;
2757 status = posix_lock_file_conf(filp, &file_lock, &conflock);
2758 dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status);
2754 switch (-status) { 2759 switch (-status) {
2755 case 0: /* success! */ 2760 case 0: /* success! */
2756 update_stateid(&lock_stp->st_stateid); 2761 update_stateid(&lock_stp->st_stateid);
2757 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, 2762 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid,
2758 sizeof(stateid_t)); 2763 sizeof(stateid_t));
2759 goto out; 2764 break;
2760 case (EAGAIN): 2765 case (EAGAIN): /* conflock holds conflicting lock */
2761 goto conflicting_lock; 2766 status = nfserr_denied;
2767 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
2768 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
2769 break;
2762 case (EDEADLK): 2770 case (EDEADLK):
2763 status = nfserr_deadlock; 2771 status = nfserr_deadlock;
2764 dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); 2772 break;
2765 goto out;
2766 default: 2773 default:
2767 status = nfserrno(status); 2774 dprintk("NFSD: nfsd4_lock: posix_lock_file_conf() failed! status %d\n",status);
2768 dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); 2775 status = nfserr_resource;
2769 goto out; 2776 break;
2770 }
2771
2772conflicting_lock:
2773 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
2774 status = nfserr_denied;
2775 /* XXX There is a race here. Future patch needed to provide
2776 * an atomic posix_lock_and_test_file
2777 */
2778 if (!posix_test_lock(filp, &file_lock, &conflock)) {
2779 status = nfserr_serverfault;
2780 goto out;
2781 } 2777 }
2782 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
2783out: 2778out:
2784 if (status && lock->lk_is_new && lock_sop) 2779 if (status && lock->lk_is_new && lock_sop)
2785 release_stateowner(lock_sop); 2780 release_stateowner(lock_sop);
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 0fd70295cca6..4af2ad1193ec 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -515,10 +515,10 @@ BOOL ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
515 log_page_size = PAGE_CACHE_SIZE; 515 log_page_size = PAGE_CACHE_SIZE;
516 log_page_mask = log_page_size - 1; 516 log_page_mask = log_page_size - 1;
517 /* 517 /*
518 * Use generic_ffs() instead of ffs() to enable the compiler to 518 * Use ntfs_ffs() instead of ffs() to enable the compiler to
519 * optimize log_page_size and log_page_bits into constants. 519 * optimize log_page_size and log_page_bits into constants.
520 */ 520 */
521 log_page_bits = generic_ffs(log_page_size) - 1; 521 log_page_bits = ntfs_ffs(log_page_size) - 1;
522 size &= ~(s64)(log_page_size - 1); 522 size &= ~(s64)(log_page_size - 1);
523 /* 523 /*
524 * Ensure the log file is big enough to store at least the two restart 524 * Ensure the log file is big enough to store at least the two restart
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 4e72bc7afdf9..2438c00ec0ce 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2670,7 +2670,7 @@ mft_rec_already_initialized:
2670 ni->name_len = 4; 2670 ni->name_len = 4;
2671 2671
2672 ni->itype.index.block_size = 4096; 2672 ni->itype.index.block_size = 4096;
2673 ni->itype.index.block_size_bits = generic_ffs(4096) - 1; 2673 ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1;
2674 ni->itype.index.collation_rule = COLLATION_FILE_NAME; 2674 ni->itype.index.collation_rule = COLLATION_FILE_NAME;
2675 if (vol->cluster_size <= ni->itype.index.block_size) { 2675 if (vol->cluster_size <= ni->itype.index.block_size) {
2676 ni->itype.index.vcn_size = vol->cluster_size; 2676 ni->itype.index.vcn_size = vol->cluster_size;
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index 0624c8ef4d9c..166142960b53 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -132,4 +132,33 @@ extern int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
132/* From fs/ntfs/upcase.c */ 132/* From fs/ntfs/upcase.c */
133extern ntfschar *generate_default_upcase(void); 133extern ntfschar *generate_default_upcase(void);
134 134
135static inline int ntfs_ffs(int x)
136{
137 int r = 1;
138
139 if (!x)
140 return 0;
141 if (!(x & 0xffff)) {
142 x >>= 16;
143 r += 16;
144 }
145 if (!(x & 0xff)) {
146 x >>= 8;
147 r += 8;
148 }
149 if (!(x & 0xf)) {
150 x >>= 4;
151 r += 4;
152 }
153 if (!(x & 3)) {
154 x >>= 2;
155 r += 2;
156 }
157 if (!(x & 1)) {
158 x >>= 1;
159 r += 1;
160 }
161 return r;
162}
163
135#endif /* _LINUX_NTFS_H */ 164#endif /* _LINUX_NTFS_H */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bf931ba1d364..0d858d0b25be 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -540,7 +540,6 @@ bail:
540 * fs_count, map_bh, dio->rw == WRITE); 540 * fs_count, map_bh, dio->rw == WRITE);
541 */ 541 */
542static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, 542static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
543 unsigned long max_blocks,
544 struct buffer_head *bh_result, int create) 543 struct buffer_head *bh_result, int create)
545{ 544{
546 int ret; 545 int ret;
@@ -548,6 +547,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
548 u64 p_blkno; 547 u64 p_blkno;
549 int contig_blocks; 548 int contig_blocks;
550 unsigned char blocksize_bits; 549 unsigned char blocksize_bits;
550 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
551 551
552 if (!inode || !bh_result) { 552 if (!inode || !bh_result) {
553 mlog(ML_ERROR, "inode or bh_result is null\n"); 553 mlog(ML_ERROR, "inode or bh_result is null\n");
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index ae3440ca083c..6a610ae53583 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -377,7 +377,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
377 BUG_ON(!bh); 377 BUG_ON(!bh);
378 BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); 378 BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
379 379
380 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %hu\n", 380 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
381 (unsigned long long)bh->b_blocknr, type, 381 (unsigned long long)bh->b_blocknr, type,
382 (type == OCFS2_JOURNAL_ACCESS_CREATE) ? 382 (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
383 "OCFS2_JOURNAL_ACCESS_CREATE" : 383 "OCFS2_JOURNAL_ACCESS_CREATE" :
@@ -582,7 +582,8 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
582 } 582 }
583 583
584 mlog(0, "inode->i_size = %lld\n", inode->i_size); 584 mlog(0, "inode->i_size = %lld\n", inode->i_size);
585 mlog(0, "inode->i_blocks = %lu\n", inode->i_blocks); 585 mlog(0, "inode->i_blocks = %llu\n",
586 (unsigned long long)inode->i_blocks);
586 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); 587 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
587 588
588 /* call the kernels journal init function now */ 589 /* call the kernels journal init function now */
@@ -850,8 +851,9 @@ static int ocfs2_force_read_journal(struct inode *inode)
850 851
851 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 852 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
852 853
853 mlog(0, "Force reading %lu blocks\n", 854 mlog(0, "Force reading %llu blocks\n",
854 (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9))); 855 (unsigned long long)(inode->i_blocks >>
856 (inode->i_sb->s_blocksize_bits - 9)));
855 857
856 v_blkno = 0; 858 v_blkno = 0;
857 while (v_blkno < 859 while (v_blkno <
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 274f61d0cda9..0673862c8bdd 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1444,8 +1444,9 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
1444 * write i_size + 1 bytes. */ 1444 * write i_size + 1 bytes. */
1445 blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 1445 blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
1446 1446
1447 mlog_entry("i_blocks = %lu, i_size = %llu, blocks = %d\n", 1447 mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n",
1448 inode->i_blocks, i_size_read(inode), blocks); 1448 (unsigned long long)inode->i_blocks,
1449 i_size_read(inode), blocks);
1449 1450
1450 /* Sanity check -- make sure we're going to fit. */ 1451 /* Sanity check -- make sure we're going to fit. */
1451 if (bytes_left > 1452 if (bytes_left >
diff --git a/fs/partitions/devfs.c b/fs/partitions/devfs.c
index 87f50444fd39..3f0a780c9cec 100644
--- a/fs/partitions/devfs.c
+++ b/fs/partitions/devfs.c
@@ -6,7 +6,7 @@
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/genhd.h> 7#include <linux/genhd.h>
8#include <linux/bitops.h> 8#include <linux/bitops.h>
9#include <asm/semaphore.h> 9#include <linux/mutex.h>
10 10
11 11
12struct unique_numspace { 12struct unique_numspace {
@@ -16,7 +16,7 @@ struct unique_numspace {
16 struct semaphore mutex; 16 struct semaphore mutex;
17}; 17};
18 18
19static DECLARE_MUTEX(numspace_mutex); 19static DEFINE_MUTEX(numspace_mutex);
20 20
21static int expand_numspace(struct unique_numspace *s) 21static int expand_numspace(struct unique_numspace *s)
22{ 22{
@@ -48,7 +48,7 @@ static int alloc_unique_number(struct unique_numspace *s)
48{ 48{
49 int rval = 0; 49 int rval = 0;
50 50
51 down(&numspace_mutex); 51 mutex_lock(&numspace_mutex);
52 if (s->num_free < 1) 52 if (s->num_free < 1)
53 rval = expand_numspace(s); 53 rval = expand_numspace(s);
54 if (!rval) { 54 if (!rval) {
@@ -56,7 +56,7 @@ static int alloc_unique_number(struct unique_numspace *s)
56 --s->num_free; 56 --s->num_free;
57 __set_bit(rval, s->bits); 57 __set_bit(rval, s->bits);
58 } 58 }
59 up(&numspace_mutex); 59 mutex_unlock(&numspace_mutex);
60 60
61 return rval; 61 return rval;
62} 62}
@@ -66,11 +66,11 @@ static void dealloc_unique_number(struct unique_numspace *s, int number)
66 int old_val; 66 int old_val;
67 67
68 if (number >= 0) { 68 if (number >= 0) {
69 down(&numspace_mutex); 69 mutex_lock(&numspace_mutex);
70 old_val = __test_and_clear_bit(number, s->bits); 70 old_val = __test_and_clear_bit(number, s->bits);
71 if (old_val) 71 if (old_val)
72 ++s->num_free; 72 ++s->num_free;
73 up(&numspace_mutex); 73 mutex_unlock(&numspace_mutex);
74 } 74 }
75} 75}
76 76
diff --git a/fs/pipe.c b/fs/pipe.c
index d976866a115b..4384c9290943 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -675,7 +675,7 @@ fail_page:
675 return NULL; 675 return NULL;
676} 676}
677 677
678static struct vfsmount *pipe_mnt; 678static struct vfsmount *pipe_mnt __read_mostly;
679static int pipefs_delete_dentry(struct dentry *dentry) 679static int pipefs_delete_dentry(struct dentry *dentry)
680{ 680{
681 return 1; 681 return 1;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7eb1bd7f800c..7a76ad570230 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -330,7 +330,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
330 unsigned long min_flt = 0, maj_flt = 0; 330 unsigned long min_flt = 0, maj_flt = 0;
331 cputime_t cutime, cstime, utime, stime; 331 cputime_t cutime, cstime, utime, stime;
332 unsigned long rsslim = 0; 332 unsigned long rsslim = 0;
333 DEFINE_KTIME(it_real_value);
334 struct task_struct *t; 333 struct task_struct *t;
335 char tcomm[sizeof(task->comm)]; 334 char tcomm[sizeof(task->comm)];
336 335
@@ -386,7 +385,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
386 utime = cputime_add(utime, task->signal->utime); 385 utime = cputime_add(utime, task->signal->utime);
387 stime = cputime_add(stime, task->signal->stime); 386 stime = cputime_add(stime, task->signal->stime);
388 } 387 }
389 it_real_value = task->signal->real_timer.expires;
390 } 388 }
391 ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0; 389 ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
392 read_unlock(&tasklist_lock); 390 read_unlock(&tasklist_lock);
@@ -413,7 +411,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
413 start_time = nsec_to_clock_t(start_time); 411 start_time = nsec_to_clock_t(start_time);
414 412
415 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ 413 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
416%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \ 414%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
417%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", 415%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
418 task->pid, 416 task->pid,
419 tcomm, 417 tcomm,
@@ -435,7 +433,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
435 priority, 433 priority,
436 nice, 434 nice,
437 num_threads, 435 num_threads,
438 (long) ktime_to_clock_t(it_real_value),
439 start_time, 436 start_time,
440 vsize, 437 vsize,
441 mm ? get_mm_rss(mm) : 0, 438 mm ? get_mm_rss(mm) : 0,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 20e5c4509a43..47b7a20d45eb 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -19,6 +19,7 @@
19#include <linux/idr.h> 19#include <linux/idr.h>
20#include <linux/namei.h> 20#include <linux/namei.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/spinlock.h>
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23 24
24#include "internal.h" 25#include "internal.h"
@@ -29,6 +30,8 @@ static ssize_t proc_file_write(struct file *file, const char __user *buffer,
29 size_t count, loff_t *ppos); 30 size_t count, loff_t *ppos);
30static loff_t proc_file_lseek(struct file *, loff_t, int); 31static loff_t proc_file_lseek(struct file *, loff_t, int);
31 32
33DEFINE_SPINLOCK(proc_subdir_lock);
34
32int proc_match(int len, const char *name, struct proc_dir_entry *de) 35int proc_match(int len, const char *name, struct proc_dir_entry *de)
33{ 36{
34 if (de->namelen != len) 37 if (de->namelen != len)
@@ -277,7 +280,9 @@ static int xlate_proc_name(const char *name,
277 const char *cp = name, *next; 280 const char *cp = name, *next;
278 struct proc_dir_entry *de; 281 struct proc_dir_entry *de;
279 int len; 282 int len;
283 int rtn = 0;
280 284
285 spin_lock(&proc_subdir_lock);
281 de = &proc_root; 286 de = &proc_root;
282 while (1) { 287 while (1) {
283 next = strchr(cp, '/'); 288 next = strchr(cp, '/');
@@ -289,13 +294,17 @@ static int xlate_proc_name(const char *name,
289 if (proc_match(len, cp, de)) 294 if (proc_match(len, cp, de))
290 break; 295 break;
291 } 296 }
292 if (!de) 297 if (!de) {
293 return -ENOENT; 298 rtn = -ENOENT;
299 goto out;
300 }
294 cp += len + 1; 301 cp += len + 1;
295 } 302 }
296 *residual = cp; 303 *residual = cp;
297 *ret = de; 304 *ret = de;
298 return 0; 305out:
306 spin_unlock(&proc_subdir_lock);
307 return rtn;
299} 308}
300 309
301static DEFINE_IDR(proc_inum_idr); 310static DEFINE_IDR(proc_inum_idr);
@@ -380,6 +389,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
380 int error = -ENOENT; 389 int error = -ENOENT;
381 390
382 lock_kernel(); 391 lock_kernel();
392 spin_lock(&proc_subdir_lock);
383 de = PDE(dir); 393 de = PDE(dir);
384 if (de) { 394 if (de) {
385 for (de = de->subdir; de ; de = de->next) { 395 for (de = de->subdir; de ; de = de->next) {
@@ -388,12 +398,15 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
388 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 398 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
389 unsigned int ino = de->low_ino; 399 unsigned int ino = de->low_ino;
390 400
401 spin_unlock(&proc_subdir_lock);
391 error = -EINVAL; 402 error = -EINVAL;
392 inode = proc_get_inode(dir->i_sb, ino, de); 403 inode = proc_get_inode(dir->i_sb, ino, de);
404 spin_lock(&proc_subdir_lock);
393 break; 405 break;
394 } 406 }
395 } 407 }
396 } 408 }
409 spin_unlock(&proc_subdir_lock);
397 unlock_kernel(); 410 unlock_kernel();
398 411
399 if (inode) { 412 if (inode) {
@@ -447,11 +460,13 @@ int proc_readdir(struct file * filp,
447 filp->f_pos++; 460 filp->f_pos++;
448 /* fall through */ 461 /* fall through */
449 default: 462 default:
463 spin_lock(&proc_subdir_lock);
450 de = de->subdir; 464 de = de->subdir;
451 i -= 2; 465 i -= 2;
452 for (;;) { 466 for (;;) {
453 if (!de) { 467 if (!de) {
454 ret = 1; 468 ret = 1;
469 spin_unlock(&proc_subdir_lock);
455 goto out; 470 goto out;
456 } 471 }
457 if (!i) 472 if (!i)
@@ -461,12 +476,16 @@ int proc_readdir(struct file * filp,
461 } 476 }
462 477
463 do { 478 do {
479 /* filldir passes info to user space */
480 spin_unlock(&proc_subdir_lock);
464 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 481 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
465 de->low_ino, de->mode >> 12) < 0) 482 de->low_ino, de->mode >> 12) < 0)
466 goto out; 483 goto out;
484 spin_lock(&proc_subdir_lock);
467 filp->f_pos++; 485 filp->f_pos++;
468 de = de->next; 486 de = de->next;
469 } while (de); 487 } while (de);
488 spin_unlock(&proc_subdir_lock);
470 } 489 }
471 ret = 1; 490 ret = 1;
472out: unlock_kernel(); 491out: unlock_kernel();
@@ -500,9 +519,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
500 if (i == 0) 519 if (i == 0)
501 return -EAGAIN; 520 return -EAGAIN;
502 dp->low_ino = i; 521 dp->low_ino = i;
522
523 spin_lock(&proc_subdir_lock);
503 dp->next = dir->subdir; 524 dp->next = dir->subdir;
504 dp->parent = dir; 525 dp->parent = dir;
505 dir->subdir = dp; 526 dir->subdir = dp;
527 spin_unlock(&proc_subdir_lock);
528
506 if (S_ISDIR(dp->mode)) { 529 if (S_ISDIR(dp->mode)) {
507 if (dp->proc_iops == NULL) { 530 if (dp->proc_iops == NULL) {
508 dp->proc_fops = &proc_dir_operations; 531 dp->proc_fops = &proc_dir_operations;
@@ -694,6 +717,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
694 if (!parent && xlate_proc_name(name, &parent, &fn) != 0) 717 if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
695 goto out; 718 goto out;
696 len = strlen(fn); 719 len = strlen(fn);
720
721 spin_lock(&proc_subdir_lock);
697 for (p = &parent->subdir; *p; p=&(*p)->next ) { 722 for (p = &parent->subdir; *p; p=&(*p)->next ) {
698 if (!proc_match(len, fn, *p)) 723 if (!proc_match(len, fn, *p))
699 continue; 724 continue;
@@ -714,6 +739,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
714 } 739 }
715 break; 740 break;
716 } 741 }
742 spin_unlock(&proc_subdir_lock);
717out: 743out:
718 return; 744 return;
719} 745}
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 9bdd077d6f55..596b4b4f1cc8 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -136,9 +136,11 @@ void proc_device_tree_add_node(struct device_node *np,
136 * properties are quite unimportant for us though, thus we 136 * properties are quite unimportant for us though, thus we
137 * simply "skip" them here, but we do have to check. 137 * simply "skip" them here, but we do have to check.
138 */ 138 */
139 spin_lock(&proc_subdir_lock);
139 for (ent = de->subdir; ent != NULL; ent = ent->next) 140 for (ent = de->subdir; ent != NULL; ent = ent->next)
140 if (!strcmp(ent->name, pp->name)) 141 if (!strcmp(ent->name, pp->name))
141 break; 142 break;
143 spin_unlock(&proc_subdir_lock);
142 if (ent != NULL) { 144 if (ent != NULL) {
143 printk(KERN_WARNING "device-tree: property \"%s\" name" 145 printk(KERN_WARNING "device-tree: property \"%s\" name"
144 " conflicts with node in %s\n", pp->name, 146 " conflicts with node in %s\n", pp->name,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index d60f6238c66a..9857e50f85e7 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -466,7 +466,6 @@ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
466 direct_IO request. */ 466 direct_IO request. */
467static int reiserfs_get_blocks_direct_io(struct inode *inode, 467static int reiserfs_get_blocks_direct_io(struct inode *inode,
468 sector_t iblock, 468 sector_t iblock,
469 unsigned long max_blocks,
470 struct buffer_head *bh_result, 469 struct buffer_head *bh_result,
471 int create) 470 int create)
472{ 471{
@@ -2793,7 +2792,7 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
2793} 2792}
2794 2793
2795/* clm -- taken from fs/buffer.c:block_invalidate_page */ 2794/* clm -- taken from fs/buffer.c:block_invalidate_page */
2796static int reiserfs_invalidatepage(struct page *page, unsigned long offset) 2795static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
2797{ 2796{
2798 struct buffer_head *head, *bh, *next; 2797 struct buffer_head *head, *bh, *next;
2799 struct inode *inode = page->mapping->host; 2798 struct inode *inode = page->mapping->host;
@@ -2832,10 +2831,12 @@ static int reiserfs_invalidatepage(struct page *page, unsigned long offset)
2832 * The get_block cached value has been unconditionally invalidated, 2831 * The get_block cached value has been unconditionally invalidated,
2833 * so real IO is not possible anymore. 2832 * so real IO is not possible anymore.
2834 */ 2833 */
2835 if (!offset && ret) 2834 if (!offset && ret) {
2836 ret = try_to_release_page(page, 0); 2835 ret = try_to_release_page(page, 0);
2836 /* maybe should BUG_ON(!ret); - neilb */
2837 }
2837 out: 2838 out:
2838 return ret; 2839 return;
2839} 2840}
2840 2841
2841static int reiserfs_set_page_dirty(struct page *page) 2842static int reiserfs_set_page_dirty(struct page *page)
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 78b40621b88b..27bd3a1df2ad 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -143,7 +143,7 @@ static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
143 char b[BDEVNAME_SIZE]; 143 char b[BDEVNAME_SIZE];
144 144
145 sprintf(buf, 145 sprintf(buf,
146 "dev %s, size %d, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", 146 "dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
147 bdevname(bh->b_bdev, b), bh->b_size, 147 bdevname(bh->b_bdev, b), bh->b_size,
148 (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), 148 (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
149 bh->b_state, bh->b_page, 149 bh->b_state, bh->b_page,
diff --git a/fs/super.c b/fs/super.c
index 8743e9bbb297..a66f66bb8049 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -37,6 +37,7 @@
37#include <linux/writeback.h> /* for the emergency remount stuff */ 37#include <linux/writeback.h> /* for the emergency remount stuff */
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/kobject.h> 39#include <linux/kobject.h>
40#include <linux/mutex.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41 42
42 43
@@ -380,9 +381,9 @@ restart:
380void sync_filesystems(int wait) 381void sync_filesystems(int wait)
381{ 382{
382 struct super_block *sb; 383 struct super_block *sb;
383 static DECLARE_MUTEX(mutex); 384 static DEFINE_MUTEX(mutex);
384 385
385 down(&mutex); /* Could be down_interruptible */ 386 mutex_lock(&mutex); /* Could be down_interruptible */
386 spin_lock(&sb_lock); 387 spin_lock(&sb_lock);
387 list_for_each_entry(sb, &super_blocks, s_list) { 388 list_for_each_entry(sb, &super_blocks, s_list) {
388 if (!sb->s_op->sync_fs) 389 if (!sb->s_op->sync_fs)
@@ -411,7 +412,7 @@ restart:
411 goto restart; 412 goto restart;
412 } 413 }
413 spin_unlock(&sb_lock); 414 spin_unlock(&sb_lock);
414 up(&mutex); 415 mutex_unlock(&mutex);
415} 416}
416 417
417/** 418/**
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 97fc056130eb..c02f7c5b7462 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1310,20 +1310,21 @@ xfs_get_block(
1310 struct buffer_head *bh_result, 1310 struct buffer_head *bh_result,
1311 int create) 1311 int create)
1312{ 1312{
1313 return __xfs_get_block(inode, iblock, 0, bh_result, 1313 return __xfs_get_block(inode, iblock,
1314 create, 0, BMAPI_WRITE); 1314 bh_result->b_size >> inode->i_blkbits,
1315 bh_result, create, 0, BMAPI_WRITE);
1315} 1316}
1316 1317
1317STATIC int 1318STATIC int
1318xfs_get_blocks_direct( 1319xfs_get_blocks_direct(
1319 struct inode *inode, 1320 struct inode *inode,
1320 sector_t iblock, 1321 sector_t iblock,
1321 unsigned long max_blocks,
1322 struct buffer_head *bh_result, 1322 struct buffer_head *bh_result,
1323 int create) 1323 int create)
1324{ 1324{
1325 return __xfs_get_block(inode, iblock, max_blocks, bh_result, 1325 return __xfs_get_block(inode, iblock,
1326 create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1326 bh_result->b_size >> inode->i_blkbits,
1327 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1327} 1328}
1328 1329
1329STATIC void 1330STATIC void
@@ -1442,14 +1443,14 @@ xfs_vm_readpages(
1442 return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); 1443 return mpage_readpages(mapping, pages, nr_pages, xfs_get_block);
1443} 1444}
1444 1445
1445STATIC int 1446STATIC void
1446xfs_vm_invalidatepage( 1447xfs_vm_invalidatepage(
1447 struct page *page, 1448 struct page *page,
1448 unsigned long offset) 1449 unsigned long offset)
1449{ 1450{
1450 xfs_page_trace(XFS_INVALIDPAGE_ENTER, 1451 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1451 page->mapping->host, page, offset); 1452 page->mapping->host, page, offset);
1452 return block_invalidatepage(page, offset); 1453 block_invalidatepage(page, offset);
1453} 1454}
1454 1455
1455struct address_space_operations xfs_address_space_operations = { 1456struct address_space_operations xfs_address_space_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 8355faf8ffde..1884300417e3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -375,9 +375,8 @@ xfs_init_zones(void)
375 if (!xfs_ioend_zone) 375 if (!xfs_ioend_zone)
376 goto out_destroy_vnode_zone; 376 goto out_destroy_vnode_zone;
377 377
378 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, 378 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
379 mempool_alloc_slab, mempool_free_slab, 379 xfs_ioend_zone);
380 xfs_ioend_zone);
381 if (!xfs_ioend_pool) 380 if (!xfs_ioend_pool)
382 goto out_free_ioend_zone; 381 goto out_free_ioend_zone;
383 return 0; 382 return 0;