diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/block_dev.c | 114 | ||||
| -rw-r--r-- | fs/eventpoll.c | 4 | ||||
| -rw-r--r-- | fs/exec.c | 10 | ||||
| -rw-r--r-- | fs/ext2/super.c | 2 | ||||
| -rw-r--r-- | fs/ext3/balloc.c | 6 | ||||
| -rw-r--r-- | fs/ioprio.c | 30 | ||||
| -rw-r--r-- | fs/jbd/commit.c | 6 | ||||
| -rw-r--r-- | fs/jbd/journal.c | 92 | ||||
| -rw-r--r-- | fs/jbd/transaction.c | 9 | ||||
| -rw-r--r-- | fs/lockd/svcsubs.c | 15 | ||||
| -rw-r--r-- | fs/minix/inode.c | 13 | ||||
| -rw-r--r-- | fs/namei.c | 11 | ||||
| -rw-r--r-- | fs/nfs/file.c | 8 | ||||
| -rw-r--r-- | fs/nfs/idmap.c | 4 | ||||
| -rw-r--r-- | fs/nfs/nfs4proc.c | 29 | ||||
| -rw-r--r-- | fs/nfs/nfs4xdr.c | 21 | ||||
| -rw-r--r-- | fs/nfs/read.c | 23 | ||||
| -rw-r--r-- | fs/partitions/sun.c | 2 | ||||
| -rw-r--r-- | fs/proc/proc_misc.c | 2 | ||||
| -rw-r--r-- | fs/reiserfs/xattr.c | 2 | ||||
| -rw-r--r-- | fs/udf/super.c | 2 | ||||
| -rw-r--r-- | fs/udf/truncate.c | 64 | ||||
| -rw-r--r-- | fs/ufs/inode.c | 35 | ||||
| -rw-r--r-- | fs/ufs/truncate.c | 77 |
24 files changed, 364 insertions, 217 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index 37534573960b..045f98854f14 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -884,6 +884,61 @@ void bd_set_size(struct block_device *bdev, loff_t size) | |||
| 884 | } | 884 | } |
| 885 | EXPORT_SYMBOL(bd_set_size); | 885 | EXPORT_SYMBOL(bd_set_size); |
| 886 | 886 | ||
| 887 | static int __blkdev_put(struct block_device *bdev, unsigned int subclass) | ||
| 888 | { | ||
| 889 | int ret = 0; | ||
| 890 | struct inode *bd_inode = bdev->bd_inode; | ||
| 891 | struct gendisk *disk = bdev->bd_disk; | ||
| 892 | |||
| 893 | mutex_lock_nested(&bdev->bd_mutex, subclass); | ||
| 894 | lock_kernel(); | ||
| 895 | if (!--bdev->bd_openers) { | ||
| 896 | sync_blockdev(bdev); | ||
| 897 | kill_bdev(bdev); | ||
| 898 | } | ||
| 899 | if (bdev->bd_contains == bdev) { | ||
| 900 | if (disk->fops->release) | ||
| 901 | ret = disk->fops->release(bd_inode, NULL); | ||
| 902 | } else { | ||
| 903 | mutex_lock_nested(&bdev->bd_contains->bd_mutex, | ||
| 904 | subclass + 1); | ||
| 905 | bdev->bd_contains->bd_part_count--; | ||
| 906 | mutex_unlock(&bdev->bd_contains->bd_mutex); | ||
| 907 | } | ||
| 908 | if (!bdev->bd_openers) { | ||
| 909 | struct module *owner = disk->fops->owner; | ||
| 910 | |||
| 911 | put_disk(disk); | ||
| 912 | module_put(owner); | ||
| 913 | |||
| 914 | if (bdev->bd_contains != bdev) { | ||
| 915 | kobject_put(&bdev->bd_part->kobj); | ||
| 916 | bdev->bd_part = NULL; | ||
| 917 | } | ||
| 918 | bdev->bd_disk = NULL; | ||
| 919 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | ||
| 920 | if (bdev != bdev->bd_contains) | ||
| 921 | __blkdev_put(bdev->bd_contains, subclass + 1); | ||
| 922 | bdev->bd_contains = NULL; | ||
| 923 | } | ||
| 924 | unlock_kernel(); | ||
| 925 | mutex_unlock(&bdev->bd_mutex); | ||
| 926 | bdput(bdev); | ||
| 927 | return ret; | ||
| 928 | } | ||
| 929 | |||
| 930 | int blkdev_put(struct block_device *bdev) | ||
| 931 | { | ||
| 932 | return __blkdev_put(bdev, BD_MUTEX_NORMAL); | ||
| 933 | } | ||
| 934 | EXPORT_SYMBOL(blkdev_put); | ||
| 935 | |||
| 936 | int blkdev_put_partition(struct block_device *bdev) | ||
| 937 | { | ||
| 938 | return __blkdev_put(bdev, BD_MUTEX_PARTITION); | ||
| 939 | } | ||
| 940 | EXPORT_SYMBOL(blkdev_put_partition); | ||
| 941 | |||
| 887 | static int | 942 | static int |
| 888 | blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); | 943 | blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); |
| 889 | 944 | ||
| @@ -980,7 +1035,7 @@ out_first: | |||
| 980 | bdev->bd_disk = NULL; | 1035 | bdev->bd_disk = NULL; |
| 981 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | 1036 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; |
| 982 | if (bdev != bdev->bd_contains) | 1037 | if (bdev != bdev->bd_contains) |
| 983 | blkdev_put(bdev->bd_contains); | 1038 | __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE); |
| 984 | bdev->bd_contains = NULL; | 1039 | bdev->bd_contains = NULL; |
| 985 | put_disk(disk); | 1040 | put_disk(disk); |
| 986 | module_put(owner); | 1041 | module_put(owner); |
| @@ -1079,63 +1134,6 @@ static int blkdev_open(struct inode * inode, struct file * filp) | |||
| 1079 | return res; | 1134 | return res; |
| 1080 | } | 1135 | } |
| 1081 | 1136 | ||
| 1082 | static int __blkdev_put(struct block_device *bdev, unsigned int subclass) | ||
| 1083 | { | ||
| 1084 | int ret = 0; | ||
| 1085 | struct inode *bd_inode = bdev->bd_inode; | ||
| 1086 | struct gendisk *disk = bdev->bd_disk; | ||
| 1087 | |||
| 1088 | mutex_lock_nested(&bdev->bd_mutex, subclass); | ||
| 1089 | lock_kernel(); | ||
| 1090 | if (!--bdev->bd_openers) { | ||
| 1091 | sync_blockdev(bdev); | ||
| 1092 | kill_bdev(bdev); | ||
| 1093 | } | ||
| 1094 | if (bdev->bd_contains == bdev) { | ||
| 1095 | if (disk->fops->release) | ||
| 1096 | ret = disk->fops->release(bd_inode, NULL); | ||
| 1097 | } else { | ||
| 1098 | mutex_lock_nested(&bdev->bd_contains->bd_mutex, | ||
| 1099 | subclass + 1); | ||
| 1100 | bdev->bd_contains->bd_part_count--; | ||
| 1101 | mutex_unlock(&bdev->bd_contains->bd_mutex); | ||
| 1102 | } | ||
| 1103 | if (!bdev->bd_openers) { | ||
| 1104 | struct module *owner = disk->fops->owner; | ||
| 1105 | |||
| 1106 | put_disk(disk); | ||
| 1107 | module_put(owner); | ||
| 1108 | |||
| 1109 | if (bdev->bd_contains != bdev) { | ||
| 1110 | kobject_put(&bdev->bd_part->kobj); | ||
| 1111 | bdev->bd_part = NULL; | ||
| 1112 | } | ||
| 1113 | bdev->bd_disk = NULL; | ||
| 1114 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | ||
| 1115 | if (bdev != bdev->bd_contains) | ||
| 1116 | __blkdev_put(bdev->bd_contains, subclass + 1); | ||
| 1117 | bdev->bd_contains = NULL; | ||
| 1118 | } | ||
| 1119 | unlock_kernel(); | ||
| 1120 | mutex_unlock(&bdev->bd_mutex); | ||
| 1121 | bdput(bdev); | ||
| 1122 | return ret; | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | int blkdev_put(struct block_device *bdev) | ||
| 1126 | { | ||
| 1127 | return __blkdev_put(bdev, BD_MUTEX_NORMAL); | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | EXPORT_SYMBOL(blkdev_put); | ||
| 1131 | |||
| 1132 | int blkdev_put_partition(struct block_device *bdev) | ||
| 1133 | { | ||
| 1134 | return __blkdev_put(bdev, BD_MUTEX_PARTITION); | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | EXPORT_SYMBOL(blkdev_put_partition); | ||
| 1138 | |||
| 1139 | static int blkdev_close(struct inode * inode, struct file * filp) | 1137 | static int blkdev_close(struct inode * inode, struct file * filp) |
| 1140 | { | 1138 | { |
| 1141 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | 1139 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 19ffb043abbc..3a3567433b92 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi) | |||
| 1168 | eexit_1: | 1168 | eexit_1: |
| 1169 | 1169 | ||
| 1170 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", | 1170 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", |
| 1171 | current, ep, epi->file, error)); | 1171 | current, ep, epi->ffd.file, error)); |
| 1172 | 1172 | ||
| 1173 | return error; | 1173 | return error; |
| 1174 | } | 1174 | } |
| @@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k | |||
| 1236 | struct eventpoll *ep = epi->ep; | 1236 | struct eventpoll *ep = epi->ep; |
| 1237 | 1237 | ||
| 1238 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", | 1238 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", |
| 1239 | current, epi->file, epi, ep)); | 1239 | current, epi->ffd.file, epi, ep)); |
| 1240 | 1240 | ||
| 1241 | write_lock_irqsave(&ep->lock, flags); | 1241 | write_lock_irqsave(&ep->lock, flags); |
| 1242 | 1242 | ||
| @@ -486,8 +486,6 @@ struct file *open_exec(const char *name) | |||
| 486 | if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && | 486 | if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && |
| 487 | S_ISREG(inode->i_mode)) { | 487 | S_ISREG(inode->i_mode)) { |
| 488 | int err = vfs_permission(&nd, MAY_EXEC); | 488 | int err = vfs_permission(&nd, MAY_EXEC); |
| 489 | if (!err && !(inode->i_mode & 0111)) | ||
| 490 | err = -EACCES; | ||
| 491 | file = ERR_PTR(err); | 489 | file = ERR_PTR(err); |
| 492 | if (!err) { | 490 | if (!err) { |
| 493 | file = nameidata_to_filp(&nd, O_RDONLY); | 491 | file = nameidata_to_filp(&nd, O_RDONLY); |
| @@ -753,7 +751,7 @@ no_thread_group: | |||
| 753 | 751 | ||
| 754 | write_lock_irq(&tasklist_lock); | 752 | write_lock_irq(&tasklist_lock); |
| 755 | spin_lock(&oldsighand->siglock); | 753 | spin_lock(&oldsighand->siglock); |
| 756 | spin_lock(&newsighand->siglock); | 754 | spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); |
| 757 | 755 | ||
| 758 | rcu_assign_pointer(current->sighand, newsighand); | 756 | rcu_assign_pointer(current->sighand, newsighand); |
| 759 | recalc_sigpending(); | 757 | recalc_sigpending(); |
| @@ -922,12 +920,6 @@ int prepare_binprm(struct linux_binprm *bprm) | |||
| 922 | int retval; | 920 | int retval; |
| 923 | 921 | ||
| 924 | mode = inode->i_mode; | 922 | mode = inode->i_mode; |
| 925 | /* | ||
| 926 | * Check execute perms again - if the caller has CAP_DAC_OVERRIDE, | ||
| 927 | * generic_permission lets a non-executable through | ||
| 928 | */ | ||
| 929 | if (!(mode & 0111)) /* with at least _one_ execute bit set */ | ||
| 930 | return -EACCES; | ||
| 931 | if (bprm->file->f_op == NULL) | 923 | if (bprm->file->f_op == NULL) |
| 932 | return -EACCES; | 924 | return -EACCES; |
| 933 | 925 | ||
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index f2702cda9779..681dea8f9532 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
| @@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) | |||
| 775 | if (EXT2_INODE_SIZE(sb) == 0) | 775 | if (EXT2_INODE_SIZE(sb) == 0) |
| 776 | goto cantfind_ext2; | 776 | goto cantfind_ext2; |
| 777 | sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); | 777 | sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); |
| 778 | if (sbi->s_inodes_per_block == 0) | 778 | if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) |
| 779 | goto cantfind_ext2; | 779 | goto cantfind_ext2; |
| 780 | sbi->s_itb_per_group = sbi->s_inodes_per_group / | 780 | sbi->s_itb_per_group = sbi->s_inodes_per_group / |
| 781 | sbi->s_inodes_per_block; | 781 | sbi->s_inodes_per_block; |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index a504a40d6d29..063d994bda0b 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
| @@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | |||
| 1269 | goal = le32_to_cpu(es->s_first_data_block); | 1269 | goal = le32_to_cpu(es->s_first_data_block); |
| 1270 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / | 1270 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / |
| 1271 | EXT3_BLOCKS_PER_GROUP(sb); | 1271 | EXT3_BLOCKS_PER_GROUP(sb); |
| 1272 | goal_group = group_no; | ||
| 1273 | retry_alloc: | ||
| 1272 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | 1274 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); |
| 1273 | if (!gdp) | 1275 | if (!gdp) |
| 1274 | goto io_error; | 1276 | goto io_error; |
| 1275 | 1277 | ||
| 1276 | goal_group = group_no; | ||
| 1277 | retry: | ||
| 1278 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | 1278 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); |
| 1279 | /* | 1279 | /* |
| 1280 | * if there is not enough free blocks to make a new resevation | 1280 | * if there is not enough free blocks to make a new resevation |
| @@ -1349,7 +1349,7 @@ retry: | |||
| 1349 | if (my_rsv) { | 1349 | if (my_rsv) { |
| 1350 | my_rsv = NULL; | 1350 | my_rsv = NULL; |
| 1351 | group_no = goal_group; | 1351 | group_no = goal_group; |
| 1352 | goto retry; | 1352 | goto retry_alloc; |
| 1353 | } | 1353 | } |
| 1354 | /* No space left on the device */ | 1354 | /* No space left on the device */ |
| 1355 | *errp = -ENOSPC; | 1355 | *errp = -ENOSPC; |
diff --git a/fs/ioprio.c b/fs/ioprio.c index 93aa5715f224..78b1deae3fa2 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c | |||
| @@ -44,6 +44,9 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) | |||
| 44 | task->ioprio = ioprio; | 44 | task->ioprio = ioprio; |
| 45 | 45 | ||
| 46 | ioc = task->io_context; | 46 | ioc = task->io_context; |
| 47 | /* see wmb() in current_io_context() */ | ||
| 48 | smp_read_barrier_depends(); | ||
| 49 | |||
| 47 | if (ioc && ioc->set_ioprio) | 50 | if (ioc && ioc->set_ioprio) |
| 48 | ioc->set_ioprio(ioc, ioprio); | 51 | ioc->set_ioprio(ioc, ioprio); |
| 49 | 52 | ||
| @@ -111,9 +114,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) | |||
| 111 | continue; | 114 | continue; |
| 112 | ret = set_task_ioprio(p, ioprio); | 115 | ret = set_task_ioprio(p, ioprio); |
| 113 | if (ret) | 116 | if (ret) |
| 114 | break; | 117 | goto free_uid; |
| 115 | } while_each_thread(g, p); | 118 | } while_each_thread(g, p); |
| 116 | 119 | free_uid: | |
| 117 | if (who) | 120 | if (who) |
| 118 | free_uid(user); | 121 | free_uid(user); |
| 119 | break; | 122 | break; |
| @@ -137,6 +140,29 @@ out: | |||
| 137 | return ret; | 140 | return ret; |
| 138 | } | 141 | } |
| 139 | 142 | ||
| 143 | int ioprio_best(unsigned short aprio, unsigned short bprio) | ||
| 144 | { | ||
| 145 | unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); | ||
| 146 | unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); | ||
| 147 | |||
| 148 | if (!ioprio_valid(aprio)) | ||
| 149 | return bprio; | ||
| 150 | if (!ioprio_valid(bprio)) | ||
| 151 | return aprio; | ||
| 152 | |||
| 153 | if (aclass == IOPRIO_CLASS_NONE) | ||
| 154 | aclass = IOPRIO_CLASS_BE; | ||
| 155 | if (bclass == IOPRIO_CLASS_NONE) | ||
| 156 | bclass = IOPRIO_CLASS_BE; | ||
| 157 | |||
| 158 | if (aclass == bclass) | ||
| 159 | return min(aprio, bprio); | ||
| 160 | if (aclass > bclass) | ||
| 161 | return bprio; | ||
| 162 | else | ||
| 163 | return aprio; | ||
| 164 | } | ||
| 165 | |||
| 140 | asmlinkage long sys_ioprio_get(int which, int who) | 166 | asmlinkage long sys_ioprio_get(int which, int who) |
| 141 | { | 167 | { |
| 142 | struct task_struct *g, *p; | 168 | struct task_struct *g, *p; |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 0971814c38b8..42da60784311 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
| @@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal) | |||
| 261 | struct buffer_head *bh = jh2bh(jh); | 261 | struct buffer_head *bh = jh2bh(jh); |
| 262 | 262 | ||
| 263 | jbd_lock_bh_state(bh); | 263 | jbd_lock_bh_state(bh); |
| 264 | kfree(jh->b_committed_data); | 264 | jbd_slab_free(jh->b_committed_data, bh->b_size); |
| 265 | jh->b_committed_data = NULL; | 265 | jh->b_committed_data = NULL; |
| 266 | jbd_unlock_bh_state(bh); | 266 | jbd_unlock_bh_state(bh); |
| 267 | } | 267 | } |
| @@ -745,14 +745,14 @@ restart_loop: | |||
| 745 | * Otherwise, we can just throw away the frozen data now. | 745 | * Otherwise, we can just throw away the frozen data now. |
| 746 | */ | 746 | */ |
| 747 | if (jh->b_committed_data) { | 747 | if (jh->b_committed_data) { |
| 748 | kfree(jh->b_committed_data); | 748 | jbd_slab_free(jh->b_committed_data, bh->b_size); |
| 749 | jh->b_committed_data = NULL; | 749 | jh->b_committed_data = NULL; |
| 750 | if (jh->b_frozen_data) { | 750 | if (jh->b_frozen_data) { |
| 751 | jh->b_committed_data = jh->b_frozen_data; | 751 | jh->b_committed_data = jh->b_frozen_data; |
| 752 | jh->b_frozen_data = NULL; | 752 | jh->b_frozen_data = NULL; |
| 753 | } | 753 | } |
| 754 | } else if (jh->b_frozen_data) { | 754 | } else if (jh->b_frozen_data) { |
| 755 | kfree(jh->b_frozen_data); | 755 | jbd_slab_free(jh->b_frozen_data, bh->b_size); |
| 756 | jh->b_frozen_data = NULL; | 756 | jh->b_frozen_data = NULL; |
| 757 | } | 757 | } |
| 758 | 758 | ||
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 8c9b28dff119..f66724ce443a 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
| @@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit); | |||
| 84 | 84 | ||
| 85 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); | 85 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); |
| 86 | static void __journal_abort_soft (journal_t *journal, int errno); | 86 | static void __journal_abort_soft (journal_t *journal, int errno); |
| 87 | static int journal_create_jbd_slab(size_t slab_size); | ||
| 87 | 88 | ||
| 88 | /* | 89 | /* |
| 89 | * Helper function used to manage commit timeouts | 90 | * Helper function used to manage commit timeouts |
| @@ -328,10 +329,10 @@ repeat: | |||
| 328 | char *tmp; | 329 | char *tmp; |
| 329 | 330 | ||
| 330 | jbd_unlock_bh_state(bh_in); | 331 | jbd_unlock_bh_state(bh_in); |
| 331 | tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); | 332 | tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); |
| 332 | jbd_lock_bh_state(bh_in); | 333 | jbd_lock_bh_state(bh_in); |
| 333 | if (jh_in->b_frozen_data) { | 334 | if (jh_in->b_frozen_data) { |
| 334 | kfree(tmp); | 335 | jbd_slab_free(tmp, bh_in->b_size); |
| 335 | goto repeat; | 336 | goto repeat; |
| 336 | } | 337 | } |
| 337 | 338 | ||
| @@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal) | |||
| 1069 | int journal_load(journal_t *journal) | 1070 | int journal_load(journal_t *journal) |
| 1070 | { | 1071 | { |
| 1071 | int err; | 1072 | int err; |
| 1073 | journal_superblock_t *sb; | ||
| 1072 | 1074 | ||
| 1073 | err = load_superblock(journal); | 1075 | err = load_superblock(journal); |
| 1074 | if (err) | 1076 | if (err) |
| 1075 | return err; | 1077 | return err; |
| 1076 | 1078 | ||
| 1079 | sb = journal->j_superblock; | ||
| 1077 | /* If this is a V2 superblock, then we have to check the | 1080 | /* If this is a V2 superblock, then we have to check the |
| 1078 | * features flags on it. */ | 1081 | * features flags on it. */ |
| 1079 | 1082 | ||
| 1080 | if (journal->j_format_version >= 2) { | 1083 | if (journal->j_format_version >= 2) { |
| 1081 | journal_superblock_t *sb = journal->j_superblock; | ||
| 1082 | |||
| 1083 | if ((sb->s_feature_ro_compat & | 1084 | if ((sb->s_feature_ro_compat & |
| 1084 | ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || | 1085 | ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || |
| 1085 | (sb->s_feature_incompat & | 1086 | (sb->s_feature_incompat & |
| @@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal) | |||
| 1090 | } | 1091 | } |
| 1091 | } | 1092 | } |
| 1092 | 1093 | ||
| 1094 | /* | ||
| 1095 | * Create a slab for this blocksize | ||
| 1096 | */ | ||
| 1097 | err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize)); | ||
| 1098 | if (err) | ||
| 1099 | return err; | ||
| 1100 | |||
| 1093 | /* Let the recovery code check whether it needs to recover any | 1101 | /* Let the recovery code check whether it needs to recover any |
| 1094 | * data from the journal. */ | 1102 | * data from the journal. */ |
| 1095 | if (journal_recover(journal)) | 1103 | if (journal_recover(journal)) |
| @@ -1612,6 +1620,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) | |||
| 1612 | } | 1620 | } |
| 1613 | 1621 | ||
| 1614 | /* | 1622 | /* |
| 1623 | * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed | ||
| 1624 | * and allocate frozen and commit buffers from these slabs. | ||
| 1625 | * | ||
| 1626 | * Reason for doing this is to avoid, SLAB_DEBUG - since it could | ||
| 1627 | * cause bh to cross page boundary. | ||
| 1628 | */ | ||
| 1629 | |||
| 1630 | #define JBD_MAX_SLABS 5 | ||
| 1631 | #define JBD_SLAB_INDEX(size) (size >> 11) | ||
| 1632 | |||
| 1633 | static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; | ||
| 1634 | static const char *jbd_slab_names[JBD_MAX_SLABS] = { | ||
| 1635 | "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" | ||
| 1636 | }; | ||
| 1637 | |||
| 1638 | static void journal_destroy_jbd_slabs(void) | ||
| 1639 | { | ||
| 1640 | int i; | ||
| 1641 | |||
| 1642 | for (i = 0; i < JBD_MAX_SLABS; i++) { | ||
| 1643 | if (jbd_slab[i]) | ||
| 1644 | kmem_cache_destroy(jbd_slab[i]); | ||
| 1645 | jbd_slab[i] = NULL; | ||
| 1646 | } | ||
| 1647 | } | ||
| 1648 | |||
| 1649 | static int journal_create_jbd_slab(size_t slab_size) | ||
| 1650 | { | ||
| 1651 | int i = JBD_SLAB_INDEX(slab_size); | ||
| 1652 | |||
| 1653 | BUG_ON(i >= JBD_MAX_SLABS); | ||
| 1654 | |||
| 1655 | /* | ||
| 1656 | * Check if we already have a slab created for this size | ||
| 1657 | */ | ||
| 1658 | if (jbd_slab[i]) | ||
| 1659 | return 0; | ||
| 1660 | |||
| 1661 | /* | ||
| 1662 | * Create a slab and force alignment to be same as slabsize - | ||
| 1663 | * this will make sure that allocations won't cross the page | ||
| 1664 | * boundary. | ||
| 1665 | */ | ||
| 1666 | jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], | ||
| 1667 | slab_size, slab_size, 0, NULL, NULL); | ||
| 1668 | if (!jbd_slab[i]) { | ||
| 1669 | printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); | ||
| 1670 | return -ENOMEM; | ||
| 1671 | } | ||
| 1672 | return 0; | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | void * jbd_slab_alloc(size_t size, gfp_t flags) | ||
| 1676 | { | ||
| 1677 | int idx; | ||
| 1678 | |||
| 1679 | idx = JBD_SLAB_INDEX(size); | ||
| 1680 | BUG_ON(jbd_slab[idx] == NULL); | ||
| 1681 | return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); | ||
| 1682 | } | ||
| 1683 | |||
| 1684 | void jbd_slab_free(void *ptr, size_t size) | ||
| 1685 | { | ||
| 1686 | int idx; | ||
| 1687 | |||
| 1688 | idx = JBD_SLAB_INDEX(size); | ||
| 1689 | BUG_ON(jbd_slab[idx] == NULL); | ||
| 1690 | kmem_cache_free(jbd_slab[idx], ptr); | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | /* | ||
| 1615 | * Journal_head storage management | 1694 | * Journal_head storage management |
| 1616 | */ | 1695 | */ |
| 1617 | static kmem_cache_t *journal_head_cache; | 1696 | static kmem_cache_t *journal_head_cache; |
| @@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) | |||
| 1799 | printk(KERN_WARNING "%s: freeing " | 1878 | printk(KERN_WARNING "%s: freeing " |
| 1800 | "b_frozen_data\n", | 1879 | "b_frozen_data\n", |
| 1801 | __FUNCTION__); | 1880 | __FUNCTION__); |
| 1802 | kfree(jh->b_frozen_data); | 1881 | jbd_slab_free(jh->b_frozen_data, bh->b_size); |
| 1803 | } | 1882 | } |
| 1804 | if (jh->b_committed_data) { | 1883 | if (jh->b_committed_data) { |
| 1805 | printk(KERN_WARNING "%s: freeing " | 1884 | printk(KERN_WARNING "%s: freeing " |
| 1806 | "b_committed_data\n", | 1885 | "b_committed_data\n", |
| 1807 | __FUNCTION__); | 1886 | __FUNCTION__); |
| 1808 | kfree(jh->b_committed_data); | 1887 | jbd_slab_free(jh->b_committed_data, bh->b_size); |
| 1809 | } | 1888 | } |
| 1810 | bh->b_private = NULL; | 1889 | bh->b_private = NULL; |
| 1811 | jh->b_bh = NULL; /* debug, really */ | 1890 | jh->b_bh = NULL; /* debug, really */ |
| @@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void) | |||
| 1961 | journal_destroy_revoke_caches(); | 2040 | journal_destroy_revoke_caches(); |
| 1962 | journal_destroy_journal_head_cache(); | 2041 | journal_destroy_journal_head_cache(); |
| 1963 | journal_destroy_handle_cache(); | 2042 | journal_destroy_handle_cache(); |
| 2043 | journal_destroy_jbd_slabs(); | ||
| 1964 | } | 2044 | } |
| 1965 | 2045 | ||
| 1966 | static int __init journal_init(void) | 2046 | static int __init journal_init(void) |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 508b2ea91f43..de2e4cbbf79a 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
| @@ -666,8 +666,9 @@ repeat: | |||
| 666 | if (!frozen_buffer) { | 666 | if (!frozen_buffer) { |
| 667 | JBUFFER_TRACE(jh, "allocate memory for buffer"); | 667 | JBUFFER_TRACE(jh, "allocate memory for buffer"); |
| 668 | jbd_unlock_bh_state(bh); | 668 | jbd_unlock_bh_state(bh); |
| 669 | frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, | 669 | frozen_buffer = |
| 670 | GFP_NOFS); | 670 | jbd_slab_alloc(jh2bh(jh)->b_size, |
| 671 | GFP_NOFS); | ||
| 671 | if (!frozen_buffer) { | 672 | if (!frozen_buffer) { |
| 672 | printk(KERN_EMERG | 673 | printk(KERN_EMERG |
| 673 | "%s: OOM for frozen_buffer\n", | 674 | "%s: OOM for frozen_buffer\n", |
| @@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) | |||
| 879 | 880 | ||
| 880 | repeat: | 881 | repeat: |
| 881 | if (!jh->b_committed_data) { | 882 | if (!jh->b_committed_data) { |
| 882 | committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); | 883 | committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); |
| 883 | if (!committed_data) { | 884 | if (!committed_data) { |
| 884 | printk(KERN_EMERG "%s: No memory for committed data\n", | 885 | printk(KERN_EMERG "%s: No memory for committed data\n", |
| 885 | __FUNCTION__); | 886 | __FUNCTION__); |
| @@ -906,7 +907,7 @@ repeat: | |||
| 906 | out: | 907 | out: |
| 907 | journal_put_journal_head(jh); | 908 | journal_put_journal_head(jh); |
| 908 | if (unlikely(committed_data)) | 909 | if (unlikely(committed_data)) |
| 909 | kfree(committed_data); | 910 | jbd_slab_free(committed_data, bh->b_size); |
| 910 | return err; | 911 | return err; |
| 911 | } | 912 | } |
| 912 | 913 | ||
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 2a4df9b3779a..01b4db9e5466 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c | |||
| @@ -237,19 +237,22 @@ static int | |||
| 237 | nlm_traverse_files(struct nlm_host *host, int action) | 237 | nlm_traverse_files(struct nlm_host *host, int action) |
| 238 | { | 238 | { |
| 239 | struct nlm_file *file, **fp; | 239 | struct nlm_file *file, **fp; |
| 240 | int i; | 240 | int i, ret = 0; |
| 241 | 241 | ||
| 242 | mutex_lock(&nlm_file_mutex); | 242 | mutex_lock(&nlm_file_mutex); |
| 243 | for (i = 0; i < FILE_NRHASH; i++) { | 243 | for (i = 0; i < FILE_NRHASH; i++) { |
| 244 | fp = nlm_files + i; | 244 | fp = nlm_files + i; |
| 245 | while ((file = *fp) != NULL) { | 245 | while ((file = *fp) != NULL) { |
| 246 | file->f_count++; | ||
| 247 | mutex_unlock(&nlm_file_mutex); | ||
| 248 | |||
| 246 | /* Traverse locks, blocks and shares of this file | 249 | /* Traverse locks, blocks and shares of this file |
| 247 | * and update file->f_locks count */ | 250 | * and update file->f_locks count */ |
| 248 | if (nlm_inspect_file(host, file, action)) { | 251 | if (nlm_inspect_file(host, file, action)) |
| 249 | mutex_unlock(&nlm_file_mutex); | 252 | ret = 1; |
| 250 | return 1; | ||
| 251 | } | ||
| 252 | 253 | ||
| 254 | mutex_lock(&nlm_file_mutex); | ||
| 255 | file->f_count--; | ||
| 253 | /* No more references to this file. Let go of it. */ | 256 | /* No more references to this file. Let go of it. */ |
| 254 | if (!file->f_blocks && !file->f_locks | 257 | if (!file->f_blocks && !file->f_locks |
| 255 | && !file->f_shares && !file->f_count) { | 258 | && !file->f_shares && !file->f_count) { |
| @@ -262,7 +265,7 @@ nlm_traverse_files(struct nlm_host *host, int action) | |||
| 262 | } | 265 | } |
| 263 | } | 266 | } |
| 264 | mutex_unlock(&nlm_file_mutex); | 267 | mutex_unlock(&nlm_file_mutex); |
| 265 | return 0; | 268 | return ret; |
| 266 | } | 269 | } |
| 267 | 270 | ||
| 268 | /* | 271 | /* |
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 9ea91c5eeb7b..330ff9fc7cf0 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
| @@ -204,6 +204,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) | |||
| 204 | /* | 204 | /* |
| 205 | * Allocate the buffer map to keep the superblock small. | 205 | * Allocate the buffer map to keep the superblock small. |
| 206 | */ | 206 | */ |
| 207 | if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) | ||
| 208 | goto out_illegal_sb; | ||
| 207 | i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); | 209 | i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); |
| 208 | map = kmalloc(i, GFP_KERNEL); | 210 | map = kmalloc(i, GFP_KERNEL); |
| 209 | if (!map) | 211 | if (!map) |
| @@ -263,7 +265,7 @@ out_no_root: | |||
| 263 | 265 | ||
| 264 | out_no_bitmap: | 266 | out_no_bitmap: |
| 265 | printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); | 267 | printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); |
| 266 | out_freemap: | 268 | out_freemap: |
| 267 | for (i = 0; i < sbi->s_imap_blocks; i++) | 269 | for (i = 0; i < sbi->s_imap_blocks; i++) |
| 268 | brelse(sbi->s_imap[i]); | 270 | brelse(sbi->s_imap[i]); |
| 269 | for (i = 0; i < sbi->s_zmap_blocks; i++) | 271 | for (i = 0; i < sbi->s_zmap_blocks; i++) |
| @@ -276,11 +278,16 @@ out_no_map: | |||
| 276 | printk("MINIX-fs: can't allocate map\n"); | 278 | printk("MINIX-fs: can't allocate map\n"); |
| 277 | goto out_release; | 279 | goto out_release; |
| 278 | 280 | ||
| 281 | out_illegal_sb: | ||
| 282 | if (!silent) | ||
| 283 | printk("MINIX-fs: bad superblock\n"); | ||
| 284 | goto out_release; | ||
| 285 | |||
| 279 | out_no_fs: | 286 | out_no_fs: |
| 280 | if (!silent) | 287 | if (!silent) |
| 281 | printk("VFS: Can't find a Minix or Minix V2 filesystem " | 288 | printk("VFS: Can't find a Minix or Minix V2 filesystem " |
| 282 | "on device %s\n", s->s_id); | 289 | "on device %s\n", s->s_id); |
| 283 | out_release: | 290 | out_release: |
| 284 | brelse(bh); | 291 | brelse(bh); |
| 285 | goto out; | 292 | goto out; |
| 286 | 293 | ||
| @@ -290,7 +297,7 @@ out_bad_hblock: | |||
| 290 | 297 | ||
| 291 | out_bad_sb: | 298 | out_bad_sb: |
| 292 | printk("MINIX-fs: unable to read superblock\n"); | 299 | printk("MINIX-fs: unable to read superblock\n"); |
| 293 | out: | 300 | out: |
| 294 | s->s_fs_info = NULL; | 301 | s->s_fs_info = NULL; |
| 295 | kfree(sbi); | 302 | kfree(sbi); |
| 296 | return -EINVAL; | 303 | return -EINVAL; |
diff --git a/fs/namei.c b/fs/namei.c index 55a131230f94..432d6bc6fab0 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask, | |||
| 227 | 227 | ||
| 228 | int permission(struct inode *inode, int mask, struct nameidata *nd) | 228 | int permission(struct inode *inode, int mask, struct nameidata *nd) |
| 229 | { | 229 | { |
| 230 | umode_t mode = inode->i_mode; | ||
| 230 | int retval, submask; | 231 | int retval, submask; |
| 231 | 232 | ||
| 232 | if (mask & MAY_WRITE) { | 233 | if (mask & MAY_WRITE) { |
| 233 | umode_t mode = inode->i_mode; | ||
| 234 | 234 | ||
| 235 | /* | 235 | /* |
| 236 | * Nobody gets write access to a read-only fs. | 236 | * Nobody gets write access to a read-only fs. |
| @@ -247,6 +247,13 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) | |||
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | 249 | ||
| 250 | /* | ||
| 251 | * MAY_EXEC on regular files requires special handling: We override | ||
| 252 | * filesystem execute permissions if the mode bits aren't set. | ||
| 253 | */ | ||
| 254 | if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) | ||
| 255 | return -EACCES; | ||
| 256 | |||
| 250 | /* Ordinary permission routines do not understand MAY_APPEND. */ | 257 | /* Ordinary permission routines do not understand MAY_APPEND. */ |
| 251 | submask = mask & ~MAY_APPEND; | 258 | submask = mask & ~MAY_APPEND; |
| 252 | if (inode->i_op && inode->i_op->permission) | 259 | if (inode->i_op && inode->i_op->permission) |
| @@ -1767,6 +1774,8 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) | |||
| 1767 | if (nd->last_type != LAST_NORM) | 1774 | if (nd->last_type != LAST_NORM) |
| 1768 | goto fail; | 1775 | goto fail; |
| 1769 | nd->flags &= ~LOOKUP_PARENT; | 1776 | nd->flags &= ~LOOKUP_PARENT; |
| 1777 | nd->flags |= LOOKUP_CREATE; | ||
| 1778 | nd->intent.open.flags = O_EXCL; | ||
| 1770 | 1779 | ||
| 1771 | /* | 1780 | /* |
| 1772 | * Do the final lookup. | 1781 | * Do the final lookup. |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc2b874ad5a4..48e892880d5b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
| @@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) | |||
| 312 | 312 | ||
| 313 | static int nfs_release_page(struct page *page, gfp_t gfp) | 313 | static int nfs_release_page(struct page *page, gfp_t gfp) |
| 314 | { | 314 | { |
| 315 | return !nfs_wb_page(page->mapping->host, page); | 315 | if (gfp & __GFP_FS) |
| 316 | return !nfs_wb_page(page->mapping->host, page); | ||
| 317 | else | ||
| 318 | /* | ||
| 319 | * Avoid deadlock on nfs_wait_on_request(). | ||
| 320 | */ | ||
| 321 | return 0; | ||
| 316 | } | 322 | } |
| 317 | 323 | ||
| 318 | const struct address_space_operations nfs_file_aops = { | 324 | const struct address_space_operations nfs_file_aops = { |
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b81e7ed3c902..07a5dd57646e 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c | |||
| @@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp) | |||
| 130 | 130 | ||
| 131 | if (!idmap) | 131 | if (!idmap) |
| 132 | return; | 132 | return; |
| 133 | dput(idmap->idmap_dentry); | 133 | rpc_unlink(idmap->idmap_dentry); |
| 134 | idmap->idmap_dentry = NULL; | ||
| 135 | rpc_unlink(idmap->idmap_path); | ||
| 136 | clp->cl_idmap = NULL; | 134 | clp->cl_idmap = NULL; |
| 137 | kfree(idmap); | 135 | kfree(idmap); |
| 138 | } | 136 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e6ee97f19d81..153898e1331f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -2668,7 +2668,7 @@ out: | |||
| 2668 | nfs4_set_cached_acl(inode, acl); | 2668 | nfs4_set_cached_acl(inode, acl); |
| 2669 | } | 2669 | } |
| 2670 | 2670 | ||
| 2671 | static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | 2671 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) |
| 2672 | { | 2672 | { |
| 2673 | struct page *pages[NFS4ACL_MAXPAGES]; | 2673 | struct page *pages[NFS4ACL_MAXPAGES]; |
| 2674 | struct nfs_getaclargs args = { | 2674 | struct nfs_getaclargs args = { |
| @@ -2721,6 +2721,19 @@ out_free: | |||
| 2721 | return ret; | 2721 | return ret; |
| 2722 | } | 2722 | } |
| 2723 | 2723 | ||
| 2724 | static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | ||
| 2725 | { | ||
| 2726 | struct nfs4_exception exception = { }; | ||
| 2727 | ssize_t ret; | ||
| 2728 | do { | ||
| 2729 | ret = __nfs4_get_acl_uncached(inode, buf, buflen); | ||
| 2730 | if (ret >= 0) | ||
| 2731 | break; | ||
| 2732 | ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); | ||
| 2733 | } while (exception.retry); | ||
| 2734 | return ret; | ||
| 2735 | } | ||
| 2736 | |||
| 2724 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) | 2737 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) |
| 2725 | { | 2738 | { |
| 2726 | struct nfs_server *server = NFS_SERVER(inode); | 2739 | struct nfs_server *server = NFS_SERVER(inode); |
| @@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) | |||
| 2737 | return nfs4_get_acl_uncached(inode, buf, buflen); | 2750 | return nfs4_get_acl_uncached(inode, buf, buflen); |
| 2738 | } | 2751 | } |
| 2739 | 2752 | ||
| 2740 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | 2753 | static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) |
| 2741 | { | 2754 | { |
| 2742 | struct nfs_server *server = NFS_SERVER(inode); | 2755 | struct nfs_server *server = NFS_SERVER(inode); |
| 2743 | struct page *pages[NFS4ACL_MAXPAGES]; | 2756 | struct page *pages[NFS4ACL_MAXPAGES]; |
| @@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen | |||
| 2763 | return ret; | 2776 | return ret; |
| 2764 | } | 2777 | } |
| 2765 | 2778 | ||
| 2779 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | ||
| 2780 | { | ||
| 2781 | struct nfs4_exception exception = { }; | ||
| 2782 | int err; | ||
| 2783 | do { | ||
| 2784 | err = nfs4_handle_exception(NFS_SERVER(inode), | ||
| 2785 | __nfs4_proc_set_acl(inode, buf, buflen), | ||
| 2786 | &exception); | ||
| 2787 | } while (exception.retry); | ||
| 2788 | return err; | ||
| 2789 | } | ||
| 2790 | |||
| 2766 | static int | 2791 | static int |
| 2767 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) | 2792 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) |
| 2768 | { | 2793 | { |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1750d996f49f..730ec8fb31c6 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
| 3355 | struct kvec *iov = rcvbuf->head; | 3355 | struct kvec *iov = rcvbuf->head; |
| 3356 | unsigned int nr, pglen = rcvbuf->page_len; | 3356 | unsigned int nr, pglen = rcvbuf->page_len; |
| 3357 | uint32_t *end, *entry, *p, *kaddr; | 3357 | uint32_t *end, *entry, *p, *kaddr; |
| 3358 | uint32_t len, attrlen; | 3358 | uint32_t len, attrlen, xlen; |
| 3359 | int hdrlen, recvd, status; | 3359 | int hdrlen, recvd, status; |
| 3360 | 3360 | ||
| 3361 | status = decode_op_hdr(xdr, OP_READDIR); | 3361 | status = decode_op_hdr(xdr, OP_READDIR); |
| @@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
| 3377 | 3377 | ||
| 3378 | BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); | 3378 | BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); |
| 3379 | kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); | 3379 | kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); |
| 3380 | end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); | 3380 | end = p + ((pglen + readdir->pgbase) >> 2); |
| 3381 | entry = p; | 3381 | entry = p; |
| 3382 | for (nr = 0; *p++; nr++) { | 3382 | for (nr = 0; *p++; nr++) { |
| 3383 | if (p + 3 > end) | 3383 | if (end - p < 3) |
| 3384 | goto short_pkt; | 3384 | goto short_pkt; |
| 3385 | dprintk("cookie = %Lu, ", *((unsigned long long *)p)); | 3385 | dprintk("cookie = %Lu, ", *((unsigned long long *)p)); |
| 3386 | p += 2; /* cookie */ | 3386 | p += 2; /* cookie */ |
| @@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
| 3389 | printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); | 3389 | printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); |
| 3390 | goto err_unmap; | 3390 | goto err_unmap; |
| 3391 | } | 3391 | } |
| 3392 | dprintk("filename = %*s\n", len, (char *)p); | 3392 | xlen = XDR_QUADLEN(len); |
| 3393 | p += XDR_QUADLEN(len); | 3393 | if (end - p < xlen + 1) |
| 3394 | if (p + 1 > end) | ||
| 3395 | goto short_pkt; | 3394 | goto short_pkt; |
| 3395 | dprintk("filename = %*s\n", len, (char *)p); | ||
| 3396 | p += xlen; | ||
| 3396 | len = ntohl(*p++); /* bitmap length */ | 3397 | len = ntohl(*p++); /* bitmap length */ |
| 3397 | p += len; | 3398 | if (end - p < len + 1) |
| 3398 | if (p + 1 > end) | ||
| 3399 | goto short_pkt; | 3399 | goto short_pkt; |
| 3400 | p += len; | ||
| 3400 | attrlen = XDR_QUADLEN(ntohl(*p++)); | 3401 | attrlen = XDR_QUADLEN(ntohl(*p++)); |
| 3401 | p += attrlen; /* attributes */ | 3402 | if (end - p < attrlen + 2) |
| 3402 | if (p + 2 > end) | ||
| 3403 | goto short_pkt; | 3403 | goto short_pkt; |
| 3404 | p += attrlen; /* attributes */ | ||
| 3404 | entry = p; | 3405 | entry = p; |
| 3405 | } | 3406 | } |
| 3406 | if (!nr && (entry[0] != 0 || entry[1] == 0)) | 3407 | if (!nr && (entry[0] != 0 || entry[1] == 0)) |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 65c0c5b32351..da9cf11c326f 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
| @@ -116,10 +116,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | |||
| 116 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | 116 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; |
| 117 | base &= ~PAGE_CACHE_MASK; | 117 | base &= ~PAGE_CACHE_MASK; |
| 118 | pglen = PAGE_CACHE_SIZE - base; | 118 | pglen = PAGE_CACHE_SIZE - base; |
| 119 | if (pglen < remainder) | 119 | for (;;) { |
| 120 | if (remainder <= pglen) { | ||
| 121 | memclear_highpage_flush(*pages, base, remainder); | ||
| 122 | break; | ||
| 123 | } | ||
| 120 | memclear_highpage_flush(*pages, base, pglen); | 124 | memclear_highpage_flush(*pages, base, pglen); |
| 121 | else | 125 | pages++; |
| 122 | memclear_highpage_flush(*pages, base, remainder); | 126 | remainder -= pglen; |
| 127 | pglen = PAGE_CACHE_SIZE; | ||
| 128 | base = 0; | ||
| 129 | } | ||
| 123 | } | 130 | } |
| 124 | 131 | ||
| 125 | /* | 132 | /* |
| @@ -476,6 +483,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | |||
| 476 | unsigned int base = data->args.pgbase; | 483 | unsigned int base = data->args.pgbase; |
| 477 | struct page **pages; | 484 | struct page **pages; |
| 478 | 485 | ||
| 486 | if (data->res.eof) | ||
| 487 | count = data->args.count; | ||
| 479 | if (unlikely(count == 0)) | 488 | if (unlikely(count == 0)) |
| 480 | return; | 489 | return; |
| 481 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | 490 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; |
| @@ -483,11 +492,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | |||
| 483 | count += base; | 492 | count += base; |
| 484 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | 493 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) |
| 485 | SetPageUptodate(*pages); | 494 | SetPageUptodate(*pages); |
| 486 | /* | 495 | if (count != 0) |
| 487 | * Was this an eof or a short read? If the latter, don't mark the page | ||
| 488 | * as uptodate yet. | ||
| 489 | */ | ||
| 490 | if (count > 0 && (data->res.eof || data->args.count == data->res.count)) | ||
| 491 | SetPageUptodate(*pages); | 496 | SetPageUptodate(*pages); |
| 492 | } | 497 | } |
| 493 | 498 | ||
| @@ -502,6 +507,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data) | |||
| 502 | count += base; | 507 | count += base; |
| 503 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | 508 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) |
| 504 | SetPageError(*pages); | 509 | SetPageError(*pages); |
| 510 | if (count != 0) | ||
| 511 | SetPageError(*pages); | ||
| 505 | } | 512 | } |
| 506 | 513 | ||
| 507 | /* | 514 | /* |
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c index abe91ca03edf..0a5927c806ca 100644 --- a/fs/partitions/sun.c +++ b/fs/partitions/sun.c | |||
| @@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
| 74 | spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); | 74 | spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); |
| 75 | for (i = 0; i < 8; i++, p++) { | 75 | for (i = 0; i < 8; i++, p++) { |
| 76 | unsigned long st_sector; | 76 | unsigned long st_sector; |
| 77 | int num_sectors; | 77 | unsigned int num_sectors; |
| 78 | 78 | ||
| 79 | st_sector = be32_to_cpu(p->start_cylinder) * spc; | 79 | st_sector = be32_to_cpu(p->start_cylinder) * spc; |
| 80 | num_sectors = be32_to_cpu(p->num_sectors); | 80 | num_sectors = be32_to_cpu(p->num_sectors); |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 9f2cfc30f9cf..942156225447 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
| @@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
| 169 | "Mapped: %8lu kB\n" | 169 | "Mapped: %8lu kB\n" |
| 170 | "Slab: %8lu kB\n" | 170 | "Slab: %8lu kB\n" |
| 171 | "PageTables: %8lu kB\n" | 171 | "PageTables: %8lu kB\n" |
| 172 | "NFS Unstable: %8lu kB\n" | 172 | "NFS_Unstable: %8lu kB\n" |
| 173 | "Bounce: %8lu kB\n" | 173 | "Bounce: %8lu kB\n" |
| 174 | "CommitLimit: %8lu kB\n" | 174 | "CommitLimit: %8lu kB\n" |
| 175 | "Committed_AS: %8lu kB\n" | 175 | "Committed_AS: %8lu kB\n" |
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 39fedaa88a0c..d935fb9394e3 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
| @@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf) | |||
| 424 | int res = -ENOTDIR; | 424 | int res = -ENOTDIR; |
| 425 | if (!file->f_op || !file->f_op->readdir) | 425 | if (!file->f_op || !file->f_op->readdir) |
| 426 | goto out; | 426 | goto out; |
| 427 | mutex_lock(&inode->i_mutex); | 427 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR); |
| 428 | // down(&inode->i_zombie); | 428 | // down(&inode->i_zombie); |
| 429 | res = -ENOENT; | 429 | res = -ENOENT; |
| 430 | if (!IS_DEADDIR(inode)) { | 430 | if (!IS_DEADDIR(inode)) { |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 7de172efa084..fcce1a21a51b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
| @@ -1659,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
| 1659 | iput(inode); | 1659 | iput(inode); |
| 1660 | goto error_out; | 1660 | goto error_out; |
| 1661 | } | 1661 | } |
| 1662 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 1662 | sb->s_maxbytes = 1<<30; |
| 1663 | return 0; | 1663 | return 0; |
| 1664 | 1664 | ||
| 1665 | error_out: | 1665 | error_out: |
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index e1b0e8cfecb4..0abd66ce36ea 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c | |||
| @@ -239,37 +239,51 @@ void udf_truncate_extents(struct inode * inode) | |||
| 239 | { | 239 | { |
| 240 | if (offset) | 240 | if (offset) |
| 241 | { | 241 | { |
| 242 | extoffset -= adsize; | 242 | /* |
| 243 | etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); | 243 | * OK, there is not extent covering inode->i_size and |
| 244 | if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) | 244 | * no extent above inode->i_size => truncate is |
| 245 | { | 245 | * extending the file by 'offset'. |
| 246 | extoffset -= adsize; | 246 | */ |
| 247 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); | 247 | if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) || |
| 248 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); | 248 | (bh && extoffset == sizeof(struct allocExtDesc))) { |
| 249 | /* File has no extents at all! */ | ||
| 250 | memset(&eloc, 0x00, sizeof(kernel_lb_addr)); | ||
| 251 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; | ||
| 252 | udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); | ||
| 249 | } | 253 | } |
| 250 | else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) | 254 | else { |
| 251 | { | ||
| 252 | kernel_lb_addr neloc = { 0, 0 }; | ||
| 253 | extoffset -= adsize; | 255 | extoffset -= adsize; |
| 254 | nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | | 256 | etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); |
| 255 | ((elen + offset + inode->i_sb->s_blocksize - 1) & | 257 | if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) |
| 256 | ~(inode->i_sb->s_blocksize - 1)); | 258 | { |
| 257 | udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); | 259 | extoffset -= adsize; |
| 258 | udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); | 260 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); |
| 259 | } | 261 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); |
| 260 | else | 262 | } |
| 261 | { | 263 | else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) |
| 262 | if (elen & (inode->i_sb->s_blocksize - 1)) | ||
| 263 | { | 264 | { |
| 265 | kernel_lb_addr neloc = { 0, 0 }; | ||
| 264 | extoffset -= adsize; | 266 | extoffset -= adsize; |
| 265 | elen = EXT_RECORDED_ALLOCATED | | 267 | nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | |
| 266 | ((elen + inode->i_sb->s_blocksize - 1) & | 268 | ((elen + offset + inode->i_sb->s_blocksize - 1) & |
| 267 | ~(inode->i_sb->s_blocksize - 1)); | 269 | ~(inode->i_sb->s_blocksize - 1)); |
| 268 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); | 270 | udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); |
| 271 | udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); | ||
| 272 | } | ||
| 273 | else | ||
| 274 | { | ||
| 275 | if (elen & (inode->i_sb->s_blocksize - 1)) | ||
| 276 | { | ||
| 277 | extoffset -= adsize; | ||
| 278 | elen = EXT_RECORDED_ALLOCATED | | ||
| 279 | ((elen + inode->i_sb->s_blocksize - 1) & | ||
| 280 | ~(inode->i_sb->s_blocksize - 1)); | ||
| 281 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); | ||
| 282 | } | ||
| 283 | memset(&eloc, 0x00, sizeof(kernel_lb_addr)); | ||
| 284 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; | ||
| 285 | udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); | ||
| 269 | } | 286 | } |
| 270 | memset(&eloc, 0x00, sizeof(kernel_lb_addr)); | ||
| 271 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; | ||
| 272 | udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); | ||
| 273 | } | 287 | } |
| 274 | } | 288 | } |
| 275 | } | 289 | } |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index e7c8615beb65..30c6e8a9446c 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
| @@ -169,18 +169,20 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh) | |||
| 169 | 169 | ||
| 170 | static struct buffer_head * | 170 | static struct buffer_head * |
| 171 | ufs_clear_frags(struct inode *inode, sector_t beg, | 171 | ufs_clear_frags(struct inode *inode, sector_t beg, |
| 172 | unsigned int n) | 172 | unsigned int n, sector_t want) |
| 173 | { | 173 | { |
| 174 | struct buffer_head *res, *bh; | 174 | struct buffer_head *res = NULL, *bh; |
| 175 | sector_t end = beg + n; | 175 | sector_t end = beg + n; |
| 176 | 176 | ||
| 177 | res = sb_getblk(inode->i_sb, beg); | 177 | for (; beg < end; ++beg) { |
| 178 | ufs_clear_frag(inode, res); | ||
| 179 | for (++beg; beg < end; ++beg) { | ||
| 180 | bh = sb_getblk(inode->i_sb, beg); | 178 | bh = sb_getblk(inode->i_sb, beg); |
| 181 | ufs_clear_frag(inode, bh); | 179 | ufs_clear_frag(inode, bh); |
| 182 | brelse(bh); | 180 | if (want != beg) |
| 181 | brelse(bh); | ||
| 182 | else | ||
| 183 | res = bh; | ||
| 183 | } | 184 | } |
| 185 | BUG_ON(!res); | ||
| 184 | return res; | 186 | return res; |
| 185 | } | 187 | } |
| 186 | 188 | ||
| @@ -265,7 +267,9 @@ repeat: | |||
| 265 | lastfrag = ufsi->i_lastfrag; | 267 | lastfrag = ufsi->i_lastfrag; |
| 266 | 268 | ||
| 267 | } | 269 | } |
| 268 | goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; | 270 | tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]); |
| 271 | if (tmp) | ||
| 272 | goal = tmp + uspi->s_fpb; | ||
| 269 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, | 273 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, |
| 270 | goal, required + blockoff, | 274 | goal, required + blockoff, |
| 271 | err, locked_page); | 275 | err, locked_page); |
| @@ -277,13 +281,15 @@ repeat: | |||
| 277 | tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), | 281 | tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), |
| 278 | fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), | 282 | fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), |
| 279 | err, locked_page); | 283 | err, locked_page); |
| 280 | } | 284 | } else /* (lastblock > block) */ { |
| 281 | /* | 285 | /* |
| 282 | * We will allocate new block before last allocated block | 286 | * We will allocate new block before last allocated block |
| 283 | */ | 287 | */ |
| 284 | else /* (lastblock > block) */ { | 288 | if (block) { |
| 285 | if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) | 289 | tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]); |
| 286 | goal = tmp + uspi->s_fpb; | 290 | if (tmp) |
| 291 | goal = tmp + uspi->s_fpb; | ||
| 292 | } | ||
| 287 | tmp = ufs_new_fragments(inode, p, fragment - blockoff, | 293 | tmp = ufs_new_fragments(inode, p, fragment - blockoff, |
| 288 | goal, uspi->s_fpb, err, locked_page); | 294 | goal, uspi->s_fpb, err, locked_page); |
| 289 | } | 295 | } |
| @@ -296,7 +302,7 @@ repeat: | |||
| 296 | } | 302 | } |
| 297 | 303 | ||
| 298 | if (!phys) { | 304 | if (!phys) { |
| 299 | result = ufs_clear_frags(inode, tmp + blockoff, required); | 305 | result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); |
| 300 | } else { | 306 | } else { |
| 301 | *phys = tmp + blockoff; | 307 | *phys = tmp + blockoff; |
| 302 | result = NULL; | 308 | result = NULL; |
| @@ -383,7 +389,7 @@ repeat: | |||
| 383 | } | 389 | } |
| 384 | } | 390 | } |
| 385 | 391 | ||
| 386 | if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) | 392 | if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]))) |
| 387 | goal = tmp + uspi->s_fpb; | 393 | goal = tmp + uspi->s_fpb; |
| 388 | else | 394 | else |
| 389 | goal = bh->b_blocknr + uspi->s_fpb; | 395 | goal = bh->b_blocknr + uspi->s_fpb; |
| @@ -397,7 +403,8 @@ repeat: | |||
| 397 | 403 | ||
| 398 | 404 | ||
| 399 | if (!phys) { | 405 | if (!phys) { |
| 400 | result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); | 406 | result = ufs_clear_frags(inode, tmp, uspi->s_fpb, |
| 407 | tmp + blockoff); | ||
| 401 | } else { | 408 | } else { |
| 402 | *phys = tmp + blockoff; | 409 | *phys = tmp + blockoff; |
| 403 | *new = 1; | 410 | *new = 1; |
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index c9b55872079b..ea11d04c41a0 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
| @@ -375,17 +375,15 @@ static int ufs_alloc_lastblock(struct inode *inode) | |||
| 375 | int err = 0; | 375 | int err = 0; |
| 376 | struct address_space *mapping = inode->i_mapping; | 376 | struct address_space *mapping = inode->i_mapping; |
| 377 | struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; | 377 | struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; |
| 378 | struct ufs_inode_info *ufsi = UFS_I(inode); | ||
| 379 | unsigned lastfrag, i, end; | 378 | unsigned lastfrag, i, end; |
| 380 | struct page *lastpage; | 379 | struct page *lastpage; |
| 381 | struct buffer_head *bh; | 380 | struct buffer_head *bh; |
| 382 | 381 | ||
| 383 | lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; | 382 | lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; |
| 384 | 383 | ||
| 385 | if (!lastfrag) { | 384 | if (!lastfrag) |
| 386 | ufsi->i_lastfrag = 0; | ||
| 387 | goto out; | 385 | goto out; |
| 388 | } | 386 | |
| 389 | lastfrag--; | 387 | lastfrag--; |
| 390 | 388 | ||
| 391 | lastpage = ufs_get_locked_page(mapping, lastfrag >> | 389 | lastpage = ufs_get_locked_page(mapping, lastfrag >> |
| @@ -400,25 +398,25 @@ static int ufs_alloc_lastblock(struct inode *inode) | |||
| 400 | for (i = 0; i < end; ++i) | 398 | for (i = 0; i < end; ++i) |
| 401 | bh = bh->b_this_page; | 399 | bh = bh->b_this_page; |
| 402 | 400 | ||
| 403 | if (!buffer_mapped(bh)) { | 401 | |
| 404 | err = ufs_getfrag_block(inode, lastfrag, bh, 1); | 402 | err = ufs_getfrag_block(inode, lastfrag, bh, 1); |
| 405 | 403 | ||
| 406 | if (unlikely(err)) | 404 | if (unlikely(err)) |
| 407 | goto out_unlock; | 405 | goto out_unlock; |
| 408 | 406 | ||
| 409 | if (buffer_new(bh)) { | 407 | if (buffer_new(bh)) { |
| 410 | clear_buffer_new(bh); | 408 | clear_buffer_new(bh); |
| 411 | unmap_underlying_metadata(bh->b_bdev, | 409 | unmap_underlying_metadata(bh->b_bdev, |
| 412 | bh->b_blocknr); | 410 | bh->b_blocknr); |
| 413 | /* | 411 | /* |
| 414 | * we do not zeroize fragment, because of | 412 | * we do not zeroize fragment, because of |
| 415 | * if it maped to hole, it already contains zeroes | 413 | * if it maped to hole, it already contains zeroes |
| 416 | */ | 414 | */ |
| 417 | set_buffer_uptodate(bh); | 415 | set_buffer_uptodate(bh); |
| 418 | mark_buffer_dirty(bh); | 416 | mark_buffer_dirty(bh); |
| 419 | set_page_dirty(lastpage); | 417 | set_page_dirty(lastpage); |
| 420 | } | ||
| 421 | } | 418 | } |
| 419 | |||
| 422 | out_unlock: | 420 | out_unlock: |
| 423 | ufs_put_locked_page(lastpage); | 421 | ufs_put_locked_page(lastpage); |
| 424 | out: | 422 | out: |
| @@ -440,23 +438,11 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) | |||
| 440 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | 438 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) |
| 441 | return -EPERM; | 439 | return -EPERM; |
| 442 | 440 | ||
| 443 | if (inode->i_size > old_i_size) { | 441 | err = ufs_alloc_lastblock(inode); |
| 444 | /* | ||
| 445 | * if we expand file we should care about | ||
| 446 | * allocation of block for last byte first of all | ||
| 447 | */ | ||
| 448 | err = ufs_alloc_lastblock(inode); | ||
| 449 | 442 | ||
| 450 | if (err) { | 443 | if (err) { |
| 451 | i_size_write(inode, old_i_size); | 444 | i_size_write(inode, old_i_size); |
| 452 | goto out; | 445 | goto out; |
| 453 | } | ||
| 454 | /* | ||
| 455 | * go away, because of we expand file, and we do not | ||
| 456 | * need free blocks, and zeroizes page | ||
| 457 | */ | ||
| 458 | lock_kernel(); | ||
| 459 | goto almost_end; | ||
| 460 | } | 446 | } |
| 461 | 447 | ||
| 462 | block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); | 448 | block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); |
| @@ -477,21 +463,8 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) | |||
| 477 | yield(); | 463 | yield(); |
| 478 | } | 464 | } |
| 479 | 465 | ||
| 480 | if (inode->i_size < old_i_size) { | ||
| 481 | /* | ||
| 482 | * now we should have enough space | ||
| 483 | * to allocate block for last byte | ||
| 484 | */ | ||
| 485 | err = ufs_alloc_lastblock(inode); | ||
| 486 | if (err) | ||
| 487 | /* | ||
| 488 | * looks like all the same - we have no space, | ||
| 489 | * but we truncate file already | ||
| 490 | */ | ||
| 491 | inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize; | ||
| 492 | } | ||
| 493 | almost_end: | ||
| 494 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | 466 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; |
| 467 | ufsi->i_lastfrag = DIRECT_FRAGMENT; | ||
| 495 | unlock_kernel(); | 468 | unlock_kernel(); |
| 496 | mark_inode_dirty(inode); | 469 | mark_inode_dirty(inode); |
| 497 | out: | 470 | out: |
